From 865fa06b9860f9520d7aefde745384939edcbeb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E6=BD=87=E5=AE=87?= Date: Wed, 8 Jul 2020 11:37:53 +0800 Subject: [PATCH] add aec3 sources --- AEC3.sln | 62 + AEC3.vcxproj | 308 ++ AEC3.vcxproj.filters | 483 +++ AEC3.vcxproj.user | 4 + api.filters | 17 + api.vcxproj | 170 + api.vcxproj.filters | 33 + api/echo_canceller3_config.cc | 267 ++ api/echo_canceller3_config.h | 222 ++ api/echo_canceller3_config_json.cc | 673 ++++ api/echo_canceller3_config_json.h | 45 + api/echo_canceller3_factory.cc | 31 + api/echo_canceller3_factory.h | 41 + api/echo_control.h | 68 + audio_processing/aec3/BUILD.gn | 238 ++ audio_processing/aec3/adaptive_fir_filter.cc | 730 +++++ audio_processing/aec3/adaptive_fir_filter.h | 176 ++ .../aec3/adaptive_fir_filter_erl.cc | 100 + .../aec3/adaptive_fir_filter_erl.h | 50 + .../aec3/adaptive_fir_filter_erl_unittest.cc | 81 + .../aec3/adaptive_fir_filter_unittest.cc | 485 +++ audio_processing/aec3/aec3_common.cc | 56 + audio_processing/aec3/aec3_common.h | 114 + audio_processing/aec3/aec3_fft.cc | 133 + audio_processing/aec3/aec3_fft.h | 74 + audio_processing/aec3/aec3_fft_unittest.cc | 213 ++ audio_processing/aec3/aec_state.cc | 522 ++++ audio_processing/aec3/aec_state.h | 322 ++ audio_processing/aec3/aec_state_unittest.cc | 300 ++ audio_processing/aec3/alignment_mixer.cc | 160 + audio_processing/aec3/alignment_mixer.h | 58 + .../aec3/alignment_mixer_unittest.cc | 196 ++ .../aec3/api_call_jitter_metrics.cc | 121 + .../aec3/api_call_jitter_metrics.h | 60 + .../aec3/api_call_jitter_metrics_unittest.cc | 109 + audio_processing/aec3/block_buffer.cc | 39 + audio_processing/aec3/block_buffer.h | 62 + audio_processing/aec3/block_delay_buffer.cc | 47 + audio_processing/aec3/block_delay_buffer.h | 40 + .../aec3/block_delay_buffer_unittest.cc | 88 + audio_processing/aec3/block_framer.cc | 86 + audio_processing/aec3/block_framer.h | 48 + .../aec3/block_framer_unittest.cc | 384 +++ audio_processing/aec3/block_processor.cc | 290 ++ audio_processing/aec3/block_processor.h | 76 + .../aec3/block_processor_metrics.cc | 104 + .../aec3/block_processor_metrics.h | 47 + .../aec3/block_processor_metrics_unittest.cc | 34 + .../aec3/block_processor_unittest.cc | 304 ++ audio_processing/aec3/clockdrift_detector.cc | 55 + audio_processing/aec3/clockdrift_detector.h | 38 + .../aec3/clockdrift_detector_unittest.cc | 57 + .../aec3/comfort_noise_generator.cc | 180 ++ .../aec3/comfort_noise_generator.h | 76 + .../aec3/comfort_noise_generator_unittest.cc | 70 + audio_processing/aec3/decimator.cc | 91 + audio_processing/aec3/decimator.h | 41 + audio_processing/aec3/decimator_unittest.cc | 135 + audio_processing/aec3/delay_estimate.h | 31 + .../aec3/dominant_nearend_detector.cc | 75 + .../aec3/dominant_nearend_detector.h | 56 + .../aec3/downsampled_render_buffer.cc | 25 + .../aec3/downsampled_render_buffer.h | 58 + audio_processing/aec3/echo_audibility.cc | 118 + audio_processing/aec3/echo_audibility.h | 86 + audio_processing/aec3/echo_canceller3.cc | 511 ++++ audio_processing/aec3/echo_canceller3.h | 190 ++ .../aec3/echo_canceller3_unittest.cc | 714 +++++ .../aec3/echo_path_delay_estimator.cc | 122 + .../aec3/echo_path_delay_estimator.h | 79 + .../echo_path_delay_estimator_unittest.cc | 198 ++ .../aec3/echo_path_variability.cc | 22 + audio_processing/aec3/echo_path_variability.h | 39 + .../aec3/echo_path_variability_unittest.cc | 50 + audio_processing/aec3/echo_remover.cc | 495 +++ audio_processing/aec3/echo_remover.h | 55 + audio_processing/aec3/echo_remover_metrics.cc | 329 ++ audio_processing/aec3/echo_remover_metrics.h | 83 + .../aec3/echo_remover_metrics_unittest.cc | 156 + .../aec3/echo_remover_unittest.cc | 239 ++ audio_processing/aec3/erl_estimator.cc | 146 + audio_processing/aec3/erl_estimator.h | 57 + .../aec3/erl_estimator_unittest.cc | 100 + audio_processing/aec3/erle_estimator.cc | 86 + audio_processing/aec3/erle_estimator.h | 99 + .../aec3/erle_estimator_unittest.cc | 278 ++ audio_processing/aec3/fft_buffer.cc | 27 + audio_processing/aec3/fft_buffer.h | 60 + audio_processing/aec3/fft_data.h | 98 + audio_processing/aec3/fft_data_unittest.cc | 163 + audio_processing/aec3/filter_analyzer.cc | 277 ++ audio_processing/aec3/filter_analyzer.h | 140 + .../aec3/filter_analyzer_unittest.cc | 33 + audio_processing/aec3/frame_blocker.cc | 88 + audio_processing/aec3/frame_blocker.h | 50 + .../aec3/frame_blocker_unittest.cc | 471 +++ .../aec3/fullband_erle_estimator.cc | 201 ++ .../aec3/fullband_erle_estimator.h | 118 + .../aec3/main_filter_update_gain.cc | 174 ++ .../aec3/main_filter_update_gain.h | 87 + .../aec3/main_filter_update_gain_unittest.cc | 390 +++ audio_processing/aec3/matched_filter.cc | 458 +++ audio_processing/aec3/matched_filter.h | 138 + .../aec3/matched_filter_lag_aggregator.cc | 99 + .../aec3/matched_filter_lag_aggregator.h | 55 + .../matched_filter_lag_aggregator_unittest.cc | 156 + .../aec3/matched_filter_unittest.cc | 422 +++ audio_processing/aec3/moving_average.cc | 60 + audio_processing/aec3/moving_average.h | 45 + .../aec3/moving_average_unittest.cc | 89 + audio_processing/aec3/nearend_detector.h | 42 + audio_processing/aec3/render_buffer.cc | 80 + audio_processing/aec3/render_buffer.h | 113 + .../aec3/render_buffer_unittest.cc | 46 + audio_processing/aec3/render_delay_buffer.cc | 505 +++ audio_processing/aec3/render_delay_buffer.h | 83 + .../aec3/render_delay_buffer_unittest.cc | 156 + .../aec3/render_delay_controller.cc | 191 ++ .../aec3/render_delay_controller.h | 50 + .../aec3/render_delay_controller_metrics.cc | 145 + .../aec3/render_delay_controller_metrics.h | 55 + ...ender_delay_controller_metrics_unittest.cc | 35 + .../aec3/render_delay_controller_unittest.cc | 363 +++ .../aec3/render_signal_analyzer.cc | 156 + .../aec3/render_signal_analyzer.h | 62 + .../aec3/render_signal_analyzer_unittest.cc | 175 ++ .../aec3/residual_echo_estimator.cc | 328 ++ .../aec3/residual_echo_estimator.h | 70 + .../aec3/residual_echo_estimator_unittest.cc | 103 + .../aec3/reverb_decay_estimator.cc | 409 +++ .../aec3/reverb_decay_estimator.h | 112 + .../aec3/reverb_frequency_response.cc | 98 + .../aec3/reverb_frequency_response.h | 53 + audio_processing/aec3/reverb_model.cc | 59 + audio_processing/aec3/reverb_model.h | 58 + .../aec3/reverb_model_estimator.cc | 54 + .../aec3/reverb_model_estimator.h | 67 + .../aec3/reverb_model_estimator_unittest.cc | 150 + .../aec3/shadow_filter_update_gain.cc | 103 + .../aec3/shadow_filter_update_gain.h | 74 + .../shadow_filter_update_gain_unittest.cc | 254 ++ .../aec3/signal_dependent_erle_estimator.cc | 406 +++ .../aec3/signal_dependent_erle_estimator.h | 98 + ...ignal_dependent_erle_estimator_unittest.cc | 205 ++ audio_processing/aec3/spectrum_buffer.cc | 30 + audio_processing/aec3/spectrum_buffer.h | 62 + .../aec3/stationarity_estimator.cc | 243 ++ .../aec3/stationarity_estimator.h | 122 + .../aec3/subband_erle_estimator.cc | 215 ++ .../aec3/subband_erle_estimator.h | 93 + .../aec3/subband_nearend_detector.cc | 70 + .../aec3/subband_nearend_detector.h | 52 + audio_processing/aec3/subtractor.cc | 320 ++ audio_processing/aec3/subtractor.h | 136 + audio_processing/aec3/subtractor_output.cc | 55 + audio_processing/aec3/subtractor_output.h | 52 + .../aec3/subtractor_output_analyzer.cc | 56 + .../aec3/subtractor_output_analyzer.h | 44 + audio_processing/aec3/subtractor_unittest.cc | 311 ++ audio_processing/aec3/suppression_filter.cc | 179 ++ audio_processing/aec3/suppression_filter.h | 50 + .../aec3/suppression_filter_unittest.cc | 249 ++ audio_processing/aec3/suppression_gain.cc | 438 +++ audio_processing/aec3/suppression_gain.h | 130 + .../aec3/suppression_gain_unittest.cc | 148 + audio_processing/aec3/vector_math.h | 215 ++ audio_processing/aec3/vector_math_unittest.cc | 146 + audio_processing/audio_buffer.cc | 399 +++ audio_processing/audio_buffer.h | 179 ++ audio_processing/audio_frame.cc | 138 + audio_processing/audio_frame.h | 158 + audio_processing/channel_buffer.cc | 80 + audio_processing/channel_buffer.h | 184 ++ audio_processing/channel_layout.cc | 282 ++ audio_processing/channel_layout.h | 165 + audio_processing/high_pass_filter.cc | 115 + audio_processing/high_pass_filter.h | 42 + audio_processing/include/audio_processing.h | 83 + audio_processing/include/audio_util.h | 230 ++ audio_processing/logging/apm_data_dumper.cc | 90 + audio_processing/logging/apm_data_dumper.h | 284 ++ audio_processing/logging/wav_file.cc | 207 ++ audio_processing/logging/wav_file.h | 106 + audio_processing/logging/wav_header.cc | 292 ++ audio_processing/logging/wav_header.h | 65 + .../resampler/push_sinc_resampler.cc | 102 + .../resampler/push_sinc_resampler.h | 81 + audio_processing/resampler/sinc_resampler.cc | 376 +++ audio_processing/resampler/sinc_resampler.h | 179 ++ .../resampler/sinc_resampler_neon.cc | 52 + .../resampler/sinc_resampler_sse.cc | 63 + audio_processing/sparse_fir_filter.cc | 60 + audio_processing/sparse_fir_filter.h | 53 + audio_processing/splitting_filter.cc | 144 + audio_processing/splitting_filter.h | 72 + audio_processing/splitting_filter_c.c | 237 ++ audio_processing/three_band_filter_bank.cc | 213 ++ audio_processing/three_band_filter_bank.h | 69 + audio_processing/utility/BUILD.gn | 131 + audio_processing/utility/DEPS | 3 + .../utility/cascaded_biquad_filter.cc | 117 + .../utility/cascaded_biquad_filter.h | 80 + .../cascaded_biquad_filter_unittest.cc | 157 + audio_processing/utility/delay_estimator.cc | 700 +++++ audio_processing/utility/delay_estimator.h | 253 ++ .../utility/delay_estimator_internal.h | 47 + .../utility/delay_estimator_unittest.cc | 617 ++++ .../utility/delay_estimator_wrapper.cc | 485 +++ .../utility/delay_estimator_wrapper.h | 244 ++ audio_processing/utility/ooura_fft.cc | 540 ++++ audio_processing/utility/ooura_fft.h | 60 + audio_processing/utility/ooura_fft_mips.cc | 1245 ++++++++ audio_processing/utility/ooura_fft_neon.cc | 353 +++ audio_processing/utility/ooura_fft_sse2.cc | 439 +++ .../utility/ooura_fft_tables_common.h | 54 + .../utility/ooura_fft_tables_neon_sse2.h | 98 + audio_processing/utility/pffft_wrapper.cc | 135 + audio_processing/utility/pffft_wrapper.h | 94 + .../utility/pffft_wrapper_unittest.cc | 181 ++ base.vcxproj | 255 ++ base.vcxproj.filters | 366 +++ base.vcxproj.user | 4 + base/abseil/.clang-format | 4 + base/abseil/.gitignore | 15 + base/abseil/ABSEIL_ISSUE_TEMPLATE.md | 22 + base/abseil/AUTHORS | 6 + base/abseil/CMake/AbseilHelpers.cmake | 258 ++ base/abseil/CMake/AbseilInstallDirs.cmake | 20 + .../abseil/CMake/Googletest/CMakeLists.txt.in | 15 + .../CMake/Googletest/DownloadGTest.cmake | 32 + base/abseil/CMake/README.md | 101 + base/abseil/CMake/abslConfig.cmake.in | 7 + .../CMake/install_test_project/CMakeLists.txt | 27 + .../CMake/install_test_project/simple.cc | 23 + .../abseil/CMake/install_test_project/test.sh | 144 + base/abseil/CMakeLists.txt | 160 + base/abseil/CONTRIBUTING.md | 138 + base/abseil/LICENSE | 203 ++ base/abseil/LTS.md | 15 + base/abseil/README.md | 114 + base/abseil/UPGRADES.md | 17 + base/abseil/WORKSPACE | 45 + base/abseil/absl/BUILD.bazel | 59 + base/abseil/absl/CMakeLists.txt | 33 + base/abseil/absl/abseil.podspec.gen.py | 247 ++ base/abseil/absl/algorithm/BUILD.bazel | 89 + base/abseil/absl/algorithm/CMakeLists.txt | 69 + base/abseil/absl/algorithm/algorithm.h | 159 + base/abseil/absl/algorithm/algorithm_test.cc | 182 ++ base/abseil/absl/algorithm/container.h | 1727 +++++++++++ base/abseil/absl/algorithm/container_test.cc | 1031 +++++++ base/abseil/absl/algorithm/equal_benchmark.cc | 126 + base/abseil/absl/base/BUILD.bazel | 679 ++++ base/abseil/absl/base/CMakeLists.txt | 617 ++++ base/abseil/absl/base/attributes.h | 609 ++++ base/abseil/absl/base/bit_cast_test.cc | 109 + base/abseil/absl/base/call_once.h | 226 ++ base/abseil/absl/base/call_once_test.cc | 107 + base/abseil/absl/base/casts.h | 184 ++ base/abseil/absl/base/config.h | 588 ++++ base/abseil/absl/base/config_test.cc | 60 + base/abseil/absl/base/const_init.h | 76 + base/abseil/absl/base/dynamic_annotations.cc | 129 + base/abseil/absl/base/dynamic_annotations.h | 389 +++ .../base/exception_safety_testing_test.cc | 958 ++++++ base/abseil/absl/base/inline_variable_test.cc | 64 + .../absl/base/inline_variable_test_a.cc | 27 + .../absl/base/inline_variable_test_b.cc | 27 + base/abseil/absl/base/internal/atomic_hook.h | 179 ++ .../absl/base/internal/atomic_hook_test.cc | 94 + .../base/internal/atomic_hook_test_helper.cc | 31 + .../base/internal/atomic_hook_test_helper.h | 34 + base/abseil/absl/base/internal/bits.h | 218 ++ base/abseil/absl/base/internal/bits_test.cc | 97 + .../absl/base/internal/cmake_thread_test.cc | 22 + base/abseil/absl/base/internal/cycleclock.cc | 107 + base/abseil/absl/base/internal/cycleclock.h | 94 + base/abseil/absl/base/internal/direct_mmap.h | 161 + base/abseil/absl/base/internal/endian.h | 266 ++ base/abseil/absl/base/internal/endian_test.cc | 265 ++ .../base/internal/exception_safety_testing.cc | 79 + .../base/internal/exception_safety_testing.h | 1101 +++++++ .../absl/base/internal/exception_testing.h | 42 + .../absl/base/internal/exponential_biased.cc | 93 + .../absl/base/internal/exponential_biased.h | 130 + .../base/internal/exponential_biased_test.cc | 199 ++ base/abseil/absl/base/internal/hide_ptr.h | 51 + base/abseil/absl/base/internal/identity.h | 37 + .../absl/base/internal/inline_variable.h | 107 + .../base/internal/inline_variable_testing.h | 46 + base/abseil/absl/base/internal/invoke.h | 187 ++ .../absl/base/internal/low_level_alloc.cc | 619 ++++ .../absl/base/internal/low_level_alloc.h | 126 + .../base/internal/low_level_alloc_test.cc | 160 + .../absl/base/internal/low_level_scheduling.h | 107 + .../absl/base/internal/per_thread_tls.h | 52 + .../absl/base/internal/periodic_sampler.cc | 53 + .../absl/base/internal/periodic_sampler.h | 211 ++ .../internal/periodic_sampler_benchmark.cc | 79 + .../base/internal/periodic_sampler_test.cc | 177 ++ .../absl/base/internal/pretty_function.h | 33 + base/abseil/absl/base/internal/raw_logging.cc | 237 ++ base/abseil/absl/base/internal/raw_logging.h | 179 ++ .../absl/base/internal/scheduling_mode.h | 58 + .../absl/base/internal/scoped_set_env.cc | 81 + .../absl/base/internal/scoped_set_env.h | 45 + .../absl/base/internal/scoped_set_env_test.cc | 99 + base/abseil/absl/base/internal/spinlock.cc | 233 ++ base/abseil/absl/base/internal/spinlock.h | 243 ++ .../absl/base/internal/spinlock_akaros.inc | 35 + .../absl/base/internal/spinlock_benchmark.cc | 52 + .../absl/base/internal/spinlock_linux.inc | 67 + .../absl/base/internal/spinlock_posix.inc | 46 + .../absl/base/internal/spinlock_wait.cc | 81 + .../abseil/absl/base/internal/spinlock_wait.h | 93 + .../absl/base/internal/spinlock_win32.inc | 37 + base/abseil/absl/base/internal/sysinfo.cc | 414 +++ base/abseil/absl/base/internal/sysinfo.h | 66 + .../abseil/absl/base/internal/sysinfo_test.cc | 100 + .../absl/base/internal/thread_annotations.h | 271 ++ .../absl/base/internal/thread_identity.cc | 140 + .../absl/base/internal/thread_identity.h | 250 ++ .../internal/thread_identity_benchmark.cc | 38 + .../base/internal/thread_identity_test.cc | 128 + .../absl/base/internal/throw_delegate.cc | 108 + .../absl/base/internal/throw_delegate.h | 75 + .../absl/base/internal/tsan_mutex_interface.h | 66 + .../absl/base/internal/unaligned_access.h | 158 + .../absl/base/internal/unscaledcycleclock.cc | 103 + .../absl/base/internal/unscaledcycleclock.h | 124 + base/abseil/absl/base/invoke_test.cc | 223 ++ base/abseil/absl/base/log_severity.cc | 27 + base/abseil/absl/base/log_severity.h | 121 + base/abseil/absl/base/log_severity_test.cc | 199 ++ base/abseil/absl/base/macros.h | 220 ++ base/abseil/absl/base/optimization.h | 181 ++ base/abseil/absl/base/options.h | 188 ++ base/abseil/absl/base/policy_checks.h | 111 + base/abseil/absl/base/port.h | 26 + base/abseil/absl/base/raw_logging_test.cc | 79 + base/abseil/absl/base/spinlock_test_common.cc | 271 ++ base/abseil/absl/base/thread_annotations.h | 280 ++ base/abseil/absl/base/throw_delegate_test.cc | 107 + base/abseil/absl/compiler_config_setting.bzl | 38 + base/abseil/absl/container/BUILD.bazel | 876 ++++++ base/abseil/absl/container/CMakeLists.txt | 887 ++++++ base/abseil/absl/container/btree_map.h | 735 +++++ base/abseil/absl/container/btree_set.h | 683 +++++ base/abseil/absl/container/btree_test.cc | 2351 ++++++++++++++ base/abseil/absl/container/btree_test.h | 155 + base/abseil/absl/container/fixed_array.h | 515 ++++ .../absl/container/fixed_array_benchmark.cc | 67 + .../fixed_array_exception_safety_test.cc | 202 ++ .../abseil/absl/container/fixed_array_test.cc | 883 ++++++ base/abseil/absl/container/flat_hash_map.h | 591 ++++ .../absl/container/flat_hash_map_test.cc | 256 ++ base/abseil/absl/container/flat_hash_set.h | 495 +++ .../absl/container/flat_hash_set_test.cc | 130 + base/abseil/absl/container/inlined_vector.h | 848 +++++ .../container/inlined_vector_benchmark.cc | 807 +++++ .../inlined_vector_exception_safety_test.cc | 508 +++ .../absl/container/inlined_vector_test.cc | 1800 +++++++++++ base/abseil/absl/container/internal/btree.h | 2613 ++++++++++++++++ .../absl/container/internal/btree_container.h | 609 ++++ base/abseil/absl/container/internal/common.h | 203 ++ .../container/internal/compressed_tuple.h | 265 ++ .../internal/compressed_tuple_test.cc | 413 +++ .../container/internal/container_memory.h | 440 +++ .../internal/container_memory_test.cc | 190 ++ .../container/internal/counting_allocator.h | 83 + .../internal/hash_function_defaults.h | 146 + .../internal/hash_function_defaults_test.cc | 299 ++ .../internal/hash_generator_testing.cc | 74 + .../internal/hash_generator_testing.h | 161 + .../container/internal/hash_policy_testing.h | 184 ++ .../internal/hash_policy_testing_test.cc | 45 + .../container/internal/hash_policy_traits.h | 191 ++ .../internal/hash_policy_traits_test.cc | 144 + .../absl/container/internal/hashtable_debug.h | 110 + .../internal/hashtable_debug_hooks.h | 85 + .../container/internal/hashtablez_sampler.cc | 270 ++ .../container/internal/hashtablez_sampler.h | 288 ++ ...ashtablez_sampler_force_weak_definition.cc | 30 + .../internal/hashtablez_sampler_test.cc | 359 +++ .../abseil/absl/container/internal/have_sse.h | 49 + .../absl/container/internal/inlined_vector.h | 892 ++++++ base/abseil/absl/container/internal/layout.h | 741 +++++ .../absl/container/internal/layout_test.cc | 1567 ++++++++++ .../container/internal/node_hash_policy.h | 92 + .../internal/node_hash_policy_test.cc | 69 + .../absl/container/internal/raw_hash_map.h | 197 ++ .../absl/container/internal/raw_hash_set.cc | 48 + .../absl/container/internal/raw_hash_set.h | 1868 +++++++++++ .../internal/raw_hash_set_allocator_test.cc | 430 +++ .../container/internal/raw_hash_set_test.cc | 1918 ++++++++++++ .../internal/test_instance_tracker.cc | 29 + .../internal/test_instance_tracker.h | 274 ++ .../internal/test_instance_tracker_test.cc | 184 ++ base/abseil/absl/container/internal/tracked.h | 83 + .../internal/unordered_map_constructor_test.h | 489 +++ .../internal/unordered_map_lookup_test.h | 117 + .../internal/unordered_map_members_test.h | 87 + .../internal/unordered_map_modifiers_test.h | 316 ++ .../container/internal/unordered_map_test.cc | 50 + .../internal/unordered_set_constructor_test.h | 496 +++ .../internal/unordered_set_lookup_test.h | 91 + .../internal/unordered_set_members_test.h | 86 + .../internal/unordered_set_modifiers_test.h | 190 ++ .../container/internal/unordered_set_test.cc | 41 + base/abseil/absl/container/node_hash_map.h | 588 ++++ .../absl/container/node_hash_map_test.cc | 222 ++ base/abseil/absl/container/node_hash_set.h | 490 +++ .../absl/container/node_hash_set_test.cc | 107 + .../absl/copts/AbseilConfigureCopts.cmake | 67 + .../absl/copts/GENERATED_AbseilCopts.cmake | 213 ++ base/abseil/absl/copts/GENERATED_copts.bzl | 214 ++ base/abseil/absl/copts/configure_copts.bzl | 78 + base/abseil/absl/copts/copts.py | 200 ++ base/abseil/absl/copts/generate_copts.py | 109 + base/abseil/absl/debugging/BUILD.bazel | 339 ++ base/abseil/absl/debugging/CMakeLists.txt | 331 ++ .../absl/debugging/failure_signal_handler.cc | 361 +++ .../absl/debugging/failure_signal_handler.h | 121 + .../debugging/failure_signal_handler_test.cc | 159 + .../debugging/internal/address_is_readable.cc | 137 + .../debugging/internal/address_is_readable.h | 32 + .../absl/debugging/internal/demangle.cc | 1895 ++++++++++++ .../abseil/absl/debugging/internal/demangle.h | 71 + .../absl/debugging/internal/demangle_test.cc | 195 ++ .../absl/debugging/internal/elf_mem_image.cc | 382 +++ .../absl/debugging/internal/elf_mem_image.h | 134 + .../absl/debugging/internal/examine_stack.cc | 155 + .../absl/debugging/internal/examine_stack.h | 42 + .../debugging/internal/stack_consumption.cc | 184 ++ .../debugging/internal/stack_consumption.h | 49 + .../internal/stack_consumption_test.cc | 50 + .../internal/stacktrace_aarch64-inl.inc | 192 ++ .../debugging/internal/stacktrace_arm-inl.inc | 125 + .../debugging/internal/stacktrace_config.h | 70 + .../internal/stacktrace_generic-inl.inc | 99 + .../internal/stacktrace_powerpc-inl.inc | 248 ++ .../internal/stacktrace_unimplemented-inl.inc | 24 + .../internal/stacktrace_win32-inl.inc | 85 + .../debugging/internal/stacktrace_x86-inl.inc | 342 +++ .../absl/debugging/internal/symbolize.h | 128 + .../absl/debugging/internal/vdso_support.cc | 194 ++ .../absl/debugging/internal/vdso_support.h | 158 + base/abseil/absl/debugging/leak_check.cc | 53 + base/abseil/absl/debugging/leak_check.h | 113 + .../absl/debugging/leak_check_disable.cc | 20 + .../absl/debugging/leak_check_fail_test.cc | 41 + base/abseil/absl/debugging/leak_check_test.cc | 42 + base/abseil/absl/debugging/stacktrace.cc | 140 + base/abseil/absl/debugging/stacktrace.h | 231 ++ base/abseil/absl/debugging/symbolize.cc | 25 + base/abseil/absl/debugging/symbolize.h | 99 + base/abseil/absl/debugging/symbolize_elf.inc | 1480 +++++++++ base/abseil/absl/debugging/symbolize_test.cc | 549 ++++ .../debugging/symbolize_unimplemented.inc | 40 + .../abseil/absl/debugging/symbolize_win32.inc | 81 + base/abseil/absl/flags/BUILD.bazel | 423 +++ base/abseil/absl/flags/CMakeLists.txt | 384 +++ base/abseil/absl/flags/config.h | 48 + base/abseil/absl/flags/config_test.cc | 61 + base/abseil/absl/flags/declare.h | 65 + base/abseil/absl/flags/flag.cc | 60 + base/abseil/absl/flags/flag.h | 402 +++ base/abseil/absl/flags/flag_test.cc | 531 ++++ base/abseil/absl/flags/flag_test_defs.cc | 24 + .../absl/flags/internal/commandlineflag.cc | 62 + .../absl/flags/internal/commandlineflag.h | 287 ++ .../flags/internal/commandlineflag_test.cc | 218 ++ base/abseil/absl/flags/internal/flag.cc | 401 +++ base/abseil/absl/flags/internal/flag.h | 437 +++ base/abseil/absl/flags/internal/parse.h | 50 + base/abseil/absl/flags/internal/path_util.h | 62 + .../absl/flags/internal/path_util_test.cc | 46 + .../absl/flags/internal/program_name.cc | 55 + .../abseil/absl/flags/internal/program_name.h | 49 + .../absl/flags/internal/program_name_test.cc | 60 + base/abseil/absl/flags/internal/registry.cc | 341 +++ base/abseil/absl/flags/internal/registry.h | 122 + .../abseil/absl/flags/internal/type_erased.cc | 84 + base/abseil/absl/flags/internal/type_erased.h | 88 + .../absl/flags/internal/type_erased_test.cc | 147 + base/abseil/absl/flags/internal/usage.cc | 409 +++ base/abseil/absl/flags/internal/usage.h | 80 + base/abseil/absl/flags/internal/usage_test.cc | 404 +++ base/abseil/absl/flags/marshalling.cc | 231 ++ base/abseil/absl/flags/marshalling.h | 263 ++ base/abseil/absl/flags/marshalling_test.cc | 899 ++++++ base/abseil/absl/flags/parse.cc | 755 +++++ base/abseil/absl/flags/parse.h | 60 + base/abseil/absl/flags/parse_test.cc | 862 ++++++ base/abseil/absl/flags/usage.cc | 58 + base/abseil/absl/flags/usage.h | 42 + base/abseil/absl/flags/usage_config.cc | 154 + base/abseil/absl/flags/usage_config.h | 133 + base/abseil/absl/flags/usage_config_test.cc | 198 ++ base/abseil/absl/functional/BUILD.bazel | 93 + base/abseil/absl/functional/bind_front.h | 166 + .../abseil/absl/functional/bind_front_test.cc | 231 ++ base/abseil/absl/functional/function_ref.h | 139 + .../absl/functional/function_ref_benchmark.cc | 142 + .../absl/functional/function_ref_test.cc | 257 ++ .../absl/functional/internal/front_binder.h | 95 + .../absl/functional/internal/function_ref.h | 106 + base/abseil/absl/hash/BUILD.bazel | 121 + base/abseil/absl/hash/CMakeLists.txt | 115 + base/abseil/absl/hash/hash.h | 324 ++ base/abseil/absl/hash/hash_test.cc | 934 ++++++ base/abseil/absl/hash/hash_testing.h | 378 +++ base/abseil/absl/hash/internal/city.cc | 346 +++ base/abseil/absl/hash/internal/city.h | 96 + base/abseil/absl/hash/internal/city_test.cc | 595 ++++ base/abseil/absl/hash/internal/hash.cc | 55 + base/abseil/absl/hash/internal/hash.h | 987 ++++++ .../absl/hash/internal/print_hash_of.cc | 23 + .../absl/hash/internal/spy_hash_state.h | 231 ++ base/abseil/absl/memory/BUILD.bazel | 65 + base/abseil/absl/memory/CMakeLists.txt | 55 + base/abseil/absl/memory/memory.h | 695 +++++ .../memory/memory_exception_safety_test.cc | 60 + base/abseil/absl/memory/memory_test.cc | 652 ++++ base/abseil/absl/meta/BUILD.bazel | 48 + base/abseil/absl/meta/CMakeLists.txt | 50 + base/abseil/absl/meta/type_traits.h | 759 +++++ base/abseil/absl/meta/type_traits_test.cc | 1368 +++++++++ base/abseil/absl/numeric/BUILD.bazel | 73 + base/abseil/absl/numeric/CMakeLists.txt | 60 + base/abseil/absl/numeric/int128.cc | 404 +++ base/abseil/absl/numeric/int128.h | 1091 +++++++ base/abseil/absl/numeric/int128_benchmark.cc | 221 ++ .../absl/numeric/int128_have_intrinsic.inc | 302 ++ .../absl/numeric/int128_no_intrinsic.inc | 308 ++ .../abseil/absl/numeric/int128_stream_test.cc | 1395 +++++++++ base/abseil/absl/numeric/int128_test.cc | 1225 ++++++++ base/abseil/absl/random/BUILD.bazel | 498 +++ base/abseil/absl/random/CMakeLists.txt | 1207 ++++++++ base/abseil/absl/random/benchmarks.cc | 383 +++ .../absl/random/bernoulli_distribution.h | 200 ++ .../random/bernoulli_distribution_test.cc | 213 ++ base/abseil/absl/random/beta_distribution.h | 427 +++ .../absl/random/beta_distribution_test.cc | 614 ++++ base/abseil/absl/random/bit_gen_ref.h | 153 + base/abseil/absl/random/bit_gen_ref_test.cc | 101 + .../absl/random/discrete_distribution.cc | 98 + .../absl/random/discrete_distribution.h | 247 ++ .../absl/random/discrete_distribution_test.cc | 246 ++ .../absl/random/distribution_format_traits.h | 278 ++ base/abseil/absl/random/distributions.h | 465 +++ base/abseil/absl/random/distributions_test.cc | 489 +++ base/abseil/absl/random/examples_test.cc | 99 + .../absl/random/exponential_distribution.h | 165 + .../random/exponential_distribution_test.cc | 426 +++ .../absl/random/gaussian_distribution.cc | 104 + .../absl/random/gaussian_distribution.h | 274 ++ .../absl/random/gaussian_distribution_test.cc | 573 ++++ base/abseil/absl/random/generators_test.cc | 179 ++ base/abseil/absl/random/internal/BUILD.bazel | 735 +++++ .../abseil/absl/random/internal/chi_square.cc | 232 ++ base/abseil/absl/random/internal/chi_square.h | 89 + .../absl/random/internal/chi_square_test.cc | 365 +++ .../random/internal/distribution_caller.h | 60 + .../random/internal/distribution_test_util.cc | 418 +++ .../random/internal/distribution_test_util.h | 113 + .../internal/distribution_test_util_test.cc | 193 ++ .../absl/random/internal/distributions.h | 52 + .../absl/random/internal/explicit_seed_seq.h | 91 + .../random/internal/explicit_seed_seq_test.cc | 204 ++ .../absl/random/internal/fast_uniform_bits.h | 264 ++ .../random/internal/fast_uniform_bits_test.cc | 274 ++ base/abseil/absl/random/internal/fastmath.h | 74 + .../absl/random/internal/fastmath_test.cc | 110 + .../gaussian_distribution_gentables.cc | 147 + .../absl/random/internal/generate_real.h | 146 + .../random/internal/generate_real_test.cc | 497 +++ .../random/internal/iostream_state_saver.h | 245 ++ .../internal/iostream_state_saver_test.cc | 371 +++ .../absl/random/internal/mock_overload_set.h | 91 + .../random/internal/mocking_bit_gen_base.h | 120 + .../absl/random/internal/nanobenchmark.cc | 804 +++++ .../absl/random/internal/nanobenchmark.h | 172 ++ .../random/internal/nanobenchmark_test.cc | 77 + .../absl/random/internal/nonsecure_base.h | 150 + .../random/internal/nonsecure_base_test.cc | 245 ++ base/abseil/absl/random/internal/pcg_engine.h | 307 ++ .../absl/random/internal/pcg_engine_test.cc | 638 ++++ base/abseil/absl/random/internal/platform.h | 171 ++ base/abseil/absl/random/internal/pool_urbg.cc | 254 ++ base/abseil/absl/random/internal/pool_urbg.h | 131 + .../absl/random/internal/pool_urbg_test.cc | 182 ++ .../absl/random/internal/randen-keys.inc | 207 ++ base/abseil/absl/random/internal/randen.cc | 91 + base/abseil/absl/random/internal/randen.h | 102 + .../absl/random/internal/randen_benchmarks.cc | 174 ++ .../absl/random/internal/randen_detect.cc | 221 ++ .../absl/random/internal/randen_detect.h | 33 + .../absl/random/internal/randen_engine.h | 230 ++ .../random/internal/randen_engine_test.cc | 656 ++++ .../absl/random/internal/randen_hwaes.cc | 638 ++++ .../absl/random/internal/randen_hwaes.h | 50 + .../absl/random/internal/randen_hwaes_test.cc | 102 + .../absl/random/internal/randen_slow.cc | 506 +++ .../abseil/absl/random/internal/randen_slow.h | 47 + .../absl/random/internal/randen_slow_test.cc | 61 + .../absl/random/internal/randen_test.cc | 70 + .../absl/random/internal/randen_traits.h | 63 + .../absl/random/internal/salted_seed_seq.h | 167 + .../random/internal/salted_seed_seq_test.cc | 168 + .../absl/random/internal/seed_material.cc | 219 ++ .../absl/random/internal/seed_material.h | 104 + .../random/internal/seed_material_test.cc | 202 ++ .../seed_salting_sequence_generator.cc | 30 + ...lting_sequence_generator_empty_sequence.cc | 30 + .../absl/random/internal/sequence_urbg.h | 60 + base/abseil/absl/random/internal/traits.h | 101 + .../absl/random/internal/traits_test.cc | 126 + .../absl/random/internal/uniform_helper.h | 180 ++ .../absl/random/internal/wide_multiply.h | 111 + .../random/internal/wide_multiply_test.cc | 66 + .../random/log_uniform_int_distribution.h | 254 ++ .../log_uniform_int_distribution_test.cc | 277 ++ base/abseil/absl/random/mock_distributions.h | 261 ++ .../absl/random/mock_distributions_test.cc | 72 + base/abseil/absl/random/mocking_bit_gen.cc | 30 + base/abseil/absl/random/mocking_bit_gen.h | 196 ++ .../absl/random/mocking_bit_gen_test.cc | 347 +++ .../abseil/absl/random/poisson_distribution.h | 258 ++ .../absl/random/poisson_distribution_test.cc | 565 ++++ base/abseil/absl/random/random.h | 189 ++ base/abseil/absl/random/seed_gen_exception.cc | 46 + base/abseil/absl/random/seed_gen_exception.h | 55 + base/abseil/absl/random/seed_sequences.cc | 29 + base/abseil/absl/random/seed_sequences.h | 110 + .../abseil/absl/random/seed_sequences_test.cc | 127 + .../absl/random/uniform_int_distribution.h | 275 ++ .../random/uniform_int_distribution_test.cc | 250 ++ .../absl/random/uniform_real_distribution.h | 202 ++ .../random/uniform_real_distribution_test.cc | 334 ++ base/abseil/absl/random/zipf_distribution.h | 271 ++ .../absl/random/zipf_distribution_test.cc | 423 +++ base/abseil/absl/strings/BUILD.bazel | 688 +++++ base/abseil/absl/strings/CMakeLists.txt | 525 ++++ base/abseil/absl/strings/ascii.cc | 200 ++ base/abseil/absl/strings/ascii.h | 241 ++ base/abseil/absl/strings/ascii_benchmark.cc | 120 + base/abseil/absl/strings/ascii_test.cc | 361 +++ base/abseil/absl/strings/charconv.cc | 985 ++++++ base/abseil/absl/strings/charconv.h | 119 + .../abseil/absl/strings/charconv_benchmark.cc | 204 ++ base/abseil/absl/strings/charconv_test.cc | 780 +++++ base/abseil/absl/strings/escaping.cc | 1111 +++++++ base/abseil/absl/strings/escaping.h | 164 + .../abseil/absl/strings/escaping_benchmark.cc | 94 + base/abseil/absl/strings/escaping_test.cc | 664 ++++ base/abseil/absl/strings/internal/char_map.h | 156 + .../strings/internal/char_map_benchmark.cc | 61 + .../absl/strings/internal/char_map_test.cc | 172 ++ .../absl/strings/internal/charconv_bigint.cc | 359 +++ .../absl/strings/internal/charconv_bigint.h | 421 +++ .../strings/internal/charconv_bigint_test.cc | 205 ++ .../absl/strings/internal/charconv_parse.cc | 504 +++ .../absl/strings/internal/charconv_parse.h | 99 + .../strings/internal/charconv_parse_test.cc | 357 +++ .../strings/internal/escaping_test_common.h | 133 + base/abseil/absl/strings/internal/memutil.cc | 112 + base/abseil/absl/strings/internal/memutil.h | 148 + .../strings/internal/memutil_benchmark.cc | 323 ++ .../absl/strings/internal/memutil_test.cc | 179 ++ .../strings/internal/numbers_test_common.h | 184 ++ .../absl/strings/internal/ostringstream.cc | 36 + .../absl/strings/internal/ostringstream.h | 89 + .../internal/ostringstream_benchmark.cc | 106 + .../strings/internal/ostringstream_test.cc | 102 + .../absl/strings/internal/pow10_helper.cc | 122 + .../absl/strings/internal/pow10_helper.h | 40 + .../strings/internal/pow10_helper_test.cc | 122 + .../strings/internal/resize_uninitialized.h | 73 + .../internal/resize_uninitialized_test.cc | 68 + .../absl/strings/internal/stl_type_traits.h | 248 ++ .../absl/strings/internal/str_format/arg.cc | 391 +++ .../absl/strings/internal/str_format/arg.h | 433 +++ .../strings/internal/str_format/arg_test.cc | 113 + .../absl/strings/internal/str_format/bind.cc | 241 ++ .../absl/strings/internal/str_format/bind.h | 209 ++ .../strings/internal/str_format/bind_test.cc | 143 + .../strings/internal/str_format/checker.h | 326 ++ .../internal/str_format/checker_test.cc | 152 + .../internal/str_format/convert_test.cc | 651 ++++ .../strings/internal/str_format/extension.cc | 86 + .../strings/internal/str_format/extension.h | 413 +++ .../internal/str_format/extension_test.cc | 66 + .../internal/str_format/float_conversion.cc | 487 +++ .../internal/str_format/float_conversion.h | 23 + .../strings/internal/str_format/output.cc | 72 + .../absl/strings/internal/str_format/output.h | 104 + .../internal/str_format/output_test.cc | 73 + .../strings/internal/str_format/parser.cc | 305 ++ .../absl/strings/internal/str_format/parser.h | 324 ++ .../internal/str_format/parser_test.cc | 394 +++ .../absl/strings/internal/str_join_internal.h | 314 ++ .../strings/internal/str_split_internal.h | 455 +++ base/abseil/absl/strings/internal/utf8.cc | 53 + base/abseil/absl/strings/internal/utf8.h | 50 + .../abseil/absl/strings/internal/utf8_test.cc | 66 + base/abseil/absl/strings/match.cc | 40 + base/abseil/absl/strings/match.h | 90 + base/abseil/absl/strings/match_test.cc | 110 + base/abseil/absl/strings/numbers.cc | 916 ++++++ base/abseil/absl/strings/numbers.h | 263 ++ base/abseil/absl/strings/numbers_benchmark.cc | 286 ++ base/abseil/absl/strings/numbers_test.cc | 1277 ++++++++ base/abseil/absl/strings/str_cat.cc | 246 ++ base/abseil/absl/strings/str_cat.h | 408 +++ base/abseil/absl/strings/str_cat_benchmark.cc | 140 + base/abseil/absl/strings/str_cat_test.cc | 610 ++++ base/abseil/absl/strings/str_format.h | 537 ++++ base/abseil/absl/strings/str_format_test.cc | 649 ++++ base/abseil/absl/strings/str_join.h | 293 ++ .../abseil/absl/strings/str_join_benchmark.cc | 97 + base/abseil/absl/strings/str_join_test.cc | 474 +++ base/abseil/absl/strings/str_replace.cc | 82 + base/abseil/absl/strings/str_replace.h | 219 ++ .../absl/strings/str_replace_benchmark.cc | 122 + base/abseil/absl/strings/str_replace_test.cc | 341 +++ base/abseil/absl/strings/str_split.cc | 139 + base/abseil/absl/strings/str_split.h | 513 ++++ .../absl/strings/str_split_benchmark.cc | 180 ++ base/abseil/absl/strings/str_split_test.cc | 939 ++++++ base/abseil/absl/strings/string_view.cc | 235 ++ base/abseil/absl/strings/string_view.h | 615 ++++ .../absl/strings/string_view_benchmark.cc | 347 +++ base/abseil/absl/strings/string_view_test.cc | 1207 ++++++++ base/abseil/absl/strings/strip.h | 91 + base/abseil/absl/strings/strip_test.cc | 198 ++ base/abseil/absl/strings/substitute.cc | 171 ++ base/abseil/absl/strings/substitute.h | 688 +++++ base/abseil/absl/strings/substitute_test.cc | 204 ++ .../absl/strings/testdata/getline-1.txt | 3 + .../absl/strings/testdata/getline-2.txt | 1 + base/abseil/absl/synchronization/BUILD.bazel | 285 ++ .../absl/synchronization/CMakeLists.txt | 214 ++ base/abseil/absl/synchronization/barrier.cc | 52 + base/abseil/absl/synchronization/barrier.h | 79 + .../absl/synchronization/barrier_test.cc | 75 + .../absl/synchronization/blocking_counter.cc | 57 + .../absl/synchronization/blocking_counter.h | 99 + .../synchronization/blocking_counter_test.cc | 68 + .../internal/create_thread_identity.cc | 140 + .../internal/create_thread_identity.h | 60 + .../synchronization/internal/graphcycles.cc | 697 +++++ .../synchronization/internal/graphcycles.h | 141 + .../internal/graphcycles_benchmark.cc | 44 + .../internal/graphcycles_test.cc | 464 +++ .../synchronization/internal/kernel_timeout.h | 155 + .../synchronization/internal/mutex_nonprod.cc | 320 ++ .../internal/mutex_nonprod.inc | 261 ++ .../internal/per_thread_sem.cc | 106 + .../synchronization/internal/per_thread_sem.h | 115 + .../internal/per_thread_sem_test.cc | 180 ++ .../synchronization/internal/thread_pool.h | 93 + .../absl/synchronization/internal/waiter.cc | 486 +++ .../absl/synchronization/internal/waiter.h | 164 + .../absl/synchronization/lifetime_test.cc | 181 ++ base/abseil/absl/synchronization/mutex.cc | 2725 +++++++++++++++++ base/abseil/absl/synchronization/mutex.h | 1056 +++++++ .../absl/synchronization/mutex_benchmark.cc | 223 ++ .../abseil/absl/synchronization/mutex_test.cc | 1675 ++++++++++ .../absl/synchronization/notification.cc | 78 + .../absl/synchronization/notification.h | 123 + .../absl/synchronization/notification_test.cc | 133 + base/abseil/absl/time/BUILD.bazel | 124 + base/abseil/absl/time/CMakeLists.txt | 127 + base/abseil/absl/time/civil_time.cc | 175 ++ base/abseil/absl/time/civil_time.h | 538 ++++ base/abseil/absl/time/civil_time_benchmark.cc | 127 + base/abseil/absl/time/civil_time_test.cc | 1243 ++++++++ base/abseil/absl/time/clock.cc | 569 ++++ base/abseil/absl/time/clock.h | 74 + base/abseil/absl/time/clock_benchmark.cc | 74 + base/abseil/absl/time/clock_test.cc | 118 + base/abseil/absl/time/duration.cc | 922 ++++++ base/abseil/absl/time/duration_benchmark.cc | 428 +++ base/abseil/absl/time/duration_test.cc | 1808 +++++++++++ base/abseil/absl/time/format.cc | 150 + base/abseil/absl/time/format_benchmark.cc | 64 + base/abseil/absl/time/format_test.cc | 441 +++ .../absl/time/internal/cctz/BUILD.bazel | 166 + .../internal/cctz/include/cctz/civil_time.h | 332 ++ .../cctz/include/cctz/civil_time_detail.h | 622 ++++ .../internal/cctz/include/cctz/time_zone.h | 384 +++ .../cctz/include/cctz/zone_info_source.h | 102 + .../time/internal/cctz/src/cctz_benchmark.cc | 1029 +++++++ .../internal/cctz/src/civil_time_detail.cc | 94 + .../time/internal/cctz/src/civil_time_test.cc | 1056 +++++++ .../time/internal/cctz/src/time_zone_fixed.cc | 140 + .../time/internal/cctz/src/time_zone_fixed.h | 52 + .../internal/cctz/src/time_zone_format.cc | 922 ++++++ .../cctz/src/time_zone_format_test.cc | 1500 +++++++++ .../time/internal/cctz/src/time_zone_if.cc | 45 + .../time/internal/cctz/src/time_zone_if.h | 76 + .../time/internal/cctz/src/time_zone_impl.cc | 121 + .../time/internal/cctz/src/time_zone_impl.h | 93 + .../time/internal/cctz/src/time_zone_info.cc | 958 ++++++ .../time/internal/cctz/src/time_zone_info.h | 138 + .../time/internal/cctz/src/time_zone_libc.cc | 308 ++ .../time/internal/cctz/src/time_zone_libc.h | 55 + .../internal/cctz/src/time_zone_lookup.cc | 187 ++ .../cctz/src/time_zone_lookup_test.cc | 1436 +++++++++ .../time/internal/cctz/src/time_zone_posix.cc | 159 + .../time/internal/cctz/src/time_zone_posix.h | 132 + .../absl/time/internal/cctz/src/tzfile.h | 122 + .../internal/cctz/src/zone_info_source.cc | 87 + .../internal/cctz/testdata/README.zoneinfo | 37 + .../absl/time/internal/cctz/testdata/version | 1 + .../cctz/testdata/zoneinfo/Africa/Abidjan | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Accra | Bin 0 -> 816 bytes .../cctz/testdata/zoneinfo/Africa/Addis_Ababa | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Africa/Algiers | Bin 0 -> 735 bytes .../cctz/testdata/zoneinfo/Africa/Asmara | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Africa/Asmera | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Africa/Bamako | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Bangui | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Banjul | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Bissau | Bin 0 -> 194 bytes .../cctz/testdata/zoneinfo/Africa/Blantyre | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Brazzaville | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Bujumbura | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Cairo | Bin 0 -> 1955 bytes .../cctz/testdata/zoneinfo/Africa/Casablanca | Bin 0 -> 2429 bytes .../cctz/testdata/zoneinfo/Africa/Ceuta | Bin 0 -> 2036 bytes .../cctz/testdata/zoneinfo/Africa/Conakry | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Dakar | Bin 0 -> 148 bytes .../testdata/zoneinfo/Africa/Dar_es_Salaam | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Africa/Djibouti | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Africa/Douala | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/El_Aaiun | Bin 0 -> 2295 bytes .../cctz/testdata/zoneinfo/Africa/Freetown | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Gaborone | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Harare | Bin 0 -> 149 bytes .../testdata/zoneinfo/Africa/Johannesburg | Bin 0 -> 246 bytes .../cctz/testdata/zoneinfo/Africa/Juba | Bin 0 -> 653 bytes .../cctz/testdata/zoneinfo/Africa/Kampala | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Africa/Khartoum | Bin 0 -> 679 bytes .../cctz/testdata/zoneinfo/Africa/Kigali | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Kinshasa | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Lagos | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Libreville | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Lome | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Luanda | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Lubumbashi | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Lusaka | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Malabo | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Maputo | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Maseru | Bin 0 -> 246 bytes .../cctz/testdata/zoneinfo/Africa/Mbabane | Bin 0 -> 246 bytes .../cctz/testdata/zoneinfo/Africa/Mogadishu | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Africa/Monrovia | Bin 0 -> 208 bytes .../cctz/testdata/zoneinfo/Africa/Nairobi | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Africa/Ndjamena | Bin 0 -> 199 bytes .../cctz/testdata/zoneinfo/Africa/Niamey | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Nouakchott | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Ouagadougou | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Porto-Novo | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Sao_Tome | Bin 0 -> 254 bytes .../cctz/testdata/zoneinfo/Africa/Timbuktu | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Africa/Tripoli | Bin 0 -> 625 bytes .../cctz/testdata/zoneinfo/Africa/Tunis | Bin 0 -> 689 bytes .../cctz/testdata/zoneinfo/Africa/Windhoek | Bin 0 -> 955 bytes .../cctz/testdata/zoneinfo/America/Adak | Bin 0 -> 2356 bytes .../cctz/testdata/zoneinfo/America/Anchorage | Bin 0 -> 2371 bytes .../cctz/testdata/zoneinfo/America/Anguilla | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Antigua | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Araguaina | Bin 0 -> 884 bytes .../zoneinfo/America/Argentina/Buenos_Aires | Bin 0 -> 1076 bytes .../zoneinfo/America/Argentina/Catamarca | Bin 0 -> 1076 bytes .../zoneinfo/America/Argentina/ComodRivadavia | Bin 0 -> 1076 bytes .../zoneinfo/America/Argentina/Cordoba | Bin 0 -> 1076 bytes .../testdata/zoneinfo/America/Argentina/Jujuy | Bin 0 -> 1048 bytes .../zoneinfo/America/Argentina/La_Rioja | Bin 0 -> 1090 bytes .../zoneinfo/America/Argentina/Mendoza | Bin 0 -> 1076 bytes .../zoneinfo/America/Argentina/Rio_Gallegos | Bin 0 -> 1076 bytes .../testdata/zoneinfo/America/Argentina/Salta | Bin 0 -> 1048 bytes .../zoneinfo/America/Argentina/San_Juan | Bin 0 -> 1090 bytes .../zoneinfo/America/Argentina/San_Luis | Bin 0 -> 1102 bytes .../zoneinfo/America/Argentina/Tucuman | Bin 0 -> 1104 bytes .../zoneinfo/America/Argentina/Ushuaia | Bin 0 -> 1076 bytes .../cctz/testdata/zoneinfo/America/Aruba | Bin 0 -> 186 bytes .../cctz/testdata/zoneinfo/America/Asuncion | Bin 0 -> 2044 bytes .../cctz/testdata/zoneinfo/America/Atikokan | Bin 0 -> 336 bytes .../cctz/testdata/zoneinfo/America/Atka | Bin 0 -> 2356 bytes .../cctz/testdata/zoneinfo/America/Bahia | Bin 0 -> 1024 bytes .../testdata/zoneinfo/America/Bahia_Banderas | Bin 0 -> 1546 bytes .../cctz/testdata/zoneinfo/America/Barbados | Bin 0 -> 314 bytes .../cctz/testdata/zoneinfo/America/Belem | Bin 0 -> 576 bytes .../cctz/testdata/zoneinfo/America/Belize | Bin 0 -> 948 bytes .../testdata/zoneinfo/America/Blanc-Sablon | Bin 0 -> 298 bytes .../cctz/testdata/zoneinfo/America/Boa_Vista | Bin 0 -> 632 bytes .../cctz/testdata/zoneinfo/America/Bogota | Bin 0 -> 246 bytes .../cctz/testdata/zoneinfo/America/Boise | Bin 0 -> 2394 bytes .../testdata/zoneinfo/America/Buenos_Aires | Bin 0 -> 1076 bytes .../testdata/zoneinfo/America/Cambridge_Bay | Bin 0 -> 2084 bytes .../testdata/zoneinfo/America/Campo_Grande | Bin 0 -> 1444 bytes .../cctz/testdata/zoneinfo/America/Cancun | Bin 0 -> 782 bytes .../cctz/testdata/zoneinfo/America/Caracas | Bin 0 -> 264 bytes .../cctz/testdata/zoneinfo/America/Catamarca | Bin 0 -> 1076 bytes .../cctz/testdata/zoneinfo/America/Cayenne | Bin 0 -> 198 bytes .../cctz/testdata/zoneinfo/America/Cayman | Bin 0 -> 182 bytes .../cctz/testdata/zoneinfo/America/Chicago | Bin 0 -> 3576 bytes .../cctz/testdata/zoneinfo/America/Chihuahua | Bin 0 -> 1484 bytes .../testdata/zoneinfo/America/Coral_Harbour | Bin 0 -> 336 bytes .../cctz/testdata/zoneinfo/America/Cordoba | Bin 0 -> 1076 bytes .../cctz/testdata/zoneinfo/America/Costa_Rica | Bin 0 -> 316 bytes .../cctz/testdata/zoneinfo/America/Creston | Bin 0 -> 208 bytes .../cctz/testdata/zoneinfo/America/Cuiaba | Bin 0 -> 1416 bytes .../cctz/testdata/zoneinfo/America/Curacao | Bin 0 -> 186 bytes .../testdata/zoneinfo/America/Danmarkshavn | Bin 0 -> 698 bytes .../cctz/testdata/zoneinfo/America/Dawson | Bin 0 -> 2084 bytes .../testdata/zoneinfo/America/Dawson_Creek | Bin 0 -> 1050 bytes .../cctz/testdata/zoneinfo/America/Denver | Bin 0 -> 2444 bytes .../cctz/testdata/zoneinfo/America/Detroit | Bin 0 -> 2230 bytes .../cctz/testdata/zoneinfo/America/Dominica | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Edmonton | Bin 0 -> 2332 bytes .../cctz/testdata/zoneinfo/America/Eirunepe | Bin 0 -> 656 bytes .../testdata/zoneinfo/America/El_Salvador | Bin 0 -> 224 bytes .../cctz/testdata/zoneinfo/America/Ensenada | Bin 0 -> 2342 bytes .../testdata/zoneinfo/America/Fort_Nelson | Bin 0 -> 2240 bytes .../cctz/testdata/zoneinfo/America/Fort_Wayne | Bin 0 -> 1666 bytes .../cctz/testdata/zoneinfo/America/Fortaleza | Bin 0 -> 716 bytes .../cctz/testdata/zoneinfo/America/Glace_Bay | Bin 0 -> 2192 bytes .../cctz/testdata/zoneinfo/America/Godthab | Bin 0 -> 1878 bytes .../cctz/testdata/zoneinfo/America/Goose_Bay | Bin 0 -> 3210 bytes .../cctz/testdata/zoneinfo/America/Grand_Turk | Bin 0 -> 1848 bytes .../cctz/testdata/zoneinfo/America/Grenada | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Guadeloupe | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Guatemala | Bin 0 -> 280 bytes .../cctz/testdata/zoneinfo/America/Guayaquil | Bin 0 -> 246 bytes .../cctz/testdata/zoneinfo/America/Guyana | Bin 0 -> 236 bytes .../cctz/testdata/zoneinfo/America/Halifax | Bin 0 -> 3424 bytes .../cctz/testdata/zoneinfo/America/Havana | Bin 0 -> 2416 bytes .../cctz/testdata/zoneinfo/America/Hermosillo | Bin 0 -> 416 bytes .../zoneinfo/America/Indiana/Indianapolis | Bin 0 -> 1666 bytes .../testdata/zoneinfo/America/Indiana/Knox | Bin 0 -> 2428 bytes .../testdata/zoneinfo/America/Indiana/Marengo | Bin 0 -> 1722 bytes .../zoneinfo/America/Indiana/Petersburg | Bin 0 -> 1904 bytes .../zoneinfo/America/Indiana/Tell_City | Bin 0 -> 1684 bytes .../testdata/zoneinfo/America/Indiana/Vevay | Bin 0 -> 1414 bytes .../zoneinfo/America/Indiana/Vincennes | Bin 0 -> 1694 bytes .../testdata/zoneinfo/America/Indiana/Winamac | Bin 0 -> 1778 bytes .../testdata/zoneinfo/America/Indianapolis | Bin 0 -> 1666 bytes .../cctz/testdata/zoneinfo/America/Inuvik | Bin 0 -> 1894 bytes .../cctz/testdata/zoneinfo/America/Iqaluit | Bin 0 -> 2032 bytes .../cctz/testdata/zoneinfo/America/Jamaica | Bin 0 -> 482 bytes .../cctz/testdata/zoneinfo/America/Jujuy | Bin 0 -> 1048 bytes .../cctz/testdata/zoneinfo/America/Juneau | Bin 0 -> 2353 bytes .../zoneinfo/America/Kentucky/Louisville | Bin 0 -> 2772 bytes .../zoneinfo/America/Kentucky/Monticello | Bin 0 -> 2352 bytes .../cctz/testdata/zoneinfo/America/Knox_IN | Bin 0 -> 2428 bytes .../cctz/testdata/zoneinfo/America/Kralendijk | Bin 0 -> 186 bytes .../cctz/testdata/zoneinfo/America/La_Paz | Bin 0 -> 232 bytes .../cctz/testdata/zoneinfo/America/Lima | Bin 0 -> 406 bytes .../testdata/zoneinfo/America/Los_Angeles | Bin 0 -> 2836 bytes .../cctz/testdata/zoneinfo/America/Louisville | Bin 0 -> 2772 bytes .../testdata/zoneinfo/America/Lower_Princes | Bin 0 -> 186 bytes .../cctz/testdata/zoneinfo/America/Maceio | Bin 0 -> 744 bytes .../cctz/testdata/zoneinfo/America/Managua | Bin 0 -> 430 bytes .../cctz/testdata/zoneinfo/America/Manaus | Bin 0 -> 604 bytes .../cctz/testdata/zoneinfo/America/Marigot | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Martinique | Bin 0 -> 232 bytes .../cctz/testdata/zoneinfo/America/Matamoros | Bin 0 -> 1390 bytes .../cctz/testdata/zoneinfo/America/Mazatlan | Bin 0 -> 1526 bytes .../cctz/testdata/zoneinfo/America/Mendoza | Bin 0 -> 1076 bytes .../cctz/testdata/zoneinfo/America/Menominee | Bin 0 -> 2274 bytes .../cctz/testdata/zoneinfo/America/Merida | Bin 0 -> 1422 bytes .../cctz/testdata/zoneinfo/America/Metlakatla | Bin 0 -> 1423 bytes .../testdata/zoneinfo/America/Mexico_City | Bin 0 -> 1584 bytes .../cctz/testdata/zoneinfo/America/Miquelon | Bin 0 -> 1666 bytes .../cctz/testdata/zoneinfo/America/Moncton | Bin 0 -> 3154 bytes .../cctz/testdata/zoneinfo/America/Monterrey | Bin 0 -> 1390 bytes .../cctz/testdata/zoneinfo/America/Montevideo | Bin 0 -> 1510 bytes .../cctz/testdata/zoneinfo/America/Montreal | Bin 0 -> 3494 bytes .../cctz/testdata/zoneinfo/America/Montserrat | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Nassau | Bin 0 -> 2258 bytes .../cctz/testdata/zoneinfo/America/New_York | Bin 0 -> 3536 bytes .../cctz/testdata/zoneinfo/America/Nipigon | Bin 0 -> 2122 bytes .../cctz/testdata/zoneinfo/America/Nome | Bin 0 -> 2367 bytes .../cctz/testdata/zoneinfo/America/Noronha | Bin 0 -> 716 bytes .../zoneinfo/America/North_Dakota/Beulah | Bin 0 -> 2380 bytes .../zoneinfo/America/North_Dakota/Center | Bin 0 -> 2380 bytes .../zoneinfo/America/North_Dakota/New_Salem | Bin 0 -> 2380 bytes .../cctz/testdata/zoneinfo/America/Ojinaga | Bin 0 -> 1484 bytes .../cctz/testdata/zoneinfo/America/Panama | Bin 0 -> 182 bytes .../testdata/zoneinfo/America/Pangnirtung | Bin 0 -> 2094 bytes .../cctz/testdata/zoneinfo/America/Paramaribo | Bin 0 -> 262 bytes .../cctz/testdata/zoneinfo/America/Phoenix | Bin 0 -> 328 bytes .../testdata/zoneinfo/America/Port-au-Prince | Bin 0 -> 1434 bytes .../testdata/zoneinfo/America/Port_of_Spain | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Porto_Acre | Bin 0 -> 628 bytes .../testdata/zoneinfo/America/Porto_Velho | Bin 0 -> 576 bytes .../testdata/zoneinfo/America/Puerto_Rico | Bin 0 -> 246 bytes .../testdata/zoneinfo/America/Punta_Arenas | Bin 0 -> 1902 bytes .../testdata/zoneinfo/America/Rainy_River | Bin 0 -> 2122 bytes .../testdata/zoneinfo/America/Rankin_Inlet | Bin 0 -> 1892 bytes .../cctz/testdata/zoneinfo/America/Recife | Bin 0 -> 716 bytes .../cctz/testdata/zoneinfo/America/Regina | Bin 0 -> 980 bytes .../cctz/testdata/zoneinfo/America/Resolute | Bin 0 -> 1892 bytes .../cctz/testdata/zoneinfo/America/Rio_Branco | Bin 0 -> 628 bytes .../cctz/testdata/zoneinfo/America/Rosario | Bin 0 -> 1076 bytes .../testdata/zoneinfo/America/Santa_Isabel | Bin 0 -> 2342 bytes .../cctz/testdata/zoneinfo/America/Santarem | Bin 0 -> 602 bytes .../cctz/testdata/zoneinfo/America/Santiago | Bin 0 -> 2529 bytes .../testdata/zoneinfo/America/Santo_Domingo | Bin 0 -> 458 bytes .../cctz/testdata/zoneinfo/America/Sao_Paulo | Bin 0 -> 1444 bytes .../testdata/zoneinfo/America/Scoresbysund | Bin 0 -> 1916 bytes .../cctz/testdata/zoneinfo/America/Shiprock | Bin 0 -> 2444 bytes .../cctz/testdata/zoneinfo/America/Sitka | Bin 0 -> 2329 bytes .../testdata/zoneinfo/America/St_Barthelemy | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/St_Johns | Bin 0 -> 3655 bytes .../cctz/testdata/zoneinfo/America/St_Kitts | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/St_Lucia | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/St_Thomas | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/St_Vincent | Bin 0 -> 148 bytes .../testdata/zoneinfo/America/Swift_Current | Bin 0 -> 560 bytes .../testdata/zoneinfo/America/Tegucigalpa | Bin 0 -> 252 bytes .../cctz/testdata/zoneinfo/America/Thule | Bin 0 -> 1502 bytes .../testdata/zoneinfo/America/Thunder_Bay | Bin 0 -> 2202 bytes .../cctz/testdata/zoneinfo/America/Tijuana | Bin 0 -> 2342 bytes .../cctz/testdata/zoneinfo/America/Toronto | Bin 0 -> 3494 bytes .../cctz/testdata/zoneinfo/America/Tortola | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Vancouver | Bin 0 -> 2892 bytes .../cctz/testdata/zoneinfo/America/Virgin | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/America/Whitehorse | Bin 0 -> 2084 bytes .../cctz/testdata/zoneinfo/America/Winnipeg | Bin 0 -> 2868 bytes .../cctz/testdata/zoneinfo/America/Yakutat | Bin 0 -> 2305 bytes .../testdata/zoneinfo/America/Yellowknife | Bin 0 -> 1966 bytes .../cctz/testdata/zoneinfo/Antarctica/Casey | Bin 0 -> 297 bytes .../cctz/testdata/zoneinfo/Antarctica/Davis | Bin 0 -> 297 bytes .../zoneinfo/Antarctica/DumontDUrville | Bin 0 -> 194 bytes .../testdata/zoneinfo/Antarctica/Macquarie | Bin 0 -> 1520 bytes .../cctz/testdata/zoneinfo/Antarctica/Mawson | Bin 0 -> 199 bytes .../cctz/testdata/zoneinfo/Antarctica/McMurdo | Bin 0 -> 2437 bytes .../cctz/testdata/zoneinfo/Antarctica/Palmer | Bin 0 -> 1418 bytes .../cctz/testdata/zoneinfo/Antarctica/Rothera | Bin 0 -> 164 bytes .../testdata/zoneinfo/Antarctica/South_Pole | Bin 0 -> 2437 bytes .../cctz/testdata/zoneinfo/Antarctica/Syowa | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Antarctica/Troll | Bin 0 -> 1162 bytes .../cctz/testdata/zoneinfo/Antarctica/Vostok | Bin 0 -> 165 bytes .../testdata/zoneinfo/Arctic/Longyearbyen | Bin 0 -> 2228 bytes .../internal/cctz/testdata/zoneinfo/Asia/Aden | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Asia/Almaty | Bin 0 -> 997 bytes .../cctz/testdata/zoneinfo/Asia/Amman | Bin 0 -> 1853 bytes .../cctz/testdata/zoneinfo/Asia/Anadyr | Bin 0 -> 1188 bytes .../cctz/testdata/zoneinfo/Asia/Aqtau | Bin 0 -> 983 bytes .../cctz/testdata/zoneinfo/Asia/Aqtobe | Bin 0 -> 1011 bytes .../cctz/testdata/zoneinfo/Asia/Ashgabat | Bin 0 -> 619 bytes .../cctz/testdata/zoneinfo/Asia/Ashkhabad | Bin 0 -> 619 bytes .../cctz/testdata/zoneinfo/Asia/Atyrau | Bin 0 -> 991 bytes .../cctz/testdata/zoneinfo/Asia/Baghdad | Bin 0 -> 983 bytes .../cctz/testdata/zoneinfo/Asia/Bahrain | Bin 0 -> 199 bytes .../internal/cctz/testdata/zoneinfo/Asia/Baku | Bin 0 -> 1227 bytes .../cctz/testdata/zoneinfo/Asia/Bangkok | Bin 0 -> 199 bytes .../cctz/testdata/zoneinfo/Asia/Barnaul | Bin 0 -> 1221 bytes .../cctz/testdata/zoneinfo/Asia/Beirut | Bin 0 -> 2154 bytes .../cctz/testdata/zoneinfo/Asia/Bishkek | Bin 0 -> 983 bytes .../cctz/testdata/zoneinfo/Asia/Brunei | Bin 0 -> 203 bytes .../cctz/testdata/zoneinfo/Asia/Calcutta | Bin 0 -> 285 bytes .../cctz/testdata/zoneinfo/Asia/Chita | Bin 0 -> 1221 bytes .../cctz/testdata/zoneinfo/Asia/Choibalsan | Bin 0 -> 949 bytes .../cctz/testdata/zoneinfo/Asia/Chongqing | Bin 0 -> 533 bytes .../cctz/testdata/zoneinfo/Asia/Chungking | Bin 0 -> 533 bytes .../cctz/testdata/zoneinfo/Asia/Colombo | Bin 0 -> 372 bytes .../cctz/testdata/zoneinfo/Asia/Dacca | Bin 0 -> 337 bytes .../cctz/testdata/zoneinfo/Asia/Damascus | Bin 0 -> 2294 bytes .../cctz/testdata/zoneinfo/Asia/Dhaka | Bin 0 -> 337 bytes .../internal/cctz/testdata/zoneinfo/Asia/Dili | Bin 0 -> 227 bytes .../cctz/testdata/zoneinfo/Asia/Dubai | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Asia/Dushanbe | Bin 0 -> 591 bytes .../cctz/testdata/zoneinfo/Asia/Famagusta | Bin 0 -> 2028 bytes .../internal/cctz/testdata/zoneinfo/Asia/Gaza | Bin 0 -> 2316 bytes .../cctz/testdata/zoneinfo/Asia/Harbin | Bin 0 -> 533 bytes .../cctz/testdata/zoneinfo/Asia/Hebron | Bin 0 -> 2344 bytes .../cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh | Bin 0 -> 351 bytes .../cctz/testdata/zoneinfo/Asia/Hong_Kong | Bin 0 -> 1203 bytes .../internal/cctz/testdata/zoneinfo/Asia/Hovd | Bin 0 -> 891 bytes .../cctz/testdata/zoneinfo/Asia/Irkutsk | Bin 0 -> 1243 bytes .../cctz/testdata/zoneinfo/Asia/Istanbul | Bin 0 -> 1947 bytes .../cctz/testdata/zoneinfo/Asia/Jakarta | Bin 0 -> 355 bytes .../cctz/testdata/zoneinfo/Asia/Jayapura | Bin 0 -> 221 bytes .../cctz/testdata/zoneinfo/Asia/Jerusalem | Bin 0 -> 2288 bytes .../cctz/testdata/zoneinfo/Asia/Kabul | Bin 0 -> 208 bytes .../cctz/testdata/zoneinfo/Asia/Kamchatka | Bin 0 -> 1166 bytes .../cctz/testdata/zoneinfo/Asia/Karachi | Bin 0 -> 379 bytes .../cctz/testdata/zoneinfo/Asia/Kashgar | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Asia/Kathmandu | Bin 0 -> 212 bytes .../cctz/testdata/zoneinfo/Asia/Katmandu | Bin 0 -> 212 bytes .../cctz/testdata/zoneinfo/Asia/Khandyga | Bin 0 -> 1271 bytes .../cctz/testdata/zoneinfo/Asia/Kolkata | Bin 0 -> 285 bytes .../cctz/testdata/zoneinfo/Asia/Krasnoyarsk | Bin 0 -> 1207 bytes .../cctz/testdata/zoneinfo/Asia/Kuala_Lumpur | Bin 0 -> 383 bytes .../cctz/testdata/zoneinfo/Asia/Kuching | Bin 0 -> 483 bytes .../cctz/testdata/zoneinfo/Asia/Kuwait | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Asia/Macao | Bin 0 -> 1227 bytes .../cctz/testdata/zoneinfo/Asia/Macau | Bin 0 -> 1227 bytes .../cctz/testdata/zoneinfo/Asia/Magadan | Bin 0 -> 1222 bytes .../cctz/testdata/zoneinfo/Asia/Makassar | Bin 0 -> 254 bytes .../cctz/testdata/zoneinfo/Asia/Manila | Bin 0 -> 328 bytes .../cctz/testdata/zoneinfo/Asia/Muscat | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Asia/Nicosia | Bin 0 -> 2002 bytes .../cctz/testdata/zoneinfo/Asia/Novokuznetsk | Bin 0 -> 1165 bytes .../cctz/testdata/zoneinfo/Asia/Novosibirsk | Bin 0 -> 1221 bytes .../internal/cctz/testdata/zoneinfo/Asia/Omsk | Bin 0 -> 1207 bytes .../internal/cctz/testdata/zoneinfo/Asia/Oral | Bin 0 -> 1005 bytes .../cctz/testdata/zoneinfo/Asia/Phnom_Penh | Bin 0 -> 199 bytes .../cctz/testdata/zoneinfo/Asia/Pontianak | Bin 0 -> 353 bytes .../cctz/testdata/zoneinfo/Asia/Pyongyang | Bin 0 -> 237 bytes .../cctz/testdata/zoneinfo/Asia/Qatar | Bin 0 -> 199 bytes .../cctz/testdata/zoneinfo/Asia/Qostanay | Bin 0 -> 1011 bytes .../cctz/testdata/zoneinfo/Asia/Qyzylorda | Bin 0 -> 1025 bytes .../cctz/testdata/zoneinfo/Asia/Rangoon | Bin 0 -> 268 bytes .../cctz/testdata/zoneinfo/Asia/Riyadh | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Asia/Saigon | Bin 0 -> 351 bytes .../cctz/testdata/zoneinfo/Asia/Sakhalin | Bin 0 -> 1202 bytes .../cctz/testdata/zoneinfo/Asia/Samarkand | Bin 0 -> 577 bytes .../cctz/testdata/zoneinfo/Asia/Seoul | Bin 0 -> 617 bytes .../cctz/testdata/zoneinfo/Asia/Shanghai | Bin 0 -> 533 bytes .../cctz/testdata/zoneinfo/Asia/Singapore | Bin 0 -> 383 bytes .../cctz/testdata/zoneinfo/Asia/Srednekolymsk | Bin 0 -> 1208 bytes .../cctz/testdata/zoneinfo/Asia/Taipei | Bin 0 -> 761 bytes .../cctz/testdata/zoneinfo/Asia/Tashkent | Bin 0 -> 591 bytes .../cctz/testdata/zoneinfo/Asia/Tbilisi | Bin 0 -> 1035 bytes .../cctz/testdata/zoneinfo/Asia/Tehran | Bin 0 -> 2582 bytes .../cctz/testdata/zoneinfo/Asia/Tel_Aviv | Bin 0 -> 2288 bytes .../cctz/testdata/zoneinfo/Asia/Thimbu | Bin 0 -> 203 bytes .../cctz/testdata/zoneinfo/Asia/Thimphu | Bin 0 -> 203 bytes .../cctz/testdata/zoneinfo/Asia/Tokyo | Bin 0 -> 309 bytes .../cctz/testdata/zoneinfo/Asia/Tomsk | Bin 0 -> 1221 bytes .../cctz/testdata/zoneinfo/Asia/Ujung_Pandang | Bin 0 -> 254 bytes .../cctz/testdata/zoneinfo/Asia/Ulaanbaatar | Bin 0 -> 891 bytes .../cctz/testdata/zoneinfo/Asia/Ulan_Bator | Bin 0 -> 891 bytes .../cctz/testdata/zoneinfo/Asia/Urumqi | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Asia/Ust-Nera | Bin 0 -> 1252 bytes .../cctz/testdata/zoneinfo/Asia/Vientiane | Bin 0 -> 199 bytes .../cctz/testdata/zoneinfo/Asia/Vladivostok | Bin 0 -> 1208 bytes .../cctz/testdata/zoneinfo/Asia/Yakutsk | Bin 0 -> 1207 bytes .../cctz/testdata/zoneinfo/Asia/Yangon | Bin 0 -> 268 bytes .../cctz/testdata/zoneinfo/Asia/Yekaterinburg | Bin 0 -> 1243 bytes .../cctz/testdata/zoneinfo/Asia/Yerevan | Bin 0 -> 1151 bytes .../cctz/testdata/zoneinfo/Atlantic/Azores | Bin 0 -> 3484 bytes .../cctz/testdata/zoneinfo/Atlantic/Bermuda | Bin 0 -> 1978 bytes .../cctz/testdata/zoneinfo/Atlantic/Canary | Bin 0 -> 1897 bytes .../testdata/zoneinfo/Atlantic/Cape_Verde | Bin 0 -> 270 bytes .../cctz/testdata/zoneinfo/Atlantic/Faeroe | Bin 0 -> 1815 bytes .../cctz/testdata/zoneinfo/Atlantic/Faroe | Bin 0 -> 1815 bytes .../cctz/testdata/zoneinfo/Atlantic/Jan_Mayen | Bin 0 -> 2228 bytes .../cctz/testdata/zoneinfo/Atlantic/Madeira | Bin 0 -> 3475 bytes .../cctz/testdata/zoneinfo/Atlantic/Reykjavik | Bin 0 -> 1162 bytes .../testdata/zoneinfo/Atlantic/South_Georgia | Bin 0 -> 164 bytes .../cctz/testdata/zoneinfo/Atlantic/St_Helena | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Atlantic/Stanley | Bin 0 -> 1214 bytes .../cctz/testdata/zoneinfo/Australia/ACT | Bin 0 -> 2204 bytes .../cctz/testdata/zoneinfo/Australia/Adelaide | Bin 0 -> 2222 bytes .../cctz/testdata/zoneinfo/Australia/Brisbane | Bin 0 -> 433 bytes .../testdata/zoneinfo/Australia/Broken_Hill | Bin 0 -> 2243 bytes .../cctz/testdata/zoneinfo/Australia/Canberra | Bin 0 -> 2204 bytes .../cctz/testdata/zoneinfo/Australia/Currie | Bin 0 -> 2204 bytes .../cctz/testdata/zoneinfo/Australia/Darwin | Bin 0 -> 304 bytes .../cctz/testdata/zoneinfo/Australia/Eucla | Bin 0 -> 484 bytes .../cctz/testdata/zoneinfo/Australia/Hobart | Bin 0 -> 2316 bytes .../cctz/testdata/zoneinfo/Australia/LHI | Bin 0 -> 1860 bytes .../cctz/testdata/zoneinfo/Australia/Lindeman | Bin 0 -> 489 bytes .../testdata/zoneinfo/Australia/Lord_Howe | Bin 0 -> 1860 bytes .../testdata/zoneinfo/Australia/Melbourne | Bin 0 -> 2204 bytes .../cctz/testdata/zoneinfo/Australia/NSW | Bin 0 -> 2204 bytes .../cctz/testdata/zoneinfo/Australia/North | Bin 0 -> 304 bytes .../cctz/testdata/zoneinfo/Australia/Perth | Bin 0 -> 460 bytes .../testdata/zoneinfo/Australia/Queensland | Bin 0 -> 433 bytes .../cctz/testdata/zoneinfo/Australia/South | Bin 0 -> 2222 bytes .../cctz/testdata/zoneinfo/Australia/Sydney | Bin 0 -> 2204 bytes .../cctz/testdata/zoneinfo/Australia/Tasmania | Bin 0 -> 2316 bytes .../cctz/testdata/zoneinfo/Australia/Victoria | Bin 0 -> 2204 bytes .../cctz/testdata/zoneinfo/Australia/West | Bin 0 -> 460 bytes .../testdata/zoneinfo/Australia/Yancowinna | Bin 0 -> 2243 bytes .../cctz/testdata/zoneinfo/Brazil/Acre | Bin 0 -> 628 bytes .../cctz/testdata/zoneinfo/Brazil/DeNoronha | Bin 0 -> 716 bytes .../cctz/testdata/zoneinfo/Brazil/East | Bin 0 -> 1444 bytes .../cctz/testdata/zoneinfo/Brazil/West | Bin 0 -> 604 bytes .../time/internal/cctz/testdata/zoneinfo/CET | Bin 0 -> 2094 bytes .../internal/cctz/testdata/zoneinfo/CST6CDT | Bin 0 -> 2310 bytes .../cctz/testdata/zoneinfo/Canada/Atlantic | Bin 0 -> 3424 bytes .../cctz/testdata/zoneinfo/Canada/Central | Bin 0 -> 2868 bytes .../cctz/testdata/zoneinfo/Canada/Eastern | Bin 0 -> 3494 bytes .../cctz/testdata/zoneinfo/Canada/Mountain | Bin 0 -> 2332 bytes .../testdata/zoneinfo/Canada/Newfoundland | Bin 0 -> 3655 bytes .../cctz/testdata/zoneinfo/Canada/Pacific | Bin 0 -> 2892 bytes .../testdata/zoneinfo/Canada/Saskatchewan | Bin 0 -> 980 bytes .../cctz/testdata/zoneinfo/Canada/Yukon | Bin 0 -> 2084 bytes .../cctz/testdata/zoneinfo/Chile/Continental | Bin 0 -> 2529 bytes .../cctz/testdata/zoneinfo/Chile/EasterIsland | Bin 0 -> 2233 bytes .../time/internal/cctz/testdata/zoneinfo/Cuba | Bin 0 -> 2416 bytes .../time/internal/cctz/testdata/zoneinfo/EET | Bin 0 -> 1908 bytes .../time/internal/cctz/testdata/zoneinfo/EST | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/EST5EDT | Bin 0 -> 2310 bytes .../internal/cctz/testdata/zoneinfo/Egypt | Bin 0 -> 1955 bytes .../time/internal/cctz/testdata/zoneinfo/Eire | Bin 0 -> 3492 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+0 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+1 | Bin 0 -> 116 bytes .../cctz/testdata/zoneinfo/Etc/GMT+10 | Bin 0 -> 117 bytes .../cctz/testdata/zoneinfo/Etc/GMT+11 | Bin 0 -> 117 bytes .../cctz/testdata/zoneinfo/Etc/GMT+12 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+2 | Bin 0 -> 116 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+3 | Bin 0 -> 116 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+4 | Bin 0 -> 116 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+5 | Bin 0 -> 116 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+6 | Bin 0 -> 116 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+7 | Bin 0 -> 116 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+8 | Bin 0 -> 116 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+9 | Bin 0 -> 116 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-0 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-1 | Bin 0 -> 117 bytes .../cctz/testdata/zoneinfo/Etc/GMT-10 | Bin 0 -> 118 bytes .../cctz/testdata/zoneinfo/Etc/GMT-11 | Bin 0 -> 118 bytes .../cctz/testdata/zoneinfo/Etc/GMT-12 | Bin 0 -> 118 bytes .../cctz/testdata/zoneinfo/Etc/GMT-13 | Bin 0 -> 118 bytes .../cctz/testdata/zoneinfo/Etc/GMT-14 | Bin 0 -> 118 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-2 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-3 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-4 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-5 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-6 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-7 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-8 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-9 | Bin 0 -> 117 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT0 | Bin 0 -> 114 bytes .../cctz/testdata/zoneinfo/Etc/Greenwich | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/UCT | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/UTC | Bin 0 -> 114 bytes .../cctz/testdata/zoneinfo/Etc/Universal | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/Zulu | Bin 0 -> 114 bytes .../cctz/testdata/zoneinfo/Europe/Amsterdam | Bin 0 -> 2910 bytes .../cctz/testdata/zoneinfo/Europe/Andorra | Bin 0 -> 1742 bytes .../cctz/testdata/zoneinfo/Europe/Astrakhan | Bin 0 -> 1165 bytes .../cctz/testdata/zoneinfo/Europe/Athens | Bin 0 -> 2262 bytes .../cctz/testdata/zoneinfo/Europe/Belfast | Bin 0 -> 3648 bytes .../cctz/testdata/zoneinfo/Europe/Belgrade | Bin 0 -> 1920 bytes .../cctz/testdata/zoneinfo/Europe/Berlin | Bin 0 -> 2298 bytes .../cctz/testdata/zoneinfo/Europe/Bratislava | Bin 0 -> 2301 bytes .../cctz/testdata/zoneinfo/Europe/Brussels | Bin 0 -> 2933 bytes .../cctz/testdata/zoneinfo/Europe/Bucharest | Bin 0 -> 2184 bytes .../cctz/testdata/zoneinfo/Europe/Budapest | Bin 0 -> 2368 bytes .../cctz/testdata/zoneinfo/Europe/Busingen | Bin 0 -> 1909 bytes .../cctz/testdata/zoneinfo/Europe/Chisinau | Bin 0 -> 2390 bytes .../cctz/testdata/zoneinfo/Europe/Copenhagen | Bin 0 -> 2137 bytes .../cctz/testdata/zoneinfo/Europe/Dublin | Bin 0 -> 3492 bytes .../cctz/testdata/zoneinfo/Europe/Gibraltar | Bin 0 -> 3052 bytes .../cctz/testdata/zoneinfo/Europe/Guernsey | Bin 0 -> 3648 bytes .../cctz/testdata/zoneinfo/Europe/Helsinki | Bin 0 -> 1900 bytes .../cctz/testdata/zoneinfo/Europe/Isle_of_Man | Bin 0 -> 3648 bytes .../cctz/testdata/zoneinfo/Europe/Istanbul | Bin 0 -> 1947 bytes .../cctz/testdata/zoneinfo/Europe/Jersey | Bin 0 -> 3648 bytes .../cctz/testdata/zoneinfo/Europe/Kaliningrad | Bin 0 -> 1493 bytes .../cctz/testdata/zoneinfo/Europe/Kiev | Bin 0 -> 2088 bytes .../cctz/testdata/zoneinfo/Europe/Kirov | Bin 0 -> 1153 bytes .../cctz/testdata/zoneinfo/Europe/Lisbon | Bin 0 -> 3469 bytes .../cctz/testdata/zoneinfo/Europe/Ljubljana | Bin 0 -> 1920 bytes .../cctz/testdata/zoneinfo/Europe/London | Bin 0 -> 3648 bytes .../cctz/testdata/zoneinfo/Europe/Luxembourg | Bin 0 -> 2946 bytes .../cctz/testdata/zoneinfo/Europe/Madrid | Bin 0 -> 2614 bytes .../cctz/testdata/zoneinfo/Europe/Malta | Bin 0 -> 2620 bytes .../cctz/testdata/zoneinfo/Europe/Mariehamn | Bin 0 -> 1900 bytes .../cctz/testdata/zoneinfo/Europe/Minsk | Bin 0 -> 1321 bytes .../cctz/testdata/zoneinfo/Europe/Monaco | Bin 0 -> 2944 bytes .../cctz/testdata/zoneinfo/Europe/Moscow | Bin 0 -> 1535 bytes .../cctz/testdata/zoneinfo/Europe/Nicosia | Bin 0 -> 2002 bytes .../cctz/testdata/zoneinfo/Europe/Oslo | Bin 0 -> 2228 bytes .../cctz/testdata/zoneinfo/Europe/Paris | Bin 0 -> 2962 bytes .../cctz/testdata/zoneinfo/Europe/Podgorica | Bin 0 -> 1920 bytes .../cctz/testdata/zoneinfo/Europe/Prague | Bin 0 -> 2301 bytes .../cctz/testdata/zoneinfo/Europe/Riga | Bin 0 -> 2198 bytes .../cctz/testdata/zoneinfo/Europe/Rome | Bin 0 -> 2641 bytes .../cctz/testdata/zoneinfo/Europe/Samara | Bin 0 -> 1215 bytes .../cctz/testdata/zoneinfo/Europe/San_Marino | Bin 0 -> 2641 bytes .../cctz/testdata/zoneinfo/Europe/Sarajevo | Bin 0 -> 1920 bytes .../cctz/testdata/zoneinfo/Europe/Saratov | Bin 0 -> 1183 bytes .../cctz/testdata/zoneinfo/Europe/Simferopol | Bin 0 -> 1453 bytes .../cctz/testdata/zoneinfo/Europe/Skopje | Bin 0 -> 1920 bytes .../cctz/testdata/zoneinfo/Europe/Sofia | Bin 0 -> 2077 bytes .../cctz/testdata/zoneinfo/Europe/Stockholm | Bin 0 -> 1909 bytes .../cctz/testdata/zoneinfo/Europe/Tallinn | Bin 0 -> 2148 bytes .../cctz/testdata/zoneinfo/Europe/Tirane | Bin 0 -> 2084 bytes .../cctz/testdata/zoneinfo/Europe/Tiraspol | Bin 0 -> 2390 bytes .../cctz/testdata/zoneinfo/Europe/Ulyanovsk | Bin 0 -> 1267 bytes .../cctz/testdata/zoneinfo/Europe/Uzhgorod | Bin 0 -> 2050 bytes .../cctz/testdata/zoneinfo/Europe/Vaduz | Bin 0 -> 1909 bytes .../cctz/testdata/zoneinfo/Europe/Vatican | Bin 0 -> 2641 bytes .../cctz/testdata/zoneinfo/Europe/Vienna | Bin 0 -> 2200 bytes .../cctz/testdata/zoneinfo/Europe/Vilnius | Bin 0 -> 2162 bytes .../cctz/testdata/zoneinfo/Europe/Volgograd | Bin 0 -> 1165 bytes .../cctz/testdata/zoneinfo/Europe/Warsaw | Bin 0 -> 2654 bytes .../cctz/testdata/zoneinfo/Europe/Zagreb | Bin 0 -> 1920 bytes .../cctz/testdata/zoneinfo/Europe/Zaporozhye | Bin 0 -> 2106 bytes .../cctz/testdata/zoneinfo/Europe/Zurich | Bin 0 -> 1909 bytes .../internal/cctz/testdata/zoneinfo/Factory | Bin 0 -> 116 bytes .../time/internal/cctz/testdata/zoneinfo/GB | Bin 0 -> 3648 bytes .../internal/cctz/testdata/zoneinfo/GB-Eire | Bin 0 -> 3648 bytes .../time/internal/cctz/testdata/zoneinfo/GMT | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/GMT+0 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/GMT-0 | Bin 0 -> 114 bytes .../time/internal/cctz/testdata/zoneinfo/GMT0 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Greenwich | Bin 0 -> 114 bytes .../time/internal/cctz/testdata/zoneinfo/HST | Bin 0 -> 115 bytes .../internal/cctz/testdata/zoneinfo/Hongkong | Bin 0 -> 1203 bytes .../internal/cctz/testdata/zoneinfo/Iceland | Bin 0 -> 1162 bytes .../testdata/zoneinfo/Indian/Antananarivo | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Indian/Chagos | Bin 0 -> 199 bytes .../cctz/testdata/zoneinfo/Indian/Christmas | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Indian/Cocos | Bin 0 -> 174 bytes .../cctz/testdata/zoneinfo/Indian/Comoro | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Indian/Kerguelen | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Indian/Mahe | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Indian/Maldives | Bin 0 -> 199 bytes .../cctz/testdata/zoneinfo/Indian/Mauritius | Bin 0 -> 241 bytes .../cctz/testdata/zoneinfo/Indian/Mayotte | Bin 0 -> 251 bytes .../cctz/testdata/zoneinfo/Indian/Reunion | Bin 0 -> 165 bytes .../time/internal/cctz/testdata/zoneinfo/Iran | Bin 0 -> 2582 bytes .../internal/cctz/testdata/zoneinfo/Israel | Bin 0 -> 2288 bytes .../internal/cctz/testdata/zoneinfo/Jamaica | Bin 0 -> 482 bytes .../internal/cctz/testdata/zoneinfo/Japan | Bin 0 -> 309 bytes .../internal/cctz/testdata/zoneinfo/Kwajalein | Bin 0 -> 316 bytes .../internal/cctz/testdata/zoneinfo/Libya | Bin 0 -> 625 bytes .../time/internal/cctz/testdata/zoneinfo/MET | Bin 0 -> 2094 bytes .../time/internal/cctz/testdata/zoneinfo/MST | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/MST7MDT | Bin 0 -> 2310 bytes .../cctz/testdata/zoneinfo/Mexico/BajaNorte | Bin 0 -> 2342 bytes .../cctz/testdata/zoneinfo/Mexico/BajaSur | Bin 0 -> 1526 bytes .../cctz/testdata/zoneinfo/Mexico/General | Bin 0 -> 1584 bytes .../time/internal/cctz/testdata/zoneinfo/NZ | Bin 0 -> 2437 bytes .../internal/cctz/testdata/zoneinfo/NZ-CHAT | Bin 0 -> 2068 bytes .../internal/cctz/testdata/zoneinfo/Navajo | Bin 0 -> 2444 bytes .../time/internal/cctz/testdata/zoneinfo/PRC | Bin 0 -> 533 bytes .../internal/cctz/testdata/zoneinfo/PST8PDT | Bin 0 -> 2310 bytes .../cctz/testdata/zoneinfo/Pacific/Apia | Bin 0 -> 1097 bytes .../cctz/testdata/zoneinfo/Pacific/Auckland | Bin 0 -> 2437 bytes .../testdata/zoneinfo/Pacific/Bougainville | Bin 0 -> 268 bytes .../cctz/testdata/zoneinfo/Pacific/Chatham | Bin 0 -> 2068 bytes .../cctz/testdata/zoneinfo/Pacific/Chuuk | Bin 0 -> 269 bytes .../cctz/testdata/zoneinfo/Pacific/Easter | Bin 0 -> 2233 bytes .../cctz/testdata/zoneinfo/Pacific/Efate | Bin 0 -> 466 bytes .../cctz/testdata/zoneinfo/Pacific/Enderbury | Bin 0 -> 234 bytes .../cctz/testdata/zoneinfo/Pacific/Fakaofo | Bin 0 -> 200 bytes .../cctz/testdata/zoneinfo/Pacific/Fiji | Bin 0 -> 1077 bytes .../cctz/testdata/zoneinfo/Pacific/Funafuti | Bin 0 -> 166 bytes .../cctz/testdata/zoneinfo/Pacific/Galapagos | Bin 0 -> 238 bytes .../cctz/testdata/zoneinfo/Pacific/Gambier | Bin 0 -> 164 bytes .../testdata/zoneinfo/Pacific/Guadalcanal | Bin 0 -> 166 bytes .../cctz/testdata/zoneinfo/Pacific/Guam | Bin 0 -> 494 bytes .../cctz/testdata/zoneinfo/Pacific/Honolulu | Bin 0 -> 329 bytes .../cctz/testdata/zoneinfo/Pacific/Johnston | Bin 0 -> 329 bytes .../cctz/testdata/zoneinfo/Pacific/Kiritimati | Bin 0 -> 238 bytes .../cctz/testdata/zoneinfo/Pacific/Kosrae | Bin 0 -> 351 bytes .../cctz/testdata/zoneinfo/Pacific/Kwajalein | Bin 0 -> 316 bytes .../cctz/testdata/zoneinfo/Pacific/Majuro | Bin 0 -> 310 bytes .../cctz/testdata/zoneinfo/Pacific/Marquesas | Bin 0 -> 173 bytes .../cctz/testdata/zoneinfo/Pacific/Midway | Bin 0 -> 175 bytes .../cctz/testdata/zoneinfo/Pacific/Nauru | Bin 0 -> 252 bytes .../cctz/testdata/zoneinfo/Pacific/Niue | Bin 0 -> 241 bytes .../cctz/testdata/zoneinfo/Pacific/Norfolk | Bin 0 -> 880 bytes .../cctz/testdata/zoneinfo/Pacific/Noumea | Bin 0 -> 304 bytes .../cctz/testdata/zoneinfo/Pacific/Pago_Pago | Bin 0 -> 175 bytes .../cctz/testdata/zoneinfo/Pacific/Palau | Bin 0 -> 180 bytes .../cctz/testdata/zoneinfo/Pacific/Pitcairn | Bin 0 -> 202 bytes .../cctz/testdata/zoneinfo/Pacific/Pohnpei | Bin 0 -> 303 bytes .../cctz/testdata/zoneinfo/Pacific/Ponape | Bin 0 -> 303 bytes .../testdata/zoneinfo/Pacific/Port_Moresby | Bin 0 -> 186 bytes .../cctz/testdata/zoneinfo/Pacific/Rarotonga | Bin 0 -> 577 bytes .../cctz/testdata/zoneinfo/Pacific/Saipan | Bin 0 -> 494 bytes .../cctz/testdata/zoneinfo/Pacific/Samoa | Bin 0 -> 175 bytes .../cctz/testdata/zoneinfo/Pacific/Tahiti | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Pacific/Tarawa | Bin 0 -> 166 bytes .../cctz/testdata/zoneinfo/Pacific/Tongatapu | Bin 0 -> 372 bytes .../cctz/testdata/zoneinfo/Pacific/Truk | Bin 0 -> 269 bytes .../cctz/testdata/zoneinfo/Pacific/Wake | Bin 0 -> 166 bytes .../cctz/testdata/zoneinfo/Pacific/Wallis | Bin 0 -> 166 bytes .../cctz/testdata/zoneinfo/Pacific/Yap | Bin 0 -> 269 bytes .../internal/cctz/testdata/zoneinfo/Poland | Bin 0 -> 2654 bytes .../internal/cctz/testdata/zoneinfo/Portugal | Bin 0 -> 3469 bytes .../time/internal/cctz/testdata/zoneinfo/ROC | Bin 0 -> 761 bytes .../time/internal/cctz/testdata/zoneinfo/ROK | Bin 0 -> 617 bytes .../internal/cctz/testdata/zoneinfo/Singapore | Bin 0 -> 383 bytes .../internal/cctz/testdata/zoneinfo/Turkey | Bin 0 -> 1947 bytes .../time/internal/cctz/testdata/zoneinfo/UCT | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/US/Alaska | Bin 0 -> 2371 bytes .../cctz/testdata/zoneinfo/US/Aleutian | Bin 0 -> 2356 bytes .../cctz/testdata/zoneinfo/US/Arizona | Bin 0 -> 328 bytes .../cctz/testdata/zoneinfo/US/Central | Bin 0 -> 3576 bytes .../cctz/testdata/zoneinfo/US/East-Indiana | Bin 0 -> 1666 bytes .../cctz/testdata/zoneinfo/US/Eastern | Bin 0 -> 3536 bytes .../internal/cctz/testdata/zoneinfo/US/Hawaii | Bin 0 -> 329 bytes .../cctz/testdata/zoneinfo/US/Indiana-Starke | Bin 0 -> 2428 bytes .../cctz/testdata/zoneinfo/US/Michigan | Bin 0 -> 2230 bytes .../cctz/testdata/zoneinfo/US/Mountain | Bin 0 -> 2444 bytes .../cctz/testdata/zoneinfo/US/Pacific | Bin 0 -> 2836 bytes .../internal/cctz/testdata/zoneinfo/US/Samoa | Bin 0 -> 175 bytes .../time/internal/cctz/testdata/zoneinfo/UTC | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Universal | Bin 0 -> 114 bytes .../time/internal/cctz/testdata/zoneinfo/W-SU | Bin 0 -> 1535 bytes .../time/internal/cctz/testdata/zoneinfo/WET | Bin 0 -> 1905 bytes .../time/internal/cctz/testdata/zoneinfo/Zulu | Bin 0 -> 114 bytes .../cctz/testdata/zoneinfo/iso3166.tab | 274 ++ .../internal/cctz/testdata/zoneinfo/localtime | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/zone1970.tab | 384 +++ .../time/internal/get_current_time_chrono.inc | 31 + .../time/internal/get_current_time_posix.inc | 24 + base/abseil/absl/time/internal/test_util.cc | 130 + base/abseil/absl/time/internal/test_util.h | 33 + base/abseil/absl/time/internal/zoneinfo.inc | 729 +++++ base/abseil/absl/time/time.cc | 499 +++ base/abseil/absl/time/time.h | 1581 ++++++++++ base/abseil/absl/time/time_benchmark.cc | 316 ++ base/abseil/absl/time/time_test.cc | 1274 ++++++++ base/abseil/absl/time/time_zone_test.cc | 97 + base/abseil/absl/types/BUILD.bazel | 332 ++ base/abseil/absl/types/CMakeLists.txt | 366 +++ base/abseil/absl/types/any.h | 547 ++++ .../absl/types/any_exception_safety_test.cc | 173 ++ base/abseil/absl/types/any_test.cc | 781 +++++ base/abseil/absl/types/bad_any_cast.cc | 46 + base/abseil/absl/types/bad_any_cast.h | 75 + base/abseil/absl/types/bad_optional_access.cc | 48 + base/abseil/absl/types/bad_optional_access.h | 78 + base/abseil/absl/types/bad_variant_access.cc | 64 + base/abseil/absl/types/bad_variant_access.h | 82 + base/abseil/absl/types/compare.h | 558 ++++ base/abseil/absl/types/compare_test.cc | 339 ++ .../absl/types/internal/conformance_aliases.h | 447 +++ .../types/internal/conformance_archetype.h | 978 ++++++ .../absl/types/internal/conformance_profile.h | 376 +++ .../internal/conformance_testing_test.cc | 1186 +++++++ base/abseil/absl/types/internal/optional.h | 396 +++ base/abseil/absl/types/internal/span.h | 128 + base/abseil/absl/types/internal/variant.h | 1646 ++++++++++ base/abseil/absl/types/optional.h | 776 +++++ .../types/optional_exception_safety_test.cc | 292 ++ base/abseil/absl/types/optional_test.cc | 1661 ++++++++++ base/abseil/absl/types/span.h | 713 +++++ base/abseil/absl/types/span_test.cc | 833 +++++ base/abseil/absl/types/variant.h | 861 ++++++ base/abseil/absl/types/variant_benchmark.cc | 222 ++ .../types/variant_exception_safety_test.cc | 532 ++++ base/abseil/absl/types/variant_test.cc | 2716 ++++++++++++++++ base/abseil/absl/utility/BUILD.bazel | 55 + base/abseil/absl/utility/CMakeLists.txt | 44 + base/abseil/absl/utility/utility.h | 350 +++ base/abseil/absl/utility/utility_test.cc | 376 +++ base/abseil/ci/cmake_install_test.sh | 32 + .../linux_clang-latest_libcxx_asan_bazel.sh | 93 + .../ci/linux_clang-latest_libcxx_bazel.sh | 83 + .../linux_clang-latest_libcxx_tsan_bazel.sh | 88 + .../ci/linux_clang-latest_libstdcxx_bazel.sh | 82 + .../ci/linux_gcc-4.9_libstdcxx_bazel.sh | 80 + .../ci/linux_gcc-latest_libstdcxx_bazel.sh | 80 + .../ci/linux_gcc-latest_libstdcxx_cmake.sh | 61 + base/abseil/ci/linux_gcc_alpine_cmake.sh | 63 + base/abseil/ci/macos_xcode_bazel.sh | 50 + base/abseil/ci/macos_xcode_cmake.sh | 43 + base/abseil/conanfile.py | 51 + base/jsoncpp/.gitignore | 10 + base/jsoncpp/.travis.yml | 23 + base/jsoncpp/AUTHORS | 1 + base/jsoncpp/CMakeLists.txt | 90 + base/jsoncpp/LICENSE | 55 + base/jsoncpp/NEWS.txt | 175 ++ base/jsoncpp/README.txt | 220 ++ base/jsoncpp/SConstruct | 248 ++ base/jsoncpp/amalgamate.py | 150 + base/jsoncpp/devtools/__init__.py | 1 + base/jsoncpp/devtools/agent_vmw7.json | 33 + base/jsoncpp/devtools/agent_vmxp.json | 26 + base/jsoncpp/devtools/antglob.py | 201 ++ base/jsoncpp/devtools/batchbuild.py | 280 ++ base/jsoncpp/devtools/fixeol.py | 63 + base/jsoncpp/devtools/licenseupdater.py | 93 + base/jsoncpp/devtools/tarball.py | 53 + base/jsoncpp/doc/doxyfile.in | 1534 ++++++++++ base/jsoncpp/doc/footer.html | 23 + base/jsoncpp/doc/header.html | 24 + base/jsoncpp/doc/jsoncpp.dox | 126 + base/jsoncpp/doc/readme.txt | 1 + base/jsoncpp/doc/roadmap.dox | 38 + base/jsoncpp/doxybuild.py | 169 + base/jsoncpp/include/CMakeLists.txt | 2 + base/jsoncpp/include/json/assertions.h | 32 + base/jsoncpp/include/json/autolink.h | 24 + base/jsoncpp/include/json/config.h | 108 + base/jsoncpp/include/json/features.h | 55 + base/jsoncpp/include/json/forwards.h | 44 + base/jsoncpp/include/json/json.h | 15 + base/jsoncpp/include/json/reader.h | 249 ++ base/jsoncpp/include/json/value.h | 1136 +++++++ base/jsoncpp/include/json/version.h | 14 + base/jsoncpp/include/json/writer.h | 203 ++ base/jsoncpp/makefiles/vs71/jsoncpp.sln | 46 + base/jsoncpp/makefiles/vs71/jsontest.vcproj | 119 + base/jsoncpp/makefiles/vs71/lib_json.vcproj | 214 ++ .../makefiles/vs71/test_lib_json.vcproj | 130 + base/jsoncpp/makerelease.py | 380 +++ base/jsoncpp/scons-tools/globtool.py | 53 + base/jsoncpp/scons-tools/srcdist.py | 179 ++ base/jsoncpp/scons-tools/substinfile.py | 79 + base/jsoncpp/scons-tools/targz.py | 82 + base/jsoncpp/src/CMakeLists.txt | 5 + .../jsoncpp/src/jsontestrunner/CMakeLists.txt | 23 + base/jsoncpp/src/jsontestrunner/main.cpp | 301 ++ base/jsoncpp/src/jsontestrunner/sconscript | 9 + base/jsoncpp/src/lib_json/CMakeLists.txt | 43 + .../src/lib_json/json_batchallocator.h | 128 + .../src/lib_json/json_internalarray.inl | 455 +++ .../jsoncpp/src/lib_json/json_internalmap.inl | 616 ++++ base/jsoncpp/src/lib_json/json_reader.cpp | 1021 ++++++ base/jsoncpp/src/lib_json/json_tool.h | 93 + base/jsoncpp/src/lib_json/json_value.cpp | 1980 ++++++++++++ .../src/lib_json/json_valueiterator.inl | 301 ++ base/jsoncpp/src/lib_json/json_writer.cpp | 842 +++++ base/jsoncpp/src/lib_json/sconscript | 8 + base/jsoncpp/src/lib_json/version.h.in | 14 + base/jsoncpp/src/test_lib_json/CMakeLists.txt | 22 + base/jsoncpp/src/test_lib_json/jsontest.cpp | 578 ++++ base/jsoncpp/src/test_lib_json/jsontest.h | 273 ++ base/jsoncpp/src/test_lib_json/main.cpp | 1658 ++++++++++ base/jsoncpp/src/test_lib_json/sconscript | 10 + base/jsoncpp/test/cleantests.py | 10 + .../jsoncpp/test/data/fail_test_array_01.json | 1 + base/jsoncpp/test/data/test_array_01.expected | 1 + base/jsoncpp/test/data/test_array_01.json | 1 + base/jsoncpp/test/data/test_array_02.expected | 2 + base/jsoncpp/test/data/test_array_02.json | 1 + base/jsoncpp/test/data/test_array_03.expected | 6 + base/jsoncpp/test/data/test_array_03.json | 1 + base/jsoncpp/test/data/test_array_04.expected | 5 + base/jsoncpp/test/data/test_array_04.json | 1 + base/jsoncpp/test/data/test_array_05.expected | 100 + base/jsoncpp/test/data/test_array_05.json | 1 + base/jsoncpp/test/data/test_array_06.expected | 5 + base/jsoncpp/test/data/test_array_06.json | 4 + base/jsoncpp/test/data/test_array_07.expected | 2122 +++++++++++++ base/jsoncpp/test/data/test_array_07.json | 2 + base/jsoncpp/test/data/test_basic_01.expected | 1 + base/jsoncpp/test/data/test_basic_01.json | 1 + base/jsoncpp/test/data/test_basic_02.expected | 1 + base/jsoncpp/test/data/test_basic_02.json | 1 + base/jsoncpp/test/data/test_basic_03.expected | 3 + base/jsoncpp/test/data/test_basic_03.json | 3 + base/jsoncpp/test/data/test_basic_04.expected | 2 + base/jsoncpp/test/data/test_basic_04.json | 2 + base/jsoncpp/test/data/test_basic_05.expected | 2 + base/jsoncpp/test/data/test_basic_05.json | 2 + base/jsoncpp/test/data/test_basic_06.expected | 2 + base/jsoncpp/test/data/test_basic_06.json | 2 + base/jsoncpp/test/data/test_basic_07.expected | 2 + base/jsoncpp/test/data/test_basic_07.json | 2 + base/jsoncpp/test/data/test_basic_08.expected | 2 + base/jsoncpp/test/data/test_basic_08.json | 3 + base/jsoncpp/test/data/test_basic_09.expected | 2 + base/jsoncpp/test/data/test_basic_09.json | 4 + .../test/data/test_comment_01.expected | 8 + base/jsoncpp/test/data/test_comment_01.json | 8 + .../test/data/test_comment_02.expected | 7 + base/jsoncpp/test/data/test_comment_02.json | 16 + .../test/data/test_complex_01.expected | 20 + base/jsoncpp/test/data/test_complex_01.json | 17 + .../test/data/test_integer_01.expected | 1 + base/jsoncpp/test/data/test_integer_01.json | 2 + .../test/data/test_integer_02.expected | 1 + base/jsoncpp/test/data/test_integer_02.json | 2 + .../test/data/test_integer_03.expected | 1 + base/jsoncpp/test/data/test_integer_03.json | 2 + .../test/data/test_integer_04.expected | 2 + base/jsoncpp/test/data/test_integer_04.json | 3 + .../test/data/test_integer_05.expected | 2 + base/jsoncpp/test/data/test_integer_05.json | 2 + .../test/data/test_integer_06_64bits.expected | 1 + .../test/data/test_integer_06_64bits.json | 2 + .../test/data/test_integer_07_64bits.expected | 1 + .../test/data/test_integer_07_64bits.json | 2 + .../test/data/test_integer_08_64bits.expected | 1 + .../test/data/test_integer_08_64bits.json | 2 + base/jsoncpp/test/data/test_large_01.expected | 2122 +++++++++++++ base/jsoncpp/test/data/test_large_01.json | 2 + .../jsoncpp/test/data/test_object_01.expected | 1 + base/jsoncpp/test/data/test_object_01.json | 1 + .../jsoncpp/test/data/test_object_02.expected | 2 + base/jsoncpp/test/data/test_object_02.json | 1 + .../jsoncpp/test/data/test_object_03.expected | 4 + base/jsoncpp/test/data/test_object_03.json | 5 + .../jsoncpp/test/data/test_object_04.expected | 2 + base/jsoncpp/test/data/test_object_04.json | 3 + .../data/test_preserve_comment_01.expected | 3 + .../test/data/test_preserve_comment_01.json | 14 + base/jsoncpp/test/data/test_real_01.expected | 2 + base/jsoncpp/test/data/test_real_01.json | 3 + base/jsoncpp/test/data/test_real_02.expected | 2 + base/jsoncpp/test/data/test_real_02.json | 3 + base/jsoncpp/test/data/test_real_03.expected | 2 + base/jsoncpp/test/data/test_real_03.json | 3 + base/jsoncpp/test/data/test_real_04.expected | 2 + base/jsoncpp/test/data/test_real_04.json | 3 + base/jsoncpp/test/data/test_real_05.expected | 3 + base/jsoncpp/test/data/test_real_05.json | 3 + base/jsoncpp/test/data/test_real_06.expected | 3 + base/jsoncpp/test/data/test_real_06.json | 3 + base/jsoncpp/test/data/test_real_07.expected | 3 + base/jsoncpp/test/data/test_real_07.json | 3 + base/jsoncpp/test/data/test_real_08.expected | 1 + base/jsoncpp/test/data/test_real_08.json | 4 + base/jsoncpp/test/data/test_real_09.expected | 1 + base/jsoncpp/test/data/test_real_09.json | 4 + base/jsoncpp/test/data/test_real_10.expected | 1 + base/jsoncpp/test/data/test_real_10.json | 4 + base/jsoncpp/test/data/test_real_11.expected | 1 + base/jsoncpp/test/data/test_real_11.json | 4 + base/jsoncpp/test/data/test_real_12.expected | 1 + base/jsoncpp/test/data/test_real_12.json | 2 + .../jsoncpp/test/data/test_string_01.expected | 1 + base/jsoncpp/test/data/test_string_01.json | 1 + .../jsoncpp/test/data/test_string_02.expected | 1 + base/jsoncpp/test/data/test_string_02.json | 1 + .../jsoncpp/test/data/test_string_03.expected | 1 + base/jsoncpp/test/data/test_string_03.json | 1 + .../jsoncpp/test/data/test_string_04.expected | 2 + base/jsoncpp/test/data/test_string_04.json | 2 + .../jsoncpp/test/data/test_string_05.expected | 2 + base/jsoncpp/test/data/test_string_05.json | 2 + .../test/data/test_string_unicode_01.expected | 1 + .../test/data/test_string_unicode_01.json | 1 + .../test/data/test_string_unicode_02.expected | 1 + .../test/data/test_string_unicode_02.json | 1 + .../test/data/test_string_unicode_03.expected | 1 + .../test/data/test_string_unicode_03.json | 1 + .../test/data/test_string_unicode_04.expected | 1 + .../test/data/test_string_unicode_04.json | 1 + .../test/data/test_string_unicode_05.expected | 2 + .../test/data/test_string_unicode_05.json | 1 + base/jsoncpp/test/generate_expected.py | 11 + base/jsoncpp/test/jsonchecker/fail1.json | 1 + base/jsoncpp/test/jsonchecker/fail10.json | 1 + base/jsoncpp/test/jsonchecker/fail11.json | 1 + base/jsoncpp/test/jsonchecker/fail12.json | 1 + base/jsoncpp/test/jsonchecker/fail13.json | 1 + base/jsoncpp/test/jsonchecker/fail14.json | 1 + base/jsoncpp/test/jsonchecker/fail15.json | 1 + base/jsoncpp/test/jsonchecker/fail16.json | 1 + base/jsoncpp/test/jsonchecker/fail17.json | 1 + base/jsoncpp/test/jsonchecker/fail18.json | 1 + base/jsoncpp/test/jsonchecker/fail19.json | 1 + base/jsoncpp/test/jsonchecker/fail2.json | 1 + base/jsoncpp/test/jsonchecker/fail20.json | 1 + base/jsoncpp/test/jsonchecker/fail21.json | 1 + base/jsoncpp/test/jsonchecker/fail22.json | 1 + base/jsoncpp/test/jsonchecker/fail23.json | 1 + base/jsoncpp/test/jsonchecker/fail24.json | 1 + base/jsoncpp/test/jsonchecker/fail25.json | 1 + base/jsoncpp/test/jsonchecker/fail26.json | 1 + base/jsoncpp/test/jsonchecker/fail27.json | 2 + base/jsoncpp/test/jsonchecker/fail28.json | 2 + base/jsoncpp/test/jsonchecker/fail29.json | 1 + base/jsoncpp/test/jsonchecker/fail3.json | 1 + base/jsoncpp/test/jsonchecker/fail30.json | 1 + base/jsoncpp/test/jsonchecker/fail31.json | 1 + base/jsoncpp/test/jsonchecker/fail32.json | 1 + base/jsoncpp/test/jsonchecker/fail33.json | 1 + base/jsoncpp/test/jsonchecker/fail4.json | 1 + base/jsoncpp/test/jsonchecker/fail5.json | 1 + base/jsoncpp/test/jsonchecker/fail6.json | 1 + base/jsoncpp/test/jsonchecker/fail7.json | 1 + base/jsoncpp/test/jsonchecker/fail8.json | 1 + base/jsoncpp/test/jsonchecker/fail9.json | 1 + base/jsoncpp/test/jsonchecker/pass1.json | 58 + base/jsoncpp/test/jsonchecker/pass2.json | 1 + base/jsoncpp/test/jsonchecker/pass3.json | 6 + base/jsoncpp/test/jsonchecker/readme.txt | 3 + base/jsoncpp/test/pyjsontestrunner.py | 64 + base/jsoncpp/test/runjsontests.py | 134 + base/jsoncpp/test/rununittests.py | 73 + base/jsoncpp/version | 1 + base/rtc_base/array_view.h | 307 ++ base/rtc_base/arraysize.h | 32 + base/rtc_base/atomic_ops.h | 79 + base/rtc_base/checks.cc | 207 ++ base/rtc_base/checks.h | 484 +++ base/rtc_base/constructor_magic.h | 34 + base/rtc_base/critical_section.cc | 249 ++ base/rtc_base/critical_section.h | 123 + base/rtc_base/deprecation.h | 45 + base/rtc_base/gtest_prod_util.h | 38 + base/rtc_base/logging.cc | 562 ++++ base/rtc_base/logging.h | 673 ++++ base/rtc_base/memory/aligned_malloc.cc | 98 + base/rtc_base/memory/aligned_malloc.h | 57 + base/rtc_base/numerics/safe_compare.h | 176 ++ base/rtc_base/numerics/safe_conversions.h | 76 + .../rtc_base/numerics/safe_conversions_impl.h | 177 ++ base/rtc_base/numerics/safe_minmax.h | 335 ++ base/rtc_base/platform_thread_types.cc | 87 + base/rtc_base/platform_thread_types.h | 62 + base/rtc_base/race_checker.cc | 54 + base/rtc_base/race_checker.h | 78 + base/rtc_base/sanitizer.h | 144 + base/rtc_base/string_encode.cc | 387 +++ base/rtc_base/string_encode.h | 154 + base/rtc_base/string_to_number.cc | 90 + base/rtc_base/string_to_number.h | 116 + base/rtc_base/string_utils.cc | 59 + base/rtc_base/string_utils.h | 95 + base/rtc_base/strings/json.cc | 294 ++ base/rtc_base/strings/json.h | 88 + base/rtc_base/strings/string_builder.cc | 141 + base/rtc_base/strings/string_builder.h | 175 ++ base/rtc_base/swap_queue.h | 247 ++ base/rtc_base/system/arch.h | 61 + base/rtc_base/system/file_wrapper.cc | 121 + base/rtc_base/system/file_wrapper.h | 103 + base/rtc_base/system/inline.h | 31 + base/rtc_base/system/rtc_export.h | 43 + base/rtc_base/system/unused.h | 39 + base/rtc_base/thread_annotations.h | 95 + base/rtc_base/time_utils.cc | 328 ++ base/rtc_base/time_utils.h | 170 + base/rtc_base/type_traits.h | 140 + .../include/cpu_features_wrapper.h | 48 + base/system_wrappers/include/field_trial.h | 102 + base/system_wrappers/include/metrics.h | 438 +++ base/system_wrappers/source/cpu_features.cc | 72 + .../source/cpu_features_android.c | 19 + .../source/cpu_features_linux.c | 92 + base/system_wrappers/source/field_trial.cc | 155 + base/system_wrappers/source/metrics.cc | 327 ++ build_config.h | 17 + demo.filters | 17 + demo.vcxproj | 174 ++ demo.vcxproj.filters | 12 + demo.vcxproj.user | 19 + demo/demo.cc | 170 + demo/wavreader.c | 196 ++ demo/wavreader.h | 37 + demo/wavwriter.c | 110 + demo/wavwriter.h | 35 + 1747 files changed, 259200 insertions(+) create mode 100644 AEC3.sln create mode 100644 AEC3.vcxproj create mode 100644 AEC3.vcxproj.filters create mode 100644 AEC3.vcxproj.user create mode 100644 api.filters create mode 100644 api.vcxproj create mode 100644 api.vcxproj.filters create mode 100644 api/echo_canceller3_config.cc create mode 100644 api/echo_canceller3_config.h create mode 100644 api/echo_canceller3_config_json.cc create mode 100644 api/echo_canceller3_config_json.h create mode 100644 api/echo_canceller3_factory.cc create mode 100644 api/echo_canceller3_factory.h create mode 100644 api/echo_control.h create mode 100644 audio_processing/aec3/BUILD.gn create mode 100644 audio_processing/aec3/adaptive_fir_filter.cc create mode 100644 audio_processing/aec3/adaptive_fir_filter.h create mode 100644 audio_processing/aec3/adaptive_fir_filter_erl.cc create mode 100644 audio_processing/aec3/adaptive_fir_filter_erl.h create mode 100644 audio_processing/aec3/adaptive_fir_filter_erl_unittest.cc create mode 100644 audio_processing/aec3/adaptive_fir_filter_unittest.cc create mode 100644 audio_processing/aec3/aec3_common.cc create mode 100644 audio_processing/aec3/aec3_common.h create mode 100644 audio_processing/aec3/aec3_fft.cc create mode 100644 audio_processing/aec3/aec3_fft.h create mode 100644 audio_processing/aec3/aec3_fft_unittest.cc create mode 100644 audio_processing/aec3/aec_state.cc create mode 100644 audio_processing/aec3/aec_state.h create mode 100644 audio_processing/aec3/aec_state_unittest.cc create mode 100644 audio_processing/aec3/alignment_mixer.cc create mode 100644 audio_processing/aec3/alignment_mixer.h create mode 100644 audio_processing/aec3/alignment_mixer_unittest.cc create mode 100644 audio_processing/aec3/api_call_jitter_metrics.cc create mode 100644 audio_processing/aec3/api_call_jitter_metrics.h create mode 100644 audio_processing/aec3/api_call_jitter_metrics_unittest.cc create mode 100644 audio_processing/aec3/block_buffer.cc create mode 100644 audio_processing/aec3/block_buffer.h create mode 100644 audio_processing/aec3/block_delay_buffer.cc create mode 100644 audio_processing/aec3/block_delay_buffer.h create mode 100644 audio_processing/aec3/block_delay_buffer_unittest.cc create mode 100644 audio_processing/aec3/block_framer.cc create mode 100644 audio_processing/aec3/block_framer.h create mode 100644 audio_processing/aec3/block_framer_unittest.cc create mode 100644 audio_processing/aec3/block_processor.cc create mode 100644 audio_processing/aec3/block_processor.h create mode 100644 audio_processing/aec3/block_processor_metrics.cc create mode 100644 audio_processing/aec3/block_processor_metrics.h create mode 100644 audio_processing/aec3/block_processor_metrics_unittest.cc create mode 100644 audio_processing/aec3/block_processor_unittest.cc create mode 100644 audio_processing/aec3/clockdrift_detector.cc create mode 100644 audio_processing/aec3/clockdrift_detector.h create mode 100644 audio_processing/aec3/clockdrift_detector_unittest.cc create mode 100644 audio_processing/aec3/comfort_noise_generator.cc create mode 100644 audio_processing/aec3/comfort_noise_generator.h create mode 100644 audio_processing/aec3/comfort_noise_generator_unittest.cc create mode 100644 audio_processing/aec3/decimator.cc create mode 100644 audio_processing/aec3/decimator.h create mode 100644 audio_processing/aec3/decimator_unittest.cc create mode 100644 audio_processing/aec3/delay_estimate.h create mode 100644 audio_processing/aec3/dominant_nearend_detector.cc create mode 100644 audio_processing/aec3/dominant_nearend_detector.h create mode 100644 audio_processing/aec3/downsampled_render_buffer.cc create mode 100644 audio_processing/aec3/downsampled_render_buffer.h create mode 100644 audio_processing/aec3/echo_audibility.cc create mode 100644 audio_processing/aec3/echo_audibility.h create mode 100644 audio_processing/aec3/echo_canceller3.cc create mode 100644 audio_processing/aec3/echo_canceller3.h create mode 100644 audio_processing/aec3/echo_canceller3_unittest.cc create mode 100644 audio_processing/aec3/echo_path_delay_estimator.cc create mode 100644 audio_processing/aec3/echo_path_delay_estimator.h create mode 100644 audio_processing/aec3/echo_path_delay_estimator_unittest.cc create mode 100644 audio_processing/aec3/echo_path_variability.cc create mode 100644 audio_processing/aec3/echo_path_variability.h create mode 100644 audio_processing/aec3/echo_path_variability_unittest.cc create mode 100644 audio_processing/aec3/echo_remover.cc create mode 100644 audio_processing/aec3/echo_remover.h create mode 100644 audio_processing/aec3/echo_remover_metrics.cc create mode 100644 audio_processing/aec3/echo_remover_metrics.h create mode 100644 audio_processing/aec3/echo_remover_metrics_unittest.cc create mode 100644 audio_processing/aec3/echo_remover_unittest.cc create mode 100644 audio_processing/aec3/erl_estimator.cc create mode 100644 audio_processing/aec3/erl_estimator.h create mode 100644 audio_processing/aec3/erl_estimator_unittest.cc create mode 100644 audio_processing/aec3/erle_estimator.cc create mode 100644 audio_processing/aec3/erle_estimator.h create mode 100644 audio_processing/aec3/erle_estimator_unittest.cc create mode 100644 audio_processing/aec3/fft_buffer.cc create mode 100644 audio_processing/aec3/fft_buffer.h create mode 100644 audio_processing/aec3/fft_data.h create mode 100644 audio_processing/aec3/fft_data_unittest.cc create mode 100644 audio_processing/aec3/filter_analyzer.cc create mode 100644 audio_processing/aec3/filter_analyzer.h create mode 100644 audio_processing/aec3/filter_analyzer_unittest.cc create mode 100644 audio_processing/aec3/frame_blocker.cc create mode 100644 audio_processing/aec3/frame_blocker.h create mode 100644 audio_processing/aec3/frame_blocker_unittest.cc create mode 100644 audio_processing/aec3/fullband_erle_estimator.cc create mode 100644 audio_processing/aec3/fullband_erle_estimator.h create mode 100644 audio_processing/aec3/main_filter_update_gain.cc create mode 100644 audio_processing/aec3/main_filter_update_gain.h create mode 100644 audio_processing/aec3/main_filter_update_gain_unittest.cc create mode 100644 audio_processing/aec3/matched_filter.cc create mode 100644 audio_processing/aec3/matched_filter.h create mode 100644 audio_processing/aec3/matched_filter_lag_aggregator.cc create mode 100644 audio_processing/aec3/matched_filter_lag_aggregator.h create mode 100644 audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc create mode 100644 audio_processing/aec3/matched_filter_unittest.cc create mode 100644 audio_processing/aec3/moving_average.cc create mode 100644 audio_processing/aec3/moving_average.h create mode 100644 audio_processing/aec3/moving_average_unittest.cc create mode 100644 audio_processing/aec3/nearend_detector.h create mode 100644 audio_processing/aec3/render_buffer.cc create mode 100644 audio_processing/aec3/render_buffer.h create mode 100644 audio_processing/aec3/render_buffer_unittest.cc create mode 100644 audio_processing/aec3/render_delay_buffer.cc create mode 100644 audio_processing/aec3/render_delay_buffer.h create mode 100644 audio_processing/aec3/render_delay_buffer_unittest.cc create mode 100644 audio_processing/aec3/render_delay_controller.cc create mode 100644 audio_processing/aec3/render_delay_controller.h create mode 100644 audio_processing/aec3/render_delay_controller_metrics.cc create mode 100644 audio_processing/aec3/render_delay_controller_metrics.h create mode 100644 audio_processing/aec3/render_delay_controller_metrics_unittest.cc create mode 100644 audio_processing/aec3/render_delay_controller_unittest.cc create mode 100644 audio_processing/aec3/render_signal_analyzer.cc create mode 100644 audio_processing/aec3/render_signal_analyzer.h create mode 100644 audio_processing/aec3/render_signal_analyzer_unittest.cc create mode 100644 audio_processing/aec3/residual_echo_estimator.cc create mode 100644 audio_processing/aec3/residual_echo_estimator.h create mode 100644 audio_processing/aec3/residual_echo_estimator_unittest.cc create mode 100644 audio_processing/aec3/reverb_decay_estimator.cc create mode 100644 audio_processing/aec3/reverb_decay_estimator.h create mode 100644 audio_processing/aec3/reverb_frequency_response.cc create mode 100644 audio_processing/aec3/reverb_frequency_response.h create mode 100644 audio_processing/aec3/reverb_model.cc create mode 100644 audio_processing/aec3/reverb_model.h create mode 100644 audio_processing/aec3/reverb_model_estimator.cc create mode 100644 audio_processing/aec3/reverb_model_estimator.h create mode 100644 audio_processing/aec3/reverb_model_estimator_unittest.cc create mode 100644 audio_processing/aec3/shadow_filter_update_gain.cc create mode 100644 audio_processing/aec3/shadow_filter_update_gain.h create mode 100644 audio_processing/aec3/shadow_filter_update_gain_unittest.cc create mode 100644 audio_processing/aec3/signal_dependent_erle_estimator.cc create mode 100644 audio_processing/aec3/signal_dependent_erle_estimator.h create mode 100644 audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc create mode 100644 audio_processing/aec3/spectrum_buffer.cc create mode 100644 audio_processing/aec3/spectrum_buffer.h create mode 100644 audio_processing/aec3/stationarity_estimator.cc create mode 100644 audio_processing/aec3/stationarity_estimator.h create mode 100644 audio_processing/aec3/subband_erle_estimator.cc create mode 100644 audio_processing/aec3/subband_erle_estimator.h create mode 100644 audio_processing/aec3/subband_nearend_detector.cc create mode 100644 audio_processing/aec3/subband_nearend_detector.h create mode 100644 audio_processing/aec3/subtractor.cc create mode 100644 audio_processing/aec3/subtractor.h create mode 100644 audio_processing/aec3/subtractor_output.cc create mode 100644 audio_processing/aec3/subtractor_output.h create mode 100644 audio_processing/aec3/subtractor_output_analyzer.cc create mode 100644 audio_processing/aec3/subtractor_output_analyzer.h create mode 100644 audio_processing/aec3/subtractor_unittest.cc create mode 100644 audio_processing/aec3/suppression_filter.cc create mode 100644 audio_processing/aec3/suppression_filter.h create mode 100644 audio_processing/aec3/suppression_filter_unittest.cc create mode 100644 audio_processing/aec3/suppression_gain.cc create mode 100644 audio_processing/aec3/suppression_gain.h create mode 100644 audio_processing/aec3/suppression_gain_unittest.cc create mode 100644 audio_processing/aec3/vector_math.h create mode 100644 audio_processing/aec3/vector_math_unittest.cc create mode 100644 audio_processing/audio_buffer.cc create mode 100644 audio_processing/audio_buffer.h create mode 100644 audio_processing/audio_frame.cc create mode 100644 audio_processing/audio_frame.h create mode 100644 audio_processing/channel_buffer.cc create mode 100644 audio_processing/channel_buffer.h create mode 100644 audio_processing/channel_layout.cc create mode 100644 audio_processing/channel_layout.h create mode 100644 audio_processing/high_pass_filter.cc create mode 100644 audio_processing/high_pass_filter.h create mode 100644 audio_processing/include/audio_processing.h create mode 100644 audio_processing/include/audio_util.h create mode 100644 audio_processing/logging/apm_data_dumper.cc create mode 100644 audio_processing/logging/apm_data_dumper.h create mode 100644 audio_processing/logging/wav_file.cc create mode 100644 audio_processing/logging/wav_file.h create mode 100644 audio_processing/logging/wav_header.cc create mode 100644 audio_processing/logging/wav_header.h create mode 100644 audio_processing/resampler/push_sinc_resampler.cc create mode 100644 audio_processing/resampler/push_sinc_resampler.h create mode 100644 audio_processing/resampler/sinc_resampler.cc create mode 100644 audio_processing/resampler/sinc_resampler.h create mode 100644 audio_processing/resampler/sinc_resampler_neon.cc create mode 100644 audio_processing/resampler/sinc_resampler_sse.cc create mode 100644 audio_processing/sparse_fir_filter.cc create mode 100644 audio_processing/sparse_fir_filter.h create mode 100644 audio_processing/splitting_filter.cc create mode 100644 audio_processing/splitting_filter.h create mode 100644 audio_processing/splitting_filter_c.c create mode 100644 audio_processing/three_band_filter_bank.cc create mode 100644 audio_processing/three_band_filter_bank.h create mode 100644 audio_processing/utility/BUILD.gn create mode 100644 audio_processing/utility/DEPS create mode 100644 audio_processing/utility/cascaded_biquad_filter.cc create mode 100644 audio_processing/utility/cascaded_biquad_filter.h create mode 100644 audio_processing/utility/cascaded_biquad_filter_unittest.cc create mode 100644 audio_processing/utility/delay_estimator.cc create mode 100644 audio_processing/utility/delay_estimator.h create mode 100644 audio_processing/utility/delay_estimator_internal.h create mode 100644 audio_processing/utility/delay_estimator_unittest.cc create mode 100644 audio_processing/utility/delay_estimator_wrapper.cc create mode 100644 audio_processing/utility/delay_estimator_wrapper.h create mode 100644 audio_processing/utility/ooura_fft.cc create mode 100644 audio_processing/utility/ooura_fft.h create mode 100644 audio_processing/utility/ooura_fft_mips.cc create mode 100644 audio_processing/utility/ooura_fft_neon.cc create mode 100644 audio_processing/utility/ooura_fft_sse2.cc create mode 100644 audio_processing/utility/ooura_fft_tables_common.h create mode 100644 audio_processing/utility/ooura_fft_tables_neon_sse2.h create mode 100644 audio_processing/utility/pffft_wrapper.cc create mode 100644 audio_processing/utility/pffft_wrapper.h create mode 100644 audio_processing/utility/pffft_wrapper_unittest.cc create mode 100644 base.vcxproj create mode 100644 base.vcxproj.filters create mode 100644 base.vcxproj.user create mode 100644 base/abseil/.clang-format create mode 100644 base/abseil/.gitignore create mode 100644 base/abseil/ABSEIL_ISSUE_TEMPLATE.md create mode 100644 base/abseil/AUTHORS create mode 100644 base/abseil/CMake/AbseilHelpers.cmake create mode 100644 base/abseil/CMake/AbseilInstallDirs.cmake create mode 100644 base/abseil/CMake/Googletest/CMakeLists.txt.in create mode 100644 base/abseil/CMake/Googletest/DownloadGTest.cmake create mode 100644 base/abseil/CMake/README.md create mode 100644 base/abseil/CMake/abslConfig.cmake.in create mode 100644 base/abseil/CMake/install_test_project/CMakeLists.txt create mode 100644 base/abseil/CMake/install_test_project/simple.cc create mode 100644 base/abseil/CMake/install_test_project/test.sh create mode 100644 base/abseil/CMakeLists.txt create mode 100644 base/abseil/CONTRIBUTING.md create mode 100644 base/abseil/LICENSE create mode 100644 base/abseil/LTS.md create mode 100644 base/abseil/README.md create mode 100644 base/abseil/UPGRADES.md create mode 100644 base/abseil/WORKSPACE create mode 100644 base/abseil/absl/BUILD.bazel create mode 100644 base/abseil/absl/CMakeLists.txt create mode 100644 base/abseil/absl/abseil.podspec.gen.py create mode 100644 base/abseil/absl/algorithm/BUILD.bazel create mode 100644 base/abseil/absl/algorithm/CMakeLists.txt create mode 100644 base/abseil/absl/algorithm/algorithm.h create mode 100644 base/abseil/absl/algorithm/algorithm_test.cc create mode 100644 base/abseil/absl/algorithm/container.h create mode 100644 base/abseil/absl/algorithm/container_test.cc create mode 100644 base/abseil/absl/algorithm/equal_benchmark.cc create mode 100644 base/abseil/absl/base/BUILD.bazel create mode 100644 base/abseil/absl/base/CMakeLists.txt create mode 100644 base/abseil/absl/base/attributes.h create mode 100644 base/abseil/absl/base/bit_cast_test.cc create mode 100644 base/abseil/absl/base/call_once.h create mode 100644 base/abseil/absl/base/call_once_test.cc create mode 100644 base/abseil/absl/base/casts.h create mode 100644 base/abseil/absl/base/config.h create mode 100644 base/abseil/absl/base/config_test.cc create mode 100644 base/abseil/absl/base/const_init.h create mode 100644 base/abseil/absl/base/dynamic_annotations.cc create mode 100644 base/abseil/absl/base/dynamic_annotations.h create mode 100644 base/abseil/absl/base/exception_safety_testing_test.cc create mode 100644 base/abseil/absl/base/inline_variable_test.cc create mode 100644 base/abseil/absl/base/inline_variable_test_a.cc create mode 100644 base/abseil/absl/base/inline_variable_test_b.cc create mode 100644 base/abseil/absl/base/internal/atomic_hook.h create mode 100644 base/abseil/absl/base/internal/atomic_hook_test.cc create mode 100644 base/abseil/absl/base/internal/atomic_hook_test_helper.cc create mode 100644 base/abseil/absl/base/internal/atomic_hook_test_helper.h create mode 100644 base/abseil/absl/base/internal/bits.h create mode 100644 base/abseil/absl/base/internal/bits_test.cc create mode 100644 base/abseil/absl/base/internal/cmake_thread_test.cc create mode 100644 base/abseil/absl/base/internal/cycleclock.cc create mode 100644 base/abseil/absl/base/internal/cycleclock.h create mode 100644 base/abseil/absl/base/internal/direct_mmap.h create mode 100644 base/abseil/absl/base/internal/endian.h create mode 100644 base/abseil/absl/base/internal/endian_test.cc create mode 100644 base/abseil/absl/base/internal/exception_safety_testing.cc create mode 100644 base/abseil/absl/base/internal/exception_safety_testing.h create mode 100644 base/abseil/absl/base/internal/exception_testing.h create mode 100644 base/abseil/absl/base/internal/exponential_biased.cc create mode 100644 base/abseil/absl/base/internal/exponential_biased.h create mode 100644 base/abseil/absl/base/internal/exponential_biased_test.cc create mode 100644 base/abseil/absl/base/internal/hide_ptr.h create mode 100644 base/abseil/absl/base/internal/identity.h create mode 100644 base/abseil/absl/base/internal/inline_variable.h create mode 100644 base/abseil/absl/base/internal/inline_variable_testing.h create mode 100644 base/abseil/absl/base/internal/invoke.h create mode 100644 base/abseil/absl/base/internal/low_level_alloc.cc create mode 100644 base/abseil/absl/base/internal/low_level_alloc.h create mode 100644 base/abseil/absl/base/internal/low_level_alloc_test.cc create mode 100644 base/abseil/absl/base/internal/low_level_scheduling.h create mode 100644 base/abseil/absl/base/internal/per_thread_tls.h create mode 100644 base/abseil/absl/base/internal/periodic_sampler.cc create mode 100644 base/abseil/absl/base/internal/periodic_sampler.h create mode 100644 base/abseil/absl/base/internal/periodic_sampler_benchmark.cc create mode 100644 base/abseil/absl/base/internal/periodic_sampler_test.cc create mode 100644 base/abseil/absl/base/internal/pretty_function.h create mode 100644 base/abseil/absl/base/internal/raw_logging.cc create mode 100644 base/abseil/absl/base/internal/raw_logging.h create mode 100644 base/abseil/absl/base/internal/scheduling_mode.h create mode 100644 base/abseil/absl/base/internal/scoped_set_env.cc create mode 100644 base/abseil/absl/base/internal/scoped_set_env.h create mode 100644 base/abseil/absl/base/internal/scoped_set_env_test.cc create mode 100644 base/abseil/absl/base/internal/spinlock.cc create mode 100644 base/abseil/absl/base/internal/spinlock.h create mode 100644 base/abseil/absl/base/internal/spinlock_akaros.inc create mode 100644 base/abseil/absl/base/internal/spinlock_benchmark.cc create mode 100644 base/abseil/absl/base/internal/spinlock_linux.inc create mode 100644 base/abseil/absl/base/internal/spinlock_posix.inc create mode 100644 base/abseil/absl/base/internal/spinlock_wait.cc create mode 100644 base/abseil/absl/base/internal/spinlock_wait.h create mode 100644 base/abseil/absl/base/internal/spinlock_win32.inc create mode 100644 base/abseil/absl/base/internal/sysinfo.cc create mode 100644 base/abseil/absl/base/internal/sysinfo.h create mode 100644 base/abseil/absl/base/internal/sysinfo_test.cc create mode 100644 base/abseil/absl/base/internal/thread_annotations.h create mode 100644 base/abseil/absl/base/internal/thread_identity.cc create mode 100644 base/abseil/absl/base/internal/thread_identity.h create mode 100644 base/abseil/absl/base/internal/thread_identity_benchmark.cc create mode 100644 base/abseil/absl/base/internal/thread_identity_test.cc create mode 100644 base/abseil/absl/base/internal/throw_delegate.cc create mode 100644 base/abseil/absl/base/internal/throw_delegate.h create mode 100644 base/abseil/absl/base/internal/tsan_mutex_interface.h create mode 100644 base/abseil/absl/base/internal/unaligned_access.h create mode 100644 base/abseil/absl/base/internal/unscaledcycleclock.cc create mode 100644 base/abseil/absl/base/internal/unscaledcycleclock.h create mode 100644 base/abseil/absl/base/invoke_test.cc create mode 100644 base/abseil/absl/base/log_severity.cc create mode 100644 base/abseil/absl/base/log_severity.h create mode 100644 base/abseil/absl/base/log_severity_test.cc create mode 100644 base/abseil/absl/base/macros.h create mode 100644 base/abseil/absl/base/optimization.h create mode 100644 base/abseil/absl/base/options.h create mode 100644 base/abseil/absl/base/policy_checks.h create mode 100644 base/abseil/absl/base/port.h create mode 100644 base/abseil/absl/base/raw_logging_test.cc create mode 100644 base/abseil/absl/base/spinlock_test_common.cc create mode 100644 base/abseil/absl/base/thread_annotations.h create mode 100644 base/abseil/absl/base/throw_delegate_test.cc create mode 100644 base/abseil/absl/compiler_config_setting.bzl create mode 100644 base/abseil/absl/container/BUILD.bazel create mode 100644 base/abseil/absl/container/CMakeLists.txt create mode 100644 base/abseil/absl/container/btree_map.h create mode 100644 base/abseil/absl/container/btree_set.h create mode 100644 base/abseil/absl/container/btree_test.cc create mode 100644 base/abseil/absl/container/btree_test.h create mode 100644 base/abseil/absl/container/fixed_array.h create mode 100644 base/abseil/absl/container/fixed_array_benchmark.cc create mode 100644 base/abseil/absl/container/fixed_array_exception_safety_test.cc create mode 100644 base/abseil/absl/container/fixed_array_test.cc create mode 100644 base/abseil/absl/container/flat_hash_map.h create mode 100644 base/abseil/absl/container/flat_hash_map_test.cc create mode 100644 base/abseil/absl/container/flat_hash_set.h create mode 100644 base/abseil/absl/container/flat_hash_set_test.cc create mode 100644 base/abseil/absl/container/inlined_vector.h create mode 100644 base/abseil/absl/container/inlined_vector_benchmark.cc create mode 100644 base/abseil/absl/container/inlined_vector_exception_safety_test.cc create mode 100644 base/abseil/absl/container/inlined_vector_test.cc create mode 100644 base/abseil/absl/container/internal/btree.h create mode 100644 base/abseil/absl/container/internal/btree_container.h create mode 100644 base/abseil/absl/container/internal/common.h create mode 100644 base/abseil/absl/container/internal/compressed_tuple.h create mode 100644 base/abseil/absl/container/internal/compressed_tuple_test.cc create mode 100644 base/abseil/absl/container/internal/container_memory.h create mode 100644 base/abseil/absl/container/internal/container_memory_test.cc create mode 100644 base/abseil/absl/container/internal/counting_allocator.h create mode 100644 base/abseil/absl/container/internal/hash_function_defaults.h create mode 100644 base/abseil/absl/container/internal/hash_function_defaults_test.cc create mode 100644 base/abseil/absl/container/internal/hash_generator_testing.cc create mode 100644 base/abseil/absl/container/internal/hash_generator_testing.h create mode 100644 base/abseil/absl/container/internal/hash_policy_testing.h create mode 100644 base/abseil/absl/container/internal/hash_policy_testing_test.cc create mode 100644 base/abseil/absl/container/internal/hash_policy_traits.h create mode 100644 base/abseil/absl/container/internal/hash_policy_traits_test.cc create mode 100644 base/abseil/absl/container/internal/hashtable_debug.h create mode 100644 base/abseil/absl/container/internal/hashtable_debug_hooks.h create mode 100644 base/abseil/absl/container/internal/hashtablez_sampler.cc create mode 100644 base/abseil/absl/container/internal/hashtablez_sampler.h create mode 100644 base/abseil/absl/container/internal/hashtablez_sampler_force_weak_definition.cc create mode 100644 base/abseil/absl/container/internal/hashtablez_sampler_test.cc create mode 100644 base/abseil/absl/container/internal/have_sse.h create mode 100644 base/abseil/absl/container/internal/inlined_vector.h create mode 100644 base/abseil/absl/container/internal/layout.h create mode 100644 base/abseil/absl/container/internal/layout_test.cc create mode 100644 base/abseil/absl/container/internal/node_hash_policy.h create mode 100644 base/abseil/absl/container/internal/node_hash_policy_test.cc create mode 100644 base/abseil/absl/container/internal/raw_hash_map.h create mode 100644 base/abseil/absl/container/internal/raw_hash_set.cc create mode 100644 base/abseil/absl/container/internal/raw_hash_set.h create mode 100644 base/abseil/absl/container/internal/raw_hash_set_allocator_test.cc create mode 100644 base/abseil/absl/container/internal/raw_hash_set_test.cc create mode 100644 base/abseil/absl/container/internal/test_instance_tracker.cc create mode 100644 base/abseil/absl/container/internal/test_instance_tracker.h create mode 100644 base/abseil/absl/container/internal/test_instance_tracker_test.cc create mode 100644 base/abseil/absl/container/internal/tracked.h create mode 100644 base/abseil/absl/container/internal/unordered_map_constructor_test.h create mode 100644 base/abseil/absl/container/internal/unordered_map_lookup_test.h create mode 100644 base/abseil/absl/container/internal/unordered_map_members_test.h create mode 100644 base/abseil/absl/container/internal/unordered_map_modifiers_test.h create mode 100644 base/abseil/absl/container/internal/unordered_map_test.cc create mode 100644 base/abseil/absl/container/internal/unordered_set_constructor_test.h create mode 100644 base/abseil/absl/container/internal/unordered_set_lookup_test.h create mode 100644 base/abseil/absl/container/internal/unordered_set_members_test.h create mode 100644 base/abseil/absl/container/internal/unordered_set_modifiers_test.h create mode 100644 base/abseil/absl/container/internal/unordered_set_test.cc create mode 100644 base/abseil/absl/container/node_hash_map.h create mode 100644 base/abseil/absl/container/node_hash_map_test.cc create mode 100644 base/abseil/absl/container/node_hash_set.h create mode 100644 base/abseil/absl/container/node_hash_set_test.cc create mode 100644 base/abseil/absl/copts/AbseilConfigureCopts.cmake create mode 100644 base/abseil/absl/copts/GENERATED_AbseilCopts.cmake create mode 100644 base/abseil/absl/copts/GENERATED_copts.bzl create mode 100644 base/abseil/absl/copts/configure_copts.bzl create mode 100644 base/abseil/absl/copts/copts.py create mode 100644 base/abseil/absl/copts/generate_copts.py create mode 100644 base/abseil/absl/debugging/BUILD.bazel create mode 100644 base/abseil/absl/debugging/CMakeLists.txt create mode 100644 base/abseil/absl/debugging/failure_signal_handler.cc create mode 100644 base/abseil/absl/debugging/failure_signal_handler.h create mode 100644 base/abseil/absl/debugging/failure_signal_handler_test.cc create mode 100644 base/abseil/absl/debugging/internal/address_is_readable.cc create mode 100644 base/abseil/absl/debugging/internal/address_is_readable.h create mode 100644 base/abseil/absl/debugging/internal/demangle.cc create mode 100644 base/abseil/absl/debugging/internal/demangle.h create mode 100644 base/abseil/absl/debugging/internal/demangle_test.cc create mode 100644 base/abseil/absl/debugging/internal/elf_mem_image.cc create mode 100644 base/abseil/absl/debugging/internal/elf_mem_image.h create mode 100644 base/abseil/absl/debugging/internal/examine_stack.cc create mode 100644 base/abseil/absl/debugging/internal/examine_stack.h create mode 100644 base/abseil/absl/debugging/internal/stack_consumption.cc create mode 100644 base/abseil/absl/debugging/internal/stack_consumption.h create mode 100644 base/abseil/absl/debugging/internal/stack_consumption_test.cc create mode 100644 base/abseil/absl/debugging/internal/stacktrace_aarch64-inl.inc create mode 100644 base/abseil/absl/debugging/internal/stacktrace_arm-inl.inc create mode 100644 base/abseil/absl/debugging/internal/stacktrace_config.h create mode 100644 base/abseil/absl/debugging/internal/stacktrace_generic-inl.inc create mode 100644 base/abseil/absl/debugging/internal/stacktrace_powerpc-inl.inc create mode 100644 base/abseil/absl/debugging/internal/stacktrace_unimplemented-inl.inc create mode 100644 base/abseil/absl/debugging/internal/stacktrace_win32-inl.inc create mode 100644 base/abseil/absl/debugging/internal/stacktrace_x86-inl.inc create mode 100644 base/abseil/absl/debugging/internal/symbolize.h create mode 100644 base/abseil/absl/debugging/internal/vdso_support.cc create mode 100644 base/abseil/absl/debugging/internal/vdso_support.h create mode 100644 base/abseil/absl/debugging/leak_check.cc create mode 100644 base/abseil/absl/debugging/leak_check.h create mode 100644 base/abseil/absl/debugging/leak_check_disable.cc create mode 100644 base/abseil/absl/debugging/leak_check_fail_test.cc create mode 100644 base/abseil/absl/debugging/leak_check_test.cc create mode 100644 base/abseil/absl/debugging/stacktrace.cc create mode 100644 base/abseil/absl/debugging/stacktrace.h create mode 100644 base/abseil/absl/debugging/symbolize.cc create mode 100644 base/abseil/absl/debugging/symbolize.h create mode 100644 base/abseil/absl/debugging/symbolize_elf.inc create mode 100644 base/abseil/absl/debugging/symbolize_test.cc create mode 100644 base/abseil/absl/debugging/symbolize_unimplemented.inc create mode 100644 base/abseil/absl/debugging/symbolize_win32.inc create mode 100644 base/abseil/absl/flags/BUILD.bazel create mode 100644 base/abseil/absl/flags/CMakeLists.txt create mode 100644 base/abseil/absl/flags/config.h create mode 100644 base/abseil/absl/flags/config_test.cc create mode 100644 base/abseil/absl/flags/declare.h create mode 100644 base/abseil/absl/flags/flag.cc create mode 100644 base/abseil/absl/flags/flag.h create mode 100644 base/abseil/absl/flags/flag_test.cc create mode 100644 base/abseil/absl/flags/flag_test_defs.cc create mode 100644 base/abseil/absl/flags/internal/commandlineflag.cc create mode 100644 base/abseil/absl/flags/internal/commandlineflag.h create mode 100644 base/abseil/absl/flags/internal/commandlineflag_test.cc create mode 100644 base/abseil/absl/flags/internal/flag.cc create mode 100644 base/abseil/absl/flags/internal/flag.h create mode 100644 base/abseil/absl/flags/internal/parse.h create mode 100644 base/abseil/absl/flags/internal/path_util.h create mode 100644 base/abseil/absl/flags/internal/path_util_test.cc create mode 100644 base/abseil/absl/flags/internal/program_name.cc create mode 100644 base/abseil/absl/flags/internal/program_name.h create mode 100644 base/abseil/absl/flags/internal/program_name_test.cc create mode 100644 base/abseil/absl/flags/internal/registry.cc create mode 100644 base/abseil/absl/flags/internal/registry.h create mode 100644 base/abseil/absl/flags/internal/type_erased.cc create mode 100644 base/abseil/absl/flags/internal/type_erased.h create mode 100644 base/abseil/absl/flags/internal/type_erased_test.cc create mode 100644 base/abseil/absl/flags/internal/usage.cc create mode 100644 base/abseil/absl/flags/internal/usage.h create mode 100644 base/abseil/absl/flags/internal/usage_test.cc create mode 100644 base/abseil/absl/flags/marshalling.cc create mode 100644 base/abseil/absl/flags/marshalling.h create mode 100644 base/abseil/absl/flags/marshalling_test.cc create mode 100644 base/abseil/absl/flags/parse.cc create mode 100644 base/abseil/absl/flags/parse.h create mode 100644 base/abseil/absl/flags/parse_test.cc create mode 100644 base/abseil/absl/flags/usage.cc create mode 100644 base/abseil/absl/flags/usage.h create mode 100644 base/abseil/absl/flags/usage_config.cc create mode 100644 base/abseil/absl/flags/usage_config.h create mode 100644 base/abseil/absl/flags/usage_config_test.cc create mode 100644 base/abseil/absl/functional/BUILD.bazel create mode 100644 base/abseil/absl/functional/bind_front.h create mode 100644 base/abseil/absl/functional/bind_front_test.cc create mode 100644 base/abseil/absl/functional/function_ref.h create mode 100644 base/abseil/absl/functional/function_ref_benchmark.cc create mode 100644 base/abseil/absl/functional/function_ref_test.cc create mode 100644 base/abseil/absl/functional/internal/front_binder.h create mode 100644 base/abseil/absl/functional/internal/function_ref.h create mode 100644 base/abseil/absl/hash/BUILD.bazel create mode 100644 base/abseil/absl/hash/CMakeLists.txt create mode 100644 base/abseil/absl/hash/hash.h create mode 100644 base/abseil/absl/hash/hash_test.cc create mode 100644 base/abseil/absl/hash/hash_testing.h create mode 100644 base/abseil/absl/hash/internal/city.cc create mode 100644 base/abseil/absl/hash/internal/city.h create mode 100644 base/abseil/absl/hash/internal/city_test.cc create mode 100644 base/abseil/absl/hash/internal/hash.cc create mode 100644 base/abseil/absl/hash/internal/hash.h create mode 100644 base/abseil/absl/hash/internal/print_hash_of.cc create mode 100644 base/abseil/absl/hash/internal/spy_hash_state.h create mode 100644 base/abseil/absl/memory/BUILD.bazel create mode 100644 base/abseil/absl/memory/CMakeLists.txt create mode 100644 base/abseil/absl/memory/memory.h create mode 100644 base/abseil/absl/memory/memory_exception_safety_test.cc create mode 100644 base/abseil/absl/memory/memory_test.cc create mode 100644 base/abseil/absl/meta/BUILD.bazel create mode 100644 base/abseil/absl/meta/CMakeLists.txt create mode 100644 base/abseil/absl/meta/type_traits.h create mode 100644 base/abseil/absl/meta/type_traits_test.cc create mode 100644 base/abseil/absl/numeric/BUILD.bazel create mode 100644 base/abseil/absl/numeric/CMakeLists.txt create mode 100644 base/abseil/absl/numeric/int128.cc create mode 100644 base/abseil/absl/numeric/int128.h create mode 100644 base/abseil/absl/numeric/int128_benchmark.cc create mode 100644 base/abseil/absl/numeric/int128_have_intrinsic.inc create mode 100644 base/abseil/absl/numeric/int128_no_intrinsic.inc create mode 100644 base/abseil/absl/numeric/int128_stream_test.cc create mode 100644 base/abseil/absl/numeric/int128_test.cc create mode 100644 base/abseil/absl/random/BUILD.bazel create mode 100644 base/abseil/absl/random/CMakeLists.txt create mode 100644 base/abseil/absl/random/benchmarks.cc create mode 100644 base/abseil/absl/random/bernoulli_distribution.h create mode 100644 base/abseil/absl/random/bernoulli_distribution_test.cc create mode 100644 base/abseil/absl/random/beta_distribution.h create mode 100644 base/abseil/absl/random/beta_distribution_test.cc create mode 100644 base/abseil/absl/random/bit_gen_ref.h create mode 100644 base/abseil/absl/random/bit_gen_ref_test.cc create mode 100644 base/abseil/absl/random/discrete_distribution.cc create mode 100644 base/abseil/absl/random/discrete_distribution.h create mode 100644 base/abseil/absl/random/discrete_distribution_test.cc create mode 100644 base/abseil/absl/random/distribution_format_traits.h create mode 100644 base/abseil/absl/random/distributions.h create mode 100644 base/abseil/absl/random/distributions_test.cc create mode 100644 base/abseil/absl/random/examples_test.cc create mode 100644 base/abseil/absl/random/exponential_distribution.h create mode 100644 base/abseil/absl/random/exponential_distribution_test.cc create mode 100644 base/abseil/absl/random/gaussian_distribution.cc create mode 100644 base/abseil/absl/random/gaussian_distribution.h create mode 100644 base/abseil/absl/random/gaussian_distribution_test.cc create mode 100644 base/abseil/absl/random/generators_test.cc create mode 100644 base/abseil/absl/random/internal/BUILD.bazel create mode 100644 base/abseil/absl/random/internal/chi_square.cc create mode 100644 base/abseil/absl/random/internal/chi_square.h create mode 100644 base/abseil/absl/random/internal/chi_square_test.cc create mode 100644 base/abseil/absl/random/internal/distribution_caller.h create mode 100644 base/abseil/absl/random/internal/distribution_test_util.cc create mode 100644 base/abseil/absl/random/internal/distribution_test_util.h create mode 100644 base/abseil/absl/random/internal/distribution_test_util_test.cc create mode 100644 base/abseil/absl/random/internal/distributions.h create mode 100644 base/abseil/absl/random/internal/explicit_seed_seq.h create mode 100644 base/abseil/absl/random/internal/explicit_seed_seq_test.cc create mode 100644 base/abseil/absl/random/internal/fast_uniform_bits.h create mode 100644 base/abseil/absl/random/internal/fast_uniform_bits_test.cc create mode 100644 base/abseil/absl/random/internal/fastmath.h create mode 100644 base/abseil/absl/random/internal/fastmath_test.cc create mode 100644 base/abseil/absl/random/internal/gaussian_distribution_gentables.cc create mode 100644 base/abseil/absl/random/internal/generate_real.h create mode 100644 base/abseil/absl/random/internal/generate_real_test.cc create mode 100644 base/abseil/absl/random/internal/iostream_state_saver.h create mode 100644 base/abseil/absl/random/internal/iostream_state_saver_test.cc create mode 100644 base/abseil/absl/random/internal/mock_overload_set.h create mode 100644 base/abseil/absl/random/internal/mocking_bit_gen_base.h create mode 100644 base/abseil/absl/random/internal/nanobenchmark.cc create mode 100644 base/abseil/absl/random/internal/nanobenchmark.h create mode 100644 base/abseil/absl/random/internal/nanobenchmark_test.cc create mode 100644 base/abseil/absl/random/internal/nonsecure_base.h create mode 100644 base/abseil/absl/random/internal/nonsecure_base_test.cc create mode 100644 base/abseil/absl/random/internal/pcg_engine.h create mode 100644 base/abseil/absl/random/internal/pcg_engine_test.cc create mode 100644 base/abseil/absl/random/internal/platform.h create mode 100644 base/abseil/absl/random/internal/pool_urbg.cc create mode 100644 base/abseil/absl/random/internal/pool_urbg.h create mode 100644 base/abseil/absl/random/internal/pool_urbg_test.cc create mode 100644 base/abseil/absl/random/internal/randen-keys.inc create mode 100644 base/abseil/absl/random/internal/randen.cc create mode 100644 base/abseil/absl/random/internal/randen.h create mode 100644 base/abseil/absl/random/internal/randen_benchmarks.cc create mode 100644 base/abseil/absl/random/internal/randen_detect.cc create mode 100644 base/abseil/absl/random/internal/randen_detect.h create mode 100644 base/abseil/absl/random/internal/randen_engine.h create mode 100644 base/abseil/absl/random/internal/randen_engine_test.cc create mode 100644 base/abseil/absl/random/internal/randen_hwaes.cc create mode 100644 base/abseil/absl/random/internal/randen_hwaes.h create mode 100644 base/abseil/absl/random/internal/randen_hwaes_test.cc create mode 100644 base/abseil/absl/random/internal/randen_slow.cc create mode 100644 base/abseil/absl/random/internal/randen_slow.h create mode 100644 base/abseil/absl/random/internal/randen_slow_test.cc create mode 100644 base/abseil/absl/random/internal/randen_test.cc create mode 100644 base/abseil/absl/random/internal/randen_traits.h create mode 100644 base/abseil/absl/random/internal/salted_seed_seq.h create mode 100644 base/abseil/absl/random/internal/salted_seed_seq_test.cc create mode 100644 base/abseil/absl/random/internal/seed_material.cc create mode 100644 base/abseil/absl/random/internal/seed_material.h create mode 100644 base/abseil/absl/random/internal/seed_material_test.cc create mode 100644 base/abseil/absl/random/internal/seed_salting_sequence_generator.cc create mode 100644 base/abseil/absl/random/internal/seed_salting_sequence_generator_empty_sequence.cc create mode 100644 base/abseil/absl/random/internal/sequence_urbg.h create mode 100644 base/abseil/absl/random/internal/traits.h create mode 100644 base/abseil/absl/random/internal/traits_test.cc create mode 100644 base/abseil/absl/random/internal/uniform_helper.h create mode 100644 base/abseil/absl/random/internal/wide_multiply.h create mode 100644 base/abseil/absl/random/internal/wide_multiply_test.cc create mode 100644 base/abseil/absl/random/log_uniform_int_distribution.h create mode 100644 base/abseil/absl/random/log_uniform_int_distribution_test.cc create mode 100644 base/abseil/absl/random/mock_distributions.h create mode 100644 base/abseil/absl/random/mock_distributions_test.cc create mode 100644 base/abseil/absl/random/mocking_bit_gen.cc create mode 100644 base/abseil/absl/random/mocking_bit_gen.h create mode 100644 base/abseil/absl/random/mocking_bit_gen_test.cc create mode 100644 base/abseil/absl/random/poisson_distribution.h create mode 100644 base/abseil/absl/random/poisson_distribution_test.cc create mode 100644 base/abseil/absl/random/random.h create mode 100644 base/abseil/absl/random/seed_gen_exception.cc create mode 100644 base/abseil/absl/random/seed_gen_exception.h create mode 100644 base/abseil/absl/random/seed_sequences.cc create mode 100644 base/abseil/absl/random/seed_sequences.h create mode 100644 base/abseil/absl/random/seed_sequences_test.cc create mode 100644 base/abseil/absl/random/uniform_int_distribution.h create mode 100644 base/abseil/absl/random/uniform_int_distribution_test.cc create mode 100644 base/abseil/absl/random/uniform_real_distribution.h create mode 100644 base/abseil/absl/random/uniform_real_distribution_test.cc create mode 100644 base/abseil/absl/random/zipf_distribution.h create mode 100644 base/abseil/absl/random/zipf_distribution_test.cc create mode 100644 base/abseil/absl/strings/BUILD.bazel create mode 100644 base/abseil/absl/strings/CMakeLists.txt create mode 100644 base/abseil/absl/strings/ascii.cc create mode 100644 base/abseil/absl/strings/ascii.h create mode 100644 base/abseil/absl/strings/ascii_benchmark.cc create mode 100644 base/abseil/absl/strings/ascii_test.cc create mode 100644 base/abseil/absl/strings/charconv.cc create mode 100644 base/abseil/absl/strings/charconv.h create mode 100644 base/abseil/absl/strings/charconv_benchmark.cc create mode 100644 base/abseil/absl/strings/charconv_test.cc create mode 100644 base/abseil/absl/strings/escaping.cc create mode 100644 base/abseil/absl/strings/escaping.h create mode 100644 base/abseil/absl/strings/escaping_benchmark.cc create mode 100644 base/abseil/absl/strings/escaping_test.cc create mode 100644 base/abseil/absl/strings/internal/char_map.h create mode 100644 base/abseil/absl/strings/internal/char_map_benchmark.cc create mode 100644 base/abseil/absl/strings/internal/char_map_test.cc create mode 100644 base/abseil/absl/strings/internal/charconv_bigint.cc create mode 100644 base/abseil/absl/strings/internal/charconv_bigint.h create mode 100644 base/abseil/absl/strings/internal/charconv_bigint_test.cc create mode 100644 base/abseil/absl/strings/internal/charconv_parse.cc create mode 100644 base/abseil/absl/strings/internal/charconv_parse.h create mode 100644 base/abseil/absl/strings/internal/charconv_parse_test.cc create mode 100644 base/abseil/absl/strings/internal/escaping_test_common.h create mode 100644 base/abseil/absl/strings/internal/memutil.cc create mode 100644 base/abseil/absl/strings/internal/memutil.h create mode 100644 base/abseil/absl/strings/internal/memutil_benchmark.cc create mode 100644 base/abseil/absl/strings/internal/memutil_test.cc create mode 100644 base/abseil/absl/strings/internal/numbers_test_common.h create mode 100644 base/abseil/absl/strings/internal/ostringstream.cc create mode 100644 base/abseil/absl/strings/internal/ostringstream.h create mode 100644 base/abseil/absl/strings/internal/ostringstream_benchmark.cc create mode 100644 base/abseil/absl/strings/internal/ostringstream_test.cc create mode 100644 base/abseil/absl/strings/internal/pow10_helper.cc create mode 100644 base/abseil/absl/strings/internal/pow10_helper.h create mode 100644 base/abseil/absl/strings/internal/pow10_helper_test.cc create mode 100644 base/abseil/absl/strings/internal/resize_uninitialized.h create mode 100644 base/abseil/absl/strings/internal/resize_uninitialized_test.cc create mode 100644 base/abseil/absl/strings/internal/stl_type_traits.h create mode 100644 base/abseil/absl/strings/internal/str_format/arg.cc create mode 100644 base/abseil/absl/strings/internal/str_format/arg.h create mode 100644 base/abseil/absl/strings/internal/str_format/arg_test.cc create mode 100644 base/abseil/absl/strings/internal/str_format/bind.cc create mode 100644 base/abseil/absl/strings/internal/str_format/bind.h create mode 100644 base/abseil/absl/strings/internal/str_format/bind_test.cc create mode 100644 base/abseil/absl/strings/internal/str_format/checker.h create mode 100644 base/abseil/absl/strings/internal/str_format/checker_test.cc create mode 100644 base/abseil/absl/strings/internal/str_format/convert_test.cc create mode 100644 base/abseil/absl/strings/internal/str_format/extension.cc create mode 100644 base/abseil/absl/strings/internal/str_format/extension.h create mode 100644 base/abseil/absl/strings/internal/str_format/extension_test.cc create mode 100644 base/abseil/absl/strings/internal/str_format/float_conversion.cc create mode 100644 base/abseil/absl/strings/internal/str_format/float_conversion.h create mode 100644 base/abseil/absl/strings/internal/str_format/output.cc create mode 100644 base/abseil/absl/strings/internal/str_format/output.h create mode 100644 base/abseil/absl/strings/internal/str_format/output_test.cc create mode 100644 base/abseil/absl/strings/internal/str_format/parser.cc create mode 100644 base/abseil/absl/strings/internal/str_format/parser.h create mode 100644 base/abseil/absl/strings/internal/str_format/parser_test.cc create mode 100644 base/abseil/absl/strings/internal/str_join_internal.h create mode 100644 base/abseil/absl/strings/internal/str_split_internal.h create mode 100644 base/abseil/absl/strings/internal/utf8.cc create mode 100644 base/abseil/absl/strings/internal/utf8.h create mode 100644 base/abseil/absl/strings/internal/utf8_test.cc create mode 100644 base/abseil/absl/strings/match.cc create mode 100644 base/abseil/absl/strings/match.h create mode 100644 base/abseil/absl/strings/match_test.cc create mode 100644 base/abseil/absl/strings/numbers.cc create mode 100644 base/abseil/absl/strings/numbers.h create mode 100644 base/abseil/absl/strings/numbers_benchmark.cc create mode 100644 base/abseil/absl/strings/numbers_test.cc create mode 100644 base/abseil/absl/strings/str_cat.cc create mode 100644 base/abseil/absl/strings/str_cat.h create mode 100644 base/abseil/absl/strings/str_cat_benchmark.cc create mode 100644 base/abseil/absl/strings/str_cat_test.cc create mode 100644 base/abseil/absl/strings/str_format.h create mode 100644 base/abseil/absl/strings/str_format_test.cc create mode 100644 base/abseil/absl/strings/str_join.h create mode 100644 base/abseil/absl/strings/str_join_benchmark.cc create mode 100644 base/abseil/absl/strings/str_join_test.cc create mode 100644 base/abseil/absl/strings/str_replace.cc create mode 100644 base/abseil/absl/strings/str_replace.h create mode 100644 base/abseil/absl/strings/str_replace_benchmark.cc create mode 100644 base/abseil/absl/strings/str_replace_test.cc create mode 100644 base/abseil/absl/strings/str_split.cc create mode 100644 base/abseil/absl/strings/str_split.h create mode 100644 base/abseil/absl/strings/str_split_benchmark.cc create mode 100644 base/abseil/absl/strings/str_split_test.cc create mode 100644 base/abseil/absl/strings/string_view.cc create mode 100644 base/abseil/absl/strings/string_view.h create mode 100644 base/abseil/absl/strings/string_view_benchmark.cc create mode 100644 base/abseil/absl/strings/string_view_test.cc create mode 100644 base/abseil/absl/strings/strip.h create mode 100644 base/abseil/absl/strings/strip_test.cc create mode 100644 base/abseil/absl/strings/substitute.cc create mode 100644 base/abseil/absl/strings/substitute.h create mode 100644 base/abseil/absl/strings/substitute_test.cc create mode 100644 base/abseil/absl/strings/testdata/getline-1.txt create mode 100644 base/abseil/absl/strings/testdata/getline-2.txt create mode 100644 base/abseil/absl/synchronization/BUILD.bazel create mode 100644 base/abseil/absl/synchronization/CMakeLists.txt create mode 100644 base/abseil/absl/synchronization/barrier.cc create mode 100644 base/abseil/absl/synchronization/barrier.h create mode 100644 base/abseil/absl/synchronization/barrier_test.cc create mode 100644 base/abseil/absl/synchronization/blocking_counter.cc create mode 100644 base/abseil/absl/synchronization/blocking_counter.h create mode 100644 base/abseil/absl/synchronization/blocking_counter_test.cc create mode 100644 base/abseil/absl/synchronization/internal/create_thread_identity.cc create mode 100644 base/abseil/absl/synchronization/internal/create_thread_identity.h create mode 100644 base/abseil/absl/synchronization/internal/graphcycles.cc create mode 100644 base/abseil/absl/synchronization/internal/graphcycles.h create mode 100644 base/abseil/absl/synchronization/internal/graphcycles_benchmark.cc create mode 100644 base/abseil/absl/synchronization/internal/graphcycles_test.cc create mode 100644 base/abseil/absl/synchronization/internal/kernel_timeout.h create mode 100644 base/abseil/absl/synchronization/internal/mutex_nonprod.cc create mode 100644 base/abseil/absl/synchronization/internal/mutex_nonprod.inc create mode 100644 base/abseil/absl/synchronization/internal/per_thread_sem.cc create mode 100644 base/abseil/absl/synchronization/internal/per_thread_sem.h create mode 100644 base/abseil/absl/synchronization/internal/per_thread_sem_test.cc create mode 100644 base/abseil/absl/synchronization/internal/thread_pool.h create mode 100644 base/abseil/absl/synchronization/internal/waiter.cc create mode 100644 base/abseil/absl/synchronization/internal/waiter.h create mode 100644 base/abseil/absl/synchronization/lifetime_test.cc create mode 100644 base/abseil/absl/synchronization/mutex.cc create mode 100644 base/abseil/absl/synchronization/mutex.h create mode 100644 base/abseil/absl/synchronization/mutex_benchmark.cc create mode 100644 base/abseil/absl/synchronization/mutex_test.cc create mode 100644 base/abseil/absl/synchronization/notification.cc create mode 100644 base/abseil/absl/synchronization/notification.h create mode 100644 base/abseil/absl/synchronization/notification_test.cc create mode 100644 base/abseil/absl/time/BUILD.bazel create mode 100644 base/abseil/absl/time/CMakeLists.txt create mode 100644 base/abseil/absl/time/civil_time.cc create mode 100644 base/abseil/absl/time/civil_time.h create mode 100644 base/abseil/absl/time/civil_time_benchmark.cc create mode 100644 base/abseil/absl/time/civil_time_test.cc create mode 100644 base/abseil/absl/time/clock.cc create mode 100644 base/abseil/absl/time/clock.h create mode 100644 base/abseil/absl/time/clock_benchmark.cc create mode 100644 base/abseil/absl/time/clock_test.cc create mode 100644 base/abseil/absl/time/duration.cc create mode 100644 base/abseil/absl/time/duration_benchmark.cc create mode 100644 base/abseil/absl/time/duration_test.cc create mode 100644 base/abseil/absl/time/format.cc create mode 100644 base/abseil/absl/time/format_benchmark.cc create mode 100644 base/abseil/absl/time/format_test.cc create mode 100644 base/abseil/absl/time/internal/cctz/BUILD.bazel create mode 100644 base/abseil/absl/time/internal/cctz/include/cctz/civil_time.h create mode 100644 base/abseil/absl/time/internal/cctz/include/cctz/civil_time_detail.h create mode 100644 base/abseil/absl/time/internal/cctz/include/cctz/time_zone.h create mode 100644 base/abseil/absl/time/internal/cctz/include/cctz/zone_info_source.h create mode 100644 base/abseil/absl/time/internal/cctz/src/cctz_benchmark.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/civil_time_detail.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/civil_time_test.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_fixed.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_fixed.h create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_format.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_format_test.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_if.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_if.h create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_impl.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_impl.h create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_info.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_info.h create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_libc.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_libc.h create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_lookup.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_lookup_test.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_posix.cc create mode 100644 base/abseil/absl/time/internal/cctz/src/time_zone_posix.h create mode 100644 base/abseil/absl/time/internal/cctz/src/tzfile.h create mode 100644 base/abseil/absl/time/internal/cctz/src/zone_info_source.cc create mode 100644 base/abseil/absl/time/internal/cctz/testdata/README.zoneinfo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/version create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Abidjan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Accra create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Algiers create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bissau create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ceuta create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Johannesburg create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Juba create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Khartoum create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lagos create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maputo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Monrovia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nairobi create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ndjamena create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Sao_Tome create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tripoli create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tunis create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Africa/Windhoek create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Adak create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Anchorage create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Araguaina create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Buenos_Aires create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Catamarca create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/ComodRivadavia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Cordoba create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Jujuy create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/La_Rioja create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Mendoza create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Rio_Gallegos create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Salta create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Juan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Luis create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Tucuman create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Ushuaia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Asuncion create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Atikokan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Atka create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Belem create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Belize create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Blanc-Sablon create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Boa_Vista create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Bogota create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Boise create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Buenos_Aires create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Cambridge_Bay create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Campo_Grande create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Cancun create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Caracas create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Catamarca create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Cayenne create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Chicago create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Coral_Harbour create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Cordoba create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Costa_Rica create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Creston create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Cuiaba create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Curacao create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Danmarkshavn create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson_Creek create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Denver create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Detroit create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Edmonton create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Eirunepe create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/El_Salvador create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Nelson create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Wayne create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Fortaleza create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Glace_Bay create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Goose_Bay create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Grand_Turk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Guatemala create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Guayaquil create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Halifax create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Havana create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Indianapolis create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Knox create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Marengo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Petersburg create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Tell_City create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vevay create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vincennes create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Winamac create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Indianapolis create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Inuvik create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Iqaluit create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Jamaica create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Jujuy create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Juneau create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Louisville create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Monticello create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Knox_IN create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Kralendijk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/La_Paz create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Lima create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Los_Angeles create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Louisville create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Lower_Princes create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Maceio create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Managua create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Manaus create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Marigot create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Martinique create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Mendoza create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Menominee create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Merida create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Metlakatla create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Moncton create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Montevideo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Nassau create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/New_York create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Nome create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Noronha create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Beulah create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Center create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/New_Salem create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Panama create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Pangnirtung create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Paramaribo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Phoenix create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Port-au-Prince create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Port_of_Spain create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Acre create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Velho create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Puerto_Rico create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Rankin_Inlet create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Recife create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Regina create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Resolute create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Rio_Branco create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Rosario create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Santarem create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Santo_Domingo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Sao_Paulo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Scoresbysund create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Shiprock create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Sitka create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/St_Barthelemy create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/St_Johns create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Swift_Current create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Tegucigalpa create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Thule create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Vancouver create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Whitehorse create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Winnipeg create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Yakutat create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Casey create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Davis create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/DumontDUrville create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Macquarie create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Mawson create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Palmer create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Rothera create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Syowa create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Troll create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Vostok create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Arctic/Longyearbyen create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Anadyr create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtau create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtobe create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashgabat create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashkhabad create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Atyrau create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baghdad create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baku create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bangkok create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Barnaul create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Beirut create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bishkek create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Brunei create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Calcutta create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chita create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Choibalsan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chongqing create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chungking create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Colombo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dacca create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dhaka create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dili create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dubai create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dushanbe create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Famagusta create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Harbin create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hong_Kong create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hovd create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Irkutsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Istanbul create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jakarta create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jayapura create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jerusalem create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kabul create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kamchatka create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Karachi create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kashgar create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kathmandu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Katmandu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Khandyga create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kolkata create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Krasnoyarsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuala_Lumpur create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuching create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macao create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macau create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Magadan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Makassar create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Manila create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Nicosia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novokuznetsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novosibirsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Omsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Oral create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pontianak create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pyongyang create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qatar create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qyzylorda create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Rangoon create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Riyadh create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Sakhalin create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Samarkand create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Seoul create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Shanghai create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Singapore create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Srednekolymsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Taipei create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tashkent create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tbilisi create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tel_Aviv create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimbu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimphu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tokyo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tomsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ujung_Pandang create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulaanbaatar create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulan_Bator create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Urumqi create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ust-Nera create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vladivostok create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yakutsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yangon create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yekaterinburg create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yerevan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Bermuda create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Canary create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Cape_Verde create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faeroe create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faroe create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Jan_Mayen create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Reykjavik create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/South_Georgia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Stanley create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/ACT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Adelaide create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Brisbane create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Broken_Hill create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Canberra create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Currie create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Darwin create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Eucla create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Hobart create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/LHI create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lindeman create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lord_Howe create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Melbourne create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/NSW create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/North create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Perth create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Queensland create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/South create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Sydney create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Tasmania create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Victoria create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/West create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Australia/Yancowinna create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Brazil/Acre create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Brazil/DeNoronha create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Brazil/East create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Brazil/West create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/CET create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/CST6CDT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Canada/Atlantic create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Canada/Central create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Canada/Mountain create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Canada/Newfoundland create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Canada/Pacific create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Canada/Saskatchewan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Canada/Yukon create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Cuba create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/EET create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/EST create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/EST5EDT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Egypt create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Eire create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+0 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+1 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+10 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+11 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+12 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+2 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+3 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+4 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+5 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+6 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+7 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+8 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+9 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-0 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-1 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-10 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-11 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-12 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-13 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-14 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-2 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-3 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-4 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-5 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-6 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-7 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-8 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-9 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT0 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/Greenwich create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/UCT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/UTC create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/Universal create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Etc/Zulu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Amsterdam create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Andorra create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Astrakhan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Athens create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belfast create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belgrade create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Berlin create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bratislava create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Brussels create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bucharest create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Budapest create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Busingen create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Chisinau create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Copenhagen create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Gibraltar create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Helsinki create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Istanbul create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kaliningrad create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/London create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Luxembourg create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Madrid create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Malta create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Mariehamn create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Minsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Monaco create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Moscow create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Nicosia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Oslo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Paris create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Podgorica create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Prague create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Riga create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Rome create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Samara create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/San_Marino create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Saratov create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sofia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Stockholm create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tallinn create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tirane create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tiraspol create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ulyanovsk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vatican create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vienna create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vilnius create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Warsaw create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zurich create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Factory create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/GB create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/GB-Eire create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/GMT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/GMT+0 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/GMT-0 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/GMT0 create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Greenwich create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/HST create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Hongkong create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Iceland create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Chagos create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Christmas create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Cocos create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Kerguelen create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mahe create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Maldives create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mauritius create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Indian/Reunion create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Iran create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Israel create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Jamaica create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Japan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Kwajalein create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Libya create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/MET create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/MST create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/MST7MDT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/NZ create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/NZ-CHAT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Navajo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/PRC create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/PST8PDT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Auckland create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Bougainville create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chatham create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chuuk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Efate create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fakaofo create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Funafuti create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Galapagos create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Gambier create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guadalcanal create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guam create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Honolulu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Johnston create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kiritimati create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kosrae create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kwajalein create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Majuro create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Marquesas create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Nauru create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Norfolk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Noumea create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pago_Pago create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Palau create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pitcairn create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pohnpei create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Ponape create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Port_Moresby create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Samoa create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tahiti create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tarawa create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Truk create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wake create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wallis create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Yap create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Poland create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Portugal create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/ROC create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/ROK create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Singapore create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Turkey create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/UCT create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Alaska create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Aleutian create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Arizona create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Central create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/East-Indiana create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Eastern create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Hawaii create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Indiana-Starke create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Michigan create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Mountain create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Pacific create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/US/Samoa create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/UTC create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Universal create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/W-SU create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/WET create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/Zulu create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/localtime create mode 100644 base/abseil/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab create mode 100644 base/abseil/absl/time/internal/get_current_time_chrono.inc create mode 100644 base/abseil/absl/time/internal/get_current_time_posix.inc create mode 100644 base/abseil/absl/time/internal/test_util.cc create mode 100644 base/abseil/absl/time/internal/test_util.h create mode 100644 base/abseil/absl/time/internal/zoneinfo.inc create mode 100644 base/abseil/absl/time/time.cc create mode 100644 base/abseil/absl/time/time.h create mode 100644 base/abseil/absl/time/time_benchmark.cc create mode 100644 base/abseil/absl/time/time_test.cc create mode 100644 base/abseil/absl/time/time_zone_test.cc create mode 100644 base/abseil/absl/types/BUILD.bazel create mode 100644 base/abseil/absl/types/CMakeLists.txt create mode 100644 base/abseil/absl/types/any.h create mode 100644 base/abseil/absl/types/any_exception_safety_test.cc create mode 100644 base/abseil/absl/types/any_test.cc create mode 100644 base/abseil/absl/types/bad_any_cast.cc create mode 100644 base/abseil/absl/types/bad_any_cast.h create mode 100644 base/abseil/absl/types/bad_optional_access.cc create mode 100644 base/abseil/absl/types/bad_optional_access.h create mode 100644 base/abseil/absl/types/bad_variant_access.cc create mode 100644 base/abseil/absl/types/bad_variant_access.h create mode 100644 base/abseil/absl/types/compare.h create mode 100644 base/abseil/absl/types/compare_test.cc create mode 100644 base/abseil/absl/types/internal/conformance_aliases.h create mode 100644 base/abseil/absl/types/internal/conformance_archetype.h create mode 100644 base/abseil/absl/types/internal/conformance_profile.h create mode 100644 base/abseil/absl/types/internal/conformance_testing_test.cc create mode 100644 base/abseil/absl/types/internal/optional.h create mode 100644 base/abseil/absl/types/internal/span.h create mode 100644 base/abseil/absl/types/internal/variant.h create mode 100644 base/abseil/absl/types/optional.h create mode 100644 base/abseil/absl/types/optional_exception_safety_test.cc create mode 100644 base/abseil/absl/types/optional_test.cc create mode 100644 base/abseil/absl/types/span.h create mode 100644 base/abseil/absl/types/span_test.cc create mode 100644 base/abseil/absl/types/variant.h create mode 100644 base/abseil/absl/types/variant_benchmark.cc create mode 100644 base/abseil/absl/types/variant_exception_safety_test.cc create mode 100644 base/abseil/absl/types/variant_test.cc create mode 100644 base/abseil/absl/utility/BUILD.bazel create mode 100644 base/abseil/absl/utility/CMakeLists.txt create mode 100644 base/abseil/absl/utility/utility.h create mode 100644 base/abseil/absl/utility/utility_test.cc create mode 100644 base/abseil/ci/cmake_install_test.sh create mode 100644 base/abseil/ci/linux_clang-latest_libcxx_asan_bazel.sh create mode 100644 base/abseil/ci/linux_clang-latest_libcxx_bazel.sh create mode 100644 base/abseil/ci/linux_clang-latest_libcxx_tsan_bazel.sh create mode 100644 base/abseil/ci/linux_clang-latest_libstdcxx_bazel.sh create mode 100644 base/abseil/ci/linux_gcc-4.9_libstdcxx_bazel.sh create mode 100644 base/abseil/ci/linux_gcc-latest_libstdcxx_bazel.sh create mode 100644 base/abseil/ci/linux_gcc-latest_libstdcxx_cmake.sh create mode 100644 base/abseil/ci/linux_gcc_alpine_cmake.sh create mode 100644 base/abseil/ci/macos_xcode_bazel.sh create mode 100644 base/abseil/ci/macos_xcode_cmake.sh create mode 100644 base/abseil/conanfile.py create mode 100644 base/jsoncpp/.gitignore create mode 100644 base/jsoncpp/.travis.yml create mode 100644 base/jsoncpp/AUTHORS create mode 100644 base/jsoncpp/CMakeLists.txt create mode 100644 base/jsoncpp/LICENSE create mode 100644 base/jsoncpp/NEWS.txt create mode 100644 base/jsoncpp/README.txt create mode 100644 base/jsoncpp/SConstruct create mode 100644 base/jsoncpp/amalgamate.py create mode 100644 base/jsoncpp/devtools/__init__.py create mode 100644 base/jsoncpp/devtools/agent_vmw7.json create mode 100644 base/jsoncpp/devtools/agent_vmxp.json create mode 100644 base/jsoncpp/devtools/antglob.py create mode 100644 base/jsoncpp/devtools/batchbuild.py create mode 100644 base/jsoncpp/devtools/fixeol.py create mode 100644 base/jsoncpp/devtools/licenseupdater.py create mode 100644 base/jsoncpp/devtools/tarball.py create mode 100644 base/jsoncpp/doc/doxyfile.in create mode 100644 base/jsoncpp/doc/footer.html create mode 100644 base/jsoncpp/doc/header.html create mode 100644 base/jsoncpp/doc/jsoncpp.dox create mode 100644 base/jsoncpp/doc/readme.txt create mode 100644 base/jsoncpp/doc/roadmap.dox create mode 100644 base/jsoncpp/doxybuild.py create mode 100644 base/jsoncpp/include/CMakeLists.txt create mode 100644 base/jsoncpp/include/json/assertions.h create mode 100644 base/jsoncpp/include/json/autolink.h create mode 100644 base/jsoncpp/include/json/config.h create mode 100644 base/jsoncpp/include/json/features.h create mode 100644 base/jsoncpp/include/json/forwards.h create mode 100644 base/jsoncpp/include/json/json.h create mode 100644 base/jsoncpp/include/json/reader.h create mode 100644 base/jsoncpp/include/json/value.h create mode 100644 base/jsoncpp/include/json/version.h create mode 100644 base/jsoncpp/include/json/writer.h create mode 100644 base/jsoncpp/makefiles/vs71/jsoncpp.sln create mode 100644 base/jsoncpp/makefiles/vs71/jsontest.vcproj create mode 100644 base/jsoncpp/makefiles/vs71/lib_json.vcproj create mode 100644 base/jsoncpp/makefiles/vs71/test_lib_json.vcproj create mode 100644 base/jsoncpp/makerelease.py create mode 100644 base/jsoncpp/scons-tools/globtool.py create mode 100644 base/jsoncpp/scons-tools/srcdist.py create mode 100644 base/jsoncpp/scons-tools/substinfile.py create mode 100644 base/jsoncpp/scons-tools/targz.py create mode 100644 base/jsoncpp/src/CMakeLists.txt create mode 100644 base/jsoncpp/src/jsontestrunner/CMakeLists.txt create mode 100644 base/jsoncpp/src/jsontestrunner/main.cpp create mode 100644 base/jsoncpp/src/jsontestrunner/sconscript create mode 100644 base/jsoncpp/src/lib_json/CMakeLists.txt create mode 100644 base/jsoncpp/src/lib_json/json_batchallocator.h create mode 100644 base/jsoncpp/src/lib_json/json_internalarray.inl create mode 100644 base/jsoncpp/src/lib_json/json_internalmap.inl create mode 100644 base/jsoncpp/src/lib_json/json_reader.cpp create mode 100644 base/jsoncpp/src/lib_json/json_tool.h create mode 100644 base/jsoncpp/src/lib_json/json_value.cpp create mode 100644 base/jsoncpp/src/lib_json/json_valueiterator.inl create mode 100644 base/jsoncpp/src/lib_json/json_writer.cpp create mode 100644 base/jsoncpp/src/lib_json/sconscript create mode 100644 base/jsoncpp/src/lib_json/version.h.in create mode 100644 base/jsoncpp/src/test_lib_json/CMakeLists.txt create mode 100644 base/jsoncpp/src/test_lib_json/jsontest.cpp create mode 100644 base/jsoncpp/src/test_lib_json/jsontest.h create mode 100644 base/jsoncpp/src/test_lib_json/main.cpp create mode 100644 base/jsoncpp/src/test_lib_json/sconscript create mode 100644 base/jsoncpp/test/cleantests.py create mode 100644 base/jsoncpp/test/data/fail_test_array_01.json create mode 100644 base/jsoncpp/test/data/test_array_01.expected create mode 100644 base/jsoncpp/test/data/test_array_01.json create mode 100644 base/jsoncpp/test/data/test_array_02.expected create mode 100644 base/jsoncpp/test/data/test_array_02.json create mode 100644 base/jsoncpp/test/data/test_array_03.expected create mode 100644 base/jsoncpp/test/data/test_array_03.json create mode 100644 base/jsoncpp/test/data/test_array_04.expected create mode 100644 base/jsoncpp/test/data/test_array_04.json create mode 100644 base/jsoncpp/test/data/test_array_05.expected create mode 100644 base/jsoncpp/test/data/test_array_05.json create mode 100644 base/jsoncpp/test/data/test_array_06.expected create mode 100644 base/jsoncpp/test/data/test_array_06.json create mode 100644 base/jsoncpp/test/data/test_array_07.expected create mode 100644 base/jsoncpp/test/data/test_array_07.json create mode 100644 base/jsoncpp/test/data/test_basic_01.expected create mode 100644 base/jsoncpp/test/data/test_basic_01.json create mode 100644 base/jsoncpp/test/data/test_basic_02.expected create mode 100644 base/jsoncpp/test/data/test_basic_02.json create mode 100644 base/jsoncpp/test/data/test_basic_03.expected create mode 100644 base/jsoncpp/test/data/test_basic_03.json create mode 100644 base/jsoncpp/test/data/test_basic_04.expected create mode 100644 base/jsoncpp/test/data/test_basic_04.json create mode 100644 base/jsoncpp/test/data/test_basic_05.expected create mode 100644 base/jsoncpp/test/data/test_basic_05.json create mode 100644 base/jsoncpp/test/data/test_basic_06.expected create mode 100644 base/jsoncpp/test/data/test_basic_06.json create mode 100644 base/jsoncpp/test/data/test_basic_07.expected create mode 100644 base/jsoncpp/test/data/test_basic_07.json create mode 100644 base/jsoncpp/test/data/test_basic_08.expected create mode 100644 base/jsoncpp/test/data/test_basic_08.json create mode 100644 base/jsoncpp/test/data/test_basic_09.expected create mode 100644 base/jsoncpp/test/data/test_basic_09.json create mode 100644 base/jsoncpp/test/data/test_comment_01.expected create mode 100644 base/jsoncpp/test/data/test_comment_01.json create mode 100644 base/jsoncpp/test/data/test_comment_02.expected create mode 100644 base/jsoncpp/test/data/test_comment_02.json create mode 100644 base/jsoncpp/test/data/test_complex_01.expected create mode 100644 base/jsoncpp/test/data/test_complex_01.json create mode 100644 base/jsoncpp/test/data/test_integer_01.expected create mode 100644 base/jsoncpp/test/data/test_integer_01.json create mode 100644 base/jsoncpp/test/data/test_integer_02.expected create mode 100644 base/jsoncpp/test/data/test_integer_02.json create mode 100644 base/jsoncpp/test/data/test_integer_03.expected create mode 100644 base/jsoncpp/test/data/test_integer_03.json create mode 100644 base/jsoncpp/test/data/test_integer_04.expected create mode 100644 base/jsoncpp/test/data/test_integer_04.json create mode 100644 base/jsoncpp/test/data/test_integer_05.expected create mode 100644 base/jsoncpp/test/data/test_integer_05.json create mode 100644 base/jsoncpp/test/data/test_integer_06_64bits.expected create mode 100644 base/jsoncpp/test/data/test_integer_06_64bits.json create mode 100644 base/jsoncpp/test/data/test_integer_07_64bits.expected create mode 100644 base/jsoncpp/test/data/test_integer_07_64bits.json create mode 100644 base/jsoncpp/test/data/test_integer_08_64bits.expected create mode 100644 base/jsoncpp/test/data/test_integer_08_64bits.json create mode 100644 base/jsoncpp/test/data/test_large_01.expected create mode 100644 base/jsoncpp/test/data/test_large_01.json create mode 100644 base/jsoncpp/test/data/test_object_01.expected create mode 100644 base/jsoncpp/test/data/test_object_01.json create mode 100644 base/jsoncpp/test/data/test_object_02.expected create mode 100644 base/jsoncpp/test/data/test_object_02.json create mode 100644 base/jsoncpp/test/data/test_object_03.expected create mode 100644 base/jsoncpp/test/data/test_object_03.json create mode 100644 base/jsoncpp/test/data/test_object_04.expected create mode 100644 base/jsoncpp/test/data/test_object_04.json create mode 100644 base/jsoncpp/test/data/test_preserve_comment_01.expected create mode 100644 base/jsoncpp/test/data/test_preserve_comment_01.json create mode 100644 base/jsoncpp/test/data/test_real_01.expected create mode 100644 base/jsoncpp/test/data/test_real_01.json create mode 100644 base/jsoncpp/test/data/test_real_02.expected create mode 100644 base/jsoncpp/test/data/test_real_02.json create mode 100644 base/jsoncpp/test/data/test_real_03.expected create mode 100644 base/jsoncpp/test/data/test_real_03.json create mode 100644 base/jsoncpp/test/data/test_real_04.expected create mode 100644 base/jsoncpp/test/data/test_real_04.json create mode 100644 base/jsoncpp/test/data/test_real_05.expected create mode 100644 base/jsoncpp/test/data/test_real_05.json create mode 100644 base/jsoncpp/test/data/test_real_06.expected create mode 100644 base/jsoncpp/test/data/test_real_06.json create mode 100644 base/jsoncpp/test/data/test_real_07.expected create mode 100644 base/jsoncpp/test/data/test_real_07.json create mode 100644 base/jsoncpp/test/data/test_real_08.expected create mode 100644 base/jsoncpp/test/data/test_real_08.json create mode 100644 base/jsoncpp/test/data/test_real_09.expected create mode 100644 base/jsoncpp/test/data/test_real_09.json create mode 100644 base/jsoncpp/test/data/test_real_10.expected create mode 100644 base/jsoncpp/test/data/test_real_10.json create mode 100644 base/jsoncpp/test/data/test_real_11.expected create mode 100644 base/jsoncpp/test/data/test_real_11.json create mode 100644 base/jsoncpp/test/data/test_real_12.expected create mode 100644 base/jsoncpp/test/data/test_real_12.json create mode 100644 base/jsoncpp/test/data/test_string_01.expected create mode 100644 base/jsoncpp/test/data/test_string_01.json create mode 100644 base/jsoncpp/test/data/test_string_02.expected create mode 100644 base/jsoncpp/test/data/test_string_02.json create mode 100644 base/jsoncpp/test/data/test_string_03.expected create mode 100644 base/jsoncpp/test/data/test_string_03.json create mode 100644 base/jsoncpp/test/data/test_string_04.expected create mode 100644 base/jsoncpp/test/data/test_string_04.json create mode 100644 base/jsoncpp/test/data/test_string_05.expected create mode 100644 base/jsoncpp/test/data/test_string_05.json create mode 100644 base/jsoncpp/test/data/test_string_unicode_01.expected create mode 100644 base/jsoncpp/test/data/test_string_unicode_01.json create mode 100644 base/jsoncpp/test/data/test_string_unicode_02.expected create mode 100644 base/jsoncpp/test/data/test_string_unicode_02.json create mode 100644 base/jsoncpp/test/data/test_string_unicode_03.expected create mode 100644 base/jsoncpp/test/data/test_string_unicode_03.json create mode 100644 base/jsoncpp/test/data/test_string_unicode_04.expected create mode 100644 base/jsoncpp/test/data/test_string_unicode_04.json create mode 100644 base/jsoncpp/test/data/test_string_unicode_05.expected create mode 100644 base/jsoncpp/test/data/test_string_unicode_05.json create mode 100644 base/jsoncpp/test/generate_expected.py create mode 100644 base/jsoncpp/test/jsonchecker/fail1.json create mode 100644 base/jsoncpp/test/jsonchecker/fail10.json create mode 100644 base/jsoncpp/test/jsonchecker/fail11.json create mode 100644 base/jsoncpp/test/jsonchecker/fail12.json create mode 100644 base/jsoncpp/test/jsonchecker/fail13.json create mode 100644 base/jsoncpp/test/jsonchecker/fail14.json create mode 100644 base/jsoncpp/test/jsonchecker/fail15.json create mode 100644 base/jsoncpp/test/jsonchecker/fail16.json create mode 100644 base/jsoncpp/test/jsonchecker/fail17.json create mode 100644 base/jsoncpp/test/jsonchecker/fail18.json create mode 100644 base/jsoncpp/test/jsonchecker/fail19.json create mode 100644 base/jsoncpp/test/jsonchecker/fail2.json create mode 100644 base/jsoncpp/test/jsonchecker/fail20.json create mode 100644 base/jsoncpp/test/jsonchecker/fail21.json create mode 100644 base/jsoncpp/test/jsonchecker/fail22.json create mode 100644 base/jsoncpp/test/jsonchecker/fail23.json create mode 100644 base/jsoncpp/test/jsonchecker/fail24.json create mode 100644 base/jsoncpp/test/jsonchecker/fail25.json create mode 100644 base/jsoncpp/test/jsonchecker/fail26.json create mode 100644 base/jsoncpp/test/jsonchecker/fail27.json create mode 100644 base/jsoncpp/test/jsonchecker/fail28.json create mode 100644 base/jsoncpp/test/jsonchecker/fail29.json create mode 100644 base/jsoncpp/test/jsonchecker/fail3.json create mode 100644 base/jsoncpp/test/jsonchecker/fail30.json create mode 100644 base/jsoncpp/test/jsonchecker/fail31.json create mode 100644 base/jsoncpp/test/jsonchecker/fail32.json create mode 100644 base/jsoncpp/test/jsonchecker/fail33.json create mode 100644 base/jsoncpp/test/jsonchecker/fail4.json create mode 100644 base/jsoncpp/test/jsonchecker/fail5.json create mode 100644 base/jsoncpp/test/jsonchecker/fail6.json create mode 100644 base/jsoncpp/test/jsonchecker/fail7.json create mode 100644 base/jsoncpp/test/jsonchecker/fail8.json create mode 100644 base/jsoncpp/test/jsonchecker/fail9.json create mode 100644 base/jsoncpp/test/jsonchecker/pass1.json create mode 100644 base/jsoncpp/test/jsonchecker/pass2.json create mode 100644 base/jsoncpp/test/jsonchecker/pass3.json create mode 100644 base/jsoncpp/test/jsonchecker/readme.txt create mode 100644 base/jsoncpp/test/pyjsontestrunner.py create mode 100644 base/jsoncpp/test/runjsontests.py create mode 100644 base/jsoncpp/test/rununittests.py create mode 100644 base/jsoncpp/version create mode 100644 base/rtc_base/array_view.h create mode 100644 base/rtc_base/arraysize.h create mode 100644 base/rtc_base/atomic_ops.h create mode 100644 base/rtc_base/checks.cc create mode 100644 base/rtc_base/checks.h create mode 100644 base/rtc_base/constructor_magic.h create mode 100644 base/rtc_base/critical_section.cc create mode 100644 base/rtc_base/critical_section.h create mode 100644 base/rtc_base/deprecation.h create mode 100644 base/rtc_base/gtest_prod_util.h create mode 100644 base/rtc_base/logging.cc create mode 100644 base/rtc_base/logging.h create mode 100644 base/rtc_base/memory/aligned_malloc.cc create mode 100644 base/rtc_base/memory/aligned_malloc.h create mode 100644 base/rtc_base/numerics/safe_compare.h create mode 100644 base/rtc_base/numerics/safe_conversions.h create mode 100644 base/rtc_base/numerics/safe_conversions_impl.h create mode 100644 base/rtc_base/numerics/safe_minmax.h create mode 100644 base/rtc_base/platform_thread_types.cc create mode 100644 base/rtc_base/platform_thread_types.h create mode 100644 base/rtc_base/race_checker.cc create mode 100644 base/rtc_base/race_checker.h create mode 100644 base/rtc_base/sanitizer.h create mode 100644 base/rtc_base/string_encode.cc create mode 100644 base/rtc_base/string_encode.h create mode 100644 base/rtc_base/string_to_number.cc create mode 100644 base/rtc_base/string_to_number.h create mode 100644 base/rtc_base/string_utils.cc create mode 100644 base/rtc_base/string_utils.h create mode 100644 base/rtc_base/strings/json.cc create mode 100644 base/rtc_base/strings/json.h create mode 100644 base/rtc_base/strings/string_builder.cc create mode 100644 base/rtc_base/strings/string_builder.h create mode 100644 base/rtc_base/swap_queue.h create mode 100644 base/rtc_base/system/arch.h create mode 100644 base/rtc_base/system/file_wrapper.cc create mode 100644 base/rtc_base/system/file_wrapper.h create mode 100644 base/rtc_base/system/inline.h create mode 100644 base/rtc_base/system/rtc_export.h create mode 100644 base/rtc_base/system/unused.h create mode 100644 base/rtc_base/thread_annotations.h create mode 100644 base/rtc_base/time_utils.cc create mode 100644 base/rtc_base/time_utils.h create mode 100644 base/rtc_base/type_traits.h create mode 100644 base/system_wrappers/include/cpu_features_wrapper.h create mode 100644 base/system_wrappers/include/field_trial.h create mode 100644 base/system_wrappers/include/metrics.h create mode 100644 base/system_wrappers/source/cpu_features.cc create mode 100644 base/system_wrappers/source/cpu_features_android.c create mode 100644 base/system_wrappers/source/cpu_features_linux.c create mode 100644 base/system_wrappers/source/field_trial.cc create mode 100644 base/system_wrappers/source/metrics.cc create mode 100644 build_config.h create mode 100644 demo.filters create mode 100644 demo.vcxproj create mode 100644 demo.vcxproj.filters create mode 100644 demo.vcxproj.user create mode 100644 demo/demo.cc create mode 100644 demo/wavreader.c create mode 100644 demo/wavreader.h create mode 100644 demo/wavwriter.c create mode 100644 demo/wavwriter.h diff --git a/AEC3.sln b/AEC3.sln new file mode 100644 index 0000000..4e76bc0 --- /dev/null +++ b/AEC3.sln @@ -0,0 +1,62 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "api", "api.vcxproj", "{40294AC8-1111-4B68-8666-D8ABFFC2DAEE}" + ProjectSection(ProjectDependencies) = postProject + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F} = {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F} + {2D2114D9-800B-4C74-A770-119325A6A00B} = {2D2114D9-800B-4C74-A770-119325A6A00B} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "AEC3", "AEC3.vcxproj", "{2D2114D9-800B-4C74-A770-119325A6A00B}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "base", "base.vcxproj", "{C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "demo", "demo.vcxproj", "{DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE}.Debug|x64.ActiveCfg = Debug|x64 + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE}.Debug|x64.Build.0 = Debug|x64 + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE}.Debug|x86.ActiveCfg = Debug|Win32 + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE}.Debug|x86.Build.0 = Debug|Win32 + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE}.Release|x64.ActiveCfg = Release|x64 + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE}.Release|x64.Build.0 = Release|x64 + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE}.Release|x86.ActiveCfg = Release|Win32 + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE}.Release|x86.Build.0 = Release|Win32 + {2D2114D9-800B-4C74-A770-119325A6A00B}.Debug|x64.ActiveCfg = Debug|x64 + {2D2114D9-800B-4C74-A770-119325A6A00B}.Debug|x64.Build.0 = Debug|x64 + {2D2114D9-800B-4C74-A770-119325A6A00B}.Debug|x86.ActiveCfg = Debug|Win32 + {2D2114D9-800B-4C74-A770-119325A6A00B}.Debug|x86.Build.0 = Debug|Win32 + {2D2114D9-800B-4C74-A770-119325A6A00B}.Release|x64.ActiveCfg = Release|x64 + {2D2114D9-800B-4C74-A770-119325A6A00B}.Release|x64.Build.0 = Release|x64 + {2D2114D9-800B-4C74-A770-119325A6A00B}.Release|x86.ActiveCfg = Release|Win32 + {2D2114D9-800B-4C74-A770-119325A6A00B}.Release|x86.Build.0 = Release|Win32 + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}.Debug|x64.ActiveCfg = Debug|x64 + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}.Debug|x64.Build.0 = Debug|x64 + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}.Debug|x86.ActiveCfg = Debug|Win32 + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}.Debug|x86.Build.0 = Debug|Win32 + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}.Release|x64.ActiveCfg = Release|x64 + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}.Release|x64.Build.0 = Release|x64 + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}.Release|x86.ActiveCfg = Release|Win32 + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F}.Release|x86.Build.0 = Release|Win32 + {DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}.Debug|x64.ActiveCfg = Debug|x64 + {DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}.Debug|x64.Build.0 = Debug|x64 + {DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}.Debug|x86.ActiveCfg = Debug|Win32 + {DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}.Debug|x86.Build.0 = Debug|Win32 + {DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}.Release|x64.ActiveCfg = Release|x64 + {DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}.Release|x64.Build.0 = Release|x64 + {DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}.Release|x86.ActiveCfg = Release|Win32 + {DF302EC6-FC8F-4BBB-9807-E943E9D0AA90}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/AEC3.vcxproj b/AEC3.vcxproj new file mode 100644 index 0000000..99745c8 --- /dev/null +++ b/AEC3.vcxproj @@ -0,0 +1,308 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {2D2114D9-800B-4C74-A770-119325A6A00B} + AEC3 + 8.1 + + + + StaticLibrary + true + v140 + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + StaticLibrary + true + v140 + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + + Level1 + Disabled + true + $(SolutionDir);$(SolutionDir)base;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;$(SolutionDir)audio_processing;$(SolutionDir)audio_processing\aec3 + WEBRTC_WIN;WIN32;_WINDOWS;_DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + false + true + 4005;4068;4180;4244;4267;4503;4800 + + + $(SolutionDir)libs\$(Configuration)_$(Platform)\ + rtc_base.lib;%(AdditionalDependencies) + + + + + Level3 + Disabled + true + $(SolutionDir);$(SolutionDir)base;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;$(SolutionDir)audio_processing;$(SolutionDir)audio_processing\aec3 + WEBRTC_WIN;WIN32;_WINDOWS;_DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + true + false + 4005;4068;4180;4244;4267;4503;4800 + + + + + Level3 + MaxSpeed + true + true + true + $(SolutionDir);$(SolutionDir)base;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;$(SolutionDir)audio_processing;$(SolutionDir)audio_processing\aec3 + WEBRTC_WIN;WIN32;_WINDOWS;DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + true + 4005;4068;4180;4244;4267;4503;4800 + + + true + true + + + + + Level3 + MaxSpeed + true + true + true + $(SolutionDir);$(SolutionDir)base;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;$(SolutionDir)audio_processing;$(SolutionDir)audio_processing\aec3 + WEBRTC_WIN;WIN32;_WINDOWS;DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + true + 4005;4068;4180;4244;4267;4503;4800 + + + true + true + + + + + + \ No newline at end of file diff --git a/AEC3.vcxproj.filters b/AEC3.vcxproj.filters new file mode 100644 index 0000000..874c90c --- /dev/null +++ b/AEC3.vcxproj.filters @@ -0,0 +1,483 @@ + + + + + {543ed4ce-93b1-4f37-9f97-85e493542a24} + + + {5bfa6298-ae53-4dd1-bc35-150a560206f6} + + + {345fbea9-6bc1-45cb-a86f-f5a95f9823de} + + + {84332578-0b17-4c77-92fd-e5c4d3e87793} + + + {45d9278e-7ca9-422e-9cb1-976693b3cd81} + + + {13b0466b-a3aa-482f-bf43-b4e06665ff62} + + + + + audio_processing + + + audio_processing + + + audio_processing + + + audio_processing\utility + + + audio_processing\utility + + + audio_processing\utility + + + audio_processing\utility + + + audio_processing\utility + + + audio_processing\logging + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing + + + audio_processing + + + audio_processing + + + audio_processing\resampler + + + audio_processing\resampler + + + audio_processing\resampler + + + audio_processing\resampler + + + audio_processing\logging + + + audio_processing\logging + + + audio_processing + + + audio_processing + + + audio_processing + + + + + audio_processing + + + audio_processing + + + audio_processing + + + audio_processing\utility + + + audio_processing\utility + + + audio_processing\utility + + + audio_processing\utility + + + audio_processing\logging + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\aec3 + + + audio_processing\include + + + audio_processing + + + audio_processing + + + audio_processing + + + audio_processing\resampler + + + audio_processing\resampler + + + audio_processing\include + + + audio_processing\logging + + + audio_processing\logging + + + audio_processing + + + audio_processing + + + \ No newline at end of file diff --git a/AEC3.vcxproj.user b/AEC3.vcxproj.user new file mode 100644 index 0000000..abe8dd8 --- /dev/null +++ b/AEC3.vcxproj.user @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/api.filters b/api.filters new file mode 100644 index 0000000..03c8628 --- /dev/null +++ b/api.filters @@ -0,0 +1,17 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + \ No newline at end of file diff --git a/api.vcxproj b/api.vcxproj new file mode 100644 index 0000000..d87476c --- /dev/null +++ b/api.vcxproj @@ -0,0 +1,170 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + {2d2114d9-800b-4c74-a770-119325a6a00b} + + + {c0e01e32-a6a3-439d-a3d5-becb3efae67f} + + + + {40294AC8-1111-4B68-8666-D8ABFFC2DAEE} + rtc_base + 8.1 + + + + StaticLibrary + true + v140 + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + StaticLibrary + true + v140 + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + + Level1 + Disabled + true + $(SolutionDir);$(SolutionDir)base\;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;%(AdditionalIncludeDirectories) + WEBRTC_WIN;WIN32;_WINDOWS;_DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + 4005;4068;4180;4244;4267;4503;4800 + + + %(AdditionalLibraryDirectories) + winmm.lib;%(AdditionalDependencies) + + + + + Level3 + Disabled + true + $(SolutionDir);$(SolutionDir)base\;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;%(AdditionalIncludeDirectories) + WEBRTC_WIN;WIN32;_WINDOWS;_DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + 4005;4068;4180;4244;4267;4503;4800 + + + winmm.lib;%(AdditionalDependencies) + + + + + Level3 + MaxSpeed + true + true + true + $(SolutionDir);$(SolutionDir)base\;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;%(AdditionalIncludeDirectories) + WEBRTC_WIN;WIN32;_WINDOWS;DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + 4005;4068;4180;4244;4267;4503;4800 + + + true + true + winmm.lib;%(AdditionalDependencies) + + + + + Level3 + MaxSpeed + true + true + true + $(SolutionDir);$(SolutionDir)base\;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;%(AdditionalIncludeDirectories) + WEBRTC_WIN;WIN32;_WINDOWS;DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions) + 4005;4068;4180;4244;4267;4503;4800 + + + true + true + winmm.lib;%(AdditionalDependencies) + + + + + + \ No newline at end of file diff --git a/api.vcxproj.filters b/api.vcxproj.filters new file mode 100644 index 0000000..081099c --- /dev/null +++ b/api.vcxproj.filters @@ -0,0 +1,33 @@ + + + + + {3c7fd86a-aaf5-4b70-9a89-89ecf78fac3f} + + + + + api + + + api + + + api + + + + + api + + + api + + + api + + + api + + + \ No newline at end of file diff --git a/api/echo_canceller3_config.cc b/api/echo_canceller3_config.cc new file mode 100644 index 0000000..bde37b0 --- /dev/null +++ b/api/echo_canceller3_config.cc @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "api/echo_canceller3_config.h" + +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/numerics/safe_minmax.h" + +namespace webrtc { +namespace { +bool Limit(float* value, float min, float max) { + float clamped = rtc::SafeClamp(*value, min, max); + clamped = std::isfinite(clamped) ? clamped : min; + bool res = *value == clamped; + *value = clamped; + return res; +} + +bool Limit(size_t* value, size_t min, size_t max) { + size_t clamped = rtc::SafeClamp(*value, min, max); + bool res = *value == clamped; + *value = clamped; + return res; +} + +bool Limit(int* value, int min, int max) { + int clamped = rtc::SafeClamp(*value, min, max); + bool res = *value == clamped; + *value = clamped; + return res; +} + +bool FloorLimit(size_t* value, size_t min) { + size_t clamped = *value >= min ? *value : min; + bool res = *value == clamped; + *value = clamped; + return res; +} + +} // namespace + +EchoCanceller3Config::EchoCanceller3Config() = default; +EchoCanceller3Config::EchoCanceller3Config(const EchoCanceller3Config& e) = + default; +EchoCanceller3Config& EchoCanceller3Config::operator=( + const EchoCanceller3Config& e) = default; +EchoCanceller3Config::Delay::Delay() = default; +EchoCanceller3Config::Delay::Delay(const EchoCanceller3Config::Delay& e) = + default; +EchoCanceller3Config::Delay& EchoCanceller3Config::Delay::operator=( + const Delay& e) = default; + +EchoCanceller3Config::EchoModel::EchoModel() = default; +EchoCanceller3Config::EchoModel::EchoModel( + const EchoCanceller3Config::EchoModel& e) = default; +EchoCanceller3Config::EchoModel& EchoCanceller3Config::EchoModel::operator=( + const EchoModel& e) = default; + +EchoCanceller3Config::Suppressor::Suppressor() = default; +EchoCanceller3Config::Suppressor::Suppressor( + const EchoCanceller3Config::Suppressor& e) = default; +EchoCanceller3Config::Suppressor& EchoCanceller3Config::Suppressor::operator=( + const Suppressor& e) = default; + +EchoCanceller3Config::Suppressor::MaskingThresholds::MaskingThresholds( + float enr_transparent, + float enr_suppress, + float emr_transparent) + : enr_transparent(enr_transparent), + enr_suppress(enr_suppress), + emr_transparent(emr_transparent) {} +EchoCanceller3Config::Suppressor::MaskingThresholds::MaskingThresholds( + const EchoCanceller3Config::Suppressor::MaskingThresholds& e) = default; +EchoCanceller3Config::Suppressor::MaskingThresholds& +EchoCanceller3Config::Suppressor::MaskingThresholds::operator=( + const MaskingThresholds& e) = default; + +EchoCanceller3Config::Suppressor::Tuning::Tuning(MaskingThresholds mask_lf, + MaskingThresholds mask_hf, + float max_inc_factor, + float max_dec_factor_lf) + : mask_lf(mask_lf), + mask_hf(mask_hf), + max_inc_factor(max_inc_factor), + max_dec_factor_lf(max_dec_factor_lf) {} +EchoCanceller3Config::Suppressor::Tuning::Tuning( + const EchoCanceller3Config::Suppressor::Tuning& e) = default; +EchoCanceller3Config::Suppressor::Tuning& +EchoCanceller3Config::Suppressor::Tuning::operator=(const Tuning& e) = default; + +bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) { + RTC_DCHECK(config); + EchoCanceller3Config* c = config; + bool res = true; + + if (c->delay.down_sampling_factor != 4 && + c->delay.down_sampling_factor != 8) { + c->delay.down_sampling_factor = 4; + res = false; + } + + res = res & Limit(&c->delay.default_delay, 0, 5000); + res = res & Limit(&c->delay.num_filters, 0, 5000); + res = res & Limit(&c->delay.delay_headroom_samples, 0, 5000); + res = res & Limit(&c->delay.hysteresis_limit_blocks, 0, 5000); + res = res & Limit(&c->delay.fixed_capture_delay_samples, 0, 5000); + res = res & Limit(&c->delay.delay_estimate_smoothing, 0.f, 1.f); + res = res & Limit(&c->delay.delay_candidate_detection_threshold, 0.f, 1.f); + res = res & Limit(&c->delay.delay_selection_thresholds.initial, 1, 250); + res = res & Limit(&c->delay.delay_selection_thresholds.converged, 1, 250); + + res = res & FloorLimit(&c->filter.main.length_blocks, 1); + res = res & Limit(&c->filter.main.leakage_converged, 0.f, 1000.f); + res = res & Limit(&c->filter.main.leakage_diverged, 0.f, 1000.f); + res = res & Limit(&c->filter.main.error_floor, 0.f, 1000.f); + res = res & Limit(&c->filter.main.error_ceil, 0.f, 100000000.f); + res = res & Limit(&c->filter.main.noise_gate, 0.f, 100000000.f); + + res = res & FloorLimit(&c->filter.main_initial.length_blocks, 1); + res = res & Limit(&c->filter.main_initial.leakage_converged, 0.f, 1000.f); + res = res & Limit(&c->filter.main_initial.leakage_diverged, 0.f, 1000.f); + res = res & Limit(&c->filter.main_initial.error_floor, 0.f, 1000.f); + res = res & Limit(&c->filter.main_initial.error_ceil, 0.f, 100000000.f); + res = res & Limit(&c->filter.main_initial.noise_gate, 0.f, 100000000.f); + + if (c->filter.main.length_blocks < c->filter.main_initial.length_blocks) { + c->filter.main_initial.length_blocks = c->filter.main.length_blocks; + res = false; + } + + res = res & FloorLimit(&c->filter.shadow.length_blocks, 1); + res = res & Limit(&c->filter.shadow.rate, 0.f, 1.f); + res = res & Limit(&c->filter.shadow.noise_gate, 0.f, 100000000.f); + + res = res & FloorLimit(&c->filter.shadow_initial.length_blocks, 1); + res = res & Limit(&c->filter.shadow_initial.rate, 0.f, 1.f); + res = res & Limit(&c->filter.shadow_initial.noise_gate, 0.f, 100000000.f); + + if (c->filter.shadow.length_blocks < c->filter.shadow_initial.length_blocks) { + c->filter.shadow_initial.length_blocks = c->filter.shadow.length_blocks; + res = false; + } + + res = res & Limit(&c->filter.config_change_duration_blocks, 0, 100000); + res = res & Limit(&c->filter.initial_state_seconds, 0.f, 100.f); + + res = res & Limit(&c->erle.min, 1.f, 100000.f); + res = res & Limit(&c->erle.max_l, 1.f, 100000.f); + res = res & Limit(&c->erle.max_h, 1.f, 100000.f); + if (c->erle.min > c->erle.max_l || c->erle.min > c->erle.max_h) { + c->erle.min = std::min(c->erle.max_l, c->erle.max_h); + res = false; + } + res = res & Limit(&c->erle.num_sections, 1, c->filter.main.length_blocks); + + res = res & Limit(&c->ep_strength.default_gain, 0.f, 1000000.f); + res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f); + + res = + res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f); + res = res & + Limit(&c->echo_audibility.normal_render_limit, 0.f, 32768.f * 32768.f); + res = res & Limit(&c->echo_audibility.floor_power, 0.f, 32768.f * 32768.f); + res = res & Limit(&c->echo_audibility.audibility_threshold_lf, 0.f, + 32768.f * 32768.f); + res = res & Limit(&c->echo_audibility.audibility_threshold_mf, 0.f, + 32768.f * 32768.f); + res = res & Limit(&c->echo_audibility.audibility_threshold_hf, 0.f, + 32768.f * 32768.f); + + res = res & + Limit(&c->render_levels.active_render_limit, 0.f, 32768.f * 32768.f); + res = res & Limit(&c->render_levels.poor_excitation_render_limit, 0.f, + 32768.f * 32768.f); + res = res & Limit(&c->render_levels.poor_excitation_render_limit_ds8, 0.f, + 32768.f * 32768.f); + + res = res & Limit(&c->echo_model.noise_floor_hold, 0, 1000); + res = res & Limit(&c->echo_model.min_noise_floor_power, 0, 2000000.f); + res = res & Limit(&c->echo_model.stationary_gate_slope, 0, 1000000.f); + res = res & Limit(&c->echo_model.noise_gate_power, 0, 1000000.f); + res = res & Limit(&c->echo_model.noise_gate_slope, 0, 1000000.f); + res = res & Limit(&c->echo_model.render_pre_window_size, 0, 100); + res = res & Limit(&c->echo_model.render_post_window_size, 0, 100); + + res = res & Limit(&c->suppressor.nearend_average_blocks, 1, 5000); + + res = res & + Limit(&c->suppressor.normal_tuning.mask_lf.enr_transparent, 0.f, 100.f); + res = res & + Limit(&c->suppressor.normal_tuning.mask_lf.enr_suppress, 0.f, 100.f); + res = res & + Limit(&c->suppressor.normal_tuning.mask_lf.emr_transparent, 0.f, 100.f); + res = res & + Limit(&c->suppressor.normal_tuning.mask_hf.enr_transparent, 0.f, 100.f); + res = res & + Limit(&c->suppressor.normal_tuning.mask_hf.enr_suppress, 0.f, 100.f); + res = res & + Limit(&c->suppressor.normal_tuning.mask_hf.emr_transparent, 0.f, 100.f); + res = res & Limit(&c->suppressor.normal_tuning.max_inc_factor, 0.f, 100.f); + res = res & Limit(&c->suppressor.normal_tuning.max_dec_factor_lf, 0.f, 100.f); + + res = res & Limit(&c->suppressor.nearend_tuning.mask_lf.enr_transparent, 0.f, + 100.f); + res = res & + Limit(&c->suppressor.nearend_tuning.mask_lf.enr_suppress, 0.f, 100.f); + res = res & Limit(&c->suppressor.nearend_tuning.mask_lf.emr_transparent, 0.f, + 100.f); + res = res & Limit(&c->suppressor.nearend_tuning.mask_hf.enr_transparent, 0.f, + 100.f); + res = res & + Limit(&c->suppressor.nearend_tuning.mask_hf.enr_suppress, 0.f, 100.f); + res = res & Limit(&c->suppressor.nearend_tuning.mask_hf.emr_transparent, 0.f, + 100.f); + res = res & Limit(&c->suppressor.nearend_tuning.max_inc_factor, 0.f, 100.f); + res = + res & Limit(&c->suppressor.nearend_tuning.max_dec_factor_lf, 0.f, 100.f); + + res = res & Limit(&c->suppressor.dominant_nearend_detection.enr_threshold, + 0.f, 1000000.f); + res = res & Limit(&c->suppressor.dominant_nearend_detection.snr_threshold, + 0.f, 1000000.f); + res = res & Limit(&c->suppressor.dominant_nearend_detection.hold_duration, 0, + 10000); + res = res & Limit(&c->suppressor.dominant_nearend_detection.trigger_threshold, + 0, 10000); + + res = res & + Limit(&c->suppressor.subband_nearend_detection.nearend_average_blocks, + 1, 1024); + res = + res & Limit(&c->suppressor.subband_nearend_detection.subband1.low, 0, 65); + res = res & Limit(&c->suppressor.subband_nearend_detection.subband1.high, + c->suppressor.subband_nearend_detection.subband1.low, 65); + res = + res & Limit(&c->suppressor.subband_nearend_detection.subband2.low, 0, 65); + res = res & Limit(&c->suppressor.subband_nearend_detection.subband2.high, + c->suppressor.subband_nearend_detection.subband2.low, 65); + res = res & Limit(&c->suppressor.subband_nearend_detection.nearend_threshold, + 0.f, 1.e24f); + res = res & Limit(&c->suppressor.subband_nearend_detection.snr_threshold, 0.f, + 1.e24f); + + res = res & Limit(&c->suppressor.high_bands_suppression.enr_threshold, 0.f, + 1000000.f); + res = res & Limit(&c->suppressor.high_bands_suppression.max_gain_during_echo, + 0.f, 1.f); + res = res & Limit(&c->suppressor.high_bands_suppression + .anti_howling_activation_threshold, + 0.f, 32768.f * 32768.f); + res = res & Limit(&c->suppressor.high_bands_suppression.anti_howling_gain, + 0.f, 1.f); + + res = res & Limit(&c->suppressor.floor_first_increase, 0.f, 1000000.f); + + return res; +} +} // namespace webrtc diff --git a/api/echo_canceller3_config.h b/api/echo_canceller3_config.h new file mode 100644 index 0000000..a63318f --- /dev/null +++ b/api/echo_canceller3_config.h @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_AUDIO_ECHO_CANCELLER3_CONFIG_H_ +#define API_AUDIO_ECHO_CANCELLER3_CONFIG_H_ + +#include // size_t + +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +// Configuration struct for EchoCanceller3 +struct RTC_EXPORT EchoCanceller3Config { + // Checks and updates the config parameters to lie within (mostly) reasonable + // ranges. Returns true if and only of the config did not need to be changed. + static bool Validate(EchoCanceller3Config* config); + + EchoCanceller3Config(); + EchoCanceller3Config(const EchoCanceller3Config& e); + EchoCanceller3Config& operator=(const EchoCanceller3Config& other); + + struct Buffering { + size_t excess_render_detection_interval_blocks = 250; + size_t max_allowed_excess_render_blocks = 8; + } buffering; + + struct Delay { + Delay(); + Delay(const Delay& e); + Delay& operator=(const Delay& e); + size_t default_delay = 5; + size_t down_sampling_factor = 4; + size_t num_filters = 5; + size_t delay_headroom_samples = 32; + size_t hysteresis_limit_blocks = 1; + size_t fixed_capture_delay_samples = 0; + float delay_estimate_smoothing = 0.7f; + float delay_candidate_detection_threshold = 0.2f; + struct DelaySelectionThresholds { + int initial; + int converged; + } delay_selection_thresholds = {5, 20}; + bool use_external_delay_estimator = false; + bool log_warning_on_delay_changes = false; + struct AlignmentMixing { + bool downmix; + bool adaptive_selection; + float activity_power_threshold; + bool prefer_first_two_channels; + }; + AlignmentMixing render_alignment_mixing = {false, true, 10000.f, true}; + AlignmentMixing capture_alignment_mixing = {false, true, 10000.f, false}; + } delay; + + struct Filter { + struct MainConfiguration { + size_t length_blocks; + float leakage_converged; + float leakage_diverged; + float error_floor; + float error_ceil; + float noise_gate; + }; + + struct ShadowConfiguration { + size_t length_blocks; + float rate; + float noise_gate; + }; + + MainConfiguration main = {13, 0.00005f, 0.05f, 0.001f, 2.f, 20075344.f}; + ShadowConfiguration shadow = {13, 0.7f, 20075344.f}; + + MainConfiguration main_initial = {12, 0.005f, 0.5f, + 0.001f, 2.f, 20075344.f}; + ShadowConfiguration shadow_initial = {12, 0.9f, 20075344.f}; + + size_t config_change_duration_blocks = 250; + float initial_state_seconds = 2.5f; + bool conservative_initial_phase = false; + bool enable_shadow_filter_output_usage = true; + bool use_linear_filter = true; + bool export_linear_aec_output = false; + } filter; + + struct Erle { + float min = 1.f; + float max_l = 4.f; + float max_h = 1.5f; + bool onset_detection = true; + size_t num_sections = 1; + bool clamp_quality_estimate_to_zero = true; + bool clamp_quality_estimate_to_one = true; + } erle; + + struct EpStrength { + float default_gain = 1.f; + float default_len = 0.83f; + bool echo_can_saturate = true; + bool bounded_erl = false; + } ep_strength; + + struct EchoAudibility { + float low_render_limit = 4 * 64.f; + float normal_render_limit = 64.f; + float floor_power = 2 * 64.f; + float audibility_threshold_lf = 10; + float audibility_threshold_mf = 10; + float audibility_threshold_hf = 10; + bool use_stationarity_properties = false; + bool use_stationarity_properties_at_init = false; + } echo_audibility; + + struct RenderLevels { + float active_render_limit = 100.f; + float poor_excitation_render_limit = 150.f; + float poor_excitation_render_limit_ds8 = 20.f; + float render_power_gain_db = 0.f; + } render_levels; + + struct EchoRemovalControl { + bool has_clock_drift = false; + bool linear_and_stable_echo_path = false; + } echo_removal_control; + + struct EchoModel { + EchoModel(); + EchoModel(const EchoModel& e); + EchoModel& operator=(const EchoModel& e); + size_t noise_floor_hold = 50; + float min_noise_floor_power = 1638400.f; + float stationary_gate_slope = 10.f; + float noise_gate_power = 27509.42f; + float noise_gate_slope = 0.3f; + size_t render_pre_window_size = 1; + size_t render_post_window_size = 1; + } echo_model; + + struct Suppressor { + Suppressor(); + Suppressor(const Suppressor& e); + Suppressor& operator=(const Suppressor& e); + + size_t nearend_average_blocks = 4; + + struct MaskingThresholds { + MaskingThresholds(float enr_transparent, + float enr_suppress, + float emr_transparent); + MaskingThresholds(const MaskingThresholds& e); + MaskingThresholds& operator=(const MaskingThresholds& e); + float enr_transparent; + float enr_suppress; + float emr_transparent; + }; + + struct Tuning { + Tuning(MaskingThresholds mask_lf, + MaskingThresholds mask_hf, + float max_inc_factor, + float max_dec_factor_lf); + Tuning(const Tuning& e); + Tuning& operator=(const Tuning& e); + MaskingThresholds mask_lf; + MaskingThresholds mask_hf; + float max_inc_factor; + float max_dec_factor_lf; + }; + + Tuning normal_tuning = Tuning(MaskingThresholds(.3f, .4f, .3f), + MaskingThresholds(.07f, .1f, .3f), + 2.0f, + 0.25f); + Tuning nearend_tuning = Tuning(MaskingThresholds(1.09f, 1.1f, .3f), + MaskingThresholds(.1f, .3f, .3f), + 2.0f, + 0.25f); + + struct DominantNearendDetection { + float enr_threshold = .25f; + float enr_exit_threshold = 10.f; + float snr_threshold = 30.f; + int hold_duration = 50; + int trigger_threshold = 12; + bool use_during_initial_phase = true; + } dominant_nearend_detection; + + struct SubbandNearendDetection { + size_t nearend_average_blocks = 1; + struct SubbandRegion { + size_t low; + size_t high; + }; + SubbandRegion subband1 = {1, 1}; + SubbandRegion subband2 = {1, 1}; + float nearend_threshold = 1.f; + float snr_threshold = 1.f; + } subband_nearend_detection; + + bool use_subband_nearend_detection = false; + + struct HighBandsSuppression { + float enr_threshold = 1.f; + float max_gain_during_echo = 1.f; + float anti_howling_activation_threshold = 25.f; + float anti_howling_gain = 0.01f; + } high_bands_suppression; + + float floor_first_increase = 0.00001f; + } suppressor; +}; +} // namespace webrtc + +#endif // API_AUDIO_ECHO_CANCELLER3_CONFIG_H_ diff --git a/api/echo_canceller3_config_json.cc b/api/echo_canceller3_config_json.cc new file mode 100644 index 0000000..500fab2 --- /dev/null +++ b/api/echo_canceller3_config_json.cc @@ -0,0 +1,673 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "api/echo_canceller3_config_json.h" + +#include + +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/json.h" +#include "rtc_base/strings/string_builder.h" + +namespace webrtc { +namespace { +void ReadParam(const Json::Value& root, std::string param_name, bool* param) { + RTC_DCHECK(param); + bool v; + if (rtc::GetBoolFromJsonObject(root, param_name, &v)) { + *param = v; + } +} + +void ReadParam(const Json::Value& root, std::string param_name, size_t* param) { + RTC_DCHECK(param); + int v; + if (rtc::GetIntFromJsonObject(root, param_name, &v) && v >= 0) { + *param = v; + } +} + +void ReadParam(const Json::Value& root, std::string param_name, int* param) { + RTC_DCHECK(param); + int v; + if (rtc::GetIntFromJsonObject(root, param_name, &v)) { + *param = v; + } +} + +void ReadParam(const Json::Value& root, std::string param_name, float* param) { + RTC_DCHECK(param); + double v; + if (rtc::GetDoubleFromJsonObject(root, param_name, &v)) { + *param = static_cast(v); + } +} + +void ReadParam(const Json::Value& root, + std::string param_name, + EchoCanceller3Config::Filter::MainConfiguration* param) { + RTC_DCHECK(param); + Json::Value json_array; + if (rtc::GetValueFromJsonObject(root, param_name, &json_array)) { + std::vector v; + rtc::JsonArrayToDoubleVector(json_array, &v); + if (v.size() != 6) { + RTC_LOG(LS_ERROR) << "Incorrect array size for " << param_name; + return; + } + param->length_blocks = static_cast(v[0]); + param->leakage_converged = static_cast(v[1]); + param->leakage_diverged = static_cast(v[2]); + param->error_floor = static_cast(v[3]); + param->error_ceil = static_cast(v[4]); + param->noise_gate = static_cast(v[5]); + } +} + +void ReadParam(const Json::Value& root, + std::string param_name, + EchoCanceller3Config::Filter::ShadowConfiguration* param) { + RTC_DCHECK(param); + Json::Value json_array; + if (rtc::GetValueFromJsonObject(root, param_name, &json_array)) { + std::vector v; + rtc::JsonArrayToDoubleVector(json_array, &v); + if (v.size() != 3) { + RTC_LOG(LS_ERROR) << "Incorrect array size for " << param_name; + return; + } + param->length_blocks = static_cast(v[0]); + param->rate = static_cast(v[1]); + param->noise_gate = static_cast(v[2]); + } +} + +void ReadParam(const Json::Value& root, + std::string param_name, + EchoCanceller3Config::Delay::AlignmentMixing* param) { + RTC_DCHECK(param); + + Json::Value subsection; + if (rtc::GetValueFromJsonObject(root, param_name, &subsection)) { + ReadParam(subsection, "downmix", ¶m->downmix); + ReadParam(subsection, "adaptive_selection", ¶m->adaptive_selection); + ReadParam(subsection, "activity_power_threshold", + ¶m->activity_power_threshold); + ReadParam(subsection, "prefer_first_two_channels", + ¶m->prefer_first_two_channels); + } +} + +void ReadParam( + const Json::Value& root, + std::string param_name, + EchoCanceller3Config::Suppressor::SubbandNearendDetection::SubbandRegion* + param) { + RTC_DCHECK(param); + Json::Value json_array; + if (rtc::GetValueFromJsonObject(root, param_name, &json_array)) { + std::vector v; + rtc::JsonArrayToIntVector(json_array, &v); + if (v.size() != 2) { + RTC_LOG(LS_ERROR) << "Incorrect array size for " << param_name; + return; + } + param->low = static_cast(v[0]); + param->high = static_cast(v[1]); + } +} + +void ReadParam(const Json::Value& root, + std::string param_name, + EchoCanceller3Config::Suppressor::MaskingThresholds* param) { + RTC_DCHECK(param); + Json::Value json_array; + if (rtc::GetValueFromJsonObject(root, param_name, &json_array)) { + std::vector v; + rtc::JsonArrayToDoubleVector(json_array, &v); + if (v.size() != 3) { + RTC_LOG(LS_ERROR) << "Incorrect array size for " << param_name; + return; + } + param->enr_transparent = static_cast(v[0]); + param->enr_suppress = static_cast(v[1]); + param->emr_transparent = static_cast(v[2]); + } +} +} // namespace + +void Aec3ConfigFromJsonString(absl::string_view json_string, + EchoCanceller3Config* config, + bool* parsing_successful) { + RTC_DCHECK(config); + RTC_DCHECK(parsing_successful); + EchoCanceller3Config& cfg = *config; + cfg = EchoCanceller3Config(); + *parsing_successful = true; + + Json::Value root; + bool success = Json::Reader().parse(std::string(json_string), root); + if (!success) { + RTC_LOG(LS_ERROR) << "Incorrect JSON format: " << json_string; + *parsing_successful = false; + return; + } + + Json::Value aec3_root; + success = rtc::GetValueFromJsonObject(root, "aec3", &aec3_root); + if (!success) { + RTC_LOG(LS_ERROR) << "Missing AEC3 config field: " << json_string; + *parsing_successful = false; + return; + } + + Json::Value section; + if (rtc::GetValueFromJsonObject(aec3_root, "buffering", §ion)) { + ReadParam(section, "excess_render_detection_interval_blocks", + &cfg.buffering.excess_render_detection_interval_blocks); + ReadParam(section, "max_allowed_excess_render_blocks", + &cfg.buffering.max_allowed_excess_render_blocks); + } + + if (rtc::GetValueFromJsonObject(aec3_root, "delay", §ion)) { + ReadParam(section, "default_delay", &cfg.delay.default_delay); + ReadParam(section, "down_sampling_factor", &cfg.delay.down_sampling_factor); + ReadParam(section, "num_filters", &cfg.delay.num_filters); + ReadParam(section, "delay_headroom_samples", + &cfg.delay.delay_headroom_samples); + ReadParam(section, "hysteresis_limit_blocks", + &cfg.delay.hysteresis_limit_blocks); + ReadParam(section, "fixed_capture_delay_samples", + &cfg.delay.fixed_capture_delay_samples); + ReadParam(section, "delay_estimate_smoothing", + &cfg.delay.delay_estimate_smoothing); + ReadParam(section, "delay_candidate_detection_threshold", + &cfg.delay.delay_candidate_detection_threshold); + + Json::Value subsection; + if (rtc::GetValueFromJsonObject(section, "delay_selection_thresholds", + &subsection)) { + ReadParam(subsection, "initial", + &cfg.delay.delay_selection_thresholds.initial); + ReadParam(subsection, "converged", + &cfg.delay.delay_selection_thresholds.converged); + } + + ReadParam(section, "use_external_delay_estimator", + &cfg.delay.use_external_delay_estimator); + ReadParam(section, "log_warning_on_delay_changes", + &cfg.delay.log_warning_on_delay_changes); + + ReadParam(section, "render_alignment_mixing", + &cfg.delay.render_alignment_mixing); + ReadParam(section, "capture_alignment_mixing", + &cfg.delay.capture_alignment_mixing); + } + + if (rtc::GetValueFromJsonObject(aec3_root, "filter", §ion)) { + ReadParam(section, "main", &cfg.filter.main); + ReadParam(section, "shadow", &cfg.filter.shadow); + ReadParam(section, "main_initial", &cfg.filter.main_initial); + ReadParam(section, "shadow_initial", &cfg.filter.shadow_initial); + ReadParam(section, "config_change_duration_blocks", + &cfg.filter.config_change_duration_blocks); + ReadParam(section, "initial_state_seconds", + &cfg.filter.initial_state_seconds); + ReadParam(section, "conservative_initial_phase", + &cfg.filter.conservative_initial_phase); + ReadParam(section, "enable_shadow_filter_output_usage", + &cfg.filter.enable_shadow_filter_output_usage); + ReadParam(section, "use_linear_filter", &cfg.filter.use_linear_filter); + ReadParam(section, "export_linear_aec_output", + &cfg.filter.export_linear_aec_output); + } + + if (rtc::GetValueFromJsonObject(aec3_root, "erle", §ion)) { + ReadParam(section, "min", &cfg.erle.min); + ReadParam(section, "max_l", &cfg.erle.max_l); + ReadParam(section, "max_h", &cfg.erle.max_h); + ReadParam(section, "onset_detection", &cfg.erle.onset_detection); + ReadParam(section, "num_sections", &cfg.erle.num_sections); + ReadParam(section, "clamp_quality_estimate_to_zero", + &cfg.erle.clamp_quality_estimate_to_zero); + ReadParam(section, "clamp_quality_estimate_to_one", + &cfg.erle.clamp_quality_estimate_to_one); + } + + if (rtc::GetValueFromJsonObject(aec3_root, "ep_strength", §ion)) { + ReadParam(section, "default_gain", &cfg.ep_strength.default_gain); + ReadParam(section, "default_len", &cfg.ep_strength.default_len); + ReadParam(section, "echo_can_saturate", &cfg.ep_strength.echo_can_saturate); + ReadParam(section, "bounded_erl", &cfg.ep_strength.bounded_erl); + } + + if (rtc::GetValueFromJsonObject(aec3_root, "echo_audibility", §ion)) { + ReadParam(section, "low_render_limit", + &cfg.echo_audibility.low_render_limit); + ReadParam(section, "normal_render_limit", + &cfg.echo_audibility.normal_render_limit); + + ReadParam(section, "floor_power", &cfg.echo_audibility.floor_power); + ReadParam(section, "audibility_threshold_lf", + &cfg.echo_audibility.audibility_threshold_lf); + ReadParam(section, "audibility_threshold_mf", + &cfg.echo_audibility.audibility_threshold_mf); + ReadParam(section, "audibility_threshold_hf", + &cfg.echo_audibility.audibility_threshold_hf); + ReadParam(section, "use_stationarity_properties", + &cfg.echo_audibility.use_stationarity_properties); + ReadParam(section, "use_stationarity_properties_at_init", + &cfg.echo_audibility.use_stationarity_properties_at_init); + } + + if (rtc::GetValueFromJsonObject(aec3_root, "render_levels", §ion)) { + ReadParam(section, "active_render_limit", + &cfg.render_levels.active_render_limit); + ReadParam(section, "poor_excitation_render_limit", + &cfg.render_levels.poor_excitation_render_limit); + ReadParam(section, "poor_excitation_render_limit_ds8", + &cfg.render_levels.poor_excitation_render_limit_ds8); + ReadParam(section, "render_power_gain_db", + &cfg.render_levels.render_power_gain_db); + } + + if (rtc::GetValueFromJsonObject(aec3_root, "echo_removal_control", + §ion)) { + ReadParam(section, "has_clock_drift", + &cfg.echo_removal_control.has_clock_drift); + ReadParam(section, "linear_and_stable_echo_path", + &cfg.echo_removal_control.linear_and_stable_echo_path); + } + + if (rtc::GetValueFromJsonObject(aec3_root, "echo_model", §ion)) { + Json::Value subsection; + ReadParam(section, "noise_floor_hold", &cfg.echo_model.noise_floor_hold); + ReadParam(section, "min_noise_floor_power", + &cfg.echo_model.min_noise_floor_power); + ReadParam(section, "stationary_gate_slope", + &cfg.echo_model.stationary_gate_slope); + ReadParam(section, "noise_gate_power", &cfg.echo_model.noise_gate_power); + ReadParam(section, "noise_gate_slope", &cfg.echo_model.noise_gate_slope); + ReadParam(section, "render_pre_window_size", + &cfg.echo_model.render_pre_window_size); + ReadParam(section, "render_post_window_size", + &cfg.echo_model.render_post_window_size); + } + + Json::Value subsection; + if (rtc::GetValueFromJsonObject(aec3_root, "suppressor", §ion)) { + ReadParam(section, "nearend_average_blocks", + &cfg.suppressor.nearend_average_blocks); + + if (rtc::GetValueFromJsonObject(section, "normal_tuning", &subsection)) { + ReadParam(subsection, "mask_lf", &cfg.suppressor.normal_tuning.mask_lf); + ReadParam(subsection, "mask_hf", &cfg.suppressor.normal_tuning.mask_hf); + ReadParam(subsection, "max_inc_factor", + &cfg.suppressor.normal_tuning.max_inc_factor); + ReadParam(subsection, "max_dec_factor_lf", + &cfg.suppressor.normal_tuning.max_dec_factor_lf); + } + + if (rtc::GetValueFromJsonObject(section, "nearend_tuning", &subsection)) { + ReadParam(subsection, "mask_lf", &cfg.suppressor.nearend_tuning.mask_lf); + ReadParam(subsection, "mask_hf", &cfg.suppressor.nearend_tuning.mask_hf); + ReadParam(subsection, "max_inc_factor", + &cfg.suppressor.nearend_tuning.max_inc_factor); + ReadParam(subsection, "max_dec_factor_lf", + &cfg.suppressor.nearend_tuning.max_dec_factor_lf); + } + + if (rtc::GetValueFromJsonObject(section, "dominant_nearend_detection", + &subsection)) { + ReadParam(subsection, "enr_threshold", + &cfg.suppressor.dominant_nearend_detection.enr_threshold); + ReadParam(subsection, "enr_exit_threshold", + &cfg.suppressor.dominant_nearend_detection.enr_exit_threshold); + ReadParam(subsection, "snr_threshold", + &cfg.suppressor.dominant_nearend_detection.snr_threshold); + ReadParam(subsection, "hold_duration", + &cfg.suppressor.dominant_nearend_detection.hold_duration); + ReadParam(subsection, "trigger_threshold", + &cfg.suppressor.dominant_nearend_detection.trigger_threshold); + ReadParam( + subsection, "use_during_initial_phase", + &cfg.suppressor.dominant_nearend_detection.use_during_initial_phase); + } + + if (rtc::GetValueFromJsonObject(section, "subband_nearend_detection", + &subsection)) { + ReadParam( + subsection, "nearend_average_blocks", + &cfg.suppressor.subband_nearend_detection.nearend_average_blocks); + ReadParam(subsection, "subband1", + &cfg.suppressor.subband_nearend_detection.subband1); + ReadParam(subsection, "subband2", + &cfg.suppressor.subband_nearend_detection.subband2); + ReadParam(subsection, "nearend_threshold", + &cfg.suppressor.subband_nearend_detection.nearend_threshold); + ReadParam(subsection, "snr_threshold", + &cfg.suppressor.subband_nearend_detection.snr_threshold); + } + + ReadParam(section, "use_subband_nearend_detection", + &cfg.suppressor.use_subband_nearend_detection); + + if (rtc::GetValueFromJsonObject(section, "high_bands_suppression", + &subsection)) { + ReadParam(subsection, "enr_threshold", + &cfg.suppressor.high_bands_suppression.enr_threshold); + ReadParam(subsection, "max_gain_during_echo", + &cfg.suppressor.high_bands_suppression.max_gain_during_echo); + ReadParam(subsection, "anti_howling_activation_threshold", + &cfg.suppressor.high_bands_suppression + .anti_howling_activation_threshold); + ReadParam(subsection, "anti_howling_gain", + &cfg.suppressor.high_bands_suppression.anti_howling_gain); + } + + ReadParam(section, "floor_first_increase", + &cfg.suppressor.floor_first_increase); + } +} + +EchoCanceller3Config Aec3ConfigFromJsonString(absl::string_view json_string) { + EchoCanceller3Config cfg; + bool not_used; + Aec3ConfigFromJsonString(json_string, &cfg, ¬_used); + return cfg; +} + +std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { + rtc::StringBuilder ost; + ost << "{"; + ost << "\"aec3\": {"; + ost << "\"buffering\": {"; + ost << "\"excess_render_detection_interval_blocks\": " + << config.buffering.excess_render_detection_interval_blocks << ","; + ost << "\"max_allowed_excess_render_blocks\": " + << config.buffering.max_allowed_excess_render_blocks; + ost << "},"; + + ost << "\"delay\": {"; + ost << "\"default_delay\": " << config.delay.default_delay << ","; + ost << "\"down_sampling_factor\": " << config.delay.down_sampling_factor + << ","; + ost << "\"num_filters\": " << config.delay.num_filters << ","; + ost << "\"delay_headroom_samples\": " << config.delay.delay_headroom_samples + << ","; + ost << "\"hysteresis_limit_blocks\": " << config.delay.hysteresis_limit_blocks + << ","; + ost << "\"fixed_capture_delay_samples\": " + << config.delay.fixed_capture_delay_samples << ","; + ost << "\"delay_estimate_smoothing\": " + << config.delay.delay_estimate_smoothing << ","; + ost << "\"delay_candidate_detection_threshold\": " + << config.delay.delay_candidate_detection_threshold << ","; + + ost << "\"delay_selection_thresholds\": {"; + ost << "\"initial\": " << config.delay.delay_selection_thresholds.initial + << ","; + ost << "\"converged\": " << config.delay.delay_selection_thresholds.converged; + ost << "},"; + + ost << "\"use_external_delay_estimator\": " + << (config.delay.use_external_delay_estimator ? "true" : "false") << ","; + ost << "\"log_warning_on_delay_changes\": " + << (config.delay.log_warning_on_delay_changes ? "true" : "false") << ","; + + ost << "\"render_alignment_mixing\": {"; + ost << "\"downmix\": " + << (config.delay.render_alignment_mixing.downmix ? "true" : "false") + << ","; + ost << "\"adaptive_selection\": " + << (config.delay.render_alignment_mixing.adaptive_selection ? "true" + : "false") + << ","; + ost << "\"activity_power_threshold\": " + << config.delay.render_alignment_mixing.activity_power_threshold << ","; + ost << "\"prefer_first_two_channels\": " + << (config.delay.render_alignment_mixing.prefer_first_two_channels + ? "true" + : "false"); + ost << "},"; + + ost << "\"capture_alignment_mixing\": {"; + ost << "\"downmix\": " + << (config.delay.capture_alignment_mixing.downmix ? "true" : "false") + << ","; + ost << "\"adaptive_selection\": " + << (config.delay.capture_alignment_mixing.adaptive_selection ? "true" + : "false") + << ","; + ost << "\"activity_power_threshold\": " + << config.delay.capture_alignment_mixing.activity_power_threshold << ","; + ost << "\"prefer_first_two_channels\": " + << (config.delay.capture_alignment_mixing.prefer_first_two_channels + ? "true" + : "false"); + ost << "}"; + ost << "},"; + + ost << "\"filter\": {"; + ost << "\"main\": ["; + ost << config.filter.main.length_blocks << ","; + ost << config.filter.main.leakage_converged << ","; + ost << config.filter.main.leakage_diverged << ","; + ost << config.filter.main.error_floor << ","; + ost << config.filter.main.error_ceil << ","; + ost << config.filter.main.noise_gate; + ost << "],"; + + ost << "\"shadow\": ["; + ost << config.filter.shadow.length_blocks << ","; + ost << config.filter.shadow.rate << ","; + ost << config.filter.shadow.noise_gate; + ost << "],"; + + ost << "\"main_initial\": ["; + ost << config.filter.main_initial.length_blocks << ","; + ost << config.filter.main_initial.leakage_converged << ","; + ost << config.filter.main_initial.leakage_diverged << ","; + ost << config.filter.main_initial.error_floor << ","; + ost << config.filter.main_initial.error_ceil << ","; + ost << config.filter.main_initial.noise_gate; + ost << "],"; + + ost << "\"shadow_initial\": ["; + ost << config.filter.shadow_initial.length_blocks << ","; + ost << config.filter.shadow_initial.rate << ","; + ost << config.filter.shadow_initial.noise_gate; + ost << "],"; + + ost << "\"config_change_duration_blocks\": " + << config.filter.config_change_duration_blocks << ","; + ost << "\"initial_state_seconds\": " << config.filter.initial_state_seconds + << ","; + ost << "\"conservative_initial_phase\": " + << (config.filter.conservative_initial_phase ? "true" : "false") << ","; + ost << "\"enable_shadow_filter_output_usage\": " + << (config.filter.enable_shadow_filter_output_usage ? "true" : "false") + << ","; + ost << "\"use_linear_filter\": " + << (config.filter.use_linear_filter ? "true" : "false") << ","; + ost << "\"export_linear_aec_output\": " + << (config.filter.export_linear_aec_output ? "true" : "false"); + + ost << "},"; + + ost << "\"erle\": {"; + ost << "\"min\": " << config.erle.min << ","; + ost << "\"max_l\": " << config.erle.max_l << ","; + ost << "\"max_h\": " << config.erle.max_h << ","; + ost << "\"onset_detection\": " + << (config.erle.onset_detection ? "true" : "false") << ","; + ost << "\"num_sections\": " << config.erle.num_sections << ","; + ost << "\"clamp_quality_estimate_to_zero\": " + << (config.erle.clamp_quality_estimate_to_zero ? "true" : "false") << ","; + ost << "\"clamp_quality_estimate_to_one\": " + << (config.erle.clamp_quality_estimate_to_one ? "true" : "false"); + ost << "},"; + + ost << "\"ep_strength\": {"; + ost << "\"default_gain\": " << config.ep_strength.default_gain << ","; + ost << "\"default_len\": " << config.ep_strength.default_len << ","; + ost << "\"echo_can_saturate\": " + << (config.ep_strength.echo_can_saturate ? "true" : "false") << ","; + ost << "\"bounded_erl\": " + << (config.ep_strength.bounded_erl ? "true" : "false"); + + ost << "},"; + + ost << "\"echo_audibility\": {"; + ost << "\"low_render_limit\": " << config.echo_audibility.low_render_limit + << ","; + ost << "\"normal_render_limit\": " + << config.echo_audibility.normal_render_limit << ","; + ost << "\"floor_power\": " << config.echo_audibility.floor_power << ","; + ost << "\"audibility_threshold_lf\": " + << config.echo_audibility.audibility_threshold_lf << ","; + ost << "\"audibility_threshold_mf\": " + << config.echo_audibility.audibility_threshold_mf << ","; + ost << "\"audibility_threshold_hf\": " + << config.echo_audibility.audibility_threshold_hf << ","; + ost << "\"use_stationarity_properties\": " + << (config.echo_audibility.use_stationarity_properties ? "true" : "false") + << ","; + ost << "\"use_stationarity_properties_at_init\": " + << (config.echo_audibility.use_stationarity_properties_at_init ? "true" + : "false"); + ost << "},"; + + ost << "\"render_levels\": {"; + ost << "\"active_render_limit\": " << config.render_levels.active_render_limit + << ","; + ost << "\"poor_excitation_render_limit\": " + << config.render_levels.poor_excitation_render_limit << ","; + ost << "\"poor_excitation_render_limit_ds8\": " + << config.render_levels.poor_excitation_render_limit_ds8 << ","; + ost << "\"render_power_gain_db\": " + << config.render_levels.render_power_gain_db; + ost << "},"; + + ost << "\"echo_removal_control\": {"; + ost << "\"has_clock_drift\": " + << (config.echo_removal_control.has_clock_drift ? "true" : "false") + << ","; + ost << "\"linear_and_stable_echo_path\": " + << (config.echo_removal_control.linear_and_stable_echo_path ? "true" + : "false"); + + ost << "},"; + + ost << "\"echo_model\": {"; + ost << "\"noise_floor_hold\": " << config.echo_model.noise_floor_hold << ","; + ost << "\"min_noise_floor_power\": " + << config.echo_model.min_noise_floor_power << ","; + ost << "\"stationary_gate_slope\": " + << config.echo_model.stationary_gate_slope << ","; + ost << "\"noise_gate_power\": " << config.echo_model.noise_gate_power << ","; + ost << "\"noise_gate_slope\": " << config.echo_model.noise_gate_slope << ","; + ost << "\"render_pre_window_size\": " + << config.echo_model.render_pre_window_size << ","; + ost << "\"render_post_window_size\": " + << config.echo_model.render_post_window_size; + ost << "},"; + + ost << "\"suppressor\": {"; + ost << "\"nearend_average_blocks\": " + << config.suppressor.nearend_average_blocks << ","; + ost << "\"normal_tuning\": {"; + ost << "\"mask_lf\": ["; + ost << config.suppressor.normal_tuning.mask_lf.enr_transparent << ","; + ost << config.suppressor.normal_tuning.mask_lf.enr_suppress << ","; + ost << config.suppressor.normal_tuning.mask_lf.emr_transparent; + ost << "],"; + ost << "\"mask_hf\": ["; + ost << config.suppressor.normal_tuning.mask_hf.enr_transparent << ","; + ost << config.suppressor.normal_tuning.mask_hf.enr_suppress << ","; + ost << config.suppressor.normal_tuning.mask_hf.emr_transparent; + ost << "],"; + ost << "\"max_inc_factor\": " + << config.suppressor.normal_tuning.max_inc_factor << ","; + ost << "\"max_dec_factor_lf\": " + << config.suppressor.normal_tuning.max_dec_factor_lf; + ost << "},"; + ost << "\"nearend_tuning\": {"; + ost << "\"mask_lf\": ["; + ost << config.suppressor.nearend_tuning.mask_lf.enr_transparent << ","; + ost << config.suppressor.nearend_tuning.mask_lf.enr_suppress << ","; + ost << config.suppressor.nearend_tuning.mask_lf.emr_transparent; + ost << "],"; + ost << "\"mask_hf\": ["; + ost << config.suppressor.nearend_tuning.mask_hf.enr_transparent << ","; + ost << config.suppressor.nearend_tuning.mask_hf.enr_suppress << ","; + ost << config.suppressor.nearend_tuning.mask_hf.emr_transparent; + ost << "],"; + ost << "\"max_inc_factor\": " + << config.suppressor.nearend_tuning.max_inc_factor << ","; + ost << "\"max_dec_factor_lf\": " + << config.suppressor.nearend_tuning.max_dec_factor_lf; + ost << "},"; + ost << "\"dominant_nearend_detection\": {"; + ost << "\"enr_threshold\": " + << config.suppressor.dominant_nearend_detection.enr_threshold << ","; + ost << "\"enr_exit_threshold\": " + << config.suppressor.dominant_nearend_detection.enr_exit_threshold << ","; + ost << "\"snr_threshold\": " + << config.suppressor.dominant_nearend_detection.snr_threshold << ","; + ost << "\"hold_duration\": " + << config.suppressor.dominant_nearend_detection.hold_duration << ","; + ost << "\"trigger_threshold\": " + << config.suppressor.dominant_nearend_detection.trigger_threshold << ","; + ost << "\"use_during_initial_phase\": " + << config.suppressor.dominant_nearend_detection.use_during_initial_phase; + ost << "},"; + ost << "\"subband_nearend_detection\": {"; + ost << "\"nearend_average_blocks\": " + << config.suppressor.subband_nearend_detection.nearend_average_blocks + << ","; + ost << "\"subband1\": ["; + ost << config.suppressor.subband_nearend_detection.subband1.low << ","; + ost << config.suppressor.subband_nearend_detection.subband1.high; + ost << "],"; + ost << "\"subband2\": ["; + ost << config.suppressor.subband_nearend_detection.subband2.low << ","; + ost << config.suppressor.subband_nearend_detection.subband2.high; + ost << "],"; + ost << "\"nearend_threshold\": " + << config.suppressor.subband_nearend_detection.nearend_threshold << ","; + ost << "\"snr_threshold\": " + << config.suppressor.subband_nearend_detection.snr_threshold; + ost << "},"; + ost << "\"use_subband_nearend_detection\": " + << config.suppressor.use_subband_nearend_detection << ","; + ost << "\"high_bands_suppression\": {"; + ost << "\"enr_threshold\": " + << config.suppressor.high_bands_suppression.enr_threshold << ","; + ost << "\"max_gain_during_echo\": " + << config.suppressor.high_bands_suppression.max_gain_during_echo << ","; + ost << "\"anti_howling_activation_threshold\": " + << config.suppressor.high_bands_suppression + .anti_howling_activation_threshold + << ","; + ost << "\"anti_howling_gain\": " + << config.suppressor.high_bands_suppression.anti_howling_gain; + ost << "},"; + ost << "\"floor_first_increase\": " << config.suppressor.floor_first_increase; + ost << "}"; + ost << "}"; + ost << "}"; + + return ost.Release(); +} +} // namespace webrtc diff --git a/api/echo_canceller3_config_json.h b/api/echo_canceller3_config_json.h new file mode 100644 index 0000000..bb40ac9 --- /dev/null +++ b/api/echo_canceller3_config_json.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_AUDIO_ECHO_CANCELLER3_CONFIG_JSON_H_ +#define API_AUDIO_ECHO_CANCELLER3_CONFIG_JSON_H_ + +#include + +#include "absl/strings/string_view.h" +#include "api/echo_canceller3_config.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { +// Parses a JSON-encoded string into an Aec3 config. Fields corresponds to +// substruct names, with the addition that there must be a top-level node +// "aec3". Produces default config values for anything that cannot be parsed +// from the string. If any error was found in the parsing, parsing_successful is +// set to false. +RTC_EXPORT void Aec3ConfigFromJsonString(absl::string_view json_string, + EchoCanceller3Config* config, + bool* parsing_successful); + +// To be deprecated. +// Parses a JSON-encoded string into an Aec3 config. Fields corresponds to +// substruct names, with the addition that there must be a top-level node +// "aec3". Returns default config values for anything that cannot be parsed from +// the string. +RTC_EXPORT EchoCanceller3Config +Aec3ConfigFromJsonString(absl::string_view json_string); + +// Encodes an Aec3 config in JSON format. Fields corresponds to substruct names, +// with the addition that the top-level node is named "aec3". +RTC_EXPORT std::string Aec3ConfigToJsonString( + const EchoCanceller3Config& config); + +} // namespace webrtc + +#endif // API_AUDIO_ECHO_CANCELLER3_CONFIG_JSON_H_ diff --git a/api/echo_canceller3_factory.cc b/api/echo_canceller3_factory.cc new file mode 100644 index 0000000..4046d3e --- /dev/null +++ b/api/echo_canceller3_factory.cc @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "api/echo_canceller3_factory.h" + +#include + +#include "audio_processing/aec3/echo_canceller3.h" + +namespace webrtc { + +EchoCanceller3Factory::EchoCanceller3Factory() {} + +EchoCanceller3Factory::EchoCanceller3Factory(const EchoCanceller3Config& config) + : config_(config) {} + +std::unique_ptr EchoCanceller3Factory::Create( + int sample_rate_hz, + int num_render_channels, + int num_capture_channels) { + return std::make_unique( + config_, sample_rate_hz, num_render_channels, num_capture_channels); +} + +} // namespace webrtc diff --git a/api/echo_canceller3_factory.h b/api/echo_canceller3_factory.h new file mode 100644 index 0000000..740f34f --- /dev/null +++ b/api/echo_canceller3_factory.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_AUDIO_ECHO_CANCELLER3_FACTORY_H_ +#define API_AUDIO_ECHO_CANCELLER3_FACTORY_H_ + +#include + +#include "api/echo_canceller3_config.h" +#include "api/echo_control.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +class RTC_EXPORT EchoCanceller3Factory : public EchoControlFactory { + public: + // Factory producing EchoCanceller3 instances with the default configuration. + EchoCanceller3Factory(); + + // Factory producing EchoCanceller3 instances with the specified + // configuration. + explicit EchoCanceller3Factory(const EchoCanceller3Config& config); + + // Creates an EchoCanceller3 with a specified channel count and sampling rate. + std::unique_ptr Create(int sample_rate_hz, + int num_render_channels, + int num_capture_channels) override; + + private: + const EchoCanceller3Config config_; +}; +} // namespace webrtc + +#endif // API_AUDIO_ECHO_CANCELLER3_FACTORY_H_ diff --git a/api/echo_control.h b/api/echo_control.h new file mode 100644 index 0000000..8d567bf --- /dev/null +++ b/api/echo_control.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_AUDIO_ECHO_CONTROL_H_ +#define API_AUDIO_ECHO_CONTROL_H_ + +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +class AudioBuffer; + +// Interface for an acoustic echo cancellation (AEC) submodule. +class EchoControl { + public: + // Analysis (not changing) of the render signal. + virtual void AnalyzeRender(AudioBuffer* render) = 0; + + // Analysis (not changing) of the capture signal. + virtual void AnalyzeCapture(AudioBuffer* capture) = 0; + + // Processes the capture signal in order to remove the echo. + virtual void ProcessCapture(AudioBuffer* capture, bool level_change) = 0; + + // As above, but also returns the linear filter output. + virtual void ProcessCapture(AudioBuffer* capture, + AudioBuffer* linear_output, + bool level_change) = 0; + + struct Metrics { + double echo_return_loss; + double echo_return_loss_enhancement; + int delay_ms; + }; + + // Collect current metrics from the echo controller. + virtual Metrics GetMetrics() const = 0; + + // Provides an optional external estimate of the audio buffer delay. + virtual void SetAudioBufferDelay(int delay_ms) = 0; + + // Returns wheter the signal is altered. + virtual bool ActiveProcessing() const = 0; + + virtual ~EchoControl() {} +}; + +// Interface for a factory that creates EchoControllers. +class EchoControlFactory { + public: + virtual std::unique_ptr Create(int sample_rate_hz, + int num_render_channels, + int num_capture_channels) = 0; + + virtual ~EchoControlFactory() = default; +}; +} // namespace webrtc + +#endif // API_AUDIO_ECHO_CONTROL_H_ diff --git a/audio_processing/aec3/BUILD.gn b/audio_processing/aec3/BUILD.gn new file mode 100644 index 0000000..909d49e --- /dev/null +++ b/audio_processing/aec3/BUILD.gn @@ -0,0 +1,238 @@ +# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_library("aec3") { + visibility = [ "*" ] + configs += [ "..:apm_debug_dump" ] + sources = [ + "adaptive_fir_filter.cc", + "adaptive_fir_filter.h", + "adaptive_fir_filter_erl.cc", + "adaptive_fir_filter_erl.h", + "aec3_common.cc", + "aec3_common.h", + "aec3_fft.cc", + "aec3_fft.h", + "aec_state.cc", + "aec_state.h", + "alignment_mixer.cc", + "alignment_mixer.h", + "api_call_jitter_metrics.cc", + "api_call_jitter_metrics.h", + "block_buffer.cc", + "block_buffer.h", + "block_delay_buffer.cc", + "block_delay_buffer.h", + "block_framer.cc", + "block_framer.h", + "block_processor.cc", + "block_processor.h", + "block_processor_metrics.cc", + "block_processor_metrics.h", + "clockdrift_detector.cc", + "clockdrift_detector.h", + "comfort_noise_generator.cc", + "comfort_noise_generator.h", + "decimator.cc", + "decimator.h", + "delay_estimate.h", + "dominant_nearend_detector.cc", + "dominant_nearend_detector.h", + "downsampled_render_buffer.cc", + "downsampled_render_buffer.h", + "echo_audibility.cc", + "echo_audibility.h", + "echo_canceller3.cc", + "echo_canceller3.h", + "echo_path_delay_estimator.cc", + "echo_path_delay_estimator.h", + "echo_path_variability.cc", + "echo_path_variability.h", + "echo_remover.cc", + "echo_remover.h", + "echo_remover_metrics.cc", + "echo_remover_metrics.h", + "erl_estimator.cc", + "erl_estimator.h", + "erle_estimator.cc", + "erle_estimator.h", + "fft_buffer.cc", + "fft_buffer.h", + "fft_data.h", + "filter_analyzer.cc", + "filter_analyzer.h", + "frame_blocker.cc", + "frame_blocker.h", + "fullband_erle_estimator.cc", + "fullband_erle_estimator.h", + "main_filter_update_gain.cc", + "main_filter_update_gain.h", + "matched_filter.cc", + "matched_filter.h", + "matched_filter_lag_aggregator.cc", + "matched_filter_lag_aggregator.h", + "moving_average.cc", + "moving_average.h", + "nearend_detector.h", + "render_buffer.cc", + "render_buffer.h", + "render_delay_buffer.cc", + "render_delay_buffer.h", + "render_delay_controller.cc", + "render_delay_controller.h", + "render_delay_controller_metrics.cc", + "render_delay_controller_metrics.h", + "render_signal_analyzer.cc", + "render_signal_analyzer.h", + "residual_echo_estimator.cc", + "residual_echo_estimator.h", + "reverb_decay_estimator.cc", + "reverb_decay_estimator.h", + "reverb_frequency_response.cc", + "reverb_frequency_response.h", + "reverb_model.cc", + "reverb_model.h", + "reverb_model_estimator.cc", + "reverb_model_estimator.h", + "shadow_filter_update_gain.cc", + "shadow_filter_update_gain.h", + "signal_dependent_erle_estimator.cc", + "signal_dependent_erle_estimator.h", + "spectrum_buffer.cc", + "spectrum_buffer.h", + "stationarity_estimator.cc", + "stationarity_estimator.h", + "subband_erle_estimator.cc", + "subband_erle_estimator.h", + "subband_nearend_detector.cc", + "subband_nearend_detector.h", + "subtractor.cc", + "subtractor.h", + "subtractor_output.cc", + "subtractor_output.h", + "subtractor_output_analyzer.cc", + "subtractor_output_analyzer.h", + "suppression_filter.cc", + "suppression_filter.h", + "suppression_gain.cc", + "suppression_gain.h", + "vector_math.h", + ] + + defines = [] + if (rtc_build_with_neon && current_cpu != "arm64") { + suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ] + cflags = [ "-mfpu=neon" ] + } + + deps = [ + "..:apm_logging", + "..:audio_buffer", + "..:high_pass_filter", + "../../../api:array_view", + "../../../api/audio:aec3_config", + "../../../api/audio:echo_control", + "../../../common_audio:common_audio_c", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base:safe_minmax", + "../../../rtc_base/system:arch", + "../../../system_wrappers:cpu_features_api", + "../../../system_wrappers:field_trial", + "../../../system_wrappers:metrics", + "../utility:cascaded_biquad_filter", + "../utility:ooura_fft", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +if (rtc_include_tests) { + rtc_library("aec3_unittests") { + testonly = true + + configs += [ "..:apm_debug_dump" ] + sources = [ + "mock/mock_block_processor.cc", + "mock/mock_block_processor.h", + "mock/mock_echo_remover.cc", + "mock/mock_echo_remover.h", + "mock/mock_render_delay_buffer.cc", + "mock/mock_render_delay_buffer.h", + "mock/mock_render_delay_controller.cc", + "mock/mock_render_delay_controller.h", + ] + + deps = [ + ":aec3", + "..:apm_logging", + "..:audio_buffer", + "..:audio_processing", + "..:audio_processing_unittests", + "..:high_pass_filter", + "../../../api:array_view", + "../../../api/audio:aec3_config", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base:safe_minmax", + "../../../rtc_base/system:arch", + "../../../system_wrappers:cpu_features_api", + "../../../test:test_support", + "../utility:cascaded_biquad_filter", + "//third_party/abseil-cpp/absl/types:optional", + ] + + defines = [] + + if (rtc_enable_protobuf) { + sources += [ + "adaptive_fir_filter_erl_unittest.cc", + "adaptive_fir_filter_unittest.cc", + "aec3_fft_unittest.cc", + "aec_state_unittest.cc", + "alignment_mixer_unittest.cc", + "api_call_jitter_metrics_unittest.cc", + "block_delay_buffer_unittest.cc", + "block_framer_unittest.cc", + "block_processor_metrics_unittest.cc", + "block_processor_unittest.cc", + "clockdrift_detector_unittest.cc", + "comfort_noise_generator_unittest.cc", + "decimator_unittest.cc", + "echo_canceller3_unittest.cc", + "echo_path_delay_estimator_unittest.cc", + "echo_path_variability_unittest.cc", + "echo_remover_metrics_unittest.cc", + "echo_remover_unittest.cc", + "erl_estimator_unittest.cc", + "erle_estimator_unittest.cc", + "fft_data_unittest.cc", + "filter_analyzer_unittest.cc", + "frame_blocker_unittest.cc", + "main_filter_update_gain_unittest.cc", + "matched_filter_lag_aggregator_unittest.cc", + "matched_filter_unittest.cc", + "moving_average_unittest.cc", + "render_buffer_unittest.cc", + "render_delay_buffer_unittest.cc", + "render_delay_controller_metrics_unittest.cc", + "render_delay_controller_unittest.cc", + "render_signal_analyzer_unittest.cc", + "residual_echo_estimator_unittest.cc", + "reverb_model_estimator_unittest.cc", + "shadow_filter_update_gain_unittest.cc", + "signal_dependent_erle_estimator_unittest.cc", + "subtractor_unittest.cc", + "suppression_filter_unittest.cc", + "suppression_gain_unittest.cc", + "vector_math_unittest.cc", + ] + } + } +} diff --git a/audio_processing/aec3/adaptive_fir_filter.cc b/audio_processing/aec3/adaptive_fir_filter.cc new file mode 100644 index 0000000..c7afa4b --- /dev/null +++ b/audio_processing/aec3/adaptive_fir_filter.cc @@ -0,0 +1,730 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/adaptive_fir_filter.h" + +// Defines WEBRTC_ARCH_X86_FAMILY, used below. +#include "rtc_base/system/arch.h" + +#if defined(WEBRTC_HAS_NEON) +#include +#endif +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif +#include + +#include +#include + +#include "audio_processing/aec3/fft_data.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace aec3 { + +// Computes and stores the frequency response of the filter. +void ComputeFrequencyResponse( + size_t num_partitions, + const std::vector>& H, + std::vector>* H2) { + for (auto& H2_ch : *H2) { + H2_ch.fill(0.f); + } + + const size_t num_render_channels = H[0].size(); + RTC_DCHECK_EQ(H.size(), H2->capacity()); + for (size_t p = 0; p < num_partitions; ++p) { + RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size()); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t j = 0; j < kFftLengthBy2Plus1; ++j) { + float tmp = + H[p][ch].re[j] * H[p][ch].re[j] + H[p][ch].im[j] * H[p][ch].im[j]; + (*H2)[p][j] = std::max((*H2)[p][j], tmp); + } + } + } +} + +#if defined(WEBRTC_HAS_NEON) +// Computes and stores the frequency response of the filter. +void ComputeFrequencyResponse_Neon( + size_t num_partitions, + const std::vector>& H, + std::vector>* H2) { + for (auto& H2_ch : *H2) { + H2_ch.fill(0.f); + } + + const size_t num_render_channels = H[0].size(); + RTC_DCHECK_EQ(H.size(), H2->capacity()); + for (size_t p = 0; p < num_partitions; ++p) { + RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size()); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t j = 0; j < kFftLengthBy2; j += 4) { + const float32x4_t re = vld1q_f32(&H[p][ch].re[j]); + const float32x4_t im = vld1q_f32(&H[p][ch].im[j]); + float32x4_t H2_new = vmulq_f32(re, re); + H2_new = vmlaq_f32(H2_new, im, im); + float32x4_t H2_p_j = vld1q_f32(&(*H2)[p][j]); + H2_p_j = vmaxq_f32(H2_p_j, H2_new); + vst1q_f32(&(*H2)[p][j], H2_p_j); + } + float H2_new = H[p][ch].re[kFftLengthBy2] * H[p][ch].re[kFftLengthBy2] + + H[p][ch].im[kFftLengthBy2] * H[p][ch].im[kFftLengthBy2]; + (*H2)[p][kFftLengthBy2] = std::max((*H2)[p][kFftLengthBy2], H2_new); + } + } +} +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Computes and stores the frequency response of the filter. +void ComputeFrequencyResponse_Sse2( + size_t num_partitions, + const std::vector>& H, + std::vector>* H2) { + for (auto& H2_ch : *H2) { + H2_ch.fill(0.f); + } + + const size_t num_render_channels = H[0].size(); + RTC_DCHECK_EQ(H.size(), H2->capacity()); + // constexpr __mmmask8 kMaxMask = static_cast<__mmmask8>(256u); + for (size_t p = 0; p < num_partitions; ++p) { + RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size()); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t j = 0; j < kFftLengthBy2; j += 4) { + const __m128 re = _mm_loadu_ps(&H[p][ch].re[j]); + const __m128 re2 = _mm_mul_ps(re, re); + const __m128 im = _mm_loadu_ps(&H[p][ch].im[j]); + const __m128 im2 = _mm_mul_ps(im, im); + const __m128 H2_new = _mm_add_ps(re2, im2); + __m128 H2_k_j = _mm_loadu_ps(&(*H2)[p][j]); + H2_k_j = _mm_max_ps(H2_k_j, H2_new); + _mm_storeu_ps(&(*H2)[p][j], H2_k_j); + } + float H2_new = H[p][ch].re[kFftLengthBy2] * H[p][ch].re[kFftLengthBy2] + + H[p][ch].im[kFftLengthBy2] * H[p][ch].im[kFftLengthBy2]; + (*H2)[p][kFftLengthBy2] = std::max((*H2)[p][kFftLengthBy2], H2_new); + } + } +} +#endif + +// Adapts the filter partitions as H(t+1)=H(t)+G(t)*conj(X(t)). +void AdaptPartitions(const RenderBuffer& render_buffer, + const FftData& G, + size_t num_partitions, + std::vector>* H) { + rtc::ArrayView> render_buffer_data = + render_buffer.GetFftBuffer(); + size_t index = render_buffer.Position(); + const size_t num_render_channels = render_buffer_data[index].size(); + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& X_p_ch = render_buffer_data[index][ch]; + FftData& H_p_ch = (*H)[p][ch]; + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + H_p_ch.re[k] += X_p_ch.re[k] * G.re[k] + X_p_ch.im[k] * G.im[k]; + H_p_ch.im[k] += X_p_ch.re[k] * G.im[k] - X_p_ch.im[k] * G.re[k]; + } + } + index = index < (render_buffer_data.size() - 1) ? index + 1 : 0; + } +} + +#if defined(WEBRTC_HAS_NEON) +// Adapts the filter partitions. (Neon variant) +void AdaptPartitions_Neon(const RenderBuffer& render_buffer, + const FftData& G, + size_t num_partitions, + std::vector>* H) { + rtc::ArrayView> render_buffer_data = + render_buffer.GetFftBuffer(); + const size_t num_render_channels = render_buffer_data[0].size(); + const size_t lim1 = std::min( + render_buffer_data.size() - render_buffer.Position(), num_partitions); + const size_t lim2 = num_partitions; + constexpr size_t kNumFourBinBands = kFftLengthBy2 / 4; + + size_t X_partition = render_buffer.Position(); + size_t limit = lim1; + size_t p = 0; + do { + for (; p < limit; ++p, ++X_partition) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + FftData& H_p_ch = (*H)[p][ch]; + const FftData& X = render_buffer_data[X_partition][ch]; + for (size_t k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) { + const float32x4_t G_re = vld1q_f32(&G.re[k]); + const float32x4_t G_im = vld1q_f32(&G.im[k]); + const float32x4_t X_re = vld1q_f32(&X.re[k]); + const float32x4_t X_im = vld1q_f32(&X.im[k]); + const float32x4_t H_re = vld1q_f32(&H_p_ch.re[k]); + const float32x4_t H_im = vld1q_f32(&H_p_ch.im[k]); + const float32x4_t a = vmulq_f32(X_re, G_re); + const float32x4_t e = vmlaq_f32(a, X_im, G_im); + const float32x4_t c = vmulq_f32(X_re, G_im); + const float32x4_t f = vmlsq_f32(c, X_im, G_re); + const float32x4_t g = vaddq_f32(H_re, e); + const float32x4_t h = vaddq_f32(H_im, f); + vst1q_f32(&H_p_ch.re[k], g); + vst1q_f32(&H_p_ch.im[k], h); + } + } + } + + X_partition = 0; + limit = lim2; + } while (p < lim2); + + X_partition = render_buffer.Position(); + limit = lim1; + p = 0; + do { + for (; p < limit; ++p, ++X_partition) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + FftData& H_p_ch = (*H)[p][ch]; + const FftData& X = render_buffer_data[X_partition][ch]; + + H_p_ch.re[kFftLengthBy2] += X.re[kFftLengthBy2] * G.re[kFftLengthBy2] + + X.im[kFftLengthBy2] * G.im[kFftLengthBy2]; + H_p_ch.im[kFftLengthBy2] += X.re[kFftLengthBy2] * G.im[kFftLengthBy2] - + X.im[kFftLengthBy2] * G.re[kFftLengthBy2]; + } + } + X_partition = 0; + limit = lim2; + } while (p < lim2); +} +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Adapts the filter partitions. (SSE2 variant) +void AdaptPartitions_Sse2(const RenderBuffer& render_buffer, + const FftData& G, + size_t num_partitions, + std::vector>* H) { + rtc::ArrayView> render_buffer_data = + render_buffer.GetFftBuffer(); + const size_t num_render_channels = render_buffer_data[0].size(); + const size_t lim1 = std::min( + render_buffer_data.size() - render_buffer.Position(), num_partitions); + const size_t lim2 = num_partitions; + constexpr size_t kNumFourBinBands = kFftLengthBy2 / 4; + + size_t X_partition = render_buffer.Position(); + size_t limit = lim1; + size_t p = 0; + do { + for (; p < limit; ++p, ++X_partition) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + FftData& H_p_ch = (*H)[p][ch]; + const FftData& X = render_buffer_data[X_partition][ch]; + + for (size_t k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) { + const __m128 G_re = _mm_loadu_ps(&G.re[k]); + const __m128 G_im = _mm_loadu_ps(&G.im[k]); + const __m128 X_re = _mm_loadu_ps(&X.re[k]); + const __m128 X_im = _mm_loadu_ps(&X.im[k]); + const __m128 H_re = _mm_loadu_ps(&H_p_ch.re[k]); + const __m128 H_im = _mm_loadu_ps(&H_p_ch.im[k]); + const __m128 a = _mm_mul_ps(X_re, G_re); + const __m128 b = _mm_mul_ps(X_im, G_im); + const __m128 c = _mm_mul_ps(X_re, G_im); + const __m128 d = _mm_mul_ps(X_im, G_re); + const __m128 e = _mm_add_ps(a, b); + const __m128 f = _mm_sub_ps(c, d); + const __m128 g = _mm_add_ps(H_re, e); + const __m128 h = _mm_add_ps(H_im, f); + _mm_storeu_ps(&H_p_ch.re[k], g); + _mm_storeu_ps(&H_p_ch.im[k], h); + } + } + } + X_partition = 0; + limit = lim2; + } while (p < lim2); + + X_partition = render_buffer.Position(); + limit = lim1; + p = 0; + do { + for (; p < limit; ++p, ++X_partition) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + FftData& H_p_ch = (*H)[p][ch]; + const FftData& X = render_buffer_data[X_partition][ch]; + + H_p_ch.re[kFftLengthBy2] += X.re[kFftLengthBy2] * G.re[kFftLengthBy2] + + X.im[kFftLengthBy2] * G.im[kFftLengthBy2]; + H_p_ch.im[kFftLengthBy2] += X.re[kFftLengthBy2] * G.im[kFftLengthBy2] - + X.im[kFftLengthBy2] * G.re[kFftLengthBy2]; + } + } + + X_partition = 0; + limit = lim2; + } while (p < lim2); +} +#endif + +// Produces the filter output. +void ApplyFilter(const RenderBuffer& render_buffer, + size_t num_partitions, + const std::vector>& H, + FftData* S) { + S->re.fill(0.f); + S->im.fill(0.f); + + rtc::ArrayView> render_buffer_data = + render_buffer.GetFftBuffer(); + size_t index = render_buffer.Position(); + const size_t num_render_channels = render_buffer_data[index].size(); + for (size_t p = 0; p < num_partitions; ++p) { + RTC_DCHECK_EQ(num_render_channels, H[p].size()); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& X_p_ch = render_buffer_data[index][ch]; + const FftData& H_p_ch = H[p][ch]; + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + S->re[k] += X_p_ch.re[k] * H_p_ch.re[k] - X_p_ch.im[k] * H_p_ch.im[k]; + S->im[k] += X_p_ch.re[k] * H_p_ch.im[k] + X_p_ch.im[k] * H_p_ch.re[k]; + } + } + index = index < (render_buffer_data.size() - 1) ? index + 1 : 0; + } +} + +#if defined(WEBRTC_HAS_NEON) +// Produces the filter output (Neon variant). +void ApplyFilter_Neon(const RenderBuffer& render_buffer, + size_t num_partitions, + const std::vector>& H, + FftData* S) { + // const RenderBuffer& render_buffer, + // rtc::ArrayView H, + // FftData* S) { + RTC_DCHECK_GE(H.size(), H.size() - 1); + S->Clear(); + + rtc::ArrayView> render_buffer_data = + render_buffer.GetFftBuffer(); + const size_t num_render_channels = render_buffer_data[0].size(); + const size_t lim1 = std::min( + render_buffer_data.size() - render_buffer.Position(), num_partitions); + const size_t lim2 = num_partitions; + constexpr size_t kNumFourBinBands = kFftLengthBy2 / 4; + + size_t X_partition = render_buffer.Position(); + size_t p = 0; + size_t limit = lim1; + do { + for (; p < limit; ++p, ++X_partition) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& H_p_ch = H[p][ch]; + const FftData& X = render_buffer_data[X_partition][ch]; + for (size_t k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) { + const float32x4_t X_re = vld1q_f32(&X.re[k]); + const float32x4_t X_im = vld1q_f32(&X.im[k]); + const float32x4_t H_re = vld1q_f32(&H_p_ch.re[k]); + const float32x4_t H_im = vld1q_f32(&H_p_ch.im[k]); + const float32x4_t S_re = vld1q_f32(&S->re[k]); + const float32x4_t S_im = vld1q_f32(&S->im[k]); + const float32x4_t a = vmulq_f32(X_re, H_re); + const float32x4_t e = vmlsq_f32(a, X_im, H_im); + const float32x4_t c = vmulq_f32(X_re, H_im); + const float32x4_t f = vmlaq_f32(c, X_im, H_re); + const float32x4_t g = vaddq_f32(S_re, e); + const float32x4_t h = vaddq_f32(S_im, f); + vst1q_f32(&S->re[k], g); + vst1q_f32(&S->im[k], h); + } + } + } + limit = lim2; + X_partition = 0; + } while (p < lim2); + + X_partition = render_buffer.Position(); + p = 0; + limit = lim1; + do { + for (; p < limit; ++p, ++X_partition) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& H_p_ch = H[p][ch]; + const FftData& X = render_buffer_data[X_partition][ch]; + S->re[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] - + X.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2]; + S->im[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2] + + X.im[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2]; + } + } + limit = lim2; + X_partition = 0; + } while (p < lim2); +} +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Produces the filter output (SSE2 variant). +void ApplyFilter_Sse2(const RenderBuffer& render_buffer, + size_t num_partitions, + const std::vector>& H, + FftData* S) { + // const RenderBuffer& render_buffer, + // rtc::ArrayView H, + // FftData* S) { + RTC_DCHECK_GE(H.size(), H.size() - 1); + S->re.fill(0.f); + S->im.fill(0.f); + + rtc::ArrayView> render_buffer_data = + render_buffer.GetFftBuffer(); + const size_t num_render_channels = render_buffer_data[0].size(); + const size_t lim1 = std::min( + render_buffer_data.size() - render_buffer.Position(), num_partitions); + const size_t lim2 = num_partitions; + constexpr size_t kNumFourBinBands = kFftLengthBy2 / 4; + + size_t X_partition = render_buffer.Position(); + size_t p = 0; + size_t limit = lim1; + do { + for (; p < limit; ++p, ++X_partition) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& H_p_ch = H[p][ch]; + const FftData& X = render_buffer_data[X_partition][ch]; + for (size_t k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) { + const __m128 X_re = _mm_loadu_ps(&X.re[k]); + const __m128 X_im = _mm_loadu_ps(&X.im[k]); + const __m128 H_re = _mm_loadu_ps(&H_p_ch.re[k]); + const __m128 H_im = _mm_loadu_ps(&H_p_ch.im[k]); + const __m128 S_re = _mm_loadu_ps(&S->re[k]); + const __m128 S_im = _mm_loadu_ps(&S->im[k]); + const __m128 a = _mm_mul_ps(X_re, H_re); + const __m128 b = _mm_mul_ps(X_im, H_im); + const __m128 c = _mm_mul_ps(X_re, H_im); + const __m128 d = _mm_mul_ps(X_im, H_re); + const __m128 e = _mm_sub_ps(a, b); + const __m128 f = _mm_add_ps(c, d); + const __m128 g = _mm_add_ps(S_re, e); + const __m128 h = _mm_add_ps(S_im, f); + _mm_storeu_ps(&S->re[k], g); + _mm_storeu_ps(&S->im[k], h); + } + } + } + limit = lim2; + X_partition = 0; + } while (p < lim2); + + X_partition = render_buffer.Position(); + p = 0; + limit = lim1; + do { + for (; p < limit; ++p, ++X_partition) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& H_p_ch = H[p][ch]; + const FftData& X = render_buffer_data[X_partition][ch]; + S->re[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] - + X.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2]; + S->im[kFftLengthBy2] += X.re[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2] + + X.im[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2]; + } + } + limit = lim2; + X_partition = 0; + } while (p < lim2); +} +#endif + +} // namespace aec3 + +namespace { + +// Ensures that the newly added filter partitions after a size increase are set +// to zero. +void ZeroFilter(size_t old_size, + size_t new_size, + std::vector>* H) { + RTC_DCHECK_GE(H->size(), old_size); + RTC_DCHECK_GE(H->size(), new_size); + + for (size_t p = old_size; p < new_size; ++p) { + RTC_DCHECK_EQ((*H)[p].size(), (*H)[0].size()); + for (size_t ch = 0; ch < (*H)[0].size(); ++ch) { + (*H)[p][ch].Clear(); + } + } +} + +} // namespace + +AdaptiveFirFilter::AdaptiveFirFilter(size_t max_size_partitions, + size_t initial_size_partitions, + size_t size_change_duration_blocks, + size_t num_render_channels, + Aec3Optimization optimization, + ApmDataDumper* data_dumper) + : data_dumper_(data_dumper), + fft_(), + optimization_(optimization), + num_render_channels_(num_render_channels), + max_size_partitions_(max_size_partitions), + size_change_duration_blocks_( + static_cast(size_change_duration_blocks)), + current_size_partitions_(initial_size_partitions), + target_size_partitions_(initial_size_partitions), + old_target_size_partitions_(initial_size_partitions), + H_(max_size_partitions_, std::vector(num_render_channels_)) { + RTC_DCHECK(data_dumper_); + RTC_DCHECK_GE(max_size_partitions, initial_size_partitions); + + RTC_DCHECK_LT(0, size_change_duration_blocks_); + one_by_size_change_duration_blocks_ = 1.f / size_change_duration_blocks_; + + ZeroFilter(0, max_size_partitions_, &H_); + + SetSizePartitions(current_size_partitions_, true); +} + +AdaptiveFirFilter::~AdaptiveFirFilter() = default; + +void AdaptiveFirFilter::HandleEchoPathChange() { + // TODO(peah): Check the value and purpose of the code below. + ZeroFilter(current_size_partitions_, max_size_partitions_, &H_); +} + +void AdaptiveFirFilter::SetSizePartitions(size_t size, bool immediate_effect) { + RTC_DCHECK_EQ(max_size_partitions_, H_.capacity()); + RTC_DCHECK_LE(size, max_size_partitions_); + + target_size_partitions_ = std::min(max_size_partitions_, size); + if (immediate_effect) { + size_t old_size_partitions_ = current_size_partitions_; + current_size_partitions_ = old_target_size_partitions_ = + target_size_partitions_; + ZeroFilter(old_size_partitions_, current_size_partitions_, &H_); + + partition_to_constrain_ = + std::min(partition_to_constrain_, current_size_partitions_ - 1); + size_change_counter_ = 0; + } else { + size_change_counter_ = size_change_duration_blocks_; + } +} + +void AdaptiveFirFilter::UpdateSize() { + RTC_DCHECK_GE(size_change_duration_blocks_, size_change_counter_); + size_t old_size_partitions_ = current_size_partitions_; + if (size_change_counter_ > 0) { + --size_change_counter_; + + auto average = [](float from, float to, float from_weight) { + return from * from_weight + to * (1.f - from_weight); + }; + + float change_factor = + size_change_counter_ * one_by_size_change_duration_blocks_; + + current_size_partitions_ = average(old_target_size_partitions_, + target_size_partitions_, change_factor); + + partition_to_constrain_ = + std::min(partition_to_constrain_, current_size_partitions_ - 1); + } else { + current_size_partitions_ = old_target_size_partitions_ = + target_size_partitions_; + } + ZeroFilter(old_size_partitions_, current_size_partitions_, &H_); + RTC_DCHECK_LE(0, size_change_counter_); +} + +void AdaptiveFirFilter::Filter(const RenderBuffer& render_buffer, + FftData* S) const { + RTC_DCHECK(S); + switch (optimization_) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: + aec3::ApplyFilter_Sse2(render_buffer, current_size_partitions_, H_, S); + break; +#endif +#if defined(WEBRTC_HAS_NEON) + case Aec3Optimization::kNeon: + aec3::ApplyFilter_Neon(render_buffer, current_size_partitions_, H_, S); + break; +#endif + default: + aec3::ApplyFilter(render_buffer, current_size_partitions_, H_, S); + } +} + +void AdaptiveFirFilter::Adapt(const RenderBuffer& render_buffer, + const FftData& G) { + // Adapt the filter and update the filter size. + AdaptAndUpdateSize(render_buffer, G); + + // Constrain the filter partitions in a cyclic manner. + Constrain(); +} + +void AdaptiveFirFilter::Adapt(const RenderBuffer& render_buffer, + const FftData& G, + std::vector* impulse_response) { + // Adapt the filter and update the filter size. + AdaptAndUpdateSize(render_buffer, G); + + // Constrain the filter partitions in a cyclic manner. + ConstrainAndUpdateImpulseResponse(impulse_response); +} + +void AdaptiveFirFilter::ComputeFrequencyResponse( + std::vector>* H2) const { + RTC_DCHECK_GE(max_size_partitions_, H2->capacity()); + + H2->resize(current_size_partitions_); + + switch (optimization_) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: + aec3::ComputeFrequencyResponse_Sse2(current_size_partitions_, H_, H2); + break; +#endif +#if defined(WEBRTC_HAS_NEON) + case Aec3Optimization::kNeon: + aec3::ComputeFrequencyResponse_Neon(current_size_partitions_, H_, H2); + break; +#endif + default: + aec3::ComputeFrequencyResponse(current_size_partitions_, H_, H2); + } +} + +void AdaptiveFirFilter::AdaptAndUpdateSize(const RenderBuffer& render_buffer, + const FftData& G) { + // Update the filter size if needed. + UpdateSize(); + + // Adapt the filter. + switch (optimization_) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: + aec3::AdaptPartitions_Sse2(render_buffer, G, current_size_partitions_, + &H_); + break; +#endif +#if defined(WEBRTC_HAS_NEON) + case Aec3Optimization::kNeon: + aec3::AdaptPartitions_Neon(render_buffer, G, current_size_partitions_, + &H_); + break; +#endif + default: + aec3::AdaptPartitions(render_buffer, G, current_size_partitions_, &H_); + } +} + +// Constrains the partition of the frequency domain filter to be limited in +// time via setting the relevant time-domain coefficients to zero and updates +// the corresponding values in an externally stored impulse response estimate. +void AdaptiveFirFilter::ConstrainAndUpdateImpulseResponse( + std::vector* impulse_response) { + RTC_DCHECK_EQ(GetTimeDomainLength(max_size_partitions_), + impulse_response->capacity()); + impulse_response->resize(GetTimeDomainLength(current_size_partitions_)); + std::array h; + impulse_response->resize(GetTimeDomainLength(current_size_partitions_)); + std::fill( + impulse_response->begin() + partition_to_constrain_ * kFftLengthBy2, + impulse_response->begin() + (partition_to_constrain_ + 1) * kFftLengthBy2, + 0.f); + + for (size_t ch = 0; ch < num_render_channels_; ++ch) { + fft_.Ifft(H_[partition_to_constrain_][ch], &h); + + static constexpr float kScale = 1.0f / kFftLengthBy2; + std::for_each(h.begin(), h.begin() + kFftLengthBy2, + [](float& a) { a *= kScale; }); + std::fill(h.begin() + kFftLengthBy2, h.end(), 0.f); + + if (ch == 0) { + std::copy( + h.begin(), h.begin() + kFftLengthBy2, + impulse_response->begin() + partition_to_constrain_ * kFftLengthBy2); + } else { + for (size_t k = 0, j = partition_to_constrain_ * kFftLengthBy2; + k < kFftLengthBy2; ++k, ++j) { + if (fabsf((*impulse_response)[j]) < fabsf(h[k])) { + (*impulse_response)[j] = h[k]; + } + } + } + + fft_.Fft(&h, &H_[partition_to_constrain_][ch]); + } + + partition_to_constrain_ = + partition_to_constrain_ < (current_size_partitions_ - 1) + ? partition_to_constrain_ + 1 + : 0; +} + +// Constrains the a partiton of the frequency domain filter to be limited in +// time via setting the relevant time-domain coefficients to zero. +void AdaptiveFirFilter::Constrain() { + std::array h; + for (size_t ch = 0; ch < num_render_channels_; ++ch) { + fft_.Ifft(H_[partition_to_constrain_][ch], &h); + + static constexpr float kScale = 1.0f / kFftLengthBy2; + std::for_each(h.begin(), h.begin() + kFftLengthBy2, + [](float& a) { a *= kScale; }); + std::fill(h.begin() + kFftLengthBy2, h.end(), 0.f); + + fft_.Fft(&h, &H_[partition_to_constrain_][ch]); + } + + partition_to_constrain_ = + partition_to_constrain_ < (current_size_partitions_ - 1) + ? partition_to_constrain_ + 1 + : 0; +} + +void AdaptiveFirFilter::ScaleFilter(float factor) { + for (auto& H_p : H_) { + for (auto& H_p_ch : H_p) { + for (auto& re : H_p_ch.re) { + re *= factor; + } + for (auto& im : H_p_ch.im) { + im *= factor; + } + } + } +} + +// Set the filter coefficients. +void AdaptiveFirFilter::SetFilter(size_t num_partitions, + const std::vector>& H) { + const size_t min_num_partitions = + std::min(current_size_partitions_, num_partitions); + for (size_t p = 0; p < min_num_partitions; ++p) { + RTC_DCHECK_EQ(H_[p].size(), H[p].size()); + RTC_DCHECK_EQ(num_render_channels_, H_[p].size()); + + for (size_t ch = 0; ch < num_render_channels_; ++ch) { + std::copy(H[p][ch].re.begin(), H[p][ch].re.end(), H_[p][ch].re.begin()); + std::copy(H[p][ch].im.begin(), H[p][ch].im.end(), H_[p][ch].im.begin()); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/adaptive_fir_filter.h b/audio_processing/aec3/adaptive_fir_filter.h new file mode 100644 index 0000000..6ff36ca --- /dev/null +++ b/audio_processing/aec3/adaptive_fir_filter.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_ + +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec3_fft.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { +namespace aec3 { +// Computes and stores the frequency response of the filter. +void ComputeFrequencyResponse( + size_t num_partitions, + const std::vector>& H, + std::vector>* H2); +#if defined(WEBRTC_HAS_NEON) +void ComputeFrequencyResponse_Neon( + size_t num_partitions, + const std::vector>& H, + std::vector>* H2); +#endif +#if defined(WEBRTC_ARCH_X86_FAMILY) +void ComputeFrequencyResponse_Sse2( + size_t num_partitions, + const std::vector>& H, + std::vector>* H2); +#endif + +// Adapts the filter partitions. +void AdaptPartitions(const RenderBuffer& render_buffer, + const FftData& G, + size_t num_partitions, + std::vector>* H); +#if defined(WEBRTC_HAS_NEON) +void AdaptPartitions_Neon(const RenderBuffer& render_buffer, + const FftData& G, + size_t num_partitions, + std::vector>* H); +#endif +#if defined(WEBRTC_ARCH_X86_FAMILY) +void AdaptPartitions_Sse2(const RenderBuffer& render_buffer, + const FftData& G, + size_t num_partitions, + std::vector>* H); +#endif + +// Produces the filter output. +void ApplyFilter(const RenderBuffer& render_buffer, + size_t num_partitions, + const std::vector>& H, + FftData* S); +#if defined(WEBRTC_HAS_NEON) +void ApplyFilter_Neon(const RenderBuffer& render_buffer, + size_t num_partitions, + const std::vector>& H, + FftData* S); +#endif +#if defined(WEBRTC_ARCH_X86_FAMILY) +void ApplyFilter_Sse2(const RenderBuffer& render_buffer, + size_t num_partitions, + const std::vector>& H, + FftData* S); +#endif + +} // namespace aec3 + +// Provides a frequency domain adaptive filter functionality. +class AdaptiveFirFilter { + public: + AdaptiveFirFilter(size_t max_size_partitions, + size_t initial_size_partitions, + size_t size_change_duration_blocks, + size_t num_render_channels, + Aec3Optimization optimization, + ApmDataDumper* data_dumper); + + ~AdaptiveFirFilter(); + + AdaptiveFirFilter(const AdaptiveFirFilter&) = delete; + AdaptiveFirFilter& operator=(const AdaptiveFirFilter&) = delete; + + // Produces the output of the filter. + void Filter(const RenderBuffer& render_buffer, FftData* S) const; + + // Adapts the filter and updates an externally stored impulse response + // estimate. + void Adapt(const RenderBuffer& render_buffer, + const FftData& G, + std::vector* impulse_response); + + // Adapts the filter. + void Adapt(const RenderBuffer& render_buffer, const FftData& G); + + // Receives reports that known echo path changes have occured and adjusts + // the filter adaptation accordingly. + void HandleEchoPathChange(); + + // Returns the filter size. + size_t SizePartitions() const { return current_size_partitions_; } + + // Sets the filter size. + void SetSizePartitions(size_t size, bool immediate_effect); + + // Computes the frequency responses for the filter partitions. + void ComputeFrequencyResponse( + std::vector>* H2) const; + + // Returns the maximum number of partitions for the filter. + size_t max_filter_size_partitions() const { return max_size_partitions_; } + + void DumpFilter(const char* name_frequency_domain) { + for (size_t p = 0; p < max_size_partitions_; ++p) { + data_dumper_->DumpRaw(name_frequency_domain, H_[p][0].re); + data_dumper_->DumpRaw(name_frequency_domain, H_[p][0].im); + } + } + + // Scale the filter impulse response and spectrum by a factor. + void ScaleFilter(float factor); + + // Set the filter coefficients. + void SetFilter(size_t num_partitions, + const std::vector>& H); + + // Gets the filter coefficients. + const std::vector>& GetFilter() const { return H_; } + + private: + // Adapts the filter and updates the filter size. + void AdaptAndUpdateSize(const RenderBuffer& render_buffer, const FftData& G); + + // Constrain the filter partitions in a cyclic manner. + void Constrain(); + // Constrains the filter in a cyclic manner and updates the corresponding + // values in the supplied impulse response. + void ConstrainAndUpdateImpulseResponse(std::vector* impulse_response); + + // Gradually Updates the current filter size towards the target size. + void UpdateSize(); + + ApmDataDumper* const data_dumper_; + const Aec3Fft fft_; + const Aec3Optimization optimization_; + const size_t num_render_channels_; + const size_t max_size_partitions_; + const int size_change_duration_blocks_; + float one_by_size_change_duration_blocks_; + size_t current_size_partitions_; + size_t target_size_partitions_; + size_t old_target_size_partitions_; + int size_change_counter_ = 0; + std::vector> H_; + size_t partition_to_constrain_ = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_ diff --git a/audio_processing/aec3/adaptive_fir_filter_erl.cc b/audio_processing/aec3/adaptive_fir_filter_erl.cc new file mode 100644 index 0000000..648348d --- /dev/null +++ b/audio_processing/aec3/adaptive_fir_filter_erl.cc @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/adaptive_fir_filter_erl.h" + +#include +#include + +#if defined(WEBRTC_HAS_NEON) +#include +#endif +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif + +namespace webrtc { + +namespace aec3 { + +// Computes and stores the echo return loss estimate of the filter, which is the +// sum of the partition frequency responses. +void ErlComputer(const std::vector>& H2, + rtc::ArrayView erl) { + std::fill(erl.begin(), erl.end(), 0.f); + for (auto& H2_j : H2) { + std::transform(H2_j.begin(), H2_j.end(), erl.begin(), erl.begin(), + std::plus()); + } +} + +#if defined(WEBRTC_HAS_NEON) +// Computes and stores the echo return loss estimate of the filter, which is the +// sum of the partition frequency responses. +void ErlComputer_NEON( + const std::vector>& H2, + rtc::ArrayView erl) { + std::fill(erl.begin(), erl.end(), 0.f); + for (auto& H2_j : H2) { + for (size_t k = 0; k < kFftLengthBy2; k += 4) { + const float32x4_t H2_j_k = vld1q_f32(&H2_j[k]); + float32x4_t erl_k = vld1q_f32(&erl[k]); + erl_k = vaddq_f32(erl_k, H2_j_k); + vst1q_f32(&erl[k], erl_k); + } + erl[kFftLengthBy2] += H2_j[kFftLengthBy2]; + } +} +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Computes and stores the echo return loss estimate of the filter, which is the +// sum of the partition frequency responses. +void ErlComputer_SSE2( + const std::vector>& H2, + rtc::ArrayView erl) { + std::fill(erl.begin(), erl.end(), 0.f); + for (auto& H2_j : H2) { + for (size_t k = 0; k < kFftLengthBy2; k += 4) { + const __m128 H2_j_k = _mm_loadu_ps(&H2_j[k]); + __m128 erl_k = _mm_loadu_ps(&erl[k]); + erl_k = _mm_add_ps(erl_k, H2_j_k); + _mm_storeu_ps(&erl[k], erl_k); + } + erl[kFftLengthBy2] += H2_j[kFftLengthBy2]; + } +} +#endif + +} // namespace aec3 + +void ComputeErl(const Aec3Optimization& optimization, + const std::vector>& H2, + rtc::ArrayView erl) { + RTC_DCHECK_EQ(kFftLengthBy2Plus1, erl.size()); + // Update the frequency response and echo return loss for the filter. + switch (optimization) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: + aec3::ErlComputer_SSE2(H2, erl); + break; +#endif +#if defined(WEBRTC_HAS_NEON) + case Aec3Optimization::kNeon: + + aec3::ErlComputer_NEON(H2, erl); + break; +#endif + default: + aec3::ErlComputer(H2, erl); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/adaptive_fir_filter_erl.h b/audio_processing/aec3/adaptive_fir_filter_erl.h new file mode 100644 index 0000000..879cefb --- /dev/null +++ b/audio_processing/aec3/adaptive_fir_filter_erl.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_ERL_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_ERL_H_ + +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { +namespace aec3 { + +// Computes and stores the echo return loss estimate of the filter, which is the +// sum of the partition frequency responses. +void ErlComputer(const std::vector>& H2, + rtc::ArrayView erl); +#if defined(WEBRTC_HAS_NEON) +void ErlComputer_NEON( + const std::vector>& H2, + rtc::ArrayView erl); +#endif +#if defined(WEBRTC_ARCH_X86_FAMILY) +void ErlComputer_SSE2( + const std::vector>& H2, + rtc::ArrayView erl); +#endif + +} // namespace aec3 + +// Computes the echo return loss based on a frequency response. +void ComputeErl(const Aec3Optimization& optimization, + const std::vector>& H2, + rtc::ArrayView erl); + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_ERL_H_ diff --git a/audio_processing/aec3/adaptive_fir_filter_erl_unittest.cc b/audio_processing/aec3/adaptive_fir_filter_erl_unittest.cc new file mode 100644 index 0000000..069fc9f --- /dev/null +++ b/audio_processing/aec3/adaptive_fir_filter_erl_unittest.cc @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h" + +#include +#include + +#include "rtc_base/system/arch.h" +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif + +#include "system_wrappers/include/cpu_features_wrapper.h" +#include "test/gtest.h" + +namespace webrtc { +namespace aec3 { + +#if defined(WEBRTC_HAS_NEON) +// Verifies that the optimized method for echo return loss computation is +// bitexact to the reference counterpart. +TEST(AdaptiveFirFilter, UpdateErlNeonOptimization) { + const size_t kNumPartitions = 12; + std::vector> H2(kNumPartitions); + std::array erl; + std::array erl_NEON; + + for (size_t j = 0; j < H2.size(); ++j) { + for (size_t k = 0; k < H2[j].size(); ++k) { + H2[j][k] = k + j / 3.f; + } + } + + ErlComputer(H2, erl); + ErlComputer_NEON(H2, erl_NEON); + + for (size_t j = 0; j < erl.size(); ++j) { + EXPECT_FLOAT_EQ(erl[j], erl_NEON[j]); + } +} + +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Verifies that the optimized method for echo return loss computation is +// bitexact to the reference counterpart. +TEST(AdaptiveFirFilter, UpdateErlSse2Optimization) { + bool use_sse2 = (WebRtc_GetCPUInfo(kSSE2) != 0); + if (use_sse2) { + const size_t kNumPartitions = 12; + std::vector> H2(kNumPartitions); + std::array erl; + std::array erl_SSE2; + + for (size_t j = 0; j < H2.size(); ++j) { + for (size_t k = 0; k < H2[j].size(); ++k) { + H2[j][k] = k + j / 3.f; + } + } + + ErlComputer(H2, erl); + ErlComputer_SSE2(H2, erl_SSE2); + + for (size_t j = 0; j < erl.size(); ++j) { + EXPECT_FLOAT_EQ(erl[j], erl_SSE2[j]); + } + } +} + +#endif + +} // namespace aec3 +} // namespace webrtc diff --git a/audio_processing/aec3/adaptive_fir_filter_unittest.cc b/audio_processing/aec3/adaptive_fir_filter_unittest.cc new file mode 100644 index 0000000..e99ff2a --- /dev/null +++ b/audio_processing/aec3/adaptive_fir_filter_unittest.cc @@ -0,0 +1,485 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/adaptive_fir_filter.h" + +// Defines WEBRTC_ARCH_X86_FAMILY, used below. +#include + +#include +#include +#include + +#include "rtc_base/system/arch.h" +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif + +#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h" +#include "modules/audio_processing/aec3/aec3_fft.h" +#include "modules/audio_processing/aec3/aec_state.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/aec3/render_signal_analyzer.h" +#include "modules/audio_processing/aec3/shadow_filter_update_gain.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "modules/audio_processing/utility/cascaded_biquad_filter.h" +#include "rtc_base/arraysize.h" +#include "rtc_base/numerics/safe_minmax.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "system_wrappers/include/cpu_features_wrapper.h" +#include "test/gtest.h" + +namespace webrtc { +namespace aec3 { +namespace { + +std::string ProduceDebugText(size_t num_render_channels, size_t delay) { + rtc::StringBuilder ss; + ss << "delay: " << delay << ", "; + ss << "num_render_channels:" << num_render_channels; + return ss.Release(); +} + +} // namespace + +#if defined(WEBRTC_HAS_NEON) +// Verifies that the optimized methods for filter adaptation are similar to +// their reference counterparts. +TEST(AdaptiveFirFilter, FilterAdaptationNeonOptimizations) { + for (size_t num_partitions : {2, 5, 12, 30, 50}) { + for (size_t num_render_channels : {1, 2, 4, 8}) { + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz, + num_render_channels)); + Random random_generator(42U); + std::vector>> x( + kNumBands, + std::vector>(num_render_channels, + std::vector(kBlockSize, 0.f))); + FftData S_C; + FftData S_Neon; + FftData G; + Aec3Fft fft; + std::vector> H_C( + num_partitions, std::vector(num_render_channels)); + std::vector> H_Neon( + num_partitions, std::vector(num_render_channels)); + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + H_C[p][ch].Clear(); + H_Neon[p][ch].Clear(); + } + } + + for (size_t k = 0; k < 30; ++k) { + for (size_t band = 0; band < x.size(); ++band) { + for (size_t ch = 0; ch < x[band].size(); ++ch) { + RandomizeSampleVector(&random_generator, x[band][ch]); + } + } + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + } + auto* const render_buffer = render_delay_buffer->GetRenderBuffer(); + + for (size_t j = 0; j < G.re.size(); ++j) { + G.re[j] = j / 10001.f; + } + for (size_t j = 1; j < G.im.size() - 1; ++j) { + G.im[j] = j / 20001.f; + } + G.im[0] = 0.f; + G.im[G.im.size() - 1] = 0.f; + + AdaptPartitions_Neon(*render_buffer, G, num_partitions, &H_Neon); + AdaptPartitions(*render_buffer, G, num_partitions, &H_C); + AdaptPartitions_Neon(*render_buffer, G, num_partitions, &H_Neon); + AdaptPartitions(*render_buffer, G, num_partitions, &H_C); + + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t j = 0; j < H_C[p][ch].re.size(); ++j) { + EXPECT_FLOAT_EQ(H_C[p][ch].re[j], H_Neon[p][ch].re[j]); + EXPECT_FLOAT_EQ(H_C[p][ch].im[j], H_Neon[p][ch].im[j]); + } + } + } + + ApplyFilter_Neon(*render_buffer, num_partitions, H_Neon, &S_Neon); + ApplyFilter(*render_buffer, num_partitions, H_C, &S_C); + for (size_t j = 0; j < S_C.re.size(); ++j) { + EXPECT_NEAR(S_C.re[j], S_Neon.re[j], fabs(S_C.re[j] * 0.00001f)); + EXPECT_NEAR(S_C.im[j], S_Neon.im[j], fabs(S_C.re[j] * 0.00001f)); + } + } + } +} + +// Verifies that the optimized method for frequency response computation is +// bitexact to the reference counterpart. +TEST(AdaptiveFirFilter, ComputeFrequencyResponseNeonOptimization) { + for (size_t num_partitions : {2, 5, 12, 30, 50}) { + for (size_t num_render_channels : {1, 2, 4, 8}) { + std::vector> H( + num_partitions, std::vector(num_render_channels)); + std::vector> H2(num_partitions); + std::vector> H2_Neon( + num_partitions); + + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t k = 0; k < H[p][ch].re.size(); ++k) { + H[p][ch].re[k] = k + p / 3.f + ch; + H[p][ch].im[k] = p + k / 7.f - ch; + } + } + } + + ComputeFrequencyResponse(num_partitions, H, &H2); + ComputeFrequencyResponse_Neon(num_partitions, H, &H2_Neon); + + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t k = 0; k < H2[p].size(); ++k) { + EXPECT_FLOAT_EQ(H2[p][k], H2_Neon[p][k]); + } + } + } + } +} +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Verifies that the optimized methods for filter adaptation are bitexact to +// their reference counterparts. +TEST(AdaptiveFirFilter, FilterAdaptationSse2Optimizations) { + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + bool use_sse2 = (WebRtc_GetCPUInfo(kSSE2) != 0); + if (use_sse2) { + for (size_t num_partitions : {2, 5, 12, 30, 50}) { + for (size_t num_render_channels : {1, 2, 4, 8}) { + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz, + num_render_channels)); + Random random_generator(42U); + std::vector>> x( + kNumBands, + std::vector>( + num_render_channels, std::vector(kBlockSize, 0.f))); + FftData S_C; + FftData S_Sse2; + FftData G; + Aec3Fft fft; + std::vector> H_C( + num_partitions, std::vector(num_render_channels)); + std::vector> H_Sse2( + num_partitions, std::vector(num_render_channels)); + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + H_C[p][ch].Clear(); + H_Sse2[p][ch].Clear(); + } + } + + for (size_t k = 0; k < 500; ++k) { + for (size_t band = 0; band < x.size(); ++band) { + for (size_t ch = 0; ch < x[band].size(); ++ch) { + RandomizeSampleVector(&random_generator, x[band][ch]); + } + } + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + auto* const render_buffer = render_delay_buffer->GetRenderBuffer(); + + ApplyFilter_Sse2(*render_buffer, num_partitions, H_Sse2, &S_Sse2); + ApplyFilter(*render_buffer, num_partitions, H_C, &S_C); + for (size_t j = 0; j < S_C.re.size(); ++j) { + EXPECT_FLOAT_EQ(S_C.re[j], S_Sse2.re[j]); + EXPECT_FLOAT_EQ(S_C.im[j], S_Sse2.im[j]); + } + + std::for_each(G.re.begin(), G.re.end(), + [&](float& a) { a = random_generator.Rand(); }); + std::for_each(G.im.begin(), G.im.end(), + [&](float& a) { a = random_generator.Rand(); }); + + AdaptPartitions_Sse2(*render_buffer, G, num_partitions, &H_Sse2); + AdaptPartitions(*render_buffer, G, num_partitions, &H_C); + + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t j = 0; j < H_C[p][ch].re.size(); ++j) { + EXPECT_FLOAT_EQ(H_C[p][ch].re[j], H_Sse2[p][ch].re[j]); + EXPECT_FLOAT_EQ(H_C[p][ch].im[j], H_Sse2[p][ch].im[j]); + } + } + } + } + } + } + } +} + +// Verifies that the optimized method for frequency response computation is +// bitexact to the reference counterpart. +TEST(AdaptiveFirFilter, ComputeFrequencyResponseSse2Optimization) { + bool use_sse2 = (WebRtc_GetCPUInfo(kSSE2) != 0); + if (use_sse2) { + for (size_t num_partitions : {2, 5, 12, 30, 50}) { + for (size_t num_render_channels : {1, 2, 4, 8}) { + std::vector> H( + num_partitions, std::vector(num_render_channels)); + std::vector> H2(num_partitions); + std::vector> H2_Sse2( + num_partitions); + + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t k = 0; k < H[p][ch].re.size(); ++k) { + H[p][ch].re[k] = k + p / 3.f + ch; + H[p][ch].im[k] = p + k / 7.f - ch; + } + } + } + + ComputeFrequencyResponse(num_partitions, H, &H2); + ComputeFrequencyResponse_Sse2(num_partitions, H, &H2_Sse2); + + for (size_t p = 0; p < num_partitions; ++p) { + for (size_t k = 0; k < H2[p].size(); ++k) { + EXPECT_FLOAT_EQ(H2[p][k], H2_Sse2[p][k]); + } + } + } + } + } +} + +#endif + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +// Verifies that the check for non-null data dumper works. +TEST(AdaptiveFirFilter, NullDataDumper) { + EXPECT_DEATH(AdaptiveFirFilter(9, 9, 250, 1, DetectOptimization(), nullptr), + ""); +} + +// Verifies that the check for non-null filter output works. +TEST(AdaptiveFirFilter, NullFilterOutput) { + ApmDataDumper data_dumper(42); + AdaptiveFirFilter filter(9, 9, 250, 1, DetectOptimization(), &data_dumper); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), 48000, 1)); + EXPECT_DEATH(filter.Filter(*render_delay_buffer->GetRenderBuffer(), nullptr), + ""); +} + +#endif + +// Verifies that the filter statistics can be accessed when filter statistics +// are turned on. +TEST(AdaptiveFirFilter, FilterStatisticsAccess) { + ApmDataDumper data_dumper(42); + Aec3Optimization optimization = DetectOptimization(); + AdaptiveFirFilter filter(9, 9, 250, 1, optimization, &data_dumper); + std::vector> H2( + filter.max_filter_size_partitions(), + std::array()); + for (auto& H2_k : H2) { + H2_k.fill(0.f); + } + + std::array erl; + ComputeErl(optimization, H2, erl); + filter.ComputeFrequencyResponse(&H2); +} + +// Verifies that the filter size if correctly repported. +TEST(AdaptiveFirFilter, FilterSize) { + ApmDataDumper data_dumper(42); + for (size_t filter_size = 1; filter_size < 5; ++filter_size) { + AdaptiveFirFilter filter(filter_size, filter_size, 250, 1, + DetectOptimization(), &data_dumper); + EXPECT_EQ(filter_size, filter.SizePartitions()); + } +} + +// Verifies that the filter is being able to properly filter a signal and to +// adapt its coefficients. +TEST(AdaptiveFirFilter, FilterAndAdapt) { + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + constexpr size_t kNumBlocksToProcessPerRenderChannel = 1000; + + for (size_t num_capture_channels : {1, 2, 4}) { + for (size_t num_render_channels : {1, 2, 3, 6, 8}) { + ApmDataDumper data_dumper(42); + EchoCanceller3Config config; + + if (num_render_channels == 33) { + config.filter.main = {13, 0.00005f, 0.0005f, 0.0001f, 2.f, 20075344.f}; + config.filter.shadow = {13, 0.1f, 20075344.f}; + config.filter.main_initial = {12, 0.005f, 0.5f, + 0.001f, 2.f, 20075344.f}; + config.filter.shadow_initial = {12, 0.7f, 20075344.f}; + } + + AdaptiveFirFilter filter( + config.filter.main.length_blocks, config.filter.main.length_blocks, + config.filter.config_change_duration_blocks, num_render_channels, + DetectOptimization(), &data_dumper); + std::vector>> H2( + num_capture_channels, + std::vector>( + filter.max_filter_size_partitions(), + std::array())); + std::vector> h( + num_capture_channels, + std::vector( + GetTimeDomainLength(filter.max_filter_size_partitions()), 0.f)); + Aec3Fft fft; + config.delay.default_delay = 1; + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, + num_render_channels)); + ShadowFilterUpdateGain gain(config.filter.shadow, + config.filter.config_change_duration_blocks); + Random random_generator(42U); + std::vector>> x( + kNumBands, + std::vector>(num_render_channels, + std::vector(kBlockSize, 0.f))); + std::vector n(kBlockSize, 0.f); + std::vector y(kBlockSize, 0.f); + AecState aec_state(EchoCanceller3Config{}, num_capture_channels); + RenderSignalAnalyzer render_signal_analyzer(config); + absl::optional delay_estimate; + std::vector e(kBlockSize, 0.f); + std::array s_scratch; + std::vector output(num_capture_channels); + FftData S; + FftData G; + FftData E; + std::vector> Y2( + num_capture_channels); + std::vector> E2_main( + num_capture_channels); + std::array E2_shadow; + // [B,A] = butter(2,100/8000,'high') + constexpr CascadedBiQuadFilter::BiQuadCoefficients + kHighPassFilterCoefficients = {{0.97261f, -1.94523f, 0.97261f}, + {-1.94448f, 0.94598f}}; + for (auto& Y2_ch : Y2) { + Y2_ch.fill(0.f); + } + for (auto& E2_main_ch : E2_main) { + E2_main_ch.fill(0.f); + } + E2_shadow.fill(0.f); + for (auto& subtractor_output : output) { + subtractor_output.Reset(); + } + + constexpr float kScale = 1.0f / kFftLengthBy2; + + for (size_t delay_samples : {0, 64, 150, 200, 301}) { + std::vector> delay_buffer( + num_render_channels, DelayBuffer(delay_samples)); + std::vector> x_hp_filter( + num_render_channels); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + x_hp_filter[ch] = std::make_unique( + kHighPassFilterCoefficients, 1); + } + CascadedBiQuadFilter y_hp_filter(kHighPassFilterCoefficients, 1); + + SCOPED_TRACE(ProduceDebugText(num_render_channels, delay_samples)); + const size_t num_blocks_to_process = + kNumBlocksToProcessPerRenderChannel * num_render_channels; + for (size_t j = 0; j < num_blocks_to_process; ++j) { + std::fill(y.begin(), y.end(), 0.f); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + RandomizeSampleVector(&random_generator, x[0][ch]); + std::array y_channel; + delay_buffer[ch].Delay(x[0][ch], y_channel); + for (size_t k = 0; k < y.size(); ++k) { + y[k] += y_channel[k] / num_render_channels; + } + } + + RandomizeSampleVector(&random_generator, n); + const float noise_scaling = 1.f / 100.f / num_render_channels; + for (size_t k = 0; k < y.size(); ++k) { + y[k] += n[k] * noise_scaling; + } + + for (size_t ch = 0; ch < num_render_channels; ++ch) { + x_hp_filter[ch]->Process(x[0][ch]); + } + y_hp_filter.Process(y); + + render_delay_buffer->Insert(x); + if (j == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + auto* const render_buffer = render_delay_buffer->GetRenderBuffer(); + + render_signal_analyzer.Update(*render_buffer, + aec_state.MinDirectPathFilterDelay()); + + filter.Filter(*render_buffer, &S); + fft.Ifft(S, &s_scratch); + std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2, + e.begin(), + [&](float a, float b) { return a - b * kScale; }); + std::for_each(e.begin(), e.end(), [](float& a) { + a = rtc::SafeClamp(a, -32768.f, 32767.f); + }); + fft.ZeroPaddedFft(e, Aec3Fft::Window::kRectangular, &E); + for (auto& o : output) { + for (size_t k = 0; k < kBlockSize; ++k) { + o.s_main[k] = kScale * s_scratch[k + kFftLengthBy2]; + } + } + + std::array render_power; + render_buffer->SpectralSum(filter.SizePartitions(), &render_power); + gain.Compute(render_power, render_signal_analyzer, E, + filter.SizePartitions(), false, &G); + filter.Adapt(*render_buffer, G, &h[0]); + aec_state.HandleEchoPathChange(EchoPathVariability( + false, EchoPathVariability::DelayAdjustment::kNone, false)); + + filter.ComputeFrequencyResponse(&H2[0]); + aec_state.Update(delay_estimate, H2, h, *render_buffer, E2_main, Y2, + output); + } + // Verify that the filter is able to perform well. + EXPECT_LT(1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f), + std::inner_product(y.begin(), y.end(), y.begin(), 0.f)); + } + } + } +} +} // namespace aec3 +} // namespace webrtc diff --git a/audio_processing/aec3/aec3_common.cc b/audio_processing/aec3/aec3_common.cc new file mode 100644 index 0000000..95f114a --- /dev/null +++ b/audio_processing/aec3/aec3_common.cc @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/aec3_common.h" + +#include + +#include "rtc_base/checks.h" +#include "rtc_base/system/arch.h" +#include "system_wrappers/include/cpu_features_wrapper.h" + +namespace webrtc { + +Aec3Optimization DetectOptimization() { +#if defined(WEBRTC_ARCH_X86_FAMILY) + if (WebRtc_GetCPUInfo(kSSE2) != 0) { + return Aec3Optimization::kSse2; + } +#endif + +#if defined(WEBRTC_HAS_NEON) + return Aec3Optimization::kNeon; +#endif + + return Aec3Optimization::kNone; +} + +float FastApproxLog2f(const float in) { + RTC_DCHECK_GT(in, .0f); + // Read and interpret float as uint32_t and then cast to float. + // This is done to extract the exponent (bits 30 - 23). + // "Right shift" of the exponent is then performed by multiplying + // with the constant (1/2^23). Finally, we subtract a constant to + // remove the bias (https://en.wikipedia.org/wiki/Exponent_bias). + union { + float dummy; + uint32_t a; + } x = {in}; + float out = x.a; + out *= 1.1920929e-7f; // 1/2^23 + out -= 126.942695f; // Remove bias. + return out; +} + +float Log2TodB(const float in_log2) { + return 3.0102999566398121 * in_log2; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/aec3_common.h b/audio_processing/aec3/aec3_common.h new file mode 100644 index 0000000..ed28c88 --- /dev/null +++ b/audio_processing/aec3/aec3_common.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_ + +#include + +namespace webrtc { + +#ifdef _MSC_VER /* visual c++ */ +#define ALIGN16_BEG __declspec(align(16)) +#define ALIGN16_END +#else /* gcc or icc */ +#define ALIGN16_BEG +#define ALIGN16_END __attribute__((aligned(16))) +#endif + +enum class Aec3Optimization { kNone, kSse2, kNeon }; + +constexpr int kNumBlocksPerSecond = 250; + +constexpr int kMetricsReportingIntervalBlocks = 10 * kNumBlocksPerSecond; +constexpr int kMetricsComputationBlocks = 11; +constexpr int kMetricsCollectionBlocks = + kMetricsReportingIntervalBlocks - kMetricsComputationBlocks; + +constexpr size_t kFftLengthBy2 = 64; +constexpr size_t kFftLengthBy2Plus1 = kFftLengthBy2 + 1; +constexpr size_t kFftLengthBy2Minus1 = kFftLengthBy2 - 1; +constexpr size_t kFftLength = 2 * kFftLengthBy2; +constexpr size_t kFftLengthBy2Log2 = 6; + +constexpr int kRenderTransferQueueSizeFrames = 100; + +constexpr size_t kMaxNumBands = 3; +constexpr size_t kFrameSize = 160; +constexpr size_t kSubFrameLength = kFrameSize / 2; + +constexpr size_t kBlockSize = kFftLengthBy2; +constexpr size_t kBlockSizeLog2 = kFftLengthBy2Log2; + +constexpr size_t kExtendedBlockSize = 2 * kFftLengthBy2; +constexpr size_t kMatchedFilterWindowSizeSubBlocks = 32; +constexpr size_t kMatchedFilterAlignmentShiftSizeSubBlocks = + kMatchedFilterWindowSizeSubBlocks * 3 / 4; + +// TODO(peah): Integrate this with how it is done inside audio_processing_impl. +constexpr size_t NumBandsForRate(int sample_rate_hz) { + return static_cast(sample_rate_hz / 16000); +} + +constexpr bool ValidFullBandRate(int sample_rate_hz) { + return sample_rate_hz == 16000 || sample_rate_hz == 32000 || + sample_rate_hz == 48000; +} + +constexpr int GetTimeDomainLength(int filter_length_blocks) { + return filter_length_blocks * kFftLengthBy2; +} + +constexpr size_t GetDownSampledBufferSize(size_t down_sampling_factor, + size_t num_matched_filters) { + return kBlockSize / down_sampling_factor * + (kMatchedFilterAlignmentShiftSizeSubBlocks * num_matched_filters + + kMatchedFilterWindowSizeSubBlocks + 1); +} + +constexpr size_t GetRenderDelayBufferSize(size_t down_sampling_factor, + size_t num_matched_filters, + size_t filter_length_blocks) { + return GetDownSampledBufferSize(down_sampling_factor, num_matched_filters) / + (kBlockSize / down_sampling_factor) + + filter_length_blocks + 1; +} + +// Detects what kind of optimizations to use for the code. +Aec3Optimization DetectOptimization(); + +// Computes the log2 of the input in a fast an approximate manner. +float FastApproxLog2f(const float in); + +// Returns dB from a power quantity expressed in log2. +float Log2TodB(const float in_log2); + +static_assert(1 << kBlockSizeLog2 == kBlockSize, + "Proper number of shifts for blocksize"); + +static_assert(1 << kFftLengthBy2Log2 == kFftLengthBy2, + "Proper number of shifts for the fft length"); + +static_assert(1 == NumBandsForRate(16000), "Number of bands for 16 kHz"); +static_assert(2 == NumBandsForRate(32000), "Number of bands for 32 kHz"); +static_assert(3 == NumBandsForRate(48000), "Number of bands for 48 kHz"); + +static_assert(ValidFullBandRate(16000), + "Test that 16 kHz is a valid sample rate"); +static_assert(ValidFullBandRate(32000), + "Test that 32 kHz is a valid sample rate"); +static_assert(ValidFullBandRate(48000), + "Test that 48 kHz is a valid sample rate"); +static_assert(!ValidFullBandRate(8001), + "Test that 8001 Hz is not a valid sample rate"); + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_ diff --git a/audio_processing/aec3/aec3_fft.cc b/audio_processing/aec3/aec3_fft.cc new file mode 100644 index 0000000..ba1477e --- /dev/null +++ b/audio_processing/aec3/aec3_fft.cc @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/aec3_fft.h" + +#include +#include +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { + +const float kHanning64[kFftLengthBy2] = { + 0.f, 0.00248461f, 0.00991376f, 0.0222136f, 0.03926189f, + 0.06088921f, 0.08688061f, 0.11697778f, 0.15088159f, 0.1882551f, + 0.22872687f, 0.27189467f, 0.31732949f, 0.36457977f, 0.41317591f, + 0.46263495f, 0.51246535f, 0.56217185f, 0.61126047f, 0.65924333f, + 0.70564355f, 0.75f, 0.79187184f, 0.83084292f, 0.86652594f, + 0.89856625f, 0.92664544f, 0.95048443f, 0.96984631f, 0.98453864f, + 0.99441541f, 0.99937846f, 0.99937846f, 0.99441541f, 0.98453864f, + 0.96984631f, 0.95048443f, 0.92664544f, 0.89856625f, 0.86652594f, + 0.83084292f, 0.79187184f, 0.75f, 0.70564355f, 0.65924333f, + 0.61126047f, 0.56217185f, 0.51246535f, 0.46263495f, 0.41317591f, + 0.36457977f, 0.31732949f, 0.27189467f, 0.22872687f, 0.1882551f, + 0.15088159f, 0.11697778f, 0.08688061f, 0.06088921f, 0.03926189f, + 0.0222136f, 0.00991376f, 0.00248461f, 0.f}; + +// Hanning window from Matlab command win = sqrt(hanning(128)). +const float kSqrtHanning128[kFftLength] = { + 0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f, + 0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f, + 0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f, + 0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f, + 0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f, + 0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f, + 0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f, + 0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f, + 0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f, + 0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f, + 0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f, + 0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f, + 0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f, + 0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f, + 0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f, + 0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f, + 1.00000000000000f, 0.99969881869620f, 0.99879545620517f, 0.99729045667869f, + 0.99518472667220f, 0.99247953459871f, 0.98917650996478f, 0.98527764238894f, + 0.98078528040323f, 0.97570213003853f, 0.97003125319454f, 0.96377606579544f, + 0.95694033573221f, 0.94952818059304f, 0.94154406518302f, 0.93299279883474f, + 0.92387953251129f, 0.91420975570353f, 0.90398929312344f, 0.89322430119552f, + 0.88192126434835f, 0.87008699110871f, 0.85772861000027f, 0.84485356524971f, + 0.83146961230255f, 0.81758481315158f, 0.80320753148064f, 0.78834642762661f, + 0.77301045336274f, 0.75720884650648f, 0.74095112535496f, 0.72424708295147f, + 0.70710678118655f, 0.68954054473707f, 0.67155895484702f, 0.65317284295378f, + 0.63439328416365f, 0.61523159058063f, 0.59569930449243f, 0.57580819141785f, + 0.55557023301960f, 0.53499761988710f, 0.51410274419322f, 0.49289819222978f, + 0.47139673682600f, 0.44961132965461f, 0.42755509343028f, 0.40524131400499f, + 0.38268343236509f, 0.35989503653499f, 0.33688985339222f, 0.31368174039889f, + 0.29028467725446f, 0.26671275747490f, 0.24298017990326f, 0.21910124015687f, + 0.19509032201613f, 0.17096188876030f, 0.14673047445536f, 0.12241067519922f, + 0.09801714032956f, 0.07356456359967f, 0.04906767432742f, 0.02454122852291f}; + +} // namespace + +// TODO(peah): Change x to be std::array once the rest of the code allows this. +void Aec3Fft::ZeroPaddedFft(rtc::ArrayView x, + Window window, + FftData* X) const { + RTC_DCHECK(X); + RTC_DCHECK_EQ(kFftLengthBy2, x.size()); + std::array fft; + std::fill(fft.begin(), fft.begin() + kFftLengthBy2, 0.f); + switch (window) { + case Window::kRectangular: + std::copy(x.begin(), x.end(), fft.begin() + kFftLengthBy2); + break; + case Window::kHanning: + std::transform(x.begin(), x.end(), std::begin(kHanning64), + fft.begin() + kFftLengthBy2, + [](float a, float b) { return a * b; }); + break; + case Window::kSqrtHanning: + RTC_NOTREACHED(); + break; + default: + RTC_NOTREACHED(); + } + + Fft(&fft, X); +} + +void Aec3Fft::PaddedFft(rtc::ArrayView x, + rtc::ArrayView x_old, + Window window, + FftData* X) const { + RTC_DCHECK(X); + RTC_DCHECK_EQ(kFftLengthBy2, x.size()); + RTC_DCHECK_EQ(kFftLengthBy2, x_old.size()); + std::array fft; + + switch (window) { + case Window::kRectangular: + std::copy(x_old.begin(), x_old.end(), fft.begin()); + std::copy(x.begin(), x.end(), fft.begin() + x_old.size()); + break; + case Window::kHanning: + RTC_NOTREACHED(); + break; + case Window::kSqrtHanning: + std::transform(x_old.begin(), x_old.end(), std::begin(kSqrtHanning128), + fft.begin(), std::multiplies()); + std::transform(x.begin(), x.end(), + std::begin(kSqrtHanning128) + x_old.size(), + fft.begin() + x_old.size(), std::multiplies()); + break; + default: + RTC_NOTREACHED(); + } + + Fft(&fft, X); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/aec3_fft.h b/audio_processing/aec3/aec3_fft.h new file mode 100644 index 0000000..5c14c1a --- /dev/null +++ b/audio_processing/aec3/aec3_fft.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_ + +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/utility/ooura_fft.h" +#include "rtc_base/checks.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// Wrapper class that provides 128 point real valued FFT functionality with the +// FftData type. +class Aec3Fft { + public: + enum class Window { kRectangular, kHanning, kSqrtHanning }; + + Aec3Fft() = default; + // Computes the FFT. Note that both the input and output are modified. + void Fft(std::array* x, FftData* X) const { + RTC_DCHECK(x); + RTC_DCHECK(X); + ooura_fft_.Fft(x->data()); + X->CopyFromPackedArray(*x); + } + // Computes the inverse Fft. + void Ifft(const FftData& X, std::array* x) const { + RTC_DCHECK(x); + X.CopyToPackedArray(x); + ooura_fft_.InverseFft(x->data()); + } + + // Windows the input using a Hanning window, and then adds padding of + // kFftLengthBy2 initial zeros before computing the Fft. + void ZeroPaddedFft(rtc::ArrayView x, + Window window, + FftData* X) const; + + // Concatenates the kFftLengthBy2 values long x and x_old before computing the + // Fft. After that, x is copied to x_old. + void PaddedFft(rtc::ArrayView x, + rtc::ArrayView x_old, + FftData* X) const { + PaddedFft(x, x_old, Window::kRectangular, X); + } + + // Padded Fft using a time-domain window. + void PaddedFft(rtc::ArrayView x, + rtc::ArrayView x_old, + Window window, + FftData* X) const; + + private: + const OouraFft ooura_fft_; + + RTC_DISALLOW_COPY_AND_ASSIGN(Aec3Fft); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_ diff --git a/audio_processing/aec3/aec3_fft_unittest.cc b/audio_processing/aec3/aec3_fft_unittest.cc new file mode 100644 index 0000000..82d6e76 --- /dev/null +++ b/audio_processing/aec3/aec3_fft_unittest.cc @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/aec3_fft.h" + +#include + +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies that the check for non-null input in Fft works. +TEST(Aec3Fft, NullFftInput) { + Aec3Fft fft; + FftData X; + EXPECT_DEATH(fft.Fft(nullptr, &X), ""); +} + +// Verifies that the check for non-null input in Fft works. +TEST(Aec3Fft, NullFftOutput) { + Aec3Fft fft; + std::array x; + EXPECT_DEATH(fft.Fft(&x, nullptr), ""); +} + +// Verifies that the check for non-null output in Ifft works. +TEST(Aec3Fft, NullIfftOutput) { + Aec3Fft fft; + FftData X; + EXPECT_DEATH(fft.Ifft(X, nullptr), ""); +} + +// Verifies that the check for non-null output in ZeroPaddedFft works. +TEST(Aec3Fft, NullZeroPaddedFftOutput) { + Aec3Fft fft; + std::array x; + EXPECT_DEATH(fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, nullptr), + ""); +} + +// Verifies that the check for input length in ZeroPaddedFft works. +TEST(Aec3Fft, ZeroPaddedFftWrongInputLength) { + Aec3Fft fft; + FftData X; + std::array x; + EXPECT_DEATH(fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, &X), ""); +} + +// Verifies that the check for non-null output in PaddedFft works. +TEST(Aec3Fft, NullPaddedFftOutput) { + Aec3Fft fft; + std::array x; + std::array x_old; + EXPECT_DEATH(fft.PaddedFft(x, x_old, nullptr), ""); +} + +// Verifies that the check for input length in PaddedFft works. +TEST(Aec3Fft, PaddedFftWrongInputLength) { + Aec3Fft fft; + FftData X; + std::array x; + std::array x_old; + EXPECT_DEATH(fft.PaddedFft(x, x_old, &X), ""); +} + +// Verifies that the check for length in the old value in PaddedFft works. +TEST(Aec3Fft, PaddedFftWrongOldValuesLength) { + Aec3Fft fft; + FftData X; + std::array x; + std::array x_old; + EXPECT_DEATH(fft.PaddedFft(x, x_old, &X), ""); +} + +#endif + +// Verifies that Fft works as intended. +TEST(Aec3Fft, Fft) { + Aec3Fft fft; + FftData X; + std::array x; + x.fill(0.f); + fft.Fft(&x, &X); + EXPECT_THAT(X.re, ::testing::Each(0.f)); + EXPECT_THAT(X.im, ::testing::Each(0.f)); + + x.fill(0.f); + x[0] = 1.f; + fft.Fft(&x, &X); + EXPECT_THAT(X.re, ::testing::Each(1.f)); + EXPECT_THAT(X.im, ::testing::Each(0.f)); + + x.fill(1.f); + fft.Fft(&x, &X); + EXPECT_EQ(128.f, X.re[0]); + std::for_each(X.re.begin() + 1, X.re.end(), + [](float a) { EXPECT_EQ(0.f, a); }); + EXPECT_THAT(X.im, ::testing::Each(0.f)); +} + +// Verifies that InverseFft works as intended. +TEST(Aec3Fft, Ifft) { + Aec3Fft fft; + FftData X; + std::array x; + + X.re.fill(0.f); + X.im.fill(0.f); + fft.Ifft(X, &x); + EXPECT_THAT(x, ::testing::Each(0.f)); + + X.re.fill(1.f); + X.im.fill(0.f); + fft.Ifft(X, &x); + EXPECT_EQ(64.f, x[0]); + std::for_each(x.begin() + 1, x.end(), [](float a) { EXPECT_EQ(0.f, a); }); + + X.re.fill(0.f); + X.re[0] = 128; + X.im.fill(0.f); + fft.Ifft(X, &x); + EXPECT_THAT(x, ::testing::Each(64.f)); +} + +// Verifies that InverseFft and Fft work as intended. +TEST(Aec3Fft, FftAndIfft) { + Aec3Fft fft; + FftData X; + std::array x; + std::array x_ref; + + int v = 0; + for (int k = 0; k < 20; ++k) { + for (size_t j = 0; j < x.size(); ++j) { + x[j] = v++; + x_ref[j] = x[j] * 64.f; + } + fft.Fft(&x, &X); + fft.Ifft(X, &x); + for (size_t j = 0; j < x.size(); ++j) { + EXPECT_NEAR(x_ref[j], x[j], 0.001f); + } + } +} + +// Verifies that ZeroPaddedFft work as intended. +TEST(Aec3Fft, ZeroPaddedFft) { + Aec3Fft fft; + FftData X; + std::array x_in; + std::array x_ref; + std::array x_out; + + int v = 0; + x_ref.fill(0.f); + for (int k = 0; k < 20; ++k) { + for (size_t j = 0; j < x_in.size(); ++j) { + x_in[j] = v++; + x_ref[j + kFftLengthBy2] = x_in[j] * 64.f; + } + fft.ZeroPaddedFft(x_in, Aec3Fft::Window::kRectangular, &X); + fft.Ifft(X, &x_out); + for (size_t j = 0; j < x_out.size(); ++j) { + EXPECT_NEAR(x_ref[j], x_out[j], 0.1f); + } + } +} + +// Verifies that ZeroPaddedFft work as intended. +TEST(Aec3Fft, PaddedFft) { + Aec3Fft fft; + FftData X; + std::array x_in; + std::array x_out; + std::array x_old; + std::array x_old_ref; + std::array x_ref; + + int v = 0; + x_old.fill(0.f); + for (int k = 0; k < 20; ++k) { + for (size_t j = 0; j < x_in.size(); ++j) { + x_in[j] = v++; + } + + std::copy(x_old.begin(), x_old.end(), x_ref.begin()); + std::copy(x_in.begin(), x_in.end(), x_ref.begin() + kFftLengthBy2); + std::copy(x_in.begin(), x_in.end(), x_old_ref.begin()); + std::for_each(x_ref.begin(), x_ref.end(), [](float& a) { a *= 64.f; }); + + fft.PaddedFft(x_in, x_old, &X); + std::copy(x_in.begin(), x_in.end(), x_old.begin()); + fft.Ifft(X, &x_out); + + for (size_t j = 0; j < x_out.size(); ++j) { + EXPECT_NEAR(x_ref[j], x_out[j], 0.1f); + } + + EXPECT_EQ(x_old_ref, x_old); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/aec_state.cc b/audio_processing/aec3/aec_state.cc new file mode 100644 index 0000000..d9338d0 --- /dev/null +++ b/audio_processing/aec3/aec_state.cc @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/aec_state.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +constexpr size_t kBlocksSinceConvergencedFilterInit = 10000; +constexpr size_t kBlocksSinceConsistentEstimateInit = 10000; + +void ComputeAvgRenderReverb( + const SpectrumBuffer& spectrum_buffer, + int delay_blocks, + float reverb_decay, + ReverbModel* reverb_model, + rtc::ArrayView reverb_power_spectrum) { + RTC_DCHECK(reverb_model); + const size_t num_render_channels = spectrum_buffer.buffer[0].size(); + int idx_at_delay = + spectrum_buffer.OffsetIndex(spectrum_buffer.read, delay_blocks); + int idx_past = spectrum_buffer.IncIndex(idx_at_delay); + + std::array X2_data; + rtc::ArrayView X2; + if (num_render_channels > 1) { + auto average_channels = + [](size_t num_render_channels, + rtc::ArrayView> + spectrum_band_0, + rtc::ArrayView render_power) { + std::fill(render_power.begin(), render_power.end(), 0.f); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + render_power[k] += spectrum_band_0[ch][k]; + } + } + const float normalizer = 1.f / num_render_channels; + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + render_power[k] *= normalizer; + } + }; + average_channels(num_render_channels, spectrum_buffer.buffer[idx_past], + X2_data); + reverb_model->UpdateReverbNoFreqShaping( + X2_data, /*power_spectrum_scaling=*/1.0f, reverb_decay); + + average_channels(num_render_channels, spectrum_buffer.buffer[idx_at_delay], + X2_data); + X2 = X2_data; + } else { + reverb_model->UpdateReverbNoFreqShaping( + spectrum_buffer.buffer[idx_past][/*channel=*/0], + /*power_spectrum_scaling=*/1.0f, reverb_decay); + + X2 = spectrum_buffer.buffer[idx_at_delay][/*channel=*/0]; + } + + rtc::ArrayView reverb_power = + reverb_model->reverb(); + for (size_t k = 0; k < X2.size(); ++k) { + reverb_power_spectrum[k] = X2[k] + reverb_power[k]; + } +} + +} // namespace + +int AecState::instance_count_ = 0; + +void AecState::GetResidualEchoScaling( + rtc::ArrayView residual_scaling) const { + bool filter_has_had_time_to_converge; + if (config_.filter.conservative_initial_phase) { + filter_has_had_time_to_converge = + strong_not_saturated_render_blocks_ >= 1.5f * kNumBlocksPerSecond; + } else { + filter_has_had_time_to_converge = + strong_not_saturated_render_blocks_ >= 0.8f * kNumBlocksPerSecond; + } + echo_audibility_.GetResidualEchoScaling(filter_has_had_time_to_converge, + residual_scaling); +} + +absl::optional AecState::ErleUncertainty() const { + if (SaturatedEcho()) { + return 1.f; + } + + return absl::nullopt; +} + +AecState::AecState(const EchoCanceller3Config& config, + size_t num_capture_channels) + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + config_(config), + num_capture_channels_(num_capture_channels), + initial_state_(config_), + delay_state_(config_, num_capture_channels_), + transparent_state_(config_), + filter_quality_state_(config_, num_capture_channels_), + erl_estimator_(2 * kNumBlocksPerSecond), + erle_estimator_(2 * kNumBlocksPerSecond, config_, num_capture_channels_), + filter_analyzer_(config_, num_capture_channels_), + echo_audibility_( + config_.echo_audibility.use_stationarity_properties_at_init), + reverb_model_estimator_(config_, num_capture_channels_), + subtractor_output_analyzer_(num_capture_channels_) {} + +AecState::~AecState() = default; + +void AecState::HandleEchoPathChange( + const EchoPathVariability& echo_path_variability) { + const auto full_reset = [&]() { + filter_analyzer_.Reset(); + capture_signal_saturation_ = false; + strong_not_saturated_render_blocks_ = 0; + blocks_with_active_render_ = 0; + initial_state_.Reset(); + transparent_state_.Reset(); + erle_estimator_.Reset(true); + erl_estimator_.Reset(); + filter_quality_state_.Reset(); + }; + + // TODO(peah): Refine the reset scheme according to the type of gain and + // delay adjustment. + + if (echo_path_variability.delay_change != + EchoPathVariability::DelayAdjustment::kNone) { + full_reset(); + } else if (echo_path_variability.gain_change) { + erle_estimator_.Reset(false); + } + subtractor_output_analyzer_.HandleEchoPathChange(); +} + +void AecState::Update( + const absl::optional& external_delay, + rtc::ArrayView>> + adaptive_filter_frequency_responses, + rtc::ArrayView> adaptive_filter_impulse_responses, + const RenderBuffer& render_buffer, + rtc::ArrayView> E2_main, + rtc::ArrayView> Y2, + rtc::ArrayView subtractor_output) { + RTC_DCHECK_EQ(num_capture_channels_, Y2.size()); + RTC_DCHECK_EQ(num_capture_channels_, subtractor_output.size()); + RTC_DCHECK_EQ(num_capture_channels_, + adaptive_filter_frequency_responses.size()); + RTC_DCHECK_EQ(num_capture_channels_, + adaptive_filter_impulse_responses.size()); + + bool any_filter_converged; + bool all_filters_diverged; + subtractor_output_analyzer_.Update(subtractor_output, &any_filter_converged, + &all_filters_diverged); + + bool any_filter_consistent; + float max_echo_path_gain; + filter_analyzer_.Update(adaptive_filter_impulse_responses, render_buffer, + &any_filter_consistent, &max_echo_path_gain); + + if (config_.filter.use_linear_filter) { + delay_state_.Update(filter_analyzer_.FilterDelaysBlocks(), external_delay, + strong_not_saturated_render_blocks_); + } + + const std::vector>& aligned_render_block = + render_buffer.Block(-delay_state_.MinDirectPathFilterDelay())[0]; + + bool active_render = false; + for (size_t ch = 0; ch < aligned_render_block.size(); ++ch) { + const float render_energy = std::inner_product( + aligned_render_block[ch].begin(), aligned_render_block[ch].end(), + aligned_render_block[ch].begin(), 0.f); + if (render_energy > (config_.render_levels.active_render_limit * + config_.render_levels.active_render_limit) * + kFftLengthBy2) { + active_render = true; + break; + } + } + blocks_with_active_render_ += active_render ? 1 : 0; + strong_not_saturated_render_blocks_ += + active_render && !SaturatedCapture() ? 1 : 0; + + std::array avg_render_spectrum_with_reverb; + + ComputeAvgRenderReverb(render_buffer.GetSpectrumBuffer(), + delay_state_.MinDirectPathFilterDelay(), ReverbDecay(), + &avg_render_reverb_, avg_render_spectrum_with_reverb); + + if (config_.echo_audibility.use_stationarity_properties) { + // Update the echo audibility evaluator. + echo_audibility_.Update(render_buffer, avg_render_reverb_.reverb(), + delay_state_.MinDirectPathFilterDelay(), + delay_state_.ExternalDelayReported()); + } + + if (initial_state_.TransitionTriggered()) { + erle_estimator_.Reset(false); + } + + erle_estimator_.Update(render_buffer, adaptive_filter_frequency_responses, + avg_render_spectrum_with_reverb, Y2, E2_main, + subtractor_output_analyzer_.ConvergedFilters()); + + erl_estimator_.Update( + subtractor_output_analyzer_.ConvergedFilters(), + render_buffer.Spectrum(delay_state_.MinDirectPathFilterDelay()), Y2); + + saturation_detector_.Update(aligned_render_block, SaturatedCapture(), + UsableLinearEstimate(), subtractor_output, + max_echo_path_gain); + + initial_state_.Update(active_render, SaturatedCapture()); + + transparent_state_.Update(delay_state_.MinDirectPathFilterDelay(), + any_filter_consistent, any_filter_converged, + all_filters_diverged, active_render, + SaturatedCapture()); + + filter_quality_state_.Update(active_render, TransparentMode(), + SaturatedCapture(), external_delay, + any_filter_converged); + + const bool stationary_block = + config_.echo_audibility.use_stationarity_properties && + echo_audibility_.IsBlockStationary(); + + reverb_model_estimator_.Update( + filter_analyzer_.GetAdjustedFilters(), + adaptive_filter_frequency_responses, + erle_estimator_.GetInstLinearQualityEstimates(), + delay_state_.DirectPathFilterDelays(), + filter_quality_state_.UsableLinearFilterOutputs(), stationary_block); + + erle_estimator_.Dump(data_dumper_); + reverb_model_estimator_.Dump(data_dumper_.get()); + data_dumper_->DumpRaw("aec3_erl", Erl()); + data_dumper_->DumpRaw("aec3_erl_time_domain", ErlTimeDomain()); + data_dumper_->DumpRaw("aec3_erle", Erle()[0]); + data_dumper_->DumpRaw("aec3_usable_linear_estimate", UsableLinearEstimate()); + data_dumper_->DumpRaw("aec3_transparent_mode", TransparentMode()); + data_dumper_->DumpRaw("aec3_filter_delay", + filter_analyzer_.MinFilterDelayBlocks()); + + data_dumper_->DumpRaw("aec3_any_filter_consistent", any_filter_consistent); + data_dumper_->DumpRaw("aec3_initial_state", + initial_state_.InitialStateActive()); + data_dumper_->DumpRaw("aec3_capture_saturation", SaturatedCapture()); + data_dumper_->DumpRaw("aec3_echo_saturation", SaturatedEcho()); + data_dumper_->DumpRaw("aec3_any_filter_converged", any_filter_converged); + data_dumper_->DumpRaw("aec3_all_filters_diverged", all_filters_diverged); + + data_dumper_->DumpRaw("aec3_external_delay_avaliable", + external_delay ? 1 : 0); + data_dumper_->DumpRaw("aec3_filter_tail_freq_resp_est", + GetReverbFrequencyResponse()); +} + +AecState::InitialState::InitialState(const EchoCanceller3Config& config) + : conservative_initial_phase_(config.filter.conservative_initial_phase), + initial_state_seconds_(config.filter.initial_state_seconds) { + Reset(); +} +void AecState::InitialState::InitialState::Reset() { + initial_state_ = true; + strong_not_saturated_render_blocks_ = 0; +} +void AecState::InitialState::InitialState::Update(bool active_render, + bool saturated_capture) { + strong_not_saturated_render_blocks_ += + active_render && !saturated_capture ? 1 : 0; + + // Flag whether the initial state is still active. + bool prev_initial_state = initial_state_; + if (conservative_initial_phase_) { + initial_state_ = + strong_not_saturated_render_blocks_ < 5 * kNumBlocksPerSecond; + } else { + initial_state_ = strong_not_saturated_render_blocks_ < + initial_state_seconds_ * kNumBlocksPerSecond; + } + + // Flag whether the transition from the initial state has started. + transition_triggered_ = !initial_state_ && prev_initial_state; +} + +AecState::FilterDelay::FilterDelay(const EchoCanceller3Config& config, + size_t num_capture_channels) + : delay_headroom_samples_(config.delay.delay_headroom_samples), + filter_delays_blocks_(num_capture_channels, 0) {} + +void AecState::FilterDelay::Update( + rtc::ArrayView analyzer_filter_delay_estimates_blocks, + const absl::optional& external_delay, + size_t blocks_with_proper_filter_adaptation) { + // Update the delay based on the external delay. + if (external_delay && + (!external_delay_ || external_delay_->delay != external_delay->delay)) { + external_delay_ = external_delay; + external_delay_reported_ = true; + } + + // Override the estimated delay if it is not certain that the filter has had + // time to converge. + const bool delay_estimator_may_not_have_converged = + blocks_with_proper_filter_adaptation < 2 * kNumBlocksPerSecond; + if (delay_estimator_may_not_have_converged && external_delay_) { + int delay_guess = delay_headroom_samples_ / kBlockSize; + std::fill(filter_delays_blocks_.begin(), filter_delays_blocks_.end(), + delay_guess); + } else { + RTC_DCHECK_EQ(filter_delays_blocks_.size(), + analyzer_filter_delay_estimates_blocks.size()); + std::copy(analyzer_filter_delay_estimates_blocks.begin(), + analyzer_filter_delay_estimates_blocks.end(), + filter_delays_blocks_.begin()); + } + + min_filter_delay_ = *std::min_element(filter_delays_blocks_.begin(), + filter_delays_blocks_.end()); +} + +AecState::TransparentMode::TransparentMode(const EchoCanceller3Config& config) + : bounded_erl_(config.ep_strength.bounded_erl), + linear_and_stable_echo_path_( + config.echo_removal_control.linear_and_stable_echo_path), + active_blocks_since_sane_filter_(kBlocksSinceConsistentEstimateInit), + non_converged_sequence_size_(kBlocksSinceConvergencedFilterInit) {} + +void AecState::TransparentMode::Reset() { + non_converged_sequence_size_ = kBlocksSinceConvergencedFilterInit; + diverged_sequence_size_ = 0; + strong_not_saturated_render_blocks_ = 0; + if (linear_and_stable_echo_path_) { + recent_convergence_during_activity_ = false; + } +} + +void AecState::TransparentMode::Update(int filter_delay_blocks, + bool any_filter_consistent, + bool any_filter_converged, + bool all_filters_diverged, + bool active_render, + bool saturated_capture) { + ++capture_block_counter_; + strong_not_saturated_render_blocks_ += + active_render && !saturated_capture ? 1 : 0; + + if (any_filter_consistent && filter_delay_blocks < 5) { + sane_filter_observed_ = true; + active_blocks_since_sane_filter_ = 0; + } else if (active_render) { + ++active_blocks_since_sane_filter_; + } + + bool sane_filter_recently_seen; + if (!sane_filter_observed_) { + sane_filter_recently_seen = + capture_block_counter_ <= 5 * kNumBlocksPerSecond; + } else { + sane_filter_recently_seen = + active_blocks_since_sane_filter_ <= 30 * kNumBlocksPerSecond; + } + + if (any_filter_converged) { + recent_convergence_during_activity_ = true; + active_non_converged_sequence_size_ = 0; + non_converged_sequence_size_ = 0; + ++num_converged_blocks_; + } else { + if (++non_converged_sequence_size_ > 20 * kNumBlocksPerSecond) { + num_converged_blocks_ = 0; + } + if (active_render && + ++active_non_converged_sequence_size_ > 60 * kNumBlocksPerSecond) { + recent_convergence_during_activity_ = false; + } + } + + if (!all_filters_diverged) { + diverged_sequence_size_ = 0; + } else if (++diverged_sequence_size_ >= 60) { + // TODO(peah): Change these lines to ensure proper triggering of usable + // filter. + non_converged_sequence_size_ = kBlocksSinceConvergencedFilterInit; + } + + if (active_non_converged_sequence_size_ > 60 * kNumBlocksPerSecond) { + finite_erl_recently_detected_ = false; + } + + if (num_converged_blocks_ > 50) { + finite_erl_recently_detected_ = true; + } + + if (bounded_erl_) { + transparency_activated_ = false; + } else if (finite_erl_recently_detected_) { + transparency_activated_ = false; + } else if (sane_filter_recently_seen && recent_convergence_during_activity_) { + transparency_activated_ = false; + } else { + const bool filter_should_have_converged = + strong_not_saturated_render_blocks_ > 6 * kNumBlocksPerSecond; + transparency_activated_ = filter_should_have_converged; + } +} + +AecState::FilteringQualityAnalyzer::FilteringQualityAnalyzer( + const EchoCanceller3Config& config, + size_t num_capture_channels) + : use_linear_filter_(config.filter.use_linear_filter), + usable_linear_filter_estimates_(num_capture_channels, false) {} + +void AecState::FilteringQualityAnalyzer::Reset() { + std::fill(usable_linear_filter_estimates_.begin(), + usable_linear_filter_estimates_.end(), false); + overall_usable_linear_estimates_ = false; + filter_update_blocks_since_reset_ = 0; +} + +void AecState::FilteringQualityAnalyzer::Update( + bool active_render, + bool transparent_mode, + bool saturated_capture, + const absl::optional& external_delay, + bool any_filter_converged) { + // Update blocks counter. + const bool filter_update = active_render && !saturated_capture; + filter_update_blocks_since_reset_ += filter_update ? 1 : 0; + filter_update_blocks_since_start_ += filter_update ? 1 : 0; + + // Store convergence flag when observed. + convergence_seen_ = convergence_seen_ || any_filter_converged; + + // Verify requirements for achieving a decent filter. The requirements for + // filter adaptation at call startup are more restrictive than after an + // in-call reset. + const bool sufficient_data_to_converge_at_startup = + filter_update_blocks_since_start_ > kNumBlocksPerSecond * 0.4f; + const bool sufficient_data_to_converge_at_reset = + sufficient_data_to_converge_at_startup && + filter_update_blocks_since_reset_ > kNumBlocksPerSecond * 0.2f; + + // The linear filter can only be used if it has had time to converge. + overall_usable_linear_estimates_ = sufficient_data_to_converge_at_startup && + sufficient_data_to_converge_at_reset; + + // The linear filter can only be used if an external delay or convergence have + // been identified + overall_usable_linear_estimates_ = + overall_usable_linear_estimates_ && (external_delay || convergence_seen_); + + // If transparent mode is on, deactivate usign the linear filter. + overall_usable_linear_estimates_ = + overall_usable_linear_estimates_ && !transparent_mode; + + if (use_linear_filter_) { + std::fill(usable_linear_filter_estimates_.begin(), + usable_linear_filter_estimates_.end(), + overall_usable_linear_estimates_); + } +} + +void AecState::SaturationDetector::Update( + rtc::ArrayView> x, + bool saturated_capture, + bool usable_linear_estimate, + rtc::ArrayView subtractor_output, + float echo_path_gain) { + saturated_echo_ = false; + if (!saturated_capture) { + return; + } + + if (usable_linear_estimate) { + constexpr float kSaturationThreshold = 20000.f; + for (size_t ch = 0; ch < subtractor_output.size(); ++ch) { + saturated_echo_ = + saturated_echo_ || + (subtractor_output[ch].s_main_max_abs > kSaturationThreshold || + subtractor_output[ch].s_shadow_max_abs > kSaturationThreshold); + } + } else { + float max_sample = 0.f; + for (auto& channel : x) { + for (float sample : channel) { + max_sample = std::max(max_sample, fabsf(sample)); + } + } + + const float kMargin = 10.f; + float peak_echo_amplitude = max_sample * echo_path_gain * kMargin; + saturated_echo_ = saturated_echo_ || peak_echo_amplitude > 32000; + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/aec_state.h b/audio_processing/aec3/aec_state.h new file mode 100644 index 0000000..1a5e5fc --- /dev/null +++ b/audio_processing/aec3/aec_state.h @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_ + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/delay_estimate.h" +#include "audio_processing/aec3/echo_audibility.h" +#include "audio_processing/aec3/echo_path_variability.h" +#include "audio_processing/aec3/erl_estimator.h" +#include "audio_processing/aec3/erle_estimator.h" +#include "audio_processing/aec3/filter_analyzer.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/aec3/reverb_model_estimator.h" +#include "audio_processing/aec3/subtractor_output.h" +#include "audio_processing/aec3/subtractor_output_analyzer.h" + +namespace webrtc { + +class ApmDataDumper; + +// Handles the state and the conditions for the echo removal functionality. +class AecState { + public: + AecState(const EchoCanceller3Config& config, size_t num_capture_channels); + ~AecState(); + + // Returns whether the echo subtractor can be used to determine the residual + // echo. + bool UsableLinearEstimate() const { + return filter_quality_state_.LinearFilterUsable() && + config_.filter.use_linear_filter; + } + + // Returns whether the echo subtractor output should be used as output. + bool UseLinearFilterOutput() const { + return filter_quality_state_.LinearFilterUsable() && + config_.filter.use_linear_filter; + } + + // Returns whether the render signal is currently active. + bool ActiveRender() const { return blocks_with_active_render_ > 200; } + + // Returns the appropriate scaling of the residual echo to match the + // audibility. + void GetResidualEchoScaling(rtc::ArrayView residual_scaling) const; + + // Returns whether the stationary properties of the signals are used in the + // aec. + bool UseStationarityProperties() const { + return config_.echo_audibility.use_stationarity_properties; + } + + // Returns the ERLE. + rtc::ArrayView> Erle() const { + return erle_estimator_.Erle(); + } + + // Returns an offset to apply to the estimation of the residual echo + // computation. Returning nullopt means that no offset should be used, while + // any other value will be applied as a multiplier to the estimated residual + // echo. + absl::optional ErleUncertainty() const; + + // Returns the fullband ERLE estimate in log2 units. + float FullBandErleLog2() const { return erle_estimator_.FullbandErleLog2(); } + + // Returns the ERL. + const std::array& Erl() const { + return erl_estimator_.Erl(); + } + + // Returns the time-domain ERL. + float ErlTimeDomain() const { return erl_estimator_.ErlTimeDomain(); } + + // Returns the delay estimate based on the linear filter. + int MinDirectPathFilterDelay() const { + return delay_state_.MinDirectPathFilterDelay(); + } + + // Returns whether the capture signal is saturated. + bool SaturatedCapture() const { return capture_signal_saturation_; } + + // Returns whether the echo signal is saturated. + bool SaturatedEcho() const { return saturation_detector_.SaturatedEcho(); } + + // Updates the capture signal saturation. + void UpdateCaptureSaturation(bool capture_signal_saturation) { + capture_signal_saturation_ = capture_signal_saturation; + } + + // Returns whether the transparent mode is active + bool TransparentMode() const { return transparent_state_.Active(); } + + // Takes appropriate action at an echo path change. + void HandleEchoPathChange(const EchoPathVariability& echo_path_variability); + + // Returns the decay factor for the echo reverberation. + float ReverbDecay() const { return reverb_model_estimator_.ReverbDecay(); } + + // Return the frequency response of the reverberant echo. + rtc::ArrayView GetReverbFrequencyResponse() const { + return reverb_model_estimator_.GetReverbFrequencyResponse(); + } + + // Returns whether the transition for going out of the initial stated has + // been triggered. + bool TransitionTriggered() const { + return initial_state_.TransitionTriggered(); + } + + // Updates the aec state. + // TODO(bugs.webrtc.org/10913): Compute multi-channel ERL. + void Update( + const absl::optional& external_delay, + rtc::ArrayView>> + adaptive_filter_frequency_responses, + rtc::ArrayView> + adaptive_filter_impulse_responses, + const RenderBuffer& render_buffer, + rtc::ArrayView> E2_main, + rtc::ArrayView> Y2, + rtc::ArrayView subtractor_output); + + // Returns filter length in blocks. + int FilterLengthBlocks() const { + // All filters have the same length, so arbitrarily return channel 0 length. + return filter_analyzer_.FilterLengthBlocks(); + } + + private: + static int instance_count_; + std::unique_ptr data_dumper_; + const EchoCanceller3Config config_; + const size_t num_capture_channels_; + + // Class for controlling the transition from the intial state, which in turn + // controls when the filter parameters for the initial state should be used. + class InitialState { + public: + explicit InitialState(const EchoCanceller3Config& config); + // Resets the state to again begin in the initial state. + void Reset(); + + // Updates the state based on new data. + void Update(bool active_render, bool saturated_capture); + + // Returns whether the initial state is active or not. + bool InitialStateActive() const { return initial_state_; } + + // Returns that the transition from the initial state has was started. + bool TransitionTriggered() const { return transition_triggered_; } + + private: + const bool conservative_initial_phase_; + const float initial_state_seconds_; + bool transition_triggered_ = false; + bool initial_state_ = true; + size_t strong_not_saturated_render_blocks_ = 0; + } initial_state_; + + // Class for choosing the direct-path delay relative to the beginning of the + // filter, as well as any other data related to the delay used within + // AecState. + class FilterDelay { + public: + FilterDelay(const EchoCanceller3Config& config, + size_t num_capture_channels); + + // Returns whether an external delay has been reported to the AecState (from + // the delay estimator). + bool ExternalDelayReported() const { return external_delay_reported_; } + + // Returns the delay in blocks relative to the beginning of the filter that + // corresponds to the direct path of the echo. + rtc::ArrayView DirectPathFilterDelays() const { + return filter_delays_blocks_; + } + + // Returns the minimum delay among the direct path delays relative to the + // beginning of the filter + int MinDirectPathFilterDelay() const { return min_filter_delay_; } + + // Updates the delay estimates based on new data. + void Update( + rtc::ArrayView analyzer_filter_delay_estimates_blocks, + const absl::optional& external_delay, + size_t blocks_with_proper_filter_adaptation); + + private: + const int delay_headroom_samples_; + bool external_delay_reported_ = false; + std::vector filter_delays_blocks_; + int min_filter_delay_ = 0; + absl::optional external_delay_; + } delay_state_; + + // Class for detecting and toggling the transparent mode which causes the + // suppressor to apply no suppression. + class TransparentMode { + public: + explicit TransparentMode(const EchoCanceller3Config& config); + + // Returns whether the transparent mode should be active. + bool Active() const { return transparency_activated_; } + + // Resets the state of the detector. + void Reset(); + + // Updates the detection deciscion based on new data. + void Update(int filter_delay_blocks, + bool any_filter_consistent, + bool any_filter_converged, + bool all_filters_diverged, + bool active_render, + bool saturated_capture); + + private: + const bool bounded_erl_; + const bool linear_and_stable_echo_path_; + size_t capture_block_counter_ = 0; + bool transparency_activated_ = false; + size_t active_blocks_since_sane_filter_; + bool sane_filter_observed_ = false; + bool finite_erl_recently_detected_ = false; + size_t non_converged_sequence_size_; + size_t diverged_sequence_size_ = 0; + size_t active_non_converged_sequence_size_ = 0; + size_t num_converged_blocks_ = 0; + bool recent_convergence_during_activity_ = false; + size_t strong_not_saturated_render_blocks_ = 0; + } transparent_state_; + + // Class for analyzing how well the linear filter is, and can be expected to, + // perform on the current signals. The purpose of this is for using to + // select the echo suppression functionality as well as the input to the echo + // suppressor. + class FilteringQualityAnalyzer { + public: + FilteringQualityAnalyzer(const EchoCanceller3Config& config, + size_t num_capture_channels); + + // Returns whether the linear filter can be used for the echo + // canceller output. + bool LinearFilterUsable() const { return overall_usable_linear_estimates_; } + + // Returns whether an individual filter output can be used for the echo + // canceller output. + const std::vector& UsableLinearFilterOutputs() const { + return usable_linear_filter_estimates_; + } + + // Resets the state of the analyzer. + void Reset(); + + // Updates the analysis based on new data. + void Update(bool active_render, + bool transparent_mode, + bool saturated_capture, + const absl::optional& external_delay, + bool any_filter_converged); + + private: + const bool use_linear_filter_; + bool overall_usable_linear_estimates_ = false; + size_t filter_update_blocks_since_reset_ = 0; + size_t filter_update_blocks_since_start_ = 0; + bool convergence_seen_ = false; + std::vector usable_linear_filter_estimates_; + } filter_quality_state_; + + // Class for detecting whether the echo is to be considered to be + // saturated. + class SaturationDetector { + public: + // Returns whether the echo is to be considered saturated. + bool SaturatedEcho() const { return saturated_echo_; } + + // Updates the detection decision based on new data. + void Update(rtc::ArrayView> x, + bool saturated_capture, + bool usable_linear_estimate, + rtc::ArrayView subtractor_output, + float echo_path_gain); + + private: + bool saturated_echo_ = false; + } saturation_detector_; + + ErlEstimator erl_estimator_; + ErleEstimator erle_estimator_; + size_t strong_not_saturated_render_blocks_ = 0; + size_t blocks_with_active_render_ = 0; + bool capture_signal_saturation_ = false; + FilterAnalyzer filter_analyzer_; + absl::optional external_delay_; + EchoAudibility echo_audibility_; + ReverbModelEstimator reverb_model_estimator_; + ReverbModel avg_render_reverb_; + SubtractorOutputAnalyzer subtractor_output_analyzer_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_ diff --git a/audio_processing/aec3/aec_state_unittest.cc b/audio_processing/aec3/aec_state_unittest.cc new file mode 100644 index 0000000..c068b6e --- /dev/null +++ b/audio_processing/aec3/aec_state_unittest.cc @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/aec_state.h" + +#include "modules/audio_processing/aec3/aec3_fft.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { +std::string ProduceDebugText(size_t num_render_channels, + size_t num_capture_channels) { + rtc::StringBuilder ss; + ss << "Render channels: " << num_render_channels; + ss << ", Capture channels: " << num_capture_channels; + return ss.Release(); +} + +void RunNormalUsageTest(size_t num_render_channels, + size_t num_capture_channels) { + // TODO(bugs.webrtc.org/10913): Test with different content in different + // channels. + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + ApmDataDumper data_dumper(42); + EchoCanceller3Config config; + AecState state(config, num_capture_channels); + absl::optional delay_estimate = + DelayEstimate(DelayEstimate::Quality::kRefined, 10); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels)); + std::vector> E2_main( + num_capture_channels); + std::vector> Y2(num_capture_channels); + std::vector>> x( + kNumBands, std::vector>( + num_render_channels, std::vector(kBlockSize, 0.f))); + EchoPathVariability echo_path_variability( + false, EchoPathVariability::DelayAdjustment::kNone, false); + std::vector> y(num_capture_channels); + std::vector subtractor_output(num_capture_channels); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + subtractor_output[ch].Reset(); + subtractor_output[ch].s_main.fill(100.f); + subtractor_output[ch].e_main.fill(100.f); + y[ch].fill(1000.f); + E2_main[ch].fill(0.f); + Y2[ch].fill(0.f); + } + Aec3Fft fft; + std::vector>> + converged_filter_frequency_response( + num_capture_channels, + std::vector>(10)); + for (auto& v_ch : converged_filter_frequency_response) { + for (auto& v : v_ch) { + v.fill(0.01f); + } + } + std::vector>> + diverged_filter_frequency_response = converged_filter_frequency_response; + converged_filter_frequency_response[0][2].fill(100.f); + converged_filter_frequency_response[0][2][0] = 1.f; + std::vector> impulse_response( + num_capture_channels, + std::vector(GetTimeDomainLength(config.filter.main.length_blocks), + 0.f)); + + // Verify that linear AEC usability is true when the filter is converged + for (size_t band = 0; band < kNumBands; ++band) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + std::fill(x[band][ch].begin(), x[band][ch].end(), 101.f); + } + } + for (int k = 0; k < 3000; ++k) { + render_delay_buffer->Insert(x); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + subtractor_output[ch].ComputeMetrics(y[ch]); + } + state.Update(delay_estimate, converged_filter_frequency_response, + impulse_response, *render_delay_buffer->GetRenderBuffer(), + E2_main, Y2, subtractor_output); + } + EXPECT_TRUE(state.UsableLinearEstimate()); + + // Verify that linear AEC usability becomes false after an echo path + // change is reported + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + subtractor_output[ch].ComputeMetrics(y[ch]); + } + state.HandleEchoPathChange(EchoPathVariability( + false, EchoPathVariability::DelayAdjustment::kBufferReadjustment, false)); + state.Update(delay_estimate, converged_filter_frequency_response, + impulse_response, *render_delay_buffer->GetRenderBuffer(), + E2_main, Y2, subtractor_output); + EXPECT_FALSE(state.UsableLinearEstimate()); + + // Verify that the active render detection works as intended. + for (size_t ch = 0; ch < num_render_channels; ++ch) { + std::fill(x[0][ch].begin(), x[0][ch].end(), 101.f); + } + render_delay_buffer->Insert(x); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + subtractor_output[ch].ComputeMetrics(y[ch]); + } + state.HandleEchoPathChange(EchoPathVariability( + true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false)); + state.Update(delay_estimate, converged_filter_frequency_response, + impulse_response, *render_delay_buffer->GetRenderBuffer(), + E2_main, Y2, subtractor_output); + EXPECT_FALSE(state.ActiveRender()); + + for (int k = 0; k < 1000; ++k) { + render_delay_buffer->Insert(x); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + subtractor_output[ch].ComputeMetrics(y[ch]); + } + state.Update(delay_estimate, converged_filter_frequency_response, + impulse_response, *render_delay_buffer->GetRenderBuffer(), + E2_main, Y2, subtractor_output); + } + EXPECT_TRUE(state.ActiveRender()); + + // Verify that the ERL is properly estimated + for (auto& band : x) { + for (auto& channel : band) { + channel = std::vector(kBlockSize, 0.f); + } + } + + for (size_t ch = 0; ch < num_render_channels; ++ch) { + x[0][ch][0] = 5000.f; + } + for (size_t k = 0; + k < render_delay_buffer->GetRenderBuffer()->GetFftBuffer().size(); ++k) { + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + } + + for (auto& Y2_ch : Y2) { + Y2_ch.fill(10.f * 10000.f * 10000.f); + } + for (size_t k = 0; k < 1000; ++k) { + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + subtractor_output[ch].ComputeMetrics(y[ch]); + } + state.Update(delay_estimate, converged_filter_frequency_response, + impulse_response, *render_delay_buffer->GetRenderBuffer(), + E2_main, Y2, subtractor_output); + } + + ASSERT_TRUE(state.UsableLinearEstimate()); + const std::array& erl = state.Erl(); + EXPECT_EQ(erl[0], erl[1]); + for (size_t k = 1; k < erl.size() - 1; ++k) { + EXPECT_NEAR(k % 2 == 0 ? 10.f : 1000.f, erl[k], 0.1); + } + EXPECT_EQ(erl[erl.size() - 2], erl[erl.size() - 1]); + + // Verify that the ERLE is properly estimated + for (auto& E2_main_ch : E2_main) { + E2_main_ch.fill(1.f * 10000.f * 10000.f); + } + for (auto& Y2_ch : Y2) { + Y2_ch.fill(10.f * E2_main[0][0]); + } + for (size_t k = 0; k < 1000; ++k) { + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + subtractor_output[ch].ComputeMetrics(y[ch]); + } + state.Update(delay_estimate, converged_filter_frequency_response, + impulse_response, *render_delay_buffer->GetRenderBuffer(), + E2_main, Y2, subtractor_output); + } + ASSERT_TRUE(state.UsableLinearEstimate()); + { + // Note that the render spectrum is built so it does not have energy in + // the odd bands but just in the even bands. + const auto& erle = state.Erle()[0]; + EXPECT_EQ(erle[0], erle[1]); + constexpr size_t kLowFrequencyLimit = 32; + for (size_t k = 2; k < kLowFrequencyLimit; k = k + 2) { + EXPECT_NEAR(4.f, erle[k], 0.1); + } + for (size_t k = kLowFrequencyLimit; k < erle.size() - 1; k = k + 2) { + EXPECT_NEAR(1.5f, erle[k], 0.1); + } + EXPECT_EQ(erle[erle.size() - 2], erle[erle.size() - 1]); + } + for (auto& E2_main_ch : E2_main) { + E2_main_ch.fill(1.f * 10000.f * 10000.f); + } + for (auto& Y2_ch : Y2) { + Y2_ch.fill(5.f * E2_main[0][0]); + } + for (size_t k = 0; k < 1000; ++k) { + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + subtractor_output[ch].ComputeMetrics(y[ch]); + } + state.Update(delay_estimate, converged_filter_frequency_response, + impulse_response, *render_delay_buffer->GetRenderBuffer(), + E2_main, Y2, subtractor_output); + } + + ASSERT_TRUE(state.UsableLinearEstimate()); + { + const auto& erle = state.Erle()[0]; + EXPECT_EQ(erle[0], erle[1]); + constexpr size_t kLowFrequencyLimit = 32; + for (size_t k = 1; k < kLowFrequencyLimit; ++k) { + EXPECT_NEAR(k % 2 == 0 ? 4.f : 1.f, erle[k], 0.1); + } + for (size_t k = kLowFrequencyLimit; k < erle.size() - 1; ++k) { + EXPECT_NEAR(k % 2 == 0 ? 1.5f : 1.f, erle[k], 0.1); + } + EXPECT_EQ(erle[erle.size() - 2], erle[erle.size() - 1]); + } +} + +} // namespace + +// Verify the general functionality of AecState +TEST(AecState, NormalUsage) { + for (size_t num_render_channels : {1, 2, 8}) { + for (size_t num_capture_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(num_render_channels, num_capture_channels)); + RunNormalUsageTest(num_render_channels, num_capture_channels); + } + } +} + +// Verifies the delay for a converged filter is correctly identified. +TEST(AecState, ConvergedFilterDelay) { + constexpr int kFilterLengthBlocks = 10; + constexpr size_t kNumCaptureChannels = 1; + EchoCanceller3Config config; + AecState state(config, kNumCaptureChannels); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, 48000, 1)); + absl::optional delay_estimate; + std::vector> E2_main( + kNumCaptureChannels); + std::vector> Y2(kNumCaptureChannels); + std::array x; + EchoPathVariability echo_path_variability( + false, EchoPathVariability::DelayAdjustment::kNone, false); + std::vector subtractor_output(kNumCaptureChannels); + for (auto& output : subtractor_output) { + output.Reset(); + output.s_main.fill(100.f); + } + std::array y; + x.fill(0.f); + y.fill(0.f); + + std::vector>> + frequency_response( + kNumCaptureChannels, + std::vector>(kFilterLengthBlocks)); + for (auto& v_ch : frequency_response) { + for (auto& v : v_ch) { + v.fill(0.01f); + } + } + + std::vector> impulse_response( + kNumCaptureChannels, + std::vector(GetTimeDomainLength(config.filter.main.length_blocks), + 0.f)); + + // Verify that the filter delay for a converged filter is properly + // identified. + for (int k = 0; k < kFilterLengthBlocks; ++k) { + for (auto& ir : impulse_response) { + std::fill(ir.begin(), ir.end(), 0.f); + ir[k * kBlockSize + 1] = 1.f; + } + + state.HandleEchoPathChange(echo_path_variability); + subtractor_output[0].ComputeMetrics(y); + state.Update(delay_estimate, frequency_response, impulse_response, + *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, + subtractor_output); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/alignment_mixer.cc b/audio_processing/aec3/alignment_mixer.cc new file mode 100644 index 0000000..516f8be --- /dev/null +++ b/audio_processing/aec3/alignment_mixer.cc @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/alignment_mixer.h" + +#include + +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +AlignmentMixer::MixingVariant ChooseMixingVariant(bool downmix, + bool adaptive_selection, + int num_channels) { + RTC_DCHECK(!(adaptive_selection && downmix)); + RTC_DCHECK_LT(0, num_channels); + + if (num_channels == 1) { + return AlignmentMixer::MixingVariant::kFixed; + } + if (downmix) { + return AlignmentMixer::MixingVariant::kDownmix; + } + if (adaptive_selection) { + return AlignmentMixer::MixingVariant::kAdaptive; + } + return AlignmentMixer::MixingVariant::kFixed; +} + +} // namespace + +AlignmentMixer::AlignmentMixer( + size_t num_channels, + const EchoCanceller3Config::Delay::AlignmentMixing& config) + : AlignmentMixer(num_channels, + config.downmix, + config.adaptive_selection, + config.activity_power_threshold, + config.prefer_first_two_channels) {} + +AlignmentMixer::AlignmentMixer(size_t num_channels, + bool downmix, + bool adaptive_selection, + float activity_power_threshold, + bool prefer_first_two_channels) + : num_channels_(num_channels), + one_by_num_channels_(1.f / num_channels_), + excitation_energy_threshold_(kBlockSize * activity_power_threshold), + prefer_first_two_channels_(prefer_first_two_channels), + selection_variant_( + ChooseMixingVariant(downmix, adaptive_selection, num_channels_)) { + if (selection_variant_ == MixingVariant::kAdaptive) { + std::fill(strong_block_counters_.begin(), strong_block_counters_.end(), 0); + cumulative_energies_.resize(num_channels_); + std::fill(cumulative_energies_.begin(), cumulative_energies_.end(), 0.f); + } +} + +void AlignmentMixer::ProduceOutput(rtc::ArrayView> x, + rtc::ArrayView y) { + RTC_DCHECK_EQ(x.size(), num_channels_); + if (selection_variant_ == MixingVariant::kDownmix) { + Downmix(x, y); + return; + } + + int ch = selection_variant_ == MixingVariant::kFixed ? 0 : SelectChannel(x); + + RTC_DCHECK_GE(x.size(), ch); + std::copy(x[ch].begin(), x[ch].end(), y.begin()); +} + +void AlignmentMixer::Downmix(rtc::ArrayView> x, + rtc::ArrayView y) const { + RTC_DCHECK_EQ(x.size(), num_channels_); + RTC_DCHECK_GE(num_channels_, 2); + std::copy(x[0].begin(), x[0].end(), y.begin()); + for (size_t ch = 1; ch < num_channels_; ++ch) { + for (size_t i = 0; i < kBlockSize; ++i) { + y[i] += x[ch][i]; + } + } + + for (size_t i = 0; i < kBlockSize; ++i) { + y[i] *= one_by_num_channels_; + } +} + +int AlignmentMixer::SelectChannel(rtc::ArrayView> x) { + RTC_DCHECK_EQ(x.size(), num_channels_); + RTC_DCHECK_GE(num_channels_, 2); + RTC_DCHECK_EQ(cumulative_energies_.size(), num_channels_); + + constexpr size_t kBlocksToChooseLeftOrRight = + static_cast(0.5f * kNumBlocksPerSecond); + const bool good_signal_in_left_or_right = + prefer_first_two_channels_ && + (strong_block_counters_[0] > kBlocksToChooseLeftOrRight || + strong_block_counters_[1] > kBlocksToChooseLeftOrRight); + + const int num_ch_to_analyze = + good_signal_in_left_or_right ? 2 : num_channels_; + + constexpr int kNumBlocksBeforeEnergySmoothing = 60 * kNumBlocksPerSecond; + ++block_counter_; + + for (int ch = 0; ch < num_ch_to_analyze; ++ch) { + RTC_DCHECK_EQ(x[ch].size(), kBlockSize); + float x2_sum = 0.f; + for (size_t i = 0; i < kBlockSize; ++i) { + x2_sum += x[ch][i] * x[ch][i]; + } + + if (ch < 2 && x2_sum > excitation_energy_threshold_) { + ++strong_block_counters_[ch]; + } + + if (block_counter_ <= kNumBlocksBeforeEnergySmoothing) { + cumulative_energies_[ch] += x2_sum; + } else { + constexpr float kSmoothing = 1.f / (10 * kNumBlocksPerSecond); + cumulative_energies_[ch] += + kSmoothing * (x2_sum - cumulative_energies_[ch]); + } + } + + // Normalize the energies to allow the energy computations to from now be + // based on smoothing. + if (block_counter_ == kNumBlocksBeforeEnergySmoothing) { + constexpr float kOneByNumBlocksBeforeEnergySmoothing = + 1.f / kNumBlocksBeforeEnergySmoothing; + for (int ch = 0; ch < num_ch_to_analyze; ++ch) { + cumulative_energies_[ch] *= kOneByNumBlocksBeforeEnergySmoothing; + } + } + + int strongest_ch = 0; + for (int ch = 0; ch < num_ch_to_analyze; ++ch) { + if (cumulative_energies_[ch] > cumulative_energies_[strongest_ch]) { + strongest_ch = ch; + } + } + + if ((good_signal_in_left_or_right && selected_channel_ > 1) || + cumulative_energies_[strongest_ch] > + 2.f * cumulative_energies_[selected_channel_]) { + selected_channel_ = strongest_ch; + } + + return selected_channel_; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/alignment_mixer.h b/audio_processing/aec3/alignment_mixer.h new file mode 100644 index 0000000..53d21dd --- /dev/null +++ b/audio_processing/aec3/alignment_mixer.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ALIGNMENT_MIXER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ALIGNMENT_MIXER_H_ + +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { + +// Performs channel conversion to mono for the purpose of providing a decent +// mono input for the delay estimation. This is achieved by analyzing all +// incoming channels and produce one single channel output. +class AlignmentMixer { + public: + AlignmentMixer(size_t num_channels, + const EchoCanceller3Config::Delay::AlignmentMixing& config); + + AlignmentMixer(size_t num_channels, + bool downmix, + bool adaptive_selection, + float excitation_limit, + bool prefer_first_two_channels); + + void ProduceOutput(rtc::ArrayView> x, + rtc::ArrayView y); + + enum class MixingVariant { kDownmix, kAdaptive, kFixed }; + + private: + const size_t num_channels_; + const float one_by_num_channels_; + const float excitation_energy_threshold_; + const bool prefer_first_two_channels_; + const MixingVariant selection_variant_; + std::array strong_block_counters_; + std::vector cumulative_energies_; + int selected_channel_ = 0; + size_t block_counter_ = 0; + + void Downmix(const rtc::ArrayView> x, + rtc::ArrayView y) const; + int SelectChannel(rtc::ArrayView> x); +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ALIGNMENT_MIXER_H_ diff --git a/audio_processing/aec3/alignment_mixer_unittest.cc b/audio_processing/aec3/alignment_mixer_unittest.cc new file mode 100644 index 0000000..832e4ea --- /dev/null +++ b/audio_processing/aec3/alignment_mixer_unittest.cc @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/alignment_mixer.h" + +#include + +#include "api/array_view.h" +#include "modules/audio_processing/aec3/aec3_common.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gmock.h" +#include "test/gtest.h" + +using ::testing::AllOf; +using ::testing::Each; + +namespace webrtc { +namespace { +std::string ProduceDebugText(bool initial_silence, + bool huge_activity_threshold, + bool prefer_first_two_channels, + int num_channels, + int strongest_ch) { + rtc::StringBuilder ss; + ss << ", Initial silence: " << initial_silence; + ss << ", Huge activity threshold: " << huge_activity_threshold; + ss << ", Prefer first two channels: " << prefer_first_two_channels; + ss << ", Number of channels: " << num_channels; + ss << ", Strongest channel: " << strongest_ch; + return ss.Release(); +} + +} // namespace + +TEST(AlignmentMixer, GeneralAdaptiveMode) { + constexpr int kChannelOffset = 100; + constexpr int kMaxChannelsToTest = 8; + constexpr float kStrongestSignalScaling = + kMaxChannelsToTest * kChannelOffset * 100; + + for (bool initial_silence : {false, true}) { + for (bool huge_activity_threshold : {false, true}) { + for (bool prefer_first_two_channels : {false, true}) { + for (int num_channels = 2; num_channels < 8; ++num_channels) { + for (int strongest_ch = 0; strongest_ch < num_channels; + ++strongest_ch) { + SCOPED_TRACE(ProduceDebugText( + initial_silence, huge_activity_threshold, + prefer_first_two_channels, num_channels, strongest_ch)); + const float excitation_limit = + huge_activity_threshold ? 1000000000.f : 0.001f; + AlignmentMixer am(num_channels, /*downmix*/ false, + /*adaptive_selection*/ true, excitation_limit, + prefer_first_two_channels); + + std::vector> x( + num_channels, std::vector(kBlockSize, 0.f)); + if (initial_silence) { + for (int ch = 0; ch < num_channels; ++ch) { + std::fill(x[ch].begin(), x[ch].end(), 0.f); + } + std::array y; + for (int frame = 0; frame < 10 * kNumBlocksPerSecond; ++frame) { + am.ProduceOutput(x, y); + } + } + + for (int frame = 0; frame < 2 * kNumBlocksPerSecond; ++frame) { + const auto channel_value = [&](int frame_index, + int channel_index) { + return static_cast(frame_index + + channel_index * kChannelOffset); + }; + + for (int ch = 0; ch < num_channels; ++ch) { + float scaling = + ch == strongest_ch ? kStrongestSignalScaling : 1.f; + std::fill(x[ch].begin(), x[ch].end(), + channel_value(frame, ch) * scaling); + } + + std::array y; + y.fill(-1.f); + am.ProduceOutput(x, y); + + if (frame > 1 * kNumBlocksPerSecond) { + if (!prefer_first_two_channels || huge_activity_threshold) { + EXPECT_THAT(y, AllOf(Each(x[strongest_ch][0]))); + } else { + bool left_or_right_chosen; + for (int ch = 0; ch < 2; ++ch) { + left_or_right_chosen = true; + for (size_t k = 0; k < kBlockSize; ++k) { + if (y[k] != x[ch][k]) { + left_or_right_chosen = false; + break; + } + } + if (left_or_right_chosen) { + break; + } + } + EXPECT_TRUE(left_or_right_chosen); + } + } + } + } + } + } + } + } +} + +TEST(AlignmentMixer, DownmixMode) { + for (int num_channels = 1; num_channels < 8; ++num_channels) { + AlignmentMixer am(num_channels, /*downmix*/ true, + /*adaptive_selection*/ false, /*excitation_limit*/ 1.f, + /*prefer_first_two_channels*/ false); + + std::vector> x(num_channels, + std::vector(kBlockSize, 0.f)); + const auto channel_value = [](int frame_index, int channel_index) { + return static_cast(frame_index + channel_index); + }; + for (int frame = 0; frame < 10; ++frame) { + for (int ch = 0; ch < num_channels; ++ch) { + std::fill(x[ch].begin(), x[ch].end(), channel_value(frame, ch)); + } + + std::array y; + y.fill(-1.f); + am.ProduceOutput(x, y); + + float expected_mixed_value = 0.f; + for (int ch = 0; ch < num_channels; ++ch) { + expected_mixed_value += channel_value(frame, ch); + } + expected_mixed_value *= 1.f / num_channels; + + EXPECT_THAT(y, AllOf(Each(expected_mixed_value))); + } + } +} + +TEST(AlignmentMixer, FixedMode) { + for (int num_channels = 1; num_channels < 8; ++num_channels) { + AlignmentMixer am(num_channels, /*downmix*/ false, + /*adaptive_selection*/ false, /*excitation_limit*/ 1.f, + /*prefer_first_two_channels*/ false); + + std::vector> x(num_channels, + std::vector(kBlockSize, 0.f)); + const auto channel_value = [](int frame_index, int channel_index) { + return static_cast(frame_index + channel_index); + }; + for (int frame = 0; frame < 10; ++frame) { + for (int ch = 0; ch < num_channels; ++ch) { + std::fill(x[ch].begin(), x[ch].end(), channel_value(frame, ch)); + } + + std::array y; + y.fill(-1.f); + am.ProduceOutput(x, y); + EXPECT_THAT(y, AllOf(Each(x[0][0]))); + } + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +TEST(AlignmentMixer, ZeroNumChannels) { + EXPECT_DEATH( + AlignmentMixer(/*num_channels*/ 0, /*downmix*/ false, + /*adaptive_selection*/ false, /*excitation_limit*/ 1.f, + /*prefer_first_two_channels*/ false); + , ""); +} + +TEST(AlignmentMixer, IncorrectVariant) { + EXPECT_DEATH( + AlignmentMixer(/*num_channels*/ 1, /*downmix*/ true, + /*adaptive_selection*/ true, /*excitation_limit*/ 1.f, + /*prefer_first_two_channels*/ false); + , ""); +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/api_call_jitter_metrics.cc b/audio_processing/aec3/api_call_jitter_metrics.cc new file mode 100644 index 0000000..9118c02 --- /dev/null +++ b/audio_processing/aec3/api_call_jitter_metrics.cc @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/api_call_jitter_metrics.h" + +#include +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { +namespace { + +bool TimeToReportMetrics(int frames_since_last_report) { + constexpr int kNumFramesPerSecond = 100; + constexpr int kReportingIntervalFrames = 10 * kNumFramesPerSecond; + return frames_since_last_report == kReportingIntervalFrames; +} + +} // namespace + +ApiCallJitterMetrics::Jitter::Jitter() + : max_(0), min_(std::numeric_limits::max()) {} + +void ApiCallJitterMetrics::Jitter::Update(int num_api_calls_in_a_row) { + min_ = std::min(min_, num_api_calls_in_a_row); + max_ = std::max(max_, num_api_calls_in_a_row); +} + +void ApiCallJitterMetrics::Jitter::Reset() { + min_ = std::numeric_limits::max(); + max_ = 0; +} + +void ApiCallJitterMetrics::Reset() { + render_jitter_.Reset(); + capture_jitter_.Reset(); + num_api_calls_in_a_row_ = 0; + frames_since_last_report_ = 0; + last_call_was_render_ = false; + proper_call_observed_ = false; +} + +void ApiCallJitterMetrics::ReportRenderCall() { + if (!last_call_was_render_) { + // If the previous call was a capture and a proper call has been observed + // (containing both render and capture data), storing the last number of + // capture calls into the metrics. + if (proper_call_observed_) { + capture_jitter_.Update(num_api_calls_in_a_row_); + } + + // Reset the call counter to start counting render calls. + num_api_calls_in_a_row_ = 0; + } + ++num_api_calls_in_a_row_; + last_call_was_render_ = true; +} + +void ApiCallJitterMetrics::ReportCaptureCall() { + if (last_call_was_render_) { + // If the previous call was a render and a proper call has been observed + // (containing both render and capture data), storing the last number of + // render calls into the metrics. + if (proper_call_observed_) { + render_jitter_.Update(num_api_calls_in_a_row_); + } + // Reset the call counter to start counting capture calls. + num_api_calls_in_a_row_ = 0; + + // If this statement is reached, at least one render and one capture call + // have been observed. + proper_call_observed_ = true; + } + ++num_api_calls_in_a_row_; + last_call_was_render_ = false; + + // Only report and update jitter metrics for when a proper call, containing + // both render and capture data, has been observed. + if (proper_call_observed_ && + TimeToReportMetrics(++frames_since_last_report_)) { + // Report jitter, where the base basic unit is frames. + constexpr int kMaxJitterToReport = 50; + + // Report max and min jitter for render and capture, in units of 20 ms. + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.MaxRenderJitter", + std::min(kMaxJitterToReport, render_jitter().max()), 1, + kMaxJitterToReport, kMaxJitterToReport); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.MinRenderJitter", + std::min(kMaxJitterToReport, render_jitter().min()), 1, + kMaxJitterToReport, kMaxJitterToReport); + + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.MaxCaptureJitter", + std::min(kMaxJitterToReport, capture_jitter().max()), 1, + kMaxJitterToReport, kMaxJitterToReport); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.MinCaptureJitter", + std::min(kMaxJitterToReport, capture_jitter().min()), 1, + kMaxJitterToReport, kMaxJitterToReport); + + frames_since_last_report_ = 0; + Reset(); + } +} + +bool ApiCallJitterMetrics::WillReportMetricsAtNextCapture() const { + return TimeToReportMetrics(frames_since_last_report_ + 1); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/api_call_jitter_metrics.h b/audio_processing/aec3/api_call_jitter_metrics.h new file mode 100644 index 0000000..dd1fa82 --- /dev/null +++ b/audio_processing/aec3/api_call_jitter_metrics.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_ + +namespace webrtc { + +// Stores data for reporting metrics on the API call jitter. +class ApiCallJitterMetrics { + public: + class Jitter { + public: + Jitter(); + void Update(int num_api_calls_in_a_row); + void Reset(); + + int min() const { return min_; } + int max() const { return max_; } + + private: + int max_; + int min_; + }; + + ApiCallJitterMetrics() { Reset(); } + + // Update metrics for render API call. + void ReportRenderCall(); + + // Update and periodically report metrics for capture API call. + void ReportCaptureCall(); + + // Methods used only for testing. + const Jitter& render_jitter() const { return render_jitter_; } + const Jitter& capture_jitter() const { return capture_jitter_; } + bool WillReportMetricsAtNextCapture() const; + + private: + void Reset(); + + Jitter render_jitter_; + Jitter capture_jitter_; + + int num_api_calls_in_a_row_ = 0; + int frames_since_last_report_ = 0; + bool last_call_was_render_ = false; + bool proper_call_observed_ = false; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_ diff --git a/audio_processing/aec3/api_call_jitter_metrics_unittest.cc b/audio_processing/aec3/api_call_jitter_metrics_unittest.cc new file mode 100644 index 0000000..b902487 --- /dev/null +++ b/audio_processing/aec3/api_call_jitter_metrics_unittest.cc @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/api_call_jitter_metrics.h" + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "test/gtest.h" + +namespace webrtc { + +// Verify constant jitter. +TEST(ApiCallJitterMetrics, ConstantJitter) { + for (int jitter = 1; jitter < 20; ++jitter) { + ApiCallJitterMetrics metrics; + for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) { + for (int j = 0; j < jitter; ++j) { + metrics.ReportRenderCall(); + } + + for (int j = 0; j < jitter; ++j) { + metrics.ReportCaptureCall(); + + if (metrics.WillReportMetricsAtNextCapture()) { + EXPECT_EQ(jitter, metrics.render_jitter().min()); + EXPECT_EQ(jitter, metrics.render_jitter().max()); + EXPECT_EQ(jitter, metrics.capture_jitter().min()); + EXPECT_EQ(jitter, metrics.capture_jitter().max()); + } + } + } + } +} + +// Verify peaky jitter for the render. +TEST(ApiCallJitterMetrics, JitterPeakRender) { + constexpr int kMinJitter = 2; + constexpr int kJitterPeak = 10; + constexpr int kPeakInterval = 100; + + ApiCallJitterMetrics metrics; + int render_surplus = 0; + + for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) { + const int num_render_calls = + k % kPeakInterval == 0 ? kJitterPeak : kMinJitter; + for (int j = 0; j < num_render_calls; ++j) { + metrics.ReportRenderCall(); + ++render_surplus; + } + + ASSERT_LE(kMinJitter, render_surplus); + const int num_capture_calls = + render_surplus == kMinJitter ? kMinJitter : kMinJitter + 1; + for (int j = 0; j < num_capture_calls; ++j) { + metrics.ReportCaptureCall(); + + if (metrics.WillReportMetricsAtNextCapture()) { + EXPECT_EQ(kMinJitter, metrics.render_jitter().min()); + EXPECT_EQ(kJitterPeak, metrics.render_jitter().max()); + EXPECT_EQ(kMinJitter, metrics.capture_jitter().min()); + EXPECT_EQ(kMinJitter + 1, metrics.capture_jitter().max()); + } + --render_surplus; + } + } +} + +// Verify peaky jitter for the capture. +TEST(ApiCallJitterMetrics, JitterPeakCapture) { + constexpr int kMinJitter = 2; + constexpr int kJitterPeak = 10; + constexpr int kPeakInterval = 100; + + ApiCallJitterMetrics metrics; + int capture_surplus = kMinJitter; + + for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) { + ASSERT_LE(kMinJitter, capture_surplus); + const int num_render_calls = + capture_surplus == kMinJitter ? kMinJitter : kMinJitter + 1; + for (int j = 0; j < num_render_calls; ++j) { + metrics.ReportRenderCall(); + --capture_surplus; + } + + const int num_capture_calls = + k % kPeakInterval == 0 ? kJitterPeak : kMinJitter; + for (int j = 0; j < num_capture_calls; ++j) { + metrics.ReportCaptureCall(); + + if (metrics.WillReportMetricsAtNextCapture()) { + EXPECT_EQ(kMinJitter, metrics.render_jitter().min()); + EXPECT_EQ(kMinJitter + 1, metrics.render_jitter().max()); + EXPECT_EQ(kMinJitter, metrics.capture_jitter().min()); + EXPECT_EQ(kJitterPeak, metrics.capture_jitter().max()); + } + ++capture_surplus; + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/block_buffer.cc b/audio_processing/aec3/block_buffer.cc new file mode 100644 index 0000000..3dad07a --- /dev/null +++ b/audio_processing/aec3/block_buffer.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/block_buffer.h" + +#include + +namespace webrtc { + +BlockBuffer::BlockBuffer(size_t size, + size_t num_bands, + size_t num_channels, + size_t frame_length) + : size(static_cast(size)), + buffer(size, + std::vector>>( + num_bands, + std::vector>( + num_channels, + std::vector(frame_length, 0.f)))) { + for (auto& block : buffer) { + for (auto& band : block) { + for (auto& channel : band) { + std::fill(channel.begin(), channel.end(), 0.f); + } + } + } +} + +BlockBuffer::~BlockBuffer() = default; + +} // namespace webrtc diff --git a/audio_processing/aec3/block_buffer.h b/audio_processing/aec3/block_buffer.h new file mode 100644 index 0000000..b28d659 --- /dev/null +++ b/audio_processing/aec3/block_buffer.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_BUFFER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_BUFFER_H_ + +#include + +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +// Struct for bundling a circular buffer of two dimensional vector objects +// together with the read and write indices. +struct BlockBuffer { + BlockBuffer(size_t size, + size_t num_bands, + size_t num_channels, + size_t frame_length); + ~BlockBuffer(); + + int IncIndex(int index) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return index < size - 1 ? index + 1 : 0; + } + + int DecIndex(int index) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return index > 0 ? index - 1 : size - 1; + } + + int OffsetIndex(int index, int offset) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + RTC_DCHECK_GE(size, offset); + return (size + index + offset) % size; + } + + void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); } + void IncWriteIndex() { write = IncIndex(write); } + void DecWriteIndex() { write = DecIndex(write); } + void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); } + void IncReadIndex() { read = IncIndex(read); } + void DecReadIndex() { read = DecIndex(read); } + + const int size; + std::vector>>> buffer; + int write = 0; + int read = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_BUFFER_H_ diff --git a/audio_processing/aec3/block_delay_buffer.cc b/audio_processing/aec3/block_delay_buffer.cc new file mode 100644 index 0000000..8d6d84f --- /dev/null +++ b/audio_processing/aec3/block_delay_buffer.cc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/block_delay_buffer.h" + +#include "rtc_base/checks.h" + +namespace webrtc { + +BlockDelayBuffer::BlockDelayBuffer(size_t num_bands, + size_t frame_length, + size_t delay_samples) + : frame_length_(frame_length), + delay_(delay_samples), + buf_(num_bands, std::vector(delay_, 0.f)) {} + +BlockDelayBuffer::~BlockDelayBuffer() = default; + +void BlockDelayBuffer::DelaySignal(AudioBuffer* frame) { + RTC_DCHECK_EQ(1, frame->num_channels()); + RTC_DCHECK_EQ(buf_.size(), frame->num_bands()); + if (delay_ == 0) { + return; + } + + const size_t i_start = last_insert_; + size_t i = 0; + for (size_t j = 0; j < buf_.size(); ++j) { + i = i_start; + for (size_t k = 0; k < frame_length_; ++k) { + const float tmp = buf_[j][i]; + buf_[j][i] = frame->split_bands(0)[j][k]; + frame->split_bands(0)[j][k] = tmp; + i = i < buf_[0].size() - 1 ? i + 1 : 0; + } + } + + last_insert_ = i; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/block_delay_buffer.h b/audio_processing/aec3/block_delay_buffer.h new file mode 100644 index 0000000..0af25b6 --- /dev/null +++ b/audio_processing/aec3/block_delay_buffer.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_DELAY_BUFFER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_DELAY_BUFFER_H_ + +#include + +#include + +#include "audio_processing/audio_buffer.h" + +namespace webrtc { + +// Class for applying a fixed delay to the samples in a signal partitioned using +// the audiobuffer band-splitting scheme. +class BlockDelayBuffer { + public: + BlockDelayBuffer(size_t num_bands, size_t frame_length, size_t delay_samples); + ~BlockDelayBuffer(); + + // Delays the samples by the specified delay. + void DelaySignal(AudioBuffer* frame); + + private: + const size_t frame_length_; + const size_t delay_; + std::vector> buf_; + size_t last_insert_ = 0; +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_DELAY_BUFFER_H_ diff --git a/audio_processing/aec3/block_delay_buffer_unittest.cc b/audio_processing/aec3/block_delay_buffer_unittest.cc new file mode 100644 index 0000000..bda1821 --- /dev/null +++ b/audio_processing/aec3/block_delay_buffer_unittest.cc @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/block_delay_buffer.h" + +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/audio_buffer.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +float SampleValue(size_t sample_index) { + return sample_index % 32768; +} + +// Populates the frame with linearly increasing sample values for each band. +void PopulateInputFrame(size_t frame_length, + size_t num_bands, + size_t first_sample_index, + float* const* frame) { + for (size_t k = 0; k < num_bands; ++k) { + for (size_t i = 0; i < frame_length; ++i) { + frame[k][i] = SampleValue(first_sample_index + i); + } + } +} + +std::string ProduceDebugText(int sample_rate_hz, size_t delay) { + char log_stream_buffer[8 * 1024]; + rtc::SimpleStringBuilder ss(log_stream_buffer); + ss << "Sample rate: " << sample_rate_hz; + ss << ", Delay: " << delay; + return ss.str(); +} + +} // namespace + +// Verifies that the correct signal delay is achived. +TEST(BlockDelayBuffer, CorrectDelayApplied) { + for (size_t delay : {0, 1, 27, 160, 4321, 7021}) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate, delay)); + size_t num_bands = NumBandsForRate(rate); + size_t subband_frame_length = 160; + + BlockDelayBuffer delay_buffer(num_bands, subband_frame_length, delay); + + static constexpr size_t kNumFramesToProcess = 20; + for (size_t frame_index = 0; frame_index < kNumFramesToProcess; + ++frame_index) { + AudioBuffer audio_buffer(rate, 1, rate, 1, rate, 1); + if (rate > 16000) { + audio_buffer.SplitIntoFrequencyBands(); + } + size_t first_sample_index = frame_index * subband_frame_length; + PopulateInputFrame(subband_frame_length, num_bands, first_sample_index, + &audio_buffer.split_bands(0)[0]); + delay_buffer.DelaySignal(&audio_buffer); + + for (size_t k = 0; k < num_bands; ++k) { + size_t sample_index = first_sample_index; + for (size_t i = 0; i < subband_frame_length; ++i, ++sample_index) { + if (sample_index < delay) { + EXPECT_EQ(0.f, audio_buffer.split_bands(0)[k][i]); + } else { + EXPECT_EQ(SampleValue(sample_index - delay), + audio_buffer.split_bands(0)[k][i]); + } + } + } + } + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/block_framer.cc b/audio_processing/aec3/block_framer.cc new file mode 100644 index 0000000..e9e99ea --- /dev/null +++ b/audio_processing/aec3/block_framer.cc @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/block_framer.h" + +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +BlockFramer::BlockFramer(size_t num_bands, size_t num_channels) + : num_bands_(num_bands), + num_channels_(num_channels), + buffer_(num_bands_, + std::vector>( + num_channels, + std::vector(kBlockSize, 0.f))) { + RTC_DCHECK_LT(0, num_bands); + RTC_DCHECK_LT(0, num_channels); +} + +BlockFramer::~BlockFramer() = default; + +// All the constants are chosen so that the buffer is either empty or has enough +// samples for InsertBlockAndExtractSubFrame to produce a frame. In order to +// achieve this, the InsertBlockAndExtractSubFrame and InsertBlock methods need +// to be called in the correct order. +void BlockFramer::InsertBlock( + const std::vector>>& block) { + RTC_DCHECK_EQ(num_bands_, block.size()); + for (size_t band = 0; band < num_bands_; ++band) { + RTC_DCHECK_EQ(num_channels_, block[band].size()); + for (size_t channel = 0; channel < num_channels_; ++channel) { + RTC_DCHECK_EQ(kBlockSize, block[band][channel].size()); + RTC_DCHECK_EQ(0, buffer_[band][channel].size()); + + buffer_[band][channel].insert(buffer_[band][channel].begin(), + block[band][channel].begin(), + block[band][channel].end()); + } + } +} + +void BlockFramer::InsertBlockAndExtractSubFrame( + const std::vector>>& block, + std::vector>>* sub_frame) { + RTC_DCHECK(sub_frame); + RTC_DCHECK_EQ(num_bands_, block.size()); + RTC_DCHECK_EQ(num_bands_, sub_frame->size()); + for (size_t band = 0; band < num_bands_; ++band) { + RTC_DCHECK_EQ(num_channels_, block[band].size()); + RTC_DCHECK_EQ(num_channels_, (*sub_frame)[0].size()); + for (size_t channel = 0; channel < num_channels_; ++channel) { + RTC_DCHECK_LE(kSubFrameLength, + buffer_[band][channel].size() + kBlockSize); + RTC_DCHECK_EQ(kBlockSize, block[band][channel].size()); + RTC_DCHECK_GE(kBlockSize, buffer_[band][channel].size()); + RTC_DCHECK_EQ(kSubFrameLength, (*sub_frame)[band][channel].size()); + + const int samples_to_frame = + kSubFrameLength - buffer_[band][channel].size(); + std::copy(buffer_[band][channel].begin(), buffer_[band][channel].end(), + (*sub_frame)[band][channel].begin()); + std::copy( + block[band][channel].begin(), + block[band][channel].begin() + samples_to_frame, + (*sub_frame)[band][channel].begin() + buffer_[band][channel].size()); + buffer_[band][channel].clear(); + buffer_[band][channel].insert( + buffer_[band][channel].begin(), + block[band][channel].begin() + samples_to_frame, + block[band][channel].end()); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/block_framer.h b/audio_processing/aec3/block_framer.h new file mode 100644 index 0000000..e3d2eb5 --- /dev/null +++ b/audio_processing/aec3/block_framer.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_ + +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { + +// Class for producing frames consisting of 2 subframes of 80 samples each +// from 64 sample blocks. The class is designed to work together with the +// FrameBlocker class which performs the reverse conversion. Used together with +// that, this class produces output frames are the same rate as frames are +// received by the FrameBlocker class. Note that the internal buffers will +// overrun if any other rate of packets insertion is used. +class BlockFramer { + public: + BlockFramer(size_t num_bands, size_t num_channels); + ~BlockFramer(); + BlockFramer(const BlockFramer&) = delete; + BlockFramer& operator=(const BlockFramer&) = delete; + + // Adds a 64 sample block into the data that will form the next output frame. + void InsertBlock(const std::vector>>& block); + // Adds a 64 sample block and extracts an 80 sample subframe. + void InsertBlockAndExtractSubFrame( + const std::vector>>& block, + std::vector>>* sub_frame); + + private: + const size_t num_bands_; + const size_t num_channels_; + std::vector>> buffer_; +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_ diff --git a/audio_processing/aec3/block_framer_unittest.cc b/audio_processing/aec3/block_framer_unittest.cc new file mode 100644 index 0000000..e9a16d0 --- /dev/null +++ b/audio_processing/aec3/block_framer_unittest.cc @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/block_framer.h" + +#include +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +void SetupSubFrameView( + std::vector>>* sub_frame, + std::vector>>* sub_frame_view) { + for (size_t band = 0; band < sub_frame_view->size(); ++band) { + for (size_t channel = 0; channel < (*sub_frame_view)[band].size(); + ++channel) { + (*sub_frame_view)[band][channel] = + rtc::ArrayView((*sub_frame)[band][channel].data(), + (*sub_frame)[band][channel].size()); + } + } +} + +float ComputeSampleValue(size_t chunk_counter, + size_t chunk_size, + size_t band, + size_t channel, + size_t sample_index, + int offset) { + float value = static_cast(100 + chunk_counter * chunk_size + + sample_index + channel) + + offset; + return 5000 * band + value; +} + +bool VerifySubFrame( + size_t sub_frame_counter, + int offset, + const std::vector>>& sub_frame_view) { + for (size_t band = 0; band < sub_frame_view.size(); ++band) { + for (size_t channel = 0; channel < sub_frame_view[band].size(); ++channel) { + for (size_t sample = 0; sample < sub_frame_view[band][channel].size(); + ++sample) { + const float reference_value = ComputeSampleValue( + sub_frame_counter, kSubFrameLength, band, channel, sample, offset); + if (reference_value != sub_frame_view[band][channel][sample]) { + return false; + } + } + } + } + return true; +} + +void FillBlock(size_t block_counter, + std::vector>>* block) { + for (size_t band = 0; band < block->size(); ++band) { + for (size_t channel = 0; channel < (*block)[band].size(); ++channel) { + for (size_t sample = 0; sample < (*block)[band][channel].size(); + ++sample) { + (*block)[band][channel][sample] = ComputeSampleValue( + block_counter, kBlockSize, band, channel, sample, 0); + } + } + } +} + +// Verifies that the BlockFramer is able to produce the expected frame content. +void RunFramerTest(int sample_rate_hz, size_t num_channels) { + constexpr size_t kNumSubFramesToProcess = 10; + const size_t num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> block( + num_bands, std::vector>( + num_channels, std::vector(kBlockSize, 0.f))); + std::vector>> output_sub_frame( + num_bands, std::vector>( + num_channels, std::vector(kSubFrameLength, 0.f))); + std::vector>> output_sub_frame_view( + num_bands, std::vector>(num_channels)); + SetupSubFrameView(&output_sub_frame, &output_sub_frame_view); + BlockFramer framer(num_bands, num_channels); + + size_t block_index = 0; + for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess; + ++sub_frame_index) { + FillBlock(block_index++, &block); + framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view); + if (sub_frame_index > 1) { + EXPECT_TRUE(VerifySubFrame(sub_frame_index, -64, output_sub_frame_view)); + } + + if ((sub_frame_index + 1) % 4 == 0) { + FillBlock(block_index++, &block); + framer.InsertBlock(block); + } + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +// Verifies that the BlockFramer crashes if the InsertBlockAndExtractSubFrame +// method is called for inputs with the wrong number of bands or band lengths. +void RunWronglySizedInsertAndExtractParametersTest( + int sample_rate_hz, + size_t correct_num_channels, + size_t num_block_bands, + size_t num_block_channels, + size_t block_length, + size_t num_sub_frame_bands, + size_t num_sub_frame_channels, + size_t sub_frame_length) { + const size_t correct_num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> block( + num_block_bands, + std::vector>(num_block_channels, + std::vector(block_length, 0.f))); + std::vector>> output_sub_frame( + num_sub_frame_bands, + std::vector>( + num_sub_frame_channels, std::vector(sub_frame_length, 0.f))); + std::vector>> output_sub_frame_view( + output_sub_frame.size(), + std::vector>(num_sub_frame_channels)); + SetupSubFrameView(&output_sub_frame, &output_sub_frame_view); + BlockFramer framer(correct_num_bands, correct_num_channels); + EXPECT_DEATH( + framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view), ""); +} + +// Verifies that the BlockFramer crashes if the InsertBlock method is called for +// inputs with the wrong number of bands or band lengths. +void RunWronglySizedInsertParameterTest(int sample_rate_hz, + size_t correct_num_channels, + size_t num_block_bands, + size_t num_block_channels, + size_t block_length) { + const size_t correct_num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> correct_block( + correct_num_bands, + std::vector>(correct_num_channels, + std::vector(kBlockSize, 0.f))); + std::vector>> wrong_block( + num_block_bands, + std::vector>(num_block_channels, + std::vector(block_length, 0.f))); + std::vector>> output_sub_frame( + correct_num_bands, + std::vector>( + correct_num_channels, std::vector(kSubFrameLength, 0.f))); + std::vector>> output_sub_frame_view( + output_sub_frame.size(), + std::vector>(correct_num_channels)); + SetupSubFrameView(&output_sub_frame, &output_sub_frame_view); + BlockFramer framer(correct_num_bands, correct_num_channels); + framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view); + framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view); + framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view); + framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view); + + EXPECT_DEATH(framer.InsertBlock(wrong_block), ""); +} + +// Verifies that the BlockFramer crashes if the InsertBlock method is called +// after a wrong number of previous InsertBlockAndExtractSubFrame method calls +// have been made. + +void RunWronglyInsertOrderTest(int sample_rate_hz, + size_t num_channels, + size_t num_preceeding_api_calls) { + const size_t correct_num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> block( + correct_num_bands, + std::vector>(num_channels, + std::vector(kBlockSize, 0.f))); + std::vector>> output_sub_frame( + correct_num_bands, + std::vector>( + num_channels, std::vector(kSubFrameLength, 0.f))); + std::vector>> output_sub_frame_view( + output_sub_frame.size(), + std::vector>(num_channels)); + SetupSubFrameView(&output_sub_frame, &output_sub_frame_view); + BlockFramer framer(correct_num_bands, num_channels); + for (size_t k = 0; k < num_preceeding_api_calls; ++k) { + framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view); + } + + EXPECT_DEATH(framer.InsertBlock(block), ""); +} +#endif + +std::string ProduceDebugText(int sample_rate_hz, size_t num_channels) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz; + ss << ", number of channels: " << num_channels; + return ss.Release(); +} + +} // namespace + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +TEST(BlockFramer, WrongNumberOfBandsInBlockForInsertBlockAndExtractSubFrame) { + for (auto rate : {16000, 32000, 48000}) { + for (auto correct_num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_bands = (correct_num_bands % 3) + 1; + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, wrong_num_bands, correct_num_channels, + kBlockSize, correct_num_bands, correct_num_channels, kSubFrameLength); + } + } +} + +TEST(BlockFramer, + WrongNumberOfChannelsInBlockForInsertBlockAndExtractSubFrame) { + for (auto rate : {16000, 32000, 48000}) { + for (auto correct_num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_channels = correct_num_channels + 1; + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, wrong_num_channels, + kBlockSize, correct_num_bands, correct_num_channels, kSubFrameLength); + } + } +} + +TEST(BlockFramer, + WrongNumberOfBandsInSubFrameForInsertBlockAndExtractSubFrame) { + for (auto rate : {16000, 32000, 48000}) { + for (auto correct_num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_bands = (correct_num_bands % 3) + 1; + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, correct_num_channels, + kBlockSize, wrong_num_bands, correct_num_channels, kSubFrameLength); + } + } +} + +TEST(BlockFramer, + WrongNumberOfChannelsInSubFrameForInsertBlockAndExtractSubFrame) { + for (auto rate : {16000, 32000, 48000}) { + for (auto correct_num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_channels = correct_num_channels + 1; + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, correct_num_channels, + kBlockSize, correct_num_bands, wrong_num_channels, kSubFrameLength); + } + } +} + +TEST(BlockFramer, WrongNumberOfSamplesInBlockForInsertBlockAndExtractSubFrame) { + for (auto rate : {16000, 32000, 48000}) { + for (auto correct_num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, correct_num_channels, + kBlockSize - 1, correct_num_bands, correct_num_channels, + kSubFrameLength); + } + } +} + +TEST(BlockFramer, + WrongNumberOfSamplesInSubFrameForInsertBlockAndExtractSubFrame) { + const size_t correct_num_channels = 1; + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, correct_num_channels, + kBlockSize, correct_num_bands, correct_num_channels, + kSubFrameLength - 1); + } +} + +TEST(BlockFramer, WrongNumberOfBandsInBlockForInsertBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (auto correct_num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_bands = (correct_num_bands % 3) + 1; + RunWronglySizedInsertParameterTest(rate, correct_num_channels, + wrong_num_bands, correct_num_channels, + kBlockSize); + } + } +} + +TEST(BlockFramer, WrongNumberOfChannelsInBlockForInsertBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (auto correct_num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_channels = correct_num_channels + 1; + RunWronglySizedInsertParameterTest(rate, correct_num_channels, + correct_num_bands, wrong_num_channels, + kBlockSize); + } + } +} + +TEST(BlockFramer, WrongNumberOfSamplesInBlockForInsertBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (auto correct_num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + RunWronglySizedInsertParameterTest(rate, correct_num_channels, + correct_num_bands, + correct_num_channels, kBlockSize - 1); + } + } +} + +TEST(BlockFramer, WrongNumberOfPreceedingApiCallsForInsertBlock) { + for (size_t num_channels : {1, 2, 8}) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t num_calls = 0; num_calls < 4; ++num_calls) { + rtc::StringBuilder ss; + ss << "Sample rate: " << rate; + ss << ", Num channels: " << num_channels; + ss << ", Num preceeding InsertBlockAndExtractSubFrame calls: " + << num_calls; + + SCOPED_TRACE(ss.str()); + RunWronglyInsertOrderTest(rate, num_channels, num_calls); + } + } + } +} + +// Verifies that the verification for 0 number of channels works. +TEST(BlockFramer, ZeroNumberOfChannelsParameter) { + EXPECT_DEATH(BlockFramer(16000, 0), ""); +} + +// Verifies that the verification for 0 number of bands works. +TEST(BlockFramer, ZeroNumberOfBandsParameter) { + EXPECT_DEATH(BlockFramer(0, 1), ""); +} + +// Verifies that the verification for null sub_frame pointer works. +TEST(BlockFramer, NullSubFrameParameter) { + EXPECT_DEATH(BlockFramer(1, 1).InsertBlockAndExtractSubFrame( + std::vector>>( + 1, std::vector>( + 1, std::vector(kBlockSize, 0.f))), + nullptr), + ""); +} + +#endif + +TEST(BlockFramer, FrameBitexactness) { + for (auto rate : {16000, 32000, 48000}) { + for (auto num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, num_channels)); + RunFramerTest(rate, num_channels); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/block_processor.cc b/audio_processing/aec3/block_processor.cc new file mode 100644 index 0000000..670e4b0 --- /dev/null +++ b/audio_processing/aec3/block_processor.cc @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/block_processor.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/echo_canceller3_config.h" +#include "api/echo_control.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/block_processor_metrics.h" +#include "audio_processing/aec3/delay_estimate.h" +#include "audio_processing/aec3/echo_path_variability.h" +#include "audio_processing/aec3/echo_remover.h" +#include "audio_processing/aec3/render_delay_buffer.h" +#include "audio_processing/aec3/render_delay_controller.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { + +enum class BlockProcessorApiCall { kCapture, kRender }; + +class BlockProcessorImpl final : public BlockProcessor { + public: + BlockProcessorImpl(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels, + std::unique_ptr render_buffer, + std::unique_ptr delay_controller, + std::unique_ptr echo_remover); + + BlockProcessorImpl() = delete; + + ~BlockProcessorImpl() override; + + void ProcessCapture( + bool echo_path_gain_change, + bool capture_signal_saturation, + std::vector>>* linear_output, + std::vector>>* capture_block) override; + + void BufferRender( + const std::vector>>& block) override; + + void UpdateEchoLeakageStatus(bool leakage_detected) override; + + void GetMetrics(EchoControl::Metrics* metrics) const override; + + void SetAudioBufferDelay(int delay_ms) override; + + private: + static int instance_count_; + std::unique_ptr data_dumper_; + const EchoCanceller3Config config_; + bool capture_properly_started_ = false; + bool render_properly_started_ = false; + const size_t sample_rate_hz_; + std::unique_ptr render_buffer_; + std::unique_ptr delay_controller_; + std::unique_ptr echo_remover_; + BlockProcessorMetrics metrics_; + RenderDelayBuffer::BufferingEvent render_event_; + size_t capture_call_counter_ = 0; + absl::optional estimated_delay_; +}; + +int BlockProcessorImpl::instance_count_ = 0; + +BlockProcessorImpl::BlockProcessorImpl( + const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels, + std::unique_ptr render_buffer, + std::unique_ptr delay_controller, + std::unique_ptr echo_remover) + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + config_(config), + sample_rate_hz_(sample_rate_hz), + render_buffer_(std::move(render_buffer)), + delay_controller_(std::move(delay_controller)), + echo_remover_(std::move(echo_remover)), + render_event_(RenderDelayBuffer::BufferingEvent::kNone) { + RTC_DCHECK(ValidFullBandRate(sample_rate_hz_)); +} + +BlockProcessorImpl::~BlockProcessorImpl() = default; + +void BlockProcessorImpl::ProcessCapture( + bool echo_path_gain_change, + bool capture_signal_saturation, + std::vector>>* linear_output, + std::vector>>* capture_block) { + RTC_DCHECK(capture_block); + RTC_DCHECK_EQ(NumBandsForRate(sample_rate_hz_), capture_block->size()); + RTC_DCHECK_EQ(kBlockSize, (*capture_block)[0][0].size()); + + capture_call_counter_++; + + data_dumper_->DumpRaw("aec3_processblock_call_order", + static_cast(BlockProcessorApiCall::kCapture)); + data_dumper_->DumpWav("aec3_processblock_capture_input", kBlockSize, + &(*capture_block)[0][0][0], 16000, 1); + + if (render_properly_started_) { + if (!capture_properly_started_) { + capture_properly_started_ = true; + render_buffer_->Reset(); + if (delay_controller_) + delay_controller_->Reset(true); + } + } else { + return; + } + + EchoPathVariability echo_path_variability( + echo_path_gain_change, EchoPathVariability::DelayAdjustment::kNone, + false); + + if (render_event_ == RenderDelayBuffer::BufferingEvent::kRenderOverrun && + render_properly_started_) { + echo_path_variability.delay_change = + EchoPathVariability::DelayAdjustment::kBufferFlush; + if (delay_controller_) + delay_controller_->Reset(true); + RTC_LOG(LS_WARNING) << "Reset due to render buffer overrun at block " + << capture_call_counter_; + } + render_event_ = RenderDelayBuffer::BufferingEvent::kNone; + + // Update the render buffers with any newly arrived render blocks and prepare + // the render buffers for reading the render data corresponding to the current + // capture block. + RenderDelayBuffer::BufferingEvent buffer_event = + render_buffer_->PrepareCaptureProcessing(); + // Reset the delay controller at render buffer underrun. + if (buffer_event == RenderDelayBuffer::BufferingEvent::kRenderUnderrun) { + if (delay_controller_) + delay_controller_->Reset(false); + } + + data_dumper_->DumpWav("aec3_processblock_capture_input2", kBlockSize, + &(*capture_block)[0][0][0], 16000, 1); + + bool has_delay_estimator = !config_.delay.use_external_delay_estimator; + if (has_delay_estimator) { + RTC_DCHECK(delay_controller_); + // Compute and apply the render delay required to achieve proper signal + // alignment. + estimated_delay_ = delay_controller_->GetDelay( + render_buffer_->GetDownsampledRenderBuffer(), render_buffer_->Delay(), + (*capture_block)[0]); + + if (estimated_delay_) { + bool delay_change = + render_buffer_->AlignFromDelay(estimated_delay_->delay); + if (delay_change) { + rtc::LoggingSeverity log_level = + config_.delay.log_warning_on_delay_changes ? rtc::LS_WARNING + : rtc::LS_INFO; + RTC_LOG_V(log_level) << "Delay changed to " << estimated_delay_->delay + << " at block " << capture_call_counter_; + echo_path_variability.delay_change = + EchoPathVariability::DelayAdjustment::kNewDetectedDelay; + } + } + + echo_path_variability.clock_drift = delay_controller_->HasClockdrift(); + + } else { + render_buffer_->AlignFromExternalDelay(); + } + + // Remove the echo from the capture signal. + if (has_delay_estimator || render_buffer_->HasReceivedBufferDelay()) { + echo_remover_->ProcessCapture( + echo_path_variability, capture_signal_saturation, estimated_delay_, + render_buffer_->GetRenderBuffer(), linear_output, capture_block); + } + + // Update the metrics. + metrics_.UpdateCapture(false); +} + +void BlockProcessorImpl::BufferRender( + const std::vector>>& block) { + RTC_DCHECK_EQ(NumBandsForRate(sample_rate_hz_), block.size()); + RTC_DCHECK_EQ(kBlockSize, block[0][0].size()); + data_dumper_->DumpRaw("aec3_processblock_call_order", + static_cast(BlockProcessorApiCall::kRender)); + data_dumper_->DumpWav("aec3_processblock_render_input", kBlockSize, + &block[0][0][0], 16000, 1); + data_dumper_->DumpWav("aec3_processblock_render_input2", kBlockSize, + &block[0][0][0], 16000, 1); + + render_event_ = render_buffer_->Insert(block); + + metrics_.UpdateRender(render_event_ != + RenderDelayBuffer::BufferingEvent::kNone); + + render_properly_started_ = true; + if (delay_controller_) + delay_controller_->LogRenderCall(); +} + +void BlockProcessorImpl::UpdateEchoLeakageStatus(bool leakage_detected) { + echo_remover_->UpdateEchoLeakageStatus(leakage_detected); +} + +void BlockProcessorImpl::GetMetrics(EchoControl::Metrics* metrics) const { + echo_remover_->GetMetrics(metrics); + constexpr int block_size_ms = 4; + absl::optional delay = render_buffer_->Delay(); + metrics->delay_ms = delay ? static_cast(*delay) * block_size_ms : 0; +} + +void BlockProcessorImpl::SetAudioBufferDelay(int delay_ms) { + render_buffer_->SetAudioBufferDelay(delay_ms); +} + +} // namespace + +BlockProcessor* BlockProcessor::Create(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels) { + std::unique_ptr render_buffer( + RenderDelayBuffer::Create(config, sample_rate_hz, num_render_channels)); + std::unique_ptr delay_controller; + if (!config.delay.use_external_delay_estimator) { + delay_controller.reset(RenderDelayController::Create(config, sample_rate_hz, + num_capture_channels)); + } + std::unique_ptr echo_remover(EchoRemover::Create( + config, sample_rate_hz, num_render_channels, num_capture_channels)); + return Create(config, sample_rate_hz, num_render_channels, + num_capture_channels, std::move(render_buffer), + std::move(delay_controller), std::move(echo_remover)); +} + +BlockProcessor* BlockProcessor::Create( + const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels, + std::unique_ptr render_buffer) { + std::unique_ptr delay_controller; + if (!config.delay.use_external_delay_estimator) { + delay_controller.reset(RenderDelayController::Create(config, sample_rate_hz, + num_capture_channels)); + } + std::unique_ptr echo_remover(EchoRemover::Create( + config, sample_rate_hz, num_render_channels, num_capture_channels)); + return Create(config, sample_rate_hz, num_render_channels, + num_capture_channels, std::move(render_buffer), + std::move(delay_controller), std::move(echo_remover)); +} + +BlockProcessor* BlockProcessor::Create( + const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels, + std::unique_ptr render_buffer, + std::unique_ptr delay_controller, + std::unique_ptr echo_remover) { + return new BlockProcessorImpl(config, sample_rate_hz, num_render_channels, + num_capture_channels, std::move(render_buffer), + std::move(delay_controller), + std::move(echo_remover)); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/block_processor.h b/audio_processing/aec3/block_processor.h new file mode 100644 index 0000000..863394f --- /dev/null +++ b/audio_processing/aec3/block_processor.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_ + +#include + +#include +#include + +#include "api/echo_canceller3_config.h" +#include "api/echo_control.h" +#include "audio_processing/aec3/echo_remover.h" +#include "audio_processing/aec3/render_delay_buffer.h" +#include "audio_processing/aec3/render_delay_controller.h" + +namespace webrtc { + +// Class for performing echo cancellation on 64 sample blocks of audio data. +class BlockProcessor { + public: + static BlockProcessor* Create(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels); + // Only used for testing purposes. + static BlockProcessor* Create( + const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels, + std::unique_ptr render_buffer); + static BlockProcessor* Create( + const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels, + std::unique_ptr render_buffer, + std::unique_ptr delay_controller, + std::unique_ptr echo_remover); + + virtual ~BlockProcessor() = default; + + // Get current metrics. + virtual void GetMetrics(EchoControl::Metrics* metrics) const = 0; + + // Provides an optional external estimate of the audio buffer delay. + virtual void SetAudioBufferDelay(int delay_ms) = 0; + + // Processes a block of capture data. + virtual void ProcessCapture( + bool echo_path_gain_change, + bool capture_signal_saturation, + std::vector>>* linear_output, + std::vector>>* capture_block) = 0; + + // Buffers a block of render data supplied by a FrameBlocker object. + virtual void BufferRender( + const std::vector>>& render_block) = 0; + + // Reports whether echo leakage has been detected in the echo canceller + // output. + virtual void UpdateEchoLeakageStatus(bool leakage_detected) = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_ diff --git a/audio_processing/aec3/block_processor_metrics.cc b/audio_processing/aec3/block_processor_metrics.cc new file mode 100644 index 0000000..cf47554 --- /dev/null +++ b/audio_processing/aec3/block_processor_metrics.cc @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/block_processor_metrics.h" + +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { + +namespace { + +enum class RenderUnderrunCategory { + kNone, + kFew, + kSeveral, + kMany, + kConstant, + kNumCategories +}; + +enum class RenderOverrunCategory { + kNone, + kFew, + kSeveral, + kMany, + kConstant, + kNumCategories +}; + +} // namespace + +void BlockProcessorMetrics::UpdateCapture(bool underrun) { + ++capture_block_counter_; + if (underrun) { + ++render_buffer_underruns_; + } + + if (capture_block_counter_ == kMetricsReportingIntervalBlocks) { + metrics_reported_ = true; + + RenderUnderrunCategory underrun_category; + if (render_buffer_underruns_ == 0) { + underrun_category = RenderUnderrunCategory::kNone; + } else if (render_buffer_underruns_ > (capture_block_counter_ >> 1)) { + underrun_category = RenderUnderrunCategory::kConstant; + } else if (render_buffer_underruns_ > 100) { + underrun_category = RenderUnderrunCategory::kMany; + } else if (render_buffer_underruns_ > 10) { + underrun_category = RenderUnderrunCategory::kSeveral; + } else { + underrun_category = RenderUnderrunCategory::kFew; + } + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.Audio.EchoCanceller.RenderUnderruns", + static_cast(underrun_category), + static_cast(RenderUnderrunCategory::kNumCategories)); + + RenderOverrunCategory overrun_category; + if (render_buffer_overruns_ == 0) { + overrun_category = RenderOverrunCategory::kNone; + } else if (render_buffer_overruns_ > (buffer_render_calls_ >> 1)) { + overrun_category = RenderOverrunCategory::kConstant; + } else if (render_buffer_overruns_ > 100) { + overrun_category = RenderOverrunCategory::kMany; + } else if (render_buffer_overruns_ > 10) { + overrun_category = RenderOverrunCategory::kSeveral; + } else { + overrun_category = RenderOverrunCategory::kFew; + } + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.Audio.EchoCanceller.RenderOverruns", + static_cast(overrun_category), + static_cast(RenderOverrunCategory::kNumCategories)); + + ResetMetrics(); + capture_block_counter_ = 0; + } else { + metrics_reported_ = false; + } +} + +void BlockProcessorMetrics::UpdateRender(bool overrun) { + ++buffer_render_calls_; + if (overrun) { + ++render_buffer_overruns_; + } +} + +void BlockProcessorMetrics::ResetMetrics() { + render_buffer_underruns_ = 0; + render_buffer_overruns_ = 0; + buffer_render_calls_ = 0; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/block_processor_metrics.h b/audio_processing/aec3/block_processor_metrics.h new file mode 100644 index 0000000..4ba0536 --- /dev/null +++ b/audio_processing/aec3/block_processor_metrics.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_ + +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// Handles the reporting of metrics for the block_processor. +class BlockProcessorMetrics { + public: + BlockProcessorMetrics() = default; + + // Updates the metric with new capture data. + void UpdateCapture(bool underrun); + + // Updates the metric with new render data. + void UpdateRender(bool overrun); + + // Returns true if the metrics have just been reported, otherwise false. + bool MetricsReported() { return metrics_reported_; } + + private: + // Resets the metrics. + void ResetMetrics(); + + int capture_block_counter_ = 0; + bool metrics_reported_ = false; + int render_buffer_underruns_ = 0; + int render_buffer_overruns_ = 0; + int buffer_render_calls_ = 0; + + RTC_DISALLOW_COPY_AND_ASSIGN(BlockProcessorMetrics); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_ diff --git a/audio_processing/aec3/block_processor_metrics_unittest.cc b/audio_processing/aec3/block_processor_metrics_unittest.cc new file mode 100644 index 0000000..3e23c24 --- /dev/null +++ b/audio_processing/aec3/block_processor_metrics_unittest.cc @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/block_processor_metrics.h" + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "test/gtest.h" + +namespace webrtc { + +// Verify the general functionality of BlockProcessorMetrics. +TEST(BlockProcessorMetrics, NormalUsage) { + BlockProcessorMetrics metrics; + + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) { + metrics.UpdateRender(false); + metrics.UpdateRender(false); + metrics.UpdateCapture(false); + EXPECT_FALSE(metrics.MetricsReported()); + } + metrics.UpdateCapture(false); + EXPECT_TRUE(metrics.MetricsReported()); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/block_processor_unittest.cc b/audio_processing/aec3/block_processor_unittest.cc new file mode 100644 index 0000000..2b928e8 --- /dev/null +++ b/audio_processing/aec3/block_processor_unittest.cc @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/block_processor.h" + +#include +#include +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/mock/mock_echo_remover.h" +#include "modules/audio_processing/aec3/mock/mock_render_delay_buffer.h" +#include "modules/audio_processing/aec3/mock/mock_render_delay_controller.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/checks.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::_; +using ::testing::AtLeast; +using ::testing::Return; +using ::testing::StrictMock; + +// Verifies that the basic BlockProcessor functionality works and that the API +// methods are callable. +void RunBasicSetupAndApiCallTest(int sample_rate_hz, int num_iterations) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + + std::unique_ptr block_processor( + BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz, + kNumRenderChannels, kNumCaptureChannels)); + std::vector>> block( + NumBandsForRate(sample_rate_hz), + std::vector>(kNumRenderChannels, + std::vector(kBlockSize, 1000.f))); + for (int k = 0; k < num_iterations; ++k) { + block_processor->BufferRender(block); + block_processor->ProcessCapture(false, false, nullptr, &block); + block_processor->UpdateEchoLeakageStatus(false); + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +void RunRenderBlockSizeVerificationTest(int sample_rate_hz) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + + std::unique_ptr block_processor( + BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz, + kNumRenderChannels, kNumCaptureChannels)); + std::vector>> block( + NumBandsForRate(sample_rate_hz), + std::vector>(kNumRenderChannels, + std::vector(kBlockSize - 1, 0.f))); + + EXPECT_DEATH(block_processor->BufferRender(block), ""); +} + +void RunCaptureBlockSizeVerificationTest(int sample_rate_hz) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + + std::unique_ptr block_processor( + BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz, + kNumRenderChannels, kNumCaptureChannels)); + std::vector>> block( + NumBandsForRate(sample_rate_hz), + std::vector>(kNumRenderChannels, + std::vector(kBlockSize - 1, 0.f))); + + EXPECT_DEATH(block_processor->ProcessCapture(false, false, nullptr, &block), + ""); +} + +void RunRenderNumBandsVerificationTest(int sample_rate_hz) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + + const size_t wrong_num_bands = NumBandsForRate(sample_rate_hz) < 3 + ? NumBandsForRate(sample_rate_hz) + 1 + : 1; + std::unique_ptr block_processor( + BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz, + kNumRenderChannels, kNumCaptureChannels)); + std::vector>> block( + wrong_num_bands, + std::vector>(kNumRenderChannels, + std::vector(kBlockSize, 0.f))); + + EXPECT_DEATH(block_processor->BufferRender(block), ""); +} + +void RunCaptureNumBandsVerificationTest(int sample_rate_hz) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + + const size_t wrong_num_bands = NumBandsForRate(sample_rate_hz) < 3 + ? NumBandsForRate(sample_rate_hz) + 1 + : 1; + std::unique_ptr block_processor( + BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz, + kNumRenderChannels, kNumCaptureChannels)); + std::vector>> block( + wrong_num_bands, + std::vector>(kNumRenderChannels, + std::vector(kBlockSize, 0.f))); + + EXPECT_DEATH(block_processor->ProcessCapture(false, false, nullptr, &block), + ""); +} +#endif + +std::string ProduceDebugText(int sample_rate_hz) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz; + return ss.Release(); +} + +} // namespace + +// Verifies that the delay controller functionality is properly integrated with +// the render delay buffer inside block processor. +// TODO(peah): Activate the unittest once the required code has been landed. +TEST(BlockProcessor, DISABLED_DelayControllerIntegration) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + constexpr size_t kNumBlocks = 310; + constexpr size_t kDelayInSamples = 640; + constexpr size_t kDelayHeadroom = 1; + constexpr size_t kDelayInBlocks = + kDelayInSamples / kBlockSize - kDelayHeadroom; + Random random_generator(42U); + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr> + render_delay_buffer_mock( + new StrictMock(rate, 1)); + EXPECT_CALL(*render_delay_buffer_mock, Insert(_)) + .Times(kNumBlocks) + .WillRepeatedly(Return(RenderDelayBuffer::BufferingEvent::kNone)); + EXPECT_CALL(*render_delay_buffer_mock, AlignFromDelay(kDelayInBlocks)) + .Times(AtLeast(1)); + EXPECT_CALL(*render_delay_buffer_mock, MaxDelay()).WillOnce(Return(30)); + EXPECT_CALL(*render_delay_buffer_mock, Delay()) + .Times(kNumBlocks + 1) + .WillRepeatedly(Return(0)); + std::unique_ptr block_processor(BlockProcessor::Create( + EchoCanceller3Config(), rate, kNumRenderChannels, kNumCaptureChannels, + std::move(render_delay_buffer_mock))); + + std::vector>> render_block( + NumBandsForRate(rate), + std::vector>(kNumRenderChannels, + std::vector(kBlockSize, 0.f))); + std::vector>> capture_block( + NumBandsForRate(rate), + std::vector>(kNumCaptureChannels, + std::vector(kBlockSize, 0.f))); + DelayBuffer signal_delay_buffer(kDelayInSamples); + for (size_t k = 0; k < kNumBlocks; ++k) { + RandomizeSampleVector(&random_generator, render_block[0][0]); + signal_delay_buffer.Delay(render_block[0][0], capture_block[0][0]); + block_processor->BufferRender(render_block); + block_processor->ProcessCapture(false, false, nullptr, &capture_block); + } + } +} + +// Verifies that BlockProcessor submodules are called in a proper manner. +TEST(BlockProcessor, DISABLED_SubmoduleIntegration) { + constexpr size_t kNumBlocks = 310; + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + + Random random_generator(42U); + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr> + render_delay_buffer_mock( + new StrictMock(rate, 1)); + std::unique_ptr< + ::testing::StrictMock> + render_delay_controller_mock( + new StrictMock()); + std::unique_ptr> + echo_remover_mock(new StrictMock()); + + EXPECT_CALL(*render_delay_buffer_mock, Insert(_)) + .Times(kNumBlocks - 1) + .WillRepeatedly(Return(RenderDelayBuffer::BufferingEvent::kNone)); + EXPECT_CALL(*render_delay_buffer_mock, PrepareCaptureProcessing()) + .Times(kNumBlocks); + EXPECT_CALL(*render_delay_buffer_mock, AlignFromDelay(9)).Times(AtLeast(1)); + EXPECT_CALL(*render_delay_buffer_mock, Delay()) + .Times(kNumBlocks) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*render_delay_controller_mock, GetDelay(_, _, _)) + .Times(kNumBlocks); + EXPECT_CALL(*echo_remover_mock, ProcessCapture(_, _, _, _, _, _)) + .Times(kNumBlocks); + EXPECT_CALL(*echo_remover_mock, UpdateEchoLeakageStatus(_)) + .Times(kNumBlocks); + + std::unique_ptr block_processor(BlockProcessor::Create( + EchoCanceller3Config(), rate, kNumRenderChannels, kNumCaptureChannels, + std::move(render_delay_buffer_mock), + std::move(render_delay_controller_mock), std::move(echo_remover_mock))); + + std::vector>> render_block( + NumBandsForRate(rate), + std::vector>(kNumRenderChannels, + std::vector(kBlockSize, 0.f))); + std::vector>> capture_block( + NumBandsForRate(rate), + std::vector>(kNumCaptureChannels, + std::vector(kBlockSize, 0.f))); + DelayBuffer signal_delay_buffer(640); + for (size_t k = 0; k < kNumBlocks; ++k) { + RandomizeSampleVector(&random_generator, render_block[0][0]); + signal_delay_buffer.Delay(render_block[0][0], capture_block[0][0]); + block_processor->BufferRender(render_block); + block_processor->ProcessCapture(false, false, nullptr, &capture_block); + block_processor->UpdateEchoLeakageStatus(false); + } + } +} + +TEST(BlockProcessor, BasicSetupAndApiCalls) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + RunBasicSetupAndApiCallTest(rate, 1); + } +} + +TEST(BlockProcessor, TestLongerCall) { + RunBasicSetupAndApiCallTest(16000, 20 * kNumBlocksPerSecond); +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +// TODO(gustaf): Re-enable the test once the issue with memory leaks during +// DEATH tests on test bots has been fixed. +TEST(BlockProcessor, DISABLED_VerifyRenderBlockSizeCheck) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + RunRenderBlockSizeVerificationTest(rate); + } +} + +TEST(BlockProcessor, VerifyCaptureBlockSizeCheck) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + RunCaptureBlockSizeVerificationTest(rate); + } +} + +TEST(BlockProcessor, VerifyRenderNumBandsCheck) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + RunRenderNumBandsVerificationTest(rate); + } +} + +// TODO(peah): Verify the check for correct number of bands in the capture +// signal. +TEST(BlockProcessor, VerifyCaptureNumBandsCheck) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + RunCaptureNumBandsVerificationTest(rate); + } +} + +// Verifiers that the verification for null ProcessCapture input works. +TEST(BlockProcessor, NullProcessCaptureParameter) { + EXPECT_DEATH(std::unique_ptr( + BlockProcessor::Create(EchoCanceller3Config(), 16000, 1, 1)) + ->ProcessCapture(false, false, nullptr, nullptr), + ""); +} + +// Verifies the check for correct sample rate. +// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH +// tests on test bots has been fixed. +TEST(BlockProcessor, DISABLED_WrongSampleRate) { + EXPECT_DEATH(std::unique_ptr( + BlockProcessor::Create(EchoCanceller3Config(), 8001, 1, 1)), + ""); +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/clockdrift_detector.cc b/audio_processing/aec3/clockdrift_detector.cc new file mode 100644 index 0000000..d2c523c --- /dev/null +++ b/audio_processing/aec3/clockdrift_detector.cc @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/clockdrift_detector.h" + +namespace webrtc { + +ClockdriftDetector::ClockdriftDetector() + : level_(Level::kNone), stability_counter_(0) { + delay_history_.fill(0); +} + +ClockdriftDetector::~ClockdriftDetector() = default; + +void ClockdriftDetector::Update(int delay_estimate) { + if (delay_estimate == delay_history_[0]) { + if (++stability_counter_ > 7500) + level_ = Level::kNone; + return; + } + + stability_counter_ = 0; + const int d1 = delay_history_[0] - delay_estimate; + const int d2 = delay_history_[1] - delay_estimate; + const int d3 = delay_history_[2] - delay_estimate; + + // Patterns recognized as positive clockdrift: + const bool probable_drift_up = + (d1 == -1 && d2 == -2) || (d1 == -2 && d2 == -1); + const bool drift_up = probable_drift_up && d3 == -3; + + // Patterns recognized as negative clockdrift: + const bool probable_drift_down = (d1 == 1 && d2 == 2) || (d1 == 2 && d2 == 1); + const bool drift_down = probable_drift_down && d3 == 3; + + // Set clockdrift level. + if (drift_up || drift_down) { + level_ = Level::kVerified; + } else if ((probable_drift_up || probable_drift_down) && + level_ == Level::kNone) { + level_ = Level::kProbable; + } + + // Shift delay history one step. + delay_history_[2] = delay_history_[1]; + delay_history_[1] = delay_history_[0]; + delay_history_[0] = delay_estimate; +} +} // namespace webrtc diff --git a/audio_processing/aec3/clockdrift_detector.h b/audio_processing/aec3/clockdrift_detector.h new file mode 100644 index 0000000..22528c9 --- /dev/null +++ b/audio_processing/aec3/clockdrift_detector.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_ + +#include + +namespace webrtc { + +class ApmDataDumper; +struct DownsampledRenderBuffer; +struct EchoCanceller3Config; + +// Detects clockdrift by analyzing the estimated delay. +class ClockdriftDetector { + public: + enum class Level { kNone, kProbable, kVerified, kNumCategories }; + ClockdriftDetector(); + ~ClockdriftDetector(); + void Update(int delay_estimate); + Level ClockdriftLevel() const { return level_; } + + private: + std::array delay_history_; + Level level_; + size_t stability_counter_; +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_ diff --git a/audio_processing/aec3/clockdrift_detector_unittest.cc b/audio_processing/aec3/clockdrift_detector_unittest.cc new file mode 100644 index 0000000..0f98b01 --- /dev/null +++ b/audio_processing/aec3/clockdrift_detector_unittest.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/clockdrift_detector.h" + +#include "test/gtest.h" + +namespace webrtc { +TEST(ClockdriftDetector, ClockdriftDetector) { + ClockdriftDetector c; + // No clockdrift at start. + EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone); + + // Monotonically increasing delay. + for (int i = 0; i < 100; i++) + c.Update(1000); + EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone); + for (int i = 0; i < 100; i++) + c.Update(1001); + EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone); + for (int i = 0; i < 100; i++) + c.Update(1002); + // Probable clockdrift. + EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kProbable); + for (int i = 0; i < 100; i++) + c.Update(1003); + // Verified clockdrift. + EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kVerified); + + // Stable delay. + for (int i = 0; i < 10000; i++) + c.Update(1003); + // No clockdrift. + EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone); + + // Decreasing delay. + for (int i = 0; i < 100; i++) + c.Update(1001); + for (int i = 0; i < 100; i++) + c.Update(999); + // Probable clockdrift. + EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kProbable); + for (int i = 0; i < 100; i++) + c.Update(1000); + for (int i = 0; i < 100; i++) + c.Update(998); + // Verified clockdrift. + EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kVerified); +} +} // namespace webrtc diff --git a/audio_processing/aec3/comfort_noise_generator.cc b/audio_processing/aec3/comfort_noise_generator.cc new file mode 100644 index 0000000..0916c75 --- /dev/null +++ b/audio_processing/aec3/comfort_noise_generator.cc @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/comfort_noise_generator.h" + +// Defines WEBRTC_ARCH_X86_FAMILY, used below. +#include "rtc_base/system/arch.h" + +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif +#include +#include +#include +#include +#include +#include + +//#include "signal_processing/include/signal_processing_library.h" +#include "audio_processing/aec3/vector_math.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { + +// Table of sqrt(2) * sin(2*pi*i/32). +constexpr float kSqrt2Sin[32] = { + +0.0000000f, +0.2758994f, +0.5411961f, +0.7856950f, +1.0000000f, + +1.1758756f, +1.3065630f, +1.3870398f, +1.4142136f, +1.3870398f, + +1.3065630f, +1.1758756f, +1.0000000f, +0.7856950f, +0.5411961f, + +0.2758994f, +0.0000000f, -0.2758994f, -0.5411961f, -0.7856950f, + -1.0000000f, -1.1758756f, -1.3065630f, -1.3870398f, -1.4142136f, + -1.3870398f, -1.3065630f, -1.1758756f, -1.0000000f, -0.7856950f, + -0.5411961f, -0.2758994f}; + +void GenerateComfortNoise(Aec3Optimization optimization, + const std::array& N2, + uint32_t* seed, + FftData* lower_band_noise, + FftData* upper_band_noise) { + FftData* N_low = lower_band_noise; + FftData* N_high = upper_band_noise; + + // Compute square root spectrum. + std::array N; + std::copy(N2.begin(), N2.end(), N.begin()); + aec3::VectorMath(optimization).Sqrt(N); + + // Compute the noise level for the upper bands. + constexpr float kOneByNumBands = 1.f / (kFftLengthBy2Plus1 / 2 + 1); + constexpr int kFftLengthBy2Plus1By2 = kFftLengthBy2Plus1 / 2; + const float high_band_noise_level = + std::accumulate(N.begin() + kFftLengthBy2Plus1By2, N.end(), 0.f) * + kOneByNumBands; + + // The analysis and synthesis windowing cause loss of power when + // cross-fading the noise where frames are completely uncorrelated + // (generated with random phase), hence the factor sqrt(2). + // This is not the case for the speech signal where the input is overlapping + // (strong correlation). + N_low->re[0] = N_low->re[kFftLengthBy2] = N_high->re[0] = + N_high->re[kFftLengthBy2] = 0.f; + for (size_t k = 1; k < kFftLengthBy2; k++) { + constexpr int kIndexMask = 32 - 1; + // Generate a random 31-bit integer. + seed[0] = (seed[0] * 69069 + 1) & (0x80000000 - 1); + // Convert to a 5-bit index. + int i = seed[0] >> 26; + + // y = sqrt(2) * sin(a) + const float x = kSqrt2Sin[i]; + // x = sqrt(2) * cos(a) = sqrt(2) * sin(a + pi/2) + const float y = kSqrt2Sin[(i + 8) & kIndexMask]; + + // Form low-frequency noise via spectral shaping. + N_low->re[k] = N[k] * x; + N_low->im[k] = N[k] * y; + + // Form the high-frequency noise via simple levelling. + N_high->re[k] = high_band_noise_level * x; + N_high->im[k] = high_band_noise_level * y; + } +} + +} // namespace + +ComfortNoiseGenerator::ComfortNoiseGenerator(Aec3Optimization optimization, + size_t num_capture_channels) + : optimization_(optimization), + seed_(42), + num_capture_channels_(num_capture_channels), + N2_initial_( + std::make_unique>>( + num_capture_channels_)), + Y2_smoothed_(num_capture_channels_), + N2_(num_capture_channels_) { + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + (*N2_initial_)[ch].fill(0.f); + Y2_smoothed_[ch].fill(0.f); + N2_[ch].fill(1.0e6f); + } +} + +ComfortNoiseGenerator::~ComfortNoiseGenerator() = default; + +void ComfortNoiseGenerator::Compute( + bool saturated_capture, + rtc::ArrayView> + capture_spectrum, + rtc::ArrayView lower_band_noise, + rtc::ArrayView upper_band_noise) { + const auto& Y2 = capture_spectrum; + + if (!saturated_capture) { + // Smooth Y2. + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + std::transform(Y2_smoothed_[ch].begin(), Y2_smoothed_[ch].end(), + Y2[ch].begin(), Y2_smoothed_[ch].begin(), + [](float a, float b) { return a + 0.1f * (b - a); }); + } + + if (N2_counter_ > 50) { + // Update N2 from Y2_smoothed. + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + std::transform(N2_[ch].begin(), N2_[ch].end(), Y2_smoothed_[ch].begin(), + N2_[ch].begin(), [](float a, float b) { + return b < a ? (0.9f * b + 0.1f * a) * 1.0002f + : a * 1.0002f; + }); + } + } + + if (N2_initial_) { + if (++N2_counter_ == 1000) { + N2_initial_.reset(); + } else { + // Compute the N2_initial from N2. + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + std::transform(N2_[ch].begin(), N2_[ch].end(), + (*N2_initial_)[ch].begin(), (*N2_initial_)[ch].begin(), + [](float a, float b) { + return a > b ? b + 0.001f * (a - b) : a; + }); + } + } + } + + // Limit the noise to a floor matching a WGN input of -96 dBFS. + constexpr float kNoiseFloor = 17.1267f; + + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + for (auto& n : N2_[ch]) { + n = std::max(n, kNoiseFloor); + } + if (N2_initial_) { + for (auto& n : (*N2_initial_)[ch]) { + n = std::max(n, kNoiseFloor); + } + } + } + } + + // Choose N2 estimate to use. + const auto& N2 = N2_initial_ ? (*N2_initial_) : N2_; + + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + GenerateComfortNoise(optimization_, N2[ch], &seed_, &lower_band_noise[ch], + &upper_band_noise[ch]); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/comfort_noise_generator.h b/audio_processing/aec3/comfort_noise_generator.h new file mode 100644 index 0000000..034234c --- /dev/null +++ b/audio_processing/aec3/comfort_noise_generator.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_ + +#include + +#include +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec_state.h" +#include "audio_processing/aec3/fft_data.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { +namespace aec3 { +#if defined(WEBRTC_ARCH_X86_FAMILY) + +void EstimateComfortNoise_SSE2(const std::array& N2, + uint32_t* seed, + FftData* lower_band_noise, + FftData* upper_band_noise); +#endif +void EstimateComfortNoise(const std::array& N2, + uint32_t* seed, + FftData* lower_band_noise, + FftData* upper_band_noise); + +} // namespace aec3 + +// Generates the comfort noise. +class ComfortNoiseGenerator { + public: + ComfortNoiseGenerator(Aec3Optimization optimization, + size_t num_capture_channels); + ComfortNoiseGenerator() = delete; + ~ComfortNoiseGenerator(); + ComfortNoiseGenerator(const ComfortNoiseGenerator&) = delete; + + // Computes the comfort noise. + void Compute(bool saturated_capture, + rtc::ArrayView> + capture_spectrum, + rtc::ArrayView lower_band_noise, + rtc::ArrayView upper_band_noise); + + // Returns the estimate of the background noise spectrum. + rtc::ArrayView> NoiseSpectrum() + const { + return N2_; + } + + private: + const Aec3Optimization optimization_; + uint32_t seed_; + const size_t num_capture_channels_; + std::unique_ptr>> + N2_initial_; + std::vector> Y2_smoothed_; + std::vector> N2_; + int N2_counter_ = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_ diff --git a/audio_processing/aec3/comfort_noise_generator_unittest.cc b/audio_processing/aec3/comfort_noise_generator_unittest.cc new file mode 100644 index 0000000..02c26cc --- /dev/null +++ b/audio_processing/aec3/comfort_noise_generator_unittest.cc @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/comfort_noise_generator.h" + +#include +#include + +#include "modules/audio_processing/aec3/aec_state.h" +#include "rtc_base/random.h" +#include "rtc_base/system/arch.h" +#include "system_wrappers/include/cpu_features_wrapper.h" +#include "test/gtest.h" + +namespace webrtc { +namespace aec3 { +namespace { + +float Power(const FftData& N) { + std::array N2; + N.Spectrum(Aec3Optimization::kNone, N2); + return std::accumulate(N2.begin(), N2.end(), 0.f) / N2.size(); +} + +} // namespace + +TEST(ComfortNoiseGenerator, CorrectLevel) { + constexpr size_t kNumChannels = 5; + ComfortNoiseGenerator cng(DetectOptimization(), kNumChannels); + AecState aec_state(EchoCanceller3Config{}, kNumChannels); + + std::vector> N2(kNumChannels); + std::vector n_lower(kNumChannels); + std::vector n_upper(kNumChannels); + + for (size_t ch = 0; ch < kNumChannels; ++ch) { + N2[ch].fill(1000.f * 1000.f / (ch + 1)); + n_lower[ch].re.fill(0.f); + n_lower[ch].im.fill(0.f); + n_upper[ch].re.fill(0.f); + n_upper[ch].im.fill(0.f); + } + + // Ensure instantaneous updata to nonzero noise. + cng.Compute(false, N2, n_lower, n_upper); + + for (size_t ch = 0; ch < kNumChannels; ++ch) { + EXPECT_LT(0.f, Power(n_lower[ch])); + EXPECT_LT(0.f, Power(n_upper[ch])); + } + + for (int k = 0; k < 10000; ++k) { + cng.Compute(false, N2, n_lower, n_upper); + } + + for (size_t ch = 0; ch < kNumChannels; ++ch) { + EXPECT_NEAR(2.f * N2[ch][0], Power(n_lower[ch]), N2[ch][0] / 10.f); + EXPECT_NEAR(2.f * N2[ch][0], Power(n_upper[ch]), N2[ch][0] / 10.f); + } +} + +} // namespace aec3 +} // namespace webrtc diff --git a/audio_processing/aec3/decimator.cc b/audio_processing/aec3/decimator.cc new file mode 100644 index 0000000..4c1bef3 --- /dev/null +++ b/audio_processing/aec3/decimator.cc @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/decimator.h" + +#include +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +// signal.butter(2, 3400/8000.0, 'lowpass', analog=False) +const std::vector GetLowPassFilterDS2() { + return std::vector{ + {{-1.f, 0.f}, {0.13833231f, 0.40743176f}, 0.22711796393486466f}, + {{-1.f, 0.f}, {0.13833231f, 0.40743176f}, 0.22711796393486466f}, + {{-1.f, 0.f}, {0.13833231f, 0.40743176f}, 0.22711796393486466f}}; +} + +// signal.ellip(6, 1, 40, 1800/8000, btype='lowpass', analog=False) +const std::vector GetLowPassFilterDS4() { + return std::vector{ + {{-0.08873842f, 0.99605496f}, {0.75916227f, 0.23841065f}, 0.26250696827f}, + {{0.62273832f, 0.78243018f}, {0.74892112f, 0.5410152f}, 0.26250696827f}, + {{0.71107693f, 0.70311421f}, {0.74895534f, 0.63924616f}, 0.26250696827f}}; +} + +// signal.cheby1(1, 6, [1000/8000, 2000/8000], btype='bandpass', analog=False) +const std::vector GetBandPassFilterDS8() { + return std::vector{ + {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true}, + {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true}, + {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true}, + {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true}, + {{1.f, 0.f}, {0.7601815f, 0.46423542f}, 0.10330478266505948f, true}}; +} + +// signal.butter(2, 1000/8000.0, 'highpass', analog=False) +const std::vector GetHighPassFilter() { + return std::vector{ + {{1.f, 0.f}, {0.72712179f, 0.21296904f}, 0.7570763753338849f}}; +} + +const std::vector GetPassThroughFilter() { + return std::vector{}; +} +} // namespace + +Decimator::Decimator(size_t down_sampling_factor) + : down_sampling_factor_(down_sampling_factor), + anti_aliasing_filter_(down_sampling_factor_ == 4 + ? GetLowPassFilterDS4() + : (down_sampling_factor_ == 8 + ? GetBandPassFilterDS8() + : GetLowPassFilterDS2())), + noise_reduction_filter_(down_sampling_factor_ == 8 + ? GetPassThroughFilter() + : GetHighPassFilter()) { + RTC_DCHECK(down_sampling_factor_ == 2 || down_sampling_factor_ == 4 || + down_sampling_factor_ == 8); +} + +void Decimator::Decimate(rtc::ArrayView in, + rtc::ArrayView out) { + RTC_DCHECK_EQ(kBlockSize, in.size()); + RTC_DCHECK_EQ(kBlockSize / down_sampling_factor_, out.size()); + std::array x; + + // Limit the frequency content of the signal to avoid aliasing. + anti_aliasing_filter_.Process(in, x); + + // Reduce the impact of near-end noise. + noise_reduction_filter_.Process(x); + + // Downsample the signal. + for (size_t j = 0, k = 0; j < out.size(); ++j, k += down_sampling_factor_) { + RTC_DCHECK_GT(kBlockSize, k); + out[j] = x[k]; + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/decimator.h b/audio_processing/aec3/decimator.h new file mode 100644 index 0000000..771cada --- /dev/null +++ b/audio_processing/aec3/decimator.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_ + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/utility/cascaded_biquad_filter.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// Provides functionality for decimating a signal. +class Decimator { + public: + explicit Decimator(size_t down_sampling_factor); + + // Downsamples the signal. + void Decimate(rtc::ArrayView in, rtc::ArrayView out); + + private: + const size_t down_sampling_factor_; + CascadedBiQuadFilter anti_aliasing_filter_; + CascadedBiQuadFilter noise_reduction_filter_; + + RTC_DISALLOW_COPY_AND_ASSIGN(Decimator); +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_ diff --git a/audio_processing/aec3/decimator_unittest.cc b/audio_processing/aec3/decimator_unittest.cc new file mode 100644 index 0000000..1e279ce --- /dev/null +++ b/audio_processing/aec3/decimator_unittest.cc @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/decimator.h" + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +std::string ProduceDebugText(int sample_rate_hz) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz; + return ss.Release(); +} + +constexpr size_t kDownSamplingFactors[] = {2, 4, 8}; +constexpr float kPi = 3.141592f; +constexpr size_t kNumStartupBlocks = 50; +constexpr size_t kNumBlocks = 1000; + +void ProduceDecimatedSinusoidalOutputPower(int sample_rate_hz, + size_t down_sampling_factor, + float sinusoidal_frequency_hz, + float* input_power, + float* output_power) { + float input[kBlockSize * kNumBlocks]; + const size_t sub_block_size = kBlockSize / down_sampling_factor; + + // Produce a sinusoid of the specified frequency. + for (size_t k = 0; k < kBlockSize * kNumBlocks; ++k) { + input[k] = 32767.f * std::sin(2.f * kPi * sinusoidal_frequency_hz * k / + sample_rate_hz); + } + + Decimator decimator(down_sampling_factor); + std::vector output(sub_block_size * kNumBlocks); + + for (size_t k = 0; k < kNumBlocks; ++k) { + std::vector sub_block(sub_block_size); + decimator.Decimate( + rtc::ArrayView(&input[k * kBlockSize], kBlockSize), + sub_block); + + std::copy(sub_block.begin(), sub_block.end(), + output.begin() + k * sub_block_size); + } + + ASSERT_GT(kNumBlocks, kNumStartupBlocks); + rtc::ArrayView input_to_evaluate( + &input[kNumStartupBlocks * kBlockSize], + (kNumBlocks - kNumStartupBlocks) * kBlockSize); + rtc::ArrayView output_to_evaluate( + &output[kNumStartupBlocks * sub_block_size], + (kNumBlocks - kNumStartupBlocks) * sub_block_size); + *input_power = + std::inner_product(input_to_evaluate.begin(), input_to_evaluate.end(), + input_to_evaluate.begin(), 0.f) / + input_to_evaluate.size(); + *output_power = + std::inner_product(output_to_evaluate.begin(), output_to_evaluate.end(), + output_to_evaluate.begin(), 0.f) / + output_to_evaluate.size(); +} + +} // namespace + +// Verifies that there is little aliasing from upper frequencies in the +// downsampling. +TEST(Decimator, NoLeakageFromUpperFrequencies) { + float input_power; + float output_power; + for (auto rate : {16000, 32000, 48000}) { + for (auto down_sampling_factor : kDownSamplingFactors) { + ProduceDebugText(rate); + ProduceDecimatedSinusoidalOutputPower(rate, down_sampling_factor, + 3.f / 8.f * rate, &input_power, + &output_power); + EXPECT_GT(0.0001f * input_power, output_power); + } + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +// Verifies the check for the input size. +TEST(Decimator, WrongInputSize) { + Decimator decimator(4); + std::vector x(kBlockSize - 1, 0.f); + std::array x_downsampled; + EXPECT_DEATH(decimator.Decimate(x, x_downsampled), ""); +} + +// Verifies the check for non-null output parameter. +TEST(Decimator, NullOutput) { + Decimator decimator(4); + std::vector x(kBlockSize, 0.f); + EXPECT_DEATH(decimator.Decimate(x, nullptr), ""); +} + +// Verifies the check for the output size. +TEST(Decimator, WrongOutputSize) { + Decimator decimator(4); + std::vector x(kBlockSize, 0.f); + std::array x_downsampled; + EXPECT_DEATH(decimator.Decimate(x, x_downsampled), ""); +} + +// Verifies the check for the correct downsampling factor. +TEST(Decimator, CorrectDownSamplingFactor) { + EXPECT_DEATH(Decimator(3), ""); +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/delay_estimate.h b/audio_processing/aec3/delay_estimate.h new file mode 100644 index 0000000..ea5dd27 --- /dev/null +++ b/audio_processing/aec3/delay_estimate.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_ + +namespace webrtc { + +// Stores delay_estimates. +struct DelayEstimate { + enum class Quality { kCoarse, kRefined }; + + DelayEstimate(Quality quality, size_t delay) + : quality(quality), delay(delay) {} + + Quality quality; + size_t delay; + size_t blocks_since_last_change = 0; + size_t blocks_since_last_update = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_ diff --git a/audio_processing/aec3/dominant_nearend_detector.cc b/audio_processing/aec3/dominant_nearend_detector.cc new file mode 100644 index 0000000..8f4f589 --- /dev/null +++ b/audio_processing/aec3/dominant_nearend_detector.cc @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/dominant_nearend_detector.h" + +#include + +namespace webrtc { +DominantNearendDetector::DominantNearendDetector( + const EchoCanceller3Config::Suppressor::DominantNearendDetection& config, + size_t num_capture_channels) + : enr_threshold_(config.enr_threshold), + enr_exit_threshold_(config.enr_exit_threshold), + snr_threshold_(config.snr_threshold), + hold_duration_(config.hold_duration), + trigger_threshold_(config.trigger_threshold), + use_during_initial_phase_(config.use_during_initial_phase), + num_capture_channels_(num_capture_channels), + trigger_counters_(num_capture_channels_), + hold_counters_(num_capture_channels_) {} + +void DominantNearendDetector::Update( + rtc::ArrayView> + nearend_spectrum, + rtc::ArrayView> + residual_echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + bool initial_state) { + nearend_state_ = false; + + auto low_frequency_energy = [](rtc::ArrayView spectrum) { + RTC_DCHECK_LE(16, spectrum.size()); + return std::accumulate(spectrum.begin() + 1, spectrum.begin() + 16, 0.f); + }; + + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + const float ne_sum = low_frequency_energy(nearend_spectrum[ch]); + const float echo_sum = low_frequency_energy(residual_echo_spectrum[ch]); + const float noise_sum = low_frequency_energy(comfort_noise_spectrum[ch]); + + // Detect strong active nearend if the nearend is sufficiently stronger than + // the echo and the nearend noise. + if ((!initial_state || use_during_initial_phase_) && + echo_sum < enr_threshold_ * ne_sum && + ne_sum > snr_threshold_ * noise_sum) { + if (++trigger_counters_[ch] >= trigger_threshold_) { + // After a period of strong active nearend activity, flag nearend mode. + hold_counters_[ch] = hold_duration_; + trigger_counters_[ch] = trigger_threshold_; + } + } else { + // Forget previously detected strong active nearend activity. + trigger_counters_[ch] = std::max(0, trigger_counters_[ch] - 1); + } + + // Exit nearend-state early at strong echo. + if (echo_sum > enr_exit_threshold_ * ne_sum && + echo_sum > snr_threshold_ * noise_sum) { + hold_counters_[ch] = 0; + } + + // Remain in any nearend mode for a certain duration. + hold_counters_[ch] = std::max(0, hold_counters_[ch] - 1); + nearend_state_ = nearend_state_ || hold_counters_[ch] > 0; + } +} +} // namespace webrtc diff --git a/audio_processing/aec3/dominant_nearend_detector.h b/audio_processing/aec3/dominant_nearend_detector.h new file mode 100644 index 0000000..7859459 --- /dev/null +++ b/audio_processing/aec3/dominant_nearend_detector.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_DOMINANT_NEAREND_DETECTOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_DOMINANT_NEAREND_DETECTOR_H_ + +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/nearend_detector.h" + +namespace webrtc { +// Class for selecting whether the suppressor is in the nearend or echo state. +class DominantNearendDetector : public NearendDetector { + public: + DominantNearendDetector( + const EchoCanceller3Config::Suppressor::DominantNearendDetection& config, + size_t num_capture_channels); + + // Returns whether the current state is the nearend state. + bool IsNearendState() const override { return nearend_state_; } + + // Updates the state selection based on latest spectral estimates. + void Update(rtc::ArrayView> + nearend_spectrum, + rtc::ArrayView> + residual_echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + bool initial_state) override; + + private: + const float enr_threshold_; + const float enr_exit_threshold_; + const float snr_threshold_; + const int hold_duration_; + const int trigger_threshold_; + const bool use_during_initial_phase_; + const size_t num_capture_channels_; + + bool nearend_state_ = false; + std::vector trigger_counters_; + std::vector hold_counters_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_DOMINANT_NEAREND_DETECTOR_H_ diff --git a/audio_processing/aec3/downsampled_render_buffer.cc b/audio_processing/aec3/downsampled_render_buffer.cc new file mode 100644 index 0000000..a4490e2 --- /dev/null +++ b/audio_processing/aec3/downsampled_render_buffer.cc @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/downsampled_render_buffer.h" + +#include + +namespace webrtc { + +DownsampledRenderBuffer::DownsampledRenderBuffer(size_t downsampled_buffer_size) + : size(static_cast(downsampled_buffer_size)), + buffer(downsampled_buffer_size, 0.f) { + std::fill(buffer.begin(), buffer.end(), 0.f); +} + +DownsampledRenderBuffer::~DownsampledRenderBuffer() = default; + +} // namespace webrtc diff --git a/audio_processing/aec3/downsampled_render_buffer.h b/audio_processing/aec3/downsampled_render_buffer.h new file mode 100644 index 0000000..fbdc9b4 --- /dev/null +++ b/audio_processing/aec3/downsampled_render_buffer.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_ + +#include + +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +// Holds the circular buffer of the downsampled render data. +struct DownsampledRenderBuffer { + explicit DownsampledRenderBuffer(size_t downsampled_buffer_size); + ~DownsampledRenderBuffer(); + + int IncIndex(int index) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return index < size - 1 ? index + 1 : 0; + } + + int DecIndex(int index) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return index > 0 ? index - 1 : size - 1; + } + + int OffsetIndex(int index, int offset) const { + RTC_DCHECK_GE(buffer.size(), offset); + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return (size + index + offset) % size; + } + + void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); } + void IncWriteIndex() { write = IncIndex(write); } + void DecWriteIndex() { write = DecIndex(write); } + void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); } + void IncReadIndex() { read = IncIndex(read); } + void DecReadIndex() { read = DecIndex(read); } + + const int size; + std::vector buffer; + int write = 0; + int read = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_ diff --git a/audio_processing/aec3/echo_audibility.cc b/audio_processing/aec3/echo_audibility.cc new file mode 100644 index 0000000..2b8f96d --- /dev/null +++ b/audio_processing/aec3/echo_audibility.cc @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/echo_audibility.h" + +#include +#include +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/block_buffer.h" +#include "audio_processing/aec3/spectrum_buffer.h" +#include "audio_processing/aec3/stationarity_estimator.h" + +namespace webrtc { + +EchoAudibility::EchoAudibility(bool use_render_stationarity_at_init) + : use_render_stationarity_at_init_(use_render_stationarity_at_init) { + Reset(); +} + +EchoAudibility::~EchoAudibility() = default; + +void EchoAudibility::Update(const RenderBuffer& render_buffer, + rtc::ArrayView average_reverb, + int delay_blocks, + bool external_delay_seen) { + UpdateRenderNoiseEstimator(render_buffer.GetSpectrumBuffer(), + render_buffer.GetBlockBuffer(), + external_delay_seen); + + if (external_delay_seen || use_render_stationarity_at_init_) { + UpdateRenderStationarityFlags(render_buffer, average_reverb, delay_blocks); + } +} + +void EchoAudibility::Reset() { + render_stationarity_.Reset(); + non_zero_render_seen_ = false; + render_spectrum_write_prev_ = absl::nullopt; +} + +void EchoAudibility::UpdateRenderStationarityFlags( + const RenderBuffer& render_buffer, + rtc::ArrayView average_reverb, + int min_channel_delay_blocks) { + const SpectrumBuffer& spectrum_buffer = render_buffer.GetSpectrumBuffer(); + int idx_at_delay = spectrum_buffer.OffsetIndex(spectrum_buffer.read, + min_channel_delay_blocks); + + int num_lookahead = render_buffer.Headroom() - min_channel_delay_blocks + 1; + num_lookahead = std::max(0, num_lookahead); + + render_stationarity_.UpdateStationarityFlags(spectrum_buffer, average_reverb, + idx_at_delay, num_lookahead); +} + +void EchoAudibility::UpdateRenderNoiseEstimator( + const SpectrumBuffer& spectrum_buffer, + const BlockBuffer& block_buffer, + bool external_delay_seen) { + if (!render_spectrum_write_prev_) { + render_spectrum_write_prev_ = spectrum_buffer.write; + render_block_write_prev_ = block_buffer.write; + return; + } + int render_spectrum_write_current = spectrum_buffer.write; + if (!non_zero_render_seen_ && !external_delay_seen) { + non_zero_render_seen_ = !IsRenderTooLow(block_buffer); + } + if (non_zero_render_seen_) { + for (int idx = render_spectrum_write_prev_.value(); + idx != render_spectrum_write_current; + idx = spectrum_buffer.DecIndex(idx)) { + render_stationarity_.UpdateNoiseEstimator(spectrum_buffer.buffer[idx]); + } + } + render_spectrum_write_prev_ = render_spectrum_write_current; +} + +bool EchoAudibility::IsRenderTooLow(const BlockBuffer& block_buffer) { + const int num_render_channels = + static_cast(block_buffer.buffer[0][0].size()); + bool too_low = false; + const int render_block_write_current = block_buffer.write; + if (render_block_write_current == render_block_write_prev_) { + too_low = true; + } else { + for (int idx = render_block_write_prev_; idx != render_block_write_current; + idx = block_buffer.IncIndex(idx)) { + float max_abs_over_channels = 0.f; + for (int ch = 0; ch < num_render_channels; ++ch) { + auto block = block_buffer.buffer[idx][0][ch]; + auto r = std::minmax_element(block.cbegin(), block.cend()); + float max_abs_channel = + std::max(std::fabs(*r.first), std::fabs(*r.second)); + max_abs_over_channels = + std::max(max_abs_over_channels, max_abs_channel); + } + if (max_abs_over_channels < 10.f) { + too_low = true; // Discards all blocks if one of them is too low. + break; + } + } + } + render_block_write_prev_ = render_block_write_current; + return too_low; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/echo_audibility.h b/audio_processing/aec3/echo_audibility.h new file mode 100644 index 0000000..306d3cf --- /dev/null +++ b/audio_processing/aec3/echo_audibility.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_AUDIBILITY_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_AUDIBILITY_H_ + +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/block_buffer.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/aec3/spectrum_buffer.h" +#include "audio_processing/aec3/stationarity_estimator.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +class EchoAudibility { + public: + explicit EchoAudibility(bool use_render_stationarity_at_init); + ~EchoAudibility(); + + EchoAudibility(const EchoAudibility&) = delete; + EchoAudibility& operator=(const EchoAudibility&) = delete; + + // Feed new render data to the echo audibility estimator. + void Update(const RenderBuffer& render_buffer, + rtc::ArrayView average_reverb, + int min_channel_delay_blocks, + bool external_delay_seen); + // Get the residual echo scaling. + void GetResidualEchoScaling(bool filter_has_had_time_to_converge, + rtc::ArrayView residual_scaling) const { + for (size_t band = 0; band < residual_scaling.size(); ++band) { + if (render_stationarity_.IsBandStationary(band) && + (filter_has_had_time_to_converge || + use_render_stationarity_at_init_)) { + residual_scaling[band] = 0.f; + } else { + residual_scaling[band] = 1.0f; + } + } + } + + // Returns true if the current render block is estimated as stationary. + bool IsBlockStationary() const { + return render_stationarity_.IsBlockStationary(); + } + + private: + // Reset the EchoAudibility class. + void Reset(); + + // Updates the render stationarity flags for the current frame. + void UpdateRenderStationarityFlags(const RenderBuffer& render_buffer, + rtc::ArrayView average_reverb, + int delay_blocks); + + // Updates the noise estimator with the new render data since the previous + // call to this method. + void UpdateRenderNoiseEstimator(const SpectrumBuffer& spectrum_buffer, + const BlockBuffer& block_buffer, + bool external_delay_seen); + + // Returns a bool being true if the render signal contains just close to zero + // values. + bool IsRenderTooLow(const BlockBuffer& block_buffer); + + absl::optional render_spectrum_write_prev_; + int render_block_write_prev_; + bool non_zero_render_seen_; + const bool use_render_stationarity_at_init_; + StationarityEstimator render_stationarity_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_AUDIBILITY_H_ diff --git a/audio_processing/aec3/echo_canceller3.cc b/audio_processing/aec3/echo_canceller3.cc new file mode 100644 index 0000000..d888cb4 --- /dev/null +++ b/audio_processing/aec3/echo_canceller3.cc @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/echo_canceller3.h" + +#include +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/high_pass_filter.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/logging.h" +#include "system_wrappers/include/field_trial.h" + +namespace webrtc { + +namespace { + +enum class EchoCanceller3ApiCall { kCapture, kRender }; + +bool DetectSaturation(rtc::ArrayView y) { + for (auto y_k : y) { + if (y_k >= 32700.0f || y_k <= -32700.0f) { + return true; + } + } + return false; +} + +// Method for adjusting config parameter dependencies.. +EchoCanceller3Config AdjustConfig(const EchoCanceller3Config& config) { + EchoCanceller3Config adjusted_cfg = config; + + if (field_trial::IsEnabled("WebRTC-Aec3ShortHeadroomKillSwitch")) { + // Two blocks headroom. + adjusted_cfg.delay.delay_headroom_samples = kBlockSize * 2; + } + + if (field_trial::IsEnabled("WebRTC-Aec3ClampInstQualityToZeroKillSwitch")) { + adjusted_cfg.erle.clamp_quality_estimate_to_zero = false; + } + + if (field_trial::IsEnabled("WebRTC-Aec3ClampInstQualityToOneKillSwitch")) { + adjusted_cfg.erle.clamp_quality_estimate_to_one = false; + } + + if (field_trial::IsEnabled( + "WebRTC-Aec3EnforceRenderDelayEstimationDownmixing")) { + adjusted_cfg.delay.render_alignment_mixing.downmix = true; + adjusted_cfg.delay.render_alignment_mixing.adaptive_selection = false; + } + + if (field_trial::IsEnabled( + "WebRTC-Aec3EnforceCaptureDelayEstimationDownmixing")) { + adjusted_cfg.delay.capture_alignment_mixing.downmix = true; + adjusted_cfg.delay.capture_alignment_mixing.adaptive_selection = false; + } + + if (field_trial::IsEnabled( + "WebRTC-Aec3EnforceCaptureDelayEstimationLeftRightPrioritization")) { + adjusted_cfg.delay.capture_alignment_mixing.prefer_first_two_channels = + true; + } + + if (field_trial::IsEnabled( + "WebRTC-" + "Aec3RenderDelayEstimationLeftRightPrioritizationKillSwitch")) { + adjusted_cfg.delay.capture_alignment_mixing.prefer_first_two_channels = + false; + } + + return adjusted_cfg; +} + +void FillSubFrameView( + AudioBuffer* frame, + size_t sub_frame_index, + std::vector>>* sub_frame_view) { + RTC_DCHECK_GE(1, sub_frame_index); + RTC_DCHECK_LE(0, sub_frame_index); + RTC_DCHECK_EQ(frame->num_bands(), sub_frame_view->size()); + RTC_DCHECK_EQ(frame->num_channels(), (*sub_frame_view)[0].size()); + for (size_t band = 0; band < sub_frame_view->size(); ++band) { + for (size_t channel = 0; channel < (*sub_frame_view)[0].size(); ++channel) { + (*sub_frame_view)[band][channel] = rtc::ArrayView( + &frame->split_bands(channel)[band][sub_frame_index * kSubFrameLength], + kSubFrameLength); + } + } +} + +void FillSubFrameView( + std::vector>>* frame, + size_t sub_frame_index, + std::vector>>* sub_frame_view) { + RTC_DCHECK_GE(1, sub_frame_index); + RTC_DCHECK_EQ(frame->size(), sub_frame_view->size()); + RTC_DCHECK_EQ((*frame)[0].size(), (*sub_frame_view)[0].size()); + for (size_t band = 0; band < frame->size(); ++band) { + for (size_t channel = 0; channel < (*frame)[band].size(); ++channel) { + (*sub_frame_view)[band][channel] = rtc::ArrayView( + &(*frame)[band][channel][sub_frame_index * kSubFrameLength], + kSubFrameLength); + } + } +} + +void ProcessCaptureFrameContent( + AudioBuffer* linear_output, + AudioBuffer* capture, + bool level_change, + bool saturated_microphone_signal, + size_t sub_frame_index, + FrameBlocker* capture_blocker, + BlockFramer* linear_output_framer, + BlockFramer* output_framer, + BlockProcessor* block_processor, + std::vector>>* linear_output_block, + std::vector>>* + linear_output_sub_frame_view, + std::vector>>* capture_block, + std::vector>>* capture_sub_frame_view) { + FillSubFrameView(capture, sub_frame_index, capture_sub_frame_view); + + if (linear_output) { + RTC_DCHECK(linear_output_framer); + RTC_DCHECK(linear_output_block); + RTC_DCHECK(linear_output_sub_frame_view); + FillSubFrameView(linear_output, sub_frame_index, + linear_output_sub_frame_view); + } + + capture_blocker->InsertSubFrameAndExtractBlock(*capture_sub_frame_view, + capture_block); + block_processor->ProcessCapture(level_change, saturated_microphone_signal, + linear_output_block, capture_block); + output_framer->InsertBlockAndExtractSubFrame(*capture_block, + capture_sub_frame_view); + + if (linear_output) { + RTC_DCHECK(linear_output_framer); + linear_output_framer->InsertBlockAndExtractSubFrame( + *linear_output_block, linear_output_sub_frame_view); + } +} + +void ProcessRemainingCaptureFrameContent( + bool level_change, + bool saturated_microphone_signal, + FrameBlocker* capture_blocker, + BlockFramer* linear_output_framer, + BlockFramer* output_framer, + BlockProcessor* block_processor, + std::vector>>* linear_output_block, + std::vector>>* block) { + if (!capture_blocker->IsBlockAvailable()) { + return; + } + + capture_blocker->ExtractBlock(block); + block_processor->ProcessCapture(level_change, saturated_microphone_signal, + linear_output_block, block); + output_framer->InsertBlock(*block); + + if (linear_output_framer) { + RTC_DCHECK(linear_output_block); + linear_output_framer->InsertBlock(*linear_output_block); + } +} + +void BufferRenderFrameContent( + std::vector>>* render_frame, + size_t sub_frame_index, + FrameBlocker* render_blocker, + BlockProcessor* block_processor, + std::vector>>* block, + std::vector>>* sub_frame_view) { + FillSubFrameView(render_frame, sub_frame_index, sub_frame_view); + render_blocker->InsertSubFrameAndExtractBlock(*sub_frame_view, block); + block_processor->BufferRender(*block); +} + +void BufferRemainingRenderFrameContent( + FrameBlocker* render_blocker, + BlockProcessor* block_processor, + std::vector>>* block) { + if (!render_blocker->IsBlockAvailable()) { + return; + } + render_blocker->ExtractBlock(block); + block_processor->BufferRender(*block); +} + +void CopyBufferIntoFrame(const AudioBuffer& buffer, + size_t num_bands, + size_t num_channels, + std::vector>>* frame) { + RTC_DCHECK_EQ(num_bands, frame->size()); + RTC_DCHECK_EQ(num_channels, (*frame)[0].size()); + RTC_DCHECK_EQ(AudioBuffer::kSplitBandSize, (*frame)[0][0].size()); + for (size_t band = 0; band < num_bands; ++band) { + for (size_t channel = 0; channel < num_channels; ++channel) { + rtc::ArrayView buffer_view( + &buffer.split_bands_const(channel)[band][0], + AudioBuffer::kSplitBandSize); + std::copy(buffer_view.begin(), buffer_view.end(), + (*frame)[band][channel].begin()); + } + } +} + +} // namespace + +class EchoCanceller3::RenderWriter { + public: + RenderWriter(ApmDataDumper* data_dumper, + SwapQueue>>, + Aec3RenderQueueItemVerifier>* render_transfer_queue, + size_t num_bands, + size_t num_channels); + ~RenderWriter(); + void Insert(const AudioBuffer& input); + + private: + ApmDataDumper* data_dumper_; + const size_t num_bands_; + const size_t num_channels_; + HighPassFilter high_pass_filter_; + std::vector>> render_queue_input_frame_; + SwapQueue>>, + Aec3RenderQueueItemVerifier>* render_transfer_queue_; + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderWriter); +}; + +EchoCanceller3::RenderWriter::RenderWriter( + ApmDataDumper* data_dumper, + SwapQueue>>, + Aec3RenderQueueItemVerifier>* render_transfer_queue, + size_t num_bands, + size_t num_channels) + : data_dumper_(data_dumper), + num_bands_(num_bands), + num_channels_(num_channels), + high_pass_filter_(16000, num_channels), + render_queue_input_frame_( + num_bands_, + std::vector>( + num_channels_, + std::vector(AudioBuffer::kSplitBandSize, 0.f))), + render_transfer_queue_(render_transfer_queue) { + RTC_DCHECK(data_dumper); +} + +EchoCanceller3::RenderWriter::~RenderWriter() = default; + +void EchoCanceller3::RenderWriter::Insert(const AudioBuffer& input) { + RTC_DCHECK_EQ(AudioBuffer::kSplitBandSize, input.num_frames_per_band()); + RTC_DCHECK_EQ(num_bands_, input.num_bands()); + RTC_DCHECK_EQ(num_channels_, input.num_channels()); + + // TODO(bugs.webrtc.org/8759) Temporary work-around. + if (num_bands_ != input.num_bands()) + return; + + data_dumper_->DumpWav("aec3_render_input", AudioBuffer::kSplitBandSize, + &input.split_bands_const(0)[0][0], 16000, 1); + + CopyBufferIntoFrame(input, num_bands_, num_channels_, + &render_queue_input_frame_); + high_pass_filter_.Process(&render_queue_input_frame_[0]); + + static_cast(render_transfer_queue_->Insert(&render_queue_input_frame_)); +} + +int EchoCanceller3::instance_count_ = 0; + +EchoCanceller3::EchoCanceller3(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels) + : EchoCanceller3(AdjustConfig(config), + sample_rate_hz, + num_render_channels, + num_capture_channels, + std::unique_ptr( + BlockProcessor::Create(AdjustConfig(config), + sample_rate_hz, + num_render_channels, + num_capture_channels))) {} +EchoCanceller3::EchoCanceller3(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels, + std::unique_ptr block_processor) + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + config_(config), + sample_rate_hz_(sample_rate_hz), + num_bands_(NumBandsForRate(sample_rate_hz_)), + num_render_channels_(num_render_channels), + num_capture_channels_(num_capture_channels), + output_framer_(num_bands_, num_capture_channels_), + capture_blocker_(num_bands_, num_capture_channels_), + render_blocker_(num_bands_, num_render_channels_), + render_transfer_queue_( + kRenderTransferQueueSizeFrames, + std::vector>>( + num_bands_, + std::vector>( + num_render_channels_, + std::vector(AudioBuffer::kSplitBandSize, 0.f))), + Aec3RenderQueueItemVerifier(num_bands_, + num_render_channels_, + AudioBuffer::kSplitBandSize)), + block_processor_(std::move(block_processor)), + render_queue_output_frame_( + num_bands_, + std::vector>( + num_render_channels_, + std::vector(AudioBuffer::kSplitBandSize, 0.f))), + render_block_( + num_bands_, + std::vector>(num_render_channels_, + std::vector(kBlockSize, 0.f))), + capture_block_( + num_bands_, + std::vector>(num_capture_channels_, + std::vector(kBlockSize, 0.f))), + render_sub_frame_view_( + num_bands_, + std::vector>(num_render_channels_)), + capture_sub_frame_view_( + num_bands_, + std::vector>(num_capture_channels_)), + block_delay_buffer_(num_bands_, + AudioBuffer::kSplitBandSize, + config_.delay.fixed_capture_delay_samples) { + RTC_DCHECK(ValidFullBandRate(sample_rate_hz_)); + + render_writer_.reset(new RenderWriter(data_dumper_.get(), + &render_transfer_queue_, num_bands_, + num_render_channels_)); + + RTC_DCHECK_EQ(num_bands_, std::max(sample_rate_hz_, 16000) / 16000); + RTC_DCHECK_GE(kMaxNumBands, num_bands_); + + if (config_.filter.export_linear_aec_output) { + linear_output_framer_.reset(new BlockFramer(1, num_capture_channels_)); + linear_output_block_ = + std::make_unique>>>( + 1, std::vector>( + num_capture_channels_, std::vector(kBlockSize, 0.f))); + linear_output_sub_frame_view_ = + std::vector>>( + 1, std::vector>(num_capture_channels_)); + } +} + +EchoCanceller3::~EchoCanceller3() = default; + +void EchoCanceller3::AnalyzeRender(const AudioBuffer& render) { + RTC_DCHECK_RUNS_SERIALIZED(&render_race_checker_); + + RTC_DCHECK_EQ(render.num_channels(), num_render_channels_); + data_dumper_->DumpRaw("aec3_call_order", + static_cast(EchoCanceller3ApiCall::kRender)); + + return render_writer_->Insert(render); +} + +void EchoCanceller3::AnalyzeCapture(const AudioBuffer& capture) { + RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_); + data_dumper_->DumpWav("aec3_capture_analyze_input", capture.num_frames(), + capture.channels_const()[0], sample_rate_hz_, 1); + saturated_microphone_signal_ = false; + for (size_t channel = 0; channel < capture.num_channels(); ++channel) { + saturated_microphone_signal_ |= + DetectSaturation(rtc::ArrayView( + capture.channels_const()[channel], capture.num_frames())); + if (saturated_microphone_signal_) { + break; + } + } +} + +void EchoCanceller3::ProcessCapture(AudioBuffer* capture, bool level_change) { + ProcessCapture(capture, nullptr, level_change); +} + +void EchoCanceller3::ProcessCapture(AudioBuffer* capture, + AudioBuffer* linear_output, + bool level_change) { + RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_); + RTC_DCHECK(capture); + RTC_DCHECK_EQ(num_bands_, capture->num_bands()); + RTC_DCHECK_EQ(AudioBuffer::kSplitBandSize, capture->num_frames_per_band()); + RTC_DCHECK_EQ(capture->num_channels(), num_capture_channels_); + data_dumper_->DumpRaw("aec3_call_order", + static_cast(EchoCanceller3ApiCall::kCapture)); + + if (linear_output && !linear_output_framer_) { + RTC_LOG(LS_ERROR) << "Trying to retrieve the linear AEC output without " + "properly configuring AEC3."; + RTC_NOTREACHED(); + } + + // Report capture call in the metrics and periodically update API call + // metrics. + api_call_metrics_.ReportCaptureCall(); + + // Optionally delay the capture signal. + if (config_.delay.fixed_capture_delay_samples > 0) { + block_delay_buffer_.DelaySignal(capture); + } + + rtc::ArrayView capture_lower_band = rtc::ArrayView( + &capture->split_bands(0)[0][0], AudioBuffer::kSplitBandSize); + + data_dumper_->DumpWav("aec3_capture_input", capture_lower_band, 16000, 1); + + EmptyRenderQueue(); + + ProcessCaptureFrameContent(linear_output, capture, level_change, + saturated_microphone_signal_, 0, &capture_blocker_, + linear_output_framer_.get(), &output_framer_, + block_processor_.get(), linear_output_block_.get(), + &linear_output_sub_frame_view_, &capture_block_, + &capture_sub_frame_view_); + + ProcessCaptureFrameContent(linear_output, capture, level_change, + saturated_microphone_signal_, 1, &capture_blocker_, + linear_output_framer_.get(), &output_framer_, + block_processor_.get(), linear_output_block_.get(), + &linear_output_sub_frame_view_, &capture_block_, + &capture_sub_frame_view_); + + ProcessRemainingCaptureFrameContent( + level_change, saturated_microphone_signal_, &capture_blocker_, + linear_output_framer_.get(), &output_framer_, block_processor_.get(), + linear_output_block_.get(), &capture_block_); + + data_dumper_->DumpWav("aec3_capture_output", AudioBuffer::kSplitBandSize, + &capture->split_bands(0)[0][0], 16000, 1); +} + +EchoControl::Metrics EchoCanceller3::GetMetrics() const { + RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_); + Metrics metrics; + block_processor_->GetMetrics(&metrics); + return metrics; +} + +void EchoCanceller3::SetAudioBufferDelay(int delay_ms) { + RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_); + block_processor_->SetAudioBufferDelay(delay_ms); +} + +bool EchoCanceller3::ActiveProcessing() const { + return true; +} + +EchoCanceller3Config EchoCanceller3::CreateDefaultConfig( + size_t num_render_channels, + size_t num_capture_channels) { + EchoCanceller3Config cfg; + if (num_render_channels > 1) { + // Use shorter and more rapidly adapting shadow filter to compensate for + // thge increased number of total filter parameters to adapt. + cfg.filter.shadow.length_blocks = 11; + cfg.filter.shadow.rate = 0.95f; + cfg.filter.shadow_initial.length_blocks = 11; + cfg.filter.shadow_initial.rate = 0.95f; + + // Use more concervative suppressor behavior for non-nearend speech. + cfg.suppressor.normal_tuning.max_dec_factor_lf = 0.35f; + cfg.suppressor.normal_tuning.max_inc_factor = 1.5f; + } + return cfg; +} + +void EchoCanceller3::EmptyRenderQueue() { + RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_); + bool frame_to_buffer = + render_transfer_queue_.Remove(&render_queue_output_frame_); + while (frame_to_buffer) { + // Report render call in the metrics. + api_call_metrics_.ReportRenderCall(); + + BufferRenderFrameContent(&render_queue_output_frame_, 0, &render_blocker_, + block_processor_.get(), &render_block_, + &render_sub_frame_view_); + + BufferRenderFrameContent(&render_queue_output_frame_, 1, &render_blocker_, + block_processor_.get(), &render_block_, + &render_sub_frame_view_); + + BufferRemainingRenderFrameContent(&render_blocker_, block_processor_.get(), + &render_block_); + + frame_to_buffer = + render_transfer_queue_.Remove(&render_queue_output_frame_); + } +} +} // namespace webrtc diff --git a/audio_processing/aec3/echo_canceller3.h b/audio_processing/aec3/echo_canceller3.h new file mode 100644 index 0000000..057d0b2 --- /dev/null +++ b/audio_processing/aec3/echo_canceller3.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_ + +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "api/echo_control.h" +#include "audio_processing/aec3/api_call_jitter_metrics.h" +#include "audio_processing/aec3/block_delay_buffer.h" +#include "audio_processing/aec3/block_framer.h" +#include "audio_processing/aec3/block_processor.h" +#include "audio_processing/aec3/frame_blocker.h" +#include "audio_processing/audio_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" +#include "rtc_base/race_checker.h" +#include "rtc_base/swap_queue.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +// Functor for verifying the invariance of the frames being put into the render +// queue. +class Aec3RenderQueueItemVerifier { + public: + Aec3RenderQueueItemVerifier(size_t num_bands, + size_t num_channels, + size_t frame_length) + : num_bands_(num_bands), + num_channels_(num_channels), + frame_length_(frame_length) {} + + bool operator()(const std::vector>>& v) const { + if (v.size() != num_bands_) { + return false; + } + for (const auto& band : v) { + if (band.size() != num_channels_) { + return false; + } + for (const auto& channel : band) { + if (channel.size() != frame_length_) { + return false; + } + } + } + return true; + } + + private: + const size_t num_bands_; + const size_t num_channels_; + const size_t frame_length_; +}; + +// Main class for the echo canceller3. +// It does 4 things: +// -Receives 10 ms frames of band-split audio. +// -Provides the lower level echo canceller functionality with +// blocks of 64 samples of audio data. +// -Partially handles the jitter in the render and capture API +// call sequence. +// +// The class is supposed to be used in a non-concurrent manner apart from the +// AnalyzeRender call which can be called concurrently with the other methods. +class EchoCanceller3 : public EchoControl { + public: + // Normal c-tor to use. + EchoCanceller3(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels); + // Testing c-tor that is used only for testing purposes. + EchoCanceller3(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels, + std::unique_ptr block_processor); + ~EchoCanceller3() override; + EchoCanceller3(const EchoCanceller3&) = delete; + EchoCanceller3& operator=(const EchoCanceller3&) = delete; + + // Analyzes and stores an internal copy of the split-band domain render + // signal. + void AnalyzeRender(AudioBuffer* render) override { AnalyzeRender(*render); } + // Analyzes the full-band domain capture signal to detect signal saturation. + void AnalyzeCapture(AudioBuffer* capture) override { + AnalyzeCapture(*capture); + } + // Processes the split-band domain capture signal in order to remove any echo + // present in the signal. + void ProcessCapture(AudioBuffer* capture, bool level_change) override; + // As above, but also returns the linear filter output. + void ProcessCapture(AudioBuffer* capture, + AudioBuffer* linear_output, + bool level_change) override; + // Collect current metrics from the echo canceller. + Metrics GetMetrics() const override; + // Provides an optional external estimate of the audio buffer delay. + void SetAudioBufferDelay(int delay_ms) override; + + bool ActiveProcessing() const override; + + // Signals whether an external detector has detected echo leakage from the + // echo canceller. + // Note that in the case echo leakage has been flagged, it should be unflagged + // once it is no longer occurring. + void UpdateEchoLeakageStatus(bool leakage_detected) { + RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_); + block_processor_->UpdateEchoLeakageStatus(leakage_detected); + } + + // Produces a default configuration that is suitable for a certain combination + // of render and capture channels. + static EchoCanceller3Config CreateDefaultConfig(size_t num_render_channels, + size_t num_capture_channels); + + private: + class RenderWriter; + + // Empties the render SwapQueue. + void EmptyRenderQueue(); + + // Analyzes and stores an internal copy of the split-band domain render + // signal. + void AnalyzeRender(const AudioBuffer& render); + // Analyzes the full-band domain capture signal to detect signal saturation. + void AnalyzeCapture(const AudioBuffer& capture); + + rtc::RaceChecker capture_race_checker_; + rtc::RaceChecker render_race_checker_; + + // State that is accessed by the AnalyzeRender call. + std::unique_ptr render_writer_ + RTC_GUARDED_BY(render_race_checker_); + + // State that may be accessed by the capture thread. + static int instance_count_; + std::unique_ptr data_dumper_; + const EchoCanceller3Config config_; + const int sample_rate_hz_; + const int num_bands_; + const size_t num_render_channels_; + const size_t num_capture_channels_; + std::unique_ptr linear_output_framer_ + RTC_GUARDED_BY(capture_race_checker_); + BlockFramer output_framer_ RTC_GUARDED_BY(capture_race_checker_); + FrameBlocker capture_blocker_ RTC_GUARDED_BY(capture_race_checker_); + FrameBlocker render_blocker_ RTC_GUARDED_BY(capture_race_checker_); + SwapQueue>>, + Aec3RenderQueueItemVerifier> + render_transfer_queue_; + std::unique_ptr block_processor_ + RTC_GUARDED_BY(capture_race_checker_); + std::vector>> render_queue_output_frame_ + RTC_GUARDED_BY(capture_race_checker_); + bool saturated_microphone_signal_ RTC_GUARDED_BY(capture_race_checker_) = + false; + std::vector>> render_block_ + RTC_GUARDED_BY(capture_race_checker_); + std::unique_ptr>>> + linear_output_block_ RTC_GUARDED_BY(capture_race_checker_); + std::vector>> capture_block_ + RTC_GUARDED_BY(capture_race_checker_); + std::vector>> render_sub_frame_view_ + RTC_GUARDED_BY(capture_race_checker_); + std::vector>> linear_output_sub_frame_view_ + RTC_GUARDED_BY(capture_race_checker_); + std::vector>> capture_sub_frame_view_ + RTC_GUARDED_BY(capture_race_checker_); + BlockDelayBuffer block_delay_buffer_ RTC_GUARDED_BY(capture_race_checker_); + ApiCallJitterMetrics api_call_metrics_ RTC_GUARDED_BY(capture_race_checker_); +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_ diff --git a/audio_processing/aec3/echo_canceller3_unittest.cc b/audio_processing/aec3/echo_canceller3_unittest.cc new file mode 100644 index 0000000..4fc68ff --- /dev/null +++ b/audio_processing/aec3/echo_canceller3_unittest.cc @@ -0,0 +1,714 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/echo_canceller3.h" + +#include +#include +#include +#include +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/block_processor.h" +#include "modules/audio_processing/aec3/frame_blocker.h" +#include "modules/audio_processing/aec3/mock/mock_block_processor.h" +#include "modules/audio_processing/audio_buffer.h" +#include "modules/audio_processing/high_pass_filter.h" +#include "modules/audio_processing/utility/cascaded_biquad_filter.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::_; +using ::testing::StrictMock; + +// Populates the frame with linearly increasing sample values for each band, +// with a band-specific offset, in order to allow simple bitexactness +// verification for each band. +void PopulateInputFrame(size_t frame_length, + size_t num_bands, + size_t frame_index, + float* const* frame, + int offset) { + for (size_t k = 0; k < num_bands; ++k) { + for (size_t i = 0; i < frame_length; ++i) { + float value = static_cast(frame_index * frame_length + i) + offset; + frame[k][i] = (value > 0 ? 5000 * k + value : 0); + } + } +} + +// Populates the frame with linearly increasing sample values. +void PopulateInputFrame(size_t frame_length, + size_t frame_index, + float* frame, + int offset) { + for (size_t i = 0; i < frame_length; ++i) { + float value = static_cast(frame_index * frame_length + i) + offset; + frame[i] = std::max(value, 0.f); + } +} + +// Verifies the that samples in the output frame are identical to the samples +// that were produced for the input frame, with an offset in order to compensate +// for buffering delays. +bool VerifyOutputFrameBitexactness(size_t frame_length, + size_t num_bands, + size_t frame_index, + const float* const* frame, + int offset) { + float reference_frame_data[kMaxNumBands][2 * kSubFrameLength]; + float* reference_frame[kMaxNumBands]; + for (size_t k = 0; k < num_bands; ++k) { + reference_frame[k] = &reference_frame_data[k][0]; + } + + PopulateInputFrame(frame_length, num_bands, frame_index, reference_frame, + offset); + for (size_t k = 0; k < num_bands; ++k) { + for (size_t i = 0; i < frame_length; ++i) { + if (reference_frame[k][i] != frame[k][i]) { + return false; + } + } + } + + return true; +} + +bool VerifyOutputFrameBitexactness(rtc::ArrayView reference, + rtc::ArrayView frame, + int offset) { + for (size_t k = 0; k < frame.size(); ++k) { + int reference_index = static_cast(k) + offset; + if (reference_index >= 0) { + if (reference[reference_index] != frame[k]) { + return false; + } + } + } + return true; +} + +// Class for testing that the capture data is properly received by the block +// processor and that the processor data is properly passed to the +// EchoCanceller3 output. +class CaptureTransportVerificationProcessor : public BlockProcessor { + public: + explicit CaptureTransportVerificationProcessor(size_t num_bands) {} + ~CaptureTransportVerificationProcessor() override = default; + + void ProcessCapture( + bool level_change, + bool saturated_microphone_signal, + std::vector>>* linear_output, + std::vector>>* capture_block) override {} + + void BufferRender( + const std::vector>>& block) override {} + + void UpdateEchoLeakageStatus(bool leakage_detected) override {} + + void GetMetrics(EchoControl::Metrics* metrics) const override {} + + void SetAudioBufferDelay(int delay_ms) override {} + + private: + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(CaptureTransportVerificationProcessor); +}; + +// Class for testing that the render data is properly received by the block +// processor. +class RenderTransportVerificationProcessor : public BlockProcessor { + public: + explicit RenderTransportVerificationProcessor(size_t num_bands) {} + ~RenderTransportVerificationProcessor() override = default; + + void ProcessCapture( + bool level_change, + bool saturated_microphone_signal, + std::vector>>* linear_output, + std::vector>>* capture_block) override { + std::vector>> render_block = + received_render_blocks_.front(); + received_render_blocks_.pop_front(); + capture_block->swap(render_block); + } + + void BufferRender( + const std::vector>>& block) override { + received_render_blocks_.push_back(block); + } + + void UpdateEchoLeakageStatus(bool leakage_detected) override {} + + void GetMetrics(EchoControl::Metrics* metrics) const override {} + + void SetAudioBufferDelay(int delay_ms) override {} + + private: + std::deque>>> + received_render_blocks_; + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderTransportVerificationProcessor); +}; + +class EchoCanceller3Tester { + public: + explicit EchoCanceller3Tester(int sample_rate_hz) + : sample_rate_hz_(sample_rate_hz), + num_bands_(NumBandsForRate(sample_rate_hz_)), + frame_length_(160), + fullband_frame_length_(rtc::CheckedDivExact(sample_rate_hz_, 100)), + capture_buffer_(fullband_frame_length_ * 100, + 1, + fullband_frame_length_ * 100, + 1, + fullband_frame_length_ * 100, + 1), + render_buffer_(fullband_frame_length_ * 100, + 1, + fullband_frame_length_ * 100, + 1, + fullband_frame_length_ * 100, + 1) {} + + // Verifies that the capture data is properly received by the block processor + // and that the processor data is properly passed to the EchoCanceller3 + // output. + void RunCaptureTransportVerificationTest() { + EchoCanceller3 aec3( + EchoCanceller3Config(), sample_rate_hz_, 1, 1, + std::unique_ptr( + new CaptureTransportVerificationProcessor(num_bands_))); + + for (size_t frame_index = 0; frame_index < kNumFramesToProcess; + ++frame_index) { + aec3.AnalyzeCapture(&capture_buffer_); + OptionalBandSplit(); + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &capture_buffer_.split_bands(0)[0], 0); + PopulateInputFrame(frame_length_, frame_index, + &render_buffer_.channels()[0][0], 0); + + aec3.AnalyzeRender(&render_buffer_); + aec3.ProcessCapture(&capture_buffer_, false); + EXPECT_TRUE(VerifyOutputFrameBitexactness( + frame_length_, num_bands_, frame_index, + &capture_buffer_.split_bands(0)[0], -64)); + } + } + + // Test method for testing that the render data is properly received by the + // block processor. + void RunRenderTransportVerificationTest() { + EchoCanceller3 aec3( + EchoCanceller3Config(), sample_rate_hz_, 1, 1, + std::unique_ptr( + new RenderTransportVerificationProcessor(num_bands_))); + + std::vector> render_input(1); + std::vector capture_output; + for (size_t frame_index = 0; frame_index < kNumFramesToProcess; + ++frame_index) { + aec3.AnalyzeCapture(&capture_buffer_); + OptionalBandSplit(); + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &capture_buffer_.split_bands(0)[0], 100); + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &render_buffer_.split_bands(0)[0], 0); + + for (size_t k = 0; k < frame_length_; ++k) { + render_input[0].push_back(render_buffer_.split_bands(0)[0][k]); + } + aec3.AnalyzeRender(&render_buffer_); + aec3.ProcessCapture(&capture_buffer_, false); + for (size_t k = 0; k < frame_length_; ++k) { + capture_output.push_back(capture_buffer_.split_bands(0)[0][k]); + } + } + HighPassFilter hp_filter(16000, 1); + hp_filter.Process(&render_input); + + EXPECT_TRUE( + VerifyOutputFrameBitexactness(render_input[0], capture_output, -64)); + } + + // Verifies that information about echo path changes are properly propagated + // to the block processor. + // The cases tested are: + // -That no set echo path change flags are received when there is no echo path + // change. + // -That set echo path change flags are received and continues to be received + // as long as echo path changes are flagged. + // -That set echo path change flags are no longer received when echo path + // change events stop being flagged. + enum class EchoPathChangeTestVariant { kNone, kOneSticky, kOneNonSticky }; + + void RunEchoPathChangeVerificationTest( + EchoPathChangeTestVariant echo_path_change_test_variant) { + constexpr size_t kNumFullBlocksPerFrame = 160 / kBlockSize; + constexpr size_t kExpectedNumBlocksToProcess = + (kNumFramesToProcess * 160) / kBlockSize; + std::unique_ptr> + block_processor_mock( + new StrictMock()); + EXPECT_CALL(*block_processor_mock, BufferRender(_)) + .Times(kExpectedNumBlocksToProcess); + EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0); + + switch (echo_path_change_test_variant) { + case EchoPathChangeTestVariant::kNone: + EXPECT_CALL(*block_processor_mock, ProcessCapture(false, _, _, _)) + .Times(kExpectedNumBlocksToProcess); + break; + case EchoPathChangeTestVariant::kOneSticky: + EXPECT_CALL(*block_processor_mock, ProcessCapture(true, _, _, _)) + .Times(kExpectedNumBlocksToProcess); + break; + case EchoPathChangeTestVariant::kOneNonSticky: + EXPECT_CALL(*block_processor_mock, ProcessCapture(true, _, _, _)) + .Times(kNumFullBlocksPerFrame); + EXPECT_CALL(*block_processor_mock, ProcessCapture(false, _, _, _)) + .Times(kExpectedNumBlocksToProcess - kNumFullBlocksPerFrame); + break; + } + + EchoCanceller3 aec3(EchoCanceller3Config(), sample_rate_hz_, 1, 1, + std::move(block_processor_mock)); + + for (size_t frame_index = 0; frame_index < kNumFramesToProcess; + ++frame_index) { + bool echo_path_change = false; + switch (echo_path_change_test_variant) { + case EchoPathChangeTestVariant::kNone: + break; + case EchoPathChangeTestVariant::kOneSticky: + echo_path_change = true; + break; + case EchoPathChangeTestVariant::kOneNonSticky: + if (frame_index == 0) { + echo_path_change = true; + } + break; + } + + aec3.AnalyzeCapture(&capture_buffer_); + OptionalBandSplit(); + + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &capture_buffer_.split_bands(0)[0], 0); + PopulateInputFrame(frame_length_, frame_index, + &render_buffer_.channels()[0][0], 0); + + aec3.AnalyzeRender(&render_buffer_); + aec3.ProcessCapture(&capture_buffer_, echo_path_change); + } + } + + // Test for verifying that echo leakage information is being properly passed + // to the processor. + // The cases tested are: + // -That no method calls are received when they should not. + // -That false values are received each time they are flagged. + // -That true values are received each time they are flagged. + // -That a false value is received when flagged after a true value has been + // flagged. + enum class EchoLeakageTestVariant { + kNone, + kFalseSticky, + kTrueSticky, + kTrueNonSticky + }; + + void RunEchoLeakageVerificationTest( + EchoLeakageTestVariant leakage_report_variant) { + constexpr size_t kExpectedNumBlocksToProcess = + (kNumFramesToProcess * 160) / kBlockSize; + std::unique_ptr> + block_processor_mock( + new StrictMock()); + EXPECT_CALL(*block_processor_mock, BufferRender(_)) + .Times(kExpectedNumBlocksToProcess); + EXPECT_CALL(*block_processor_mock, ProcessCapture(_, _, _, _)) + .Times(kExpectedNumBlocksToProcess); + + switch (leakage_report_variant) { + case EchoLeakageTestVariant::kNone: + EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0); + break; + case EchoLeakageTestVariant::kFalseSticky: + EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(false)) + .Times(1); + break; + case EchoLeakageTestVariant::kTrueSticky: + EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(true)) + .Times(1); + break; + case EchoLeakageTestVariant::kTrueNonSticky: { + ::testing::InSequence s; + EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(true)) + .Times(1); + EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(false)) + .Times(kNumFramesToProcess - 1); + } break; + } + + EchoCanceller3 aec3(EchoCanceller3Config(), sample_rate_hz_, 1, 1, + std::move(block_processor_mock)); + + for (size_t frame_index = 0; frame_index < kNumFramesToProcess; + ++frame_index) { + switch (leakage_report_variant) { + case EchoLeakageTestVariant::kNone: + break; + case EchoLeakageTestVariant::kFalseSticky: + if (frame_index == 0) { + aec3.UpdateEchoLeakageStatus(false); + } + break; + case EchoLeakageTestVariant::kTrueSticky: + if (frame_index == 0) { + aec3.UpdateEchoLeakageStatus(true); + } + break; + case EchoLeakageTestVariant::kTrueNonSticky: + if (frame_index == 0) { + aec3.UpdateEchoLeakageStatus(true); + } else { + aec3.UpdateEchoLeakageStatus(false); + } + break; + } + + aec3.AnalyzeCapture(&capture_buffer_); + OptionalBandSplit(); + + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &capture_buffer_.split_bands(0)[0], 0); + PopulateInputFrame(frame_length_, frame_index, + &render_buffer_.channels()[0][0], 0); + + aec3.AnalyzeRender(&render_buffer_); + aec3.ProcessCapture(&capture_buffer_, false); + } + } + + // This verifies that saturation information is properly passed to the + // BlockProcessor. + // The cases tested are: + // -That no saturation event is passed to the processor if there is no + // saturation. + // -That one frame with one negative saturated sample value is reported to be + // saturated and that following non-saturated frames are properly reported as + // not being saturated. + // -That one frame with one positive saturated sample value is reported to be + // saturated and that following non-saturated frames are properly reported as + // not being saturated. + enum class SaturationTestVariant { kNone, kOneNegative, kOnePositive }; + + void RunCaptureSaturationVerificationTest( + SaturationTestVariant saturation_variant) { + const size_t kNumFullBlocksPerFrame = 160 / kBlockSize; + const size_t kExpectedNumBlocksToProcess = + (kNumFramesToProcess * 160) / kBlockSize; + std::unique_ptr> + block_processor_mock( + new StrictMock()); + EXPECT_CALL(*block_processor_mock, BufferRender(_)) + .Times(kExpectedNumBlocksToProcess); + EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0); + + switch (saturation_variant) { + case SaturationTestVariant::kNone: + EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _, _)) + .Times(kExpectedNumBlocksToProcess); + break; + case SaturationTestVariant::kOneNegative: { + ::testing::InSequence s; + EXPECT_CALL(*block_processor_mock, ProcessCapture(_, true, _, _)) + .Times(kNumFullBlocksPerFrame); + EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _, _)) + .Times(kExpectedNumBlocksToProcess - kNumFullBlocksPerFrame); + } break; + case SaturationTestVariant::kOnePositive: { + ::testing::InSequence s; + EXPECT_CALL(*block_processor_mock, ProcessCapture(_, true, _, _)) + .Times(kNumFullBlocksPerFrame); + EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _, _)) + .Times(kExpectedNumBlocksToProcess - kNumFullBlocksPerFrame); + } break; + } + + EchoCanceller3 aec3(EchoCanceller3Config(), sample_rate_hz_, 1, 1, + std::move(block_processor_mock)); + for (size_t frame_index = 0; frame_index < kNumFramesToProcess; + ++frame_index) { + for (int k = 0; k < fullband_frame_length_; ++k) { + capture_buffer_.channels()[0][k] = 0.f; + } + switch (saturation_variant) { + case SaturationTestVariant::kNone: + break; + case SaturationTestVariant::kOneNegative: + if (frame_index == 0) { + capture_buffer_.channels()[0][10] = -32768.f; + } + break; + case SaturationTestVariant::kOnePositive: + if (frame_index == 0) { + capture_buffer_.channels()[0][10] = 32767.f; + } + break; + } + + aec3.AnalyzeCapture(&capture_buffer_); + OptionalBandSplit(); + + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &capture_buffer_.split_bands(0)[0], 0); + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &render_buffer_.split_bands(0)[0], 0); + + aec3.AnalyzeRender(&render_buffer_); + aec3.ProcessCapture(&capture_buffer_, false); + } + } + + // This test verifies that the swapqueue is able to handle jitter in the + // capture and render API calls. + void RunRenderSwapQueueVerificationTest() { + const EchoCanceller3Config config; + EchoCanceller3 aec3( + config, sample_rate_hz_, 1, 1, + std::unique_ptr( + new RenderTransportVerificationProcessor(num_bands_))); + + std::vector> render_input(1); + std::vector capture_output; + + for (size_t frame_index = 0; frame_index < kRenderTransferQueueSizeFrames; + ++frame_index) { + if (sample_rate_hz_ > 16000) { + render_buffer_.SplitIntoFrequencyBands(); + } + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &render_buffer_.split_bands(0)[0], 0); + + if (sample_rate_hz_ > 16000) { + render_buffer_.SplitIntoFrequencyBands(); + } + + for (size_t k = 0; k < frame_length_; ++k) { + render_input[0].push_back(render_buffer_.split_bands(0)[0][k]); + } + aec3.AnalyzeRender(&render_buffer_); + } + + for (size_t frame_index = 0; frame_index < kRenderTransferQueueSizeFrames; + ++frame_index) { + aec3.AnalyzeCapture(&capture_buffer_); + if (sample_rate_hz_ > 16000) { + capture_buffer_.SplitIntoFrequencyBands(); + } + + PopulateInputFrame(frame_length_, num_bands_, frame_index, + &capture_buffer_.split_bands(0)[0], 0); + + aec3.ProcessCapture(&capture_buffer_, false); + for (size_t k = 0; k < frame_length_; ++k) { + capture_output.push_back(capture_buffer_.split_bands(0)[0][k]); + } + } + HighPassFilter hp_filter(16000, 1); + hp_filter.Process(&render_input); + + EXPECT_TRUE( + VerifyOutputFrameBitexactness(render_input[0], capture_output, -64)); + } + + // This test verifies that a buffer overrun in the render swapqueue is + // properly reported. + void RunRenderPipelineSwapQueueOverrunReturnValueTest() { + EchoCanceller3 aec3(EchoCanceller3Config(), sample_rate_hz_, 1, 1); + + constexpr size_t kRenderTransferQueueSize = 30; + for (size_t k = 0; k < 2; ++k) { + for (size_t frame_index = 0; frame_index < kRenderTransferQueueSize; + ++frame_index) { + if (sample_rate_hz_ > 16000) { + render_buffer_.SplitIntoFrequencyBands(); + } + PopulateInputFrame(frame_length_, frame_index, + &render_buffer_.channels()[0][0], 0); + + aec3.AnalyzeRender(&render_buffer_); + } + } + } + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + // Verifies the that the check for the number of bands in the AnalyzeRender + // input is correct by adjusting the sample rates of EchoCanceller3 and the + // input AudioBuffer to have a different number of bands. + void RunAnalyzeRenderNumBandsCheckVerification() { + // Set aec3_sample_rate_hz to be different from sample_rate_hz_ in such a + // way that the number of bands for the rates are different. + const int aec3_sample_rate_hz = sample_rate_hz_ == 48000 ? 32000 : 48000; + EchoCanceller3 aec3(EchoCanceller3Config(), aec3_sample_rate_hz, 1, 1); + PopulateInputFrame(frame_length_, 0, &render_buffer_.channels_f()[0][0], 0); + + EXPECT_DEATH(aec3.AnalyzeRender(&render_buffer_), ""); + } + + // Verifies the that the check for the number of bands in the ProcessCapture + // input is correct by adjusting the sample rates of EchoCanceller3 and the + // input AudioBuffer to have a different number of bands. + void RunProcessCaptureNumBandsCheckVerification() { + // Set aec3_sample_rate_hz to be different from sample_rate_hz_ in such a + // way that the number of bands for the rates are different. + const int aec3_sample_rate_hz = sample_rate_hz_ == 48000 ? 32000 : 48000; + EchoCanceller3 aec3(EchoCanceller3Config(), aec3_sample_rate_hz, 1, 1); + PopulateInputFrame(frame_length_, num_bands_, 0, + &capture_buffer_.split_bands_f(0)[0], 100); + EXPECT_DEATH(aec3.ProcessCapture(&capture_buffer_, false), ""); + } + +#endif + + private: + void OptionalBandSplit() { + if (sample_rate_hz_ > 16000) { + capture_buffer_.SplitIntoFrequencyBands(); + render_buffer_.SplitIntoFrequencyBands(); + } + } + + static constexpr size_t kNumFramesToProcess = 20; + const int sample_rate_hz_; + const size_t num_bands_; + const size_t frame_length_; + const int fullband_frame_length_; + AudioBuffer capture_buffer_; + AudioBuffer render_buffer_; + + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EchoCanceller3Tester); +}; + +std::string ProduceDebugText(int sample_rate_hz) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz; + return ss.Release(); +} + +std::string ProduceDebugText(int sample_rate_hz, int variant) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz << ", variant: " << variant; + return ss.Release(); +} + +} // namespace + +TEST(EchoCanceller3Buffering, CaptureBitexactness) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + EchoCanceller3Tester(rate).RunCaptureTransportVerificationTest(); + } +} + +TEST(EchoCanceller3Buffering, RenderBitexactness) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + EchoCanceller3Tester(rate).RunRenderTransportVerificationTest(); + } +} + +TEST(EchoCanceller3Buffering, RenderSwapQueue) { + EchoCanceller3Tester(16000).RunRenderSwapQueueVerificationTest(); +} + +TEST(EchoCanceller3Buffering, RenderSwapQueueOverrunReturnValue) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + EchoCanceller3Tester(rate) + .RunRenderPipelineSwapQueueOverrunReturnValueTest(); + } +} + +TEST(EchoCanceller3Messaging, CaptureSaturation) { + auto variants = {EchoCanceller3Tester::SaturationTestVariant::kNone, + EchoCanceller3Tester::SaturationTestVariant::kOneNegative, + EchoCanceller3Tester::SaturationTestVariant::kOnePositive}; + for (auto rate : {16000, 32000, 48000}) { + for (auto variant : variants) { + SCOPED_TRACE(ProduceDebugText(rate, static_cast(variant))); + EchoCanceller3Tester(rate).RunCaptureSaturationVerificationTest(variant); + } + } +} + +TEST(EchoCanceller3Messaging, EchoPathChange) { + auto variants = { + EchoCanceller3Tester::EchoPathChangeTestVariant::kNone, + EchoCanceller3Tester::EchoPathChangeTestVariant::kOneSticky, + EchoCanceller3Tester::EchoPathChangeTestVariant::kOneNonSticky}; + for (auto rate : {16000, 32000, 48000}) { + for (auto variant : variants) { + SCOPED_TRACE(ProduceDebugText(rate, static_cast(variant))); + EchoCanceller3Tester(rate).RunEchoPathChangeVerificationTest(variant); + } + } +} + +TEST(EchoCanceller3Messaging, EchoLeakage) { + auto variants = { + EchoCanceller3Tester::EchoLeakageTestVariant::kNone, + EchoCanceller3Tester::EchoLeakageTestVariant::kFalseSticky, + EchoCanceller3Tester::EchoLeakageTestVariant::kTrueSticky, + EchoCanceller3Tester::EchoLeakageTestVariant::kTrueNonSticky}; + for (auto rate : {16000, 32000, 48000}) { + for (auto variant : variants) { + SCOPED_TRACE(ProduceDebugText(rate, static_cast(variant))); + EchoCanceller3Tester(rate).RunEchoLeakageVerificationTest(variant); + } + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +TEST(EchoCanceller3InputCheck, WrongCaptureNumBandsCheckVerification) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + EchoCanceller3Tester(rate).RunProcessCaptureNumBandsCheckVerification(); + } +} + +// Verifiers that the verification for null input to the capture processing api +// call works. +TEST(EchoCanceller3InputCheck, NullCaptureProcessingParameter) { + EXPECT_DEATH(EchoCanceller3(EchoCanceller3Config(), 16000, 1, 1) + .ProcessCapture(nullptr, false), + ""); +} + +// Verifies the check for correct sample rate. +// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH +// tests on test bots has been fixed. +TEST(EchoCanceller3InputCheck, DISABLED_WrongSampleRate) { + ApmDataDumper data_dumper(0); + EXPECT_DEATH(EchoCanceller3(EchoCanceller3Config(), 8001, 1, 1), ""); +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/echo_path_delay_estimator.cc b/audio_processing/aec3/echo_path_delay_estimator.cc new file mode 100644 index 0000000..5b0f095 --- /dev/null +++ b/audio_processing/aec3/echo_path_delay_estimator.cc @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/echo_path_delay_estimator.h" + +#include + +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/downsampled_render_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +EchoPathDelayEstimator::EchoPathDelayEstimator( + ApmDataDumper* data_dumper, + const EchoCanceller3Config& config, + size_t num_capture_channels) + : data_dumper_(data_dumper), + down_sampling_factor_(config.delay.down_sampling_factor), + sub_block_size_(down_sampling_factor_ != 0 + ? kBlockSize / down_sampling_factor_ + : kBlockSize), + capture_mixer_(num_capture_channels, + config.delay.capture_alignment_mixing), + capture_decimator_(down_sampling_factor_), + matched_filter_( + data_dumper_, + DetectOptimization(), + sub_block_size_, + kMatchedFilterWindowSizeSubBlocks, + config.delay.num_filters, + kMatchedFilterAlignmentShiftSizeSubBlocks, + config.delay.down_sampling_factor == 8 + ? config.render_levels.poor_excitation_render_limit_ds8 + : config.render_levels.poor_excitation_render_limit, + config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold), + matched_filter_lag_aggregator_(data_dumper_, + matched_filter_.GetMaxFilterLag(), + config.delay.delay_selection_thresholds) { + RTC_DCHECK(data_dumper); + RTC_DCHECK(down_sampling_factor_ > 0); +} + +EchoPathDelayEstimator::~EchoPathDelayEstimator() = default; + +void EchoPathDelayEstimator::Reset(bool reset_delay_confidence) { + Reset(true, reset_delay_confidence); +} + +absl::optional EchoPathDelayEstimator::EstimateDelay( + const DownsampledRenderBuffer& render_buffer, + const std::vector>& capture) { + RTC_DCHECK_EQ(kBlockSize, capture[0].size()); + + std::array downsampled_capture_data; + rtc::ArrayView downsampled_capture(downsampled_capture_data.data(), + sub_block_size_); + + std::array downmixed_capture; + capture_mixer_.ProduceOutput(capture, downmixed_capture); + capture_decimator_.Decimate(downmixed_capture, downsampled_capture); + data_dumper_->DumpWav("aec3_capture_decimator_output", + downsampled_capture.size(), downsampled_capture.data(), + 16000 / down_sampling_factor_, 1); + matched_filter_.Update(render_buffer, downsampled_capture); + + absl::optional aggregated_matched_filter_lag = + matched_filter_lag_aggregator_.Aggregate( + matched_filter_.GetLagEstimates()); + + if (aggregated_matched_filter_lag && + (*aggregated_matched_filter_lag).quality == + DelayEstimate::Quality::kRefined) + clockdrift_detector_.Update((*aggregated_matched_filter_lag).delay); + + // TODO(peah): Move this logging outside of this class once EchoCanceller3 + // development is done. + data_dumper_->DumpRaw( + "aec3_echo_path_delay_estimator_delay", + aggregated_matched_filter_lag + ? static_cast(aggregated_matched_filter_lag->delay * + down_sampling_factor_) + : -1); + + if (aggregated_matched_filter_lag) { + aggregated_matched_filter_lag->delay *= down_sampling_factor_; + } + + if (old_aggregated_lag_ && aggregated_matched_filter_lag && + old_aggregated_lag_->delay == aggregated_matched_filter_lag->delay) { + ++consistent_estimate_counter_; + } else { + consistent_estimate_counter_ = 0; + } + old_aggregated_lag_ = aggregated_matched_filter_lag; + constexpr size_t kNumBlocksPerSecondBy2 = kNumBlocksPerSecond / 2; + if (consistent_estimate_counter_ > kNumBlocksPerSecondBy2) { + Reset(false, false); + } + + return aggregated_matched_filter_lag; +} + +void EchoPathDelayEstimator::Reset(bool reset_lag_aggregator, + bool reset_delay_confidence) { + if (reset_lag_aggregator) { + matched_filter_lag_aggregator_.Reset(reset_delay_confidence); + } + matched_filter_.Reset(); + old_aggregated_lag_ = absl::nullopt; + consistent_estimate_counter_ = 0; +} +} // namespace webrtc diff --git a/audio_processing/aec3/echo_path_delay_estimator.h b/audio_processing/aec3/echo_path_delay_estimator.h new file mode 100644 index 0000000..b999fd6 --- /dev/null +++ b/audio_processing/aec3/echo_path_delay_estimator.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_ + +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/alignment_mixer.h" +#include "audio_processing/aec3/clockdrift_detector.h" +#include "audio_processing/aec3/decimator.h" +#include "audio_processing/aec3/delay_estimate.h" +#include "audio_processing/aec3/matched_filter.h" +#include "audio_processing/aec3/matched_filter_lag_aggregator.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +class ApmDataDumper; +struct DownsampledRenderBuffer; +struct EchoCanceller3Config; + +// Estimates the delay of the echo path. +class EchoPathDelayEstimator { + public: + EchoPathDelayEstimator(ApmDataDumper* data_dumper, + const EchoCanceller3Config& config, + size_t num_capture_channels); + ~EchoPathDelayEstimator(); + + // Resets the estimation. If the delay confidence is reset, the reset behavior + // is as if the call is restarted. + void Reset(bool reset_delay_confidence); + + // Produce a delay estimate if such is avaliable. + absl::optional EstimateDelay( + const DownsampledRenderBuffer& render_buffer, + const std::vector>& capture); + + // Log delay estimator properties. + void LogDelayEstimationProperties(int sample_rate_hz, size_t shift) const { + matched_filter_.LogFilterProperties(sample_rate_hz, shift, + down_sampling_factor_); + } + + // Returns the level of detected clockdrift. + ClockdriftDetector::Level Clockdrift() const { + return clockdrift_detector_.ClockdriftLevel(); + } + + private: + ApmDataDumper* const data_dumper_; + const size_t down_sampling_factor_; + const size_t sub_block_size_; + AlignmentMixer capture_mixer_; + Decimator capture_decimator_; + MatchedFilter matched_filter_; + MatchedFilterLagAggregator matched_filter_lag_aggregator_; + absl::optional old_aggregated_lag_; + size_t consistent_estimate_counter_ = 0; + ClockdriftDetector clockdrift_detector_; + + // Internal reset method with more granularity. + void Reset(bool reset_lag_aggregator, bool reset_delay_confidence); + + RTC_DISALLOW_COPY_AND_ASSIGN(EchoPathDelayEstimator); +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_ diff --git a/audio_processing/aec3/echo_path_delay_estimator_unittest.cc b/audio_processing/aec3/echo_path_delay_estimator_unittest.cc new file mode 100644 index 0000000..ec64533 --- /dev/null +++ b/audio_processing/aec3/echo_path_delay_estimator_unittest.cc @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/echo_path_delay_estimator.h" + +#include +#include + +#include "api/audio/echo_canceller3_config.h" +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +std::string ProduceDebugText(size_t delay, size_t down_sampling_factor) { + rtc::StringBuilder ss; + ss << "Delay: " << delay; + ss << ", Down sampling factor: " << down_sampling_factor; + return ss.Release(); +} + +} // namespace + +// Verifies that the basic API calls work. +TEST(EchoPathDelayEstimator, BasicApiCalls) { + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + for (size_t num_capture_channels : {1, 2, 4}) { + for (size_t num_render_channels : {1, 2, 3, 6, 8}) { + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, + num_render_channels)); + EchoPathDelayEstimator estimator(&data_dumper, config, + num_capture_channels); + std::vector>> render( + kNumBands, std::vector>( + num_render_channels, std::vector(kBlockSize))); + std::vector> capture(num_capture_channels, + std::vector(kBlockSize)); + for (size_t k = 0; k < 100; ++k) { + render_delay_buffer->Insert(render); + estimator.EstimateDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), capture); + } + } + } +} + +// Verifies that the delay estimator produces correct delay for artificially +// delayed signals. +TEST(EchoPathDelayEstimator, DelayEstimation) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + Random random_generator(42U); + std::vector>> render( + kNumBands, std::vector>( + kNumRenderChannels, std::vector(kBlockSize))); + std::vector> capture(kNumCaptureChannels, + std::vector(kBlockSize)); + ApmDataDumper data_dumper(0); + constexpr size_t kDownSamplingFactors[] = {2, 4, 8}; + for (auto down_sampling_factor : kDownSamplingFactors) { + EchoCanceller3Config config; + config.delay.down_sampling_factor = down_sampling_factor; + config.delay.num_filters = 10; + for (size_t delay_samples : {30, 64, 150, 200, 800, 4000}) { + SCOPED_TRACE(ProduceDebugText(delay_samples, down_sampling_factor)); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, kNumRenderChannels)); + DelayBuffer signal_delay_buffer(delay_samples); + EchoPathDelayEstimator estimator(&data_dumper, config, + kNumCaptureChannels); + + absl::optional estimated_delay_samples; + for (size_t k = 0; k < (500 + (delay_samples) / kBlockSize); ++k) { + RandomizeSampleVector(&random_generator, render[0][0]); + signal_delay_buffer.Delay(render[0][0], capture[0]); + render_delay_buffer->Insert(render); + + if (k == 0) { + render_delay_buffer->Reset(); + } + + render_delay_buffer->PrepareCaptureProcessing(); + + auto estimate = estimator.EstimateDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), capture); + + if (estimate) { + estimated_delay_samples = estimate; + } + } + + if (estimated_delay_samples) { + // Allow estimated delay to be off by one sample in the down-sampled + // domain. + size_t delay_ds = delay_samples / down_sampling_factor; + size_t estimated_delay_ds = + estimated_delay_samples->delay / down_sampling_factor; + EXPECT_NEAR(delay_ds, estimated_delay_ds, 1); + } else { + ADD_FAILURE(); + } + } + } +} + +// Verifies that the delay estimator does not produce delay estimates for render +// signals of low level. +TEST(EchoPathDelayEstimator, NoDelayEstimatesForLowLevelRenderSignals) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + Random random_generator(42U); + EchoCanceller3Config config; + std::vector>> render( + kNumBands, std::vector>( + kNumRenderChannels, std::vector(kBlockSize))); + std::vector> capture(kNumCaptureChannels, + std::vector(kBlockSize)); + ApmDataDumper data_dumper(0); + EchoPathDelayEstimator estimator(&data_dumper, config, kNumCaptureChannels); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz, + kNumRenderChannels)); + for (size_t k = 0; k < 100; ++k) { + RandomizeSampleVector(&random_generator, render[0][0]); + for (auto& render_k : render[0][0]) { + render_k *= 100.f / 32767.f; + } + std::copy(render[0][0].begin(), render[0][0].end(), capture[0].begin()); + render_delay_buffer->Insert(render); + render_delay_buffer->PrepareCaptureProcessing(); + EXPECT_FALSE(estimator.EstimateDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), capture)); + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for the render blocksize. +// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH +// tests on test bots has been fixed. +TEST(EchoPathDelayEstimator, DISABLED_WrongRenderBlockSize) { + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + EchoPathDelayEstimator estimator(&data_dumper, config, 1); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, 48000, 1)); + std::vector> capture(1, std::vector(kBlockSize)); + EXPECT_DEATH(estimator.EstimateDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), capture), + ""); +} + +// Verifies the check for the capture blocksize. +// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH +// tests on test bots has been fixed. +TEST(EchoPathDelayEstimator, WrongCaptureBlockSize) { + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + EchoPathDelayEstimator estimator(&data_dumper, config, 1); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, 48000, 1)); + std::vector> capture(1, + std::vector(kBlockSize - 1)); + EXPECT_DEATH(estimator.EstimateDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), capture), + ""); +} + +// Verifies the check for non-null data dumper. +TEST(EchoPathDelayEstimator, NullDataDumper) { + EXPECT_DEATH(EchoPathDelayEstimator(nullptr, EchoCanceller3Config(), 1), ""); +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/echo_path_variability.cc b/audio_processing/aec3/echo_path_variability.cc new file mode 100644 index 0000000..5c0e4ba --- /dev/null +++ b/audio_processing/aec3/echo_path_variability.cc @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/echo_path_variability.h" + +namespace webrtc { + +EchoPathVariability::EchoPathVariability(bool gain_change, + DelayAdjustment delay_change, + bool clock_drift) + : gain_change(gain_change), + delay_change(delay_change), + clock_drift(clock_drift) {} + +} // namespace webrtc diff --git a/audio_processing/aec3/echo_path_variability.h b/audio_processing/aec3/echo_path_variability.h new file mode 100644 index 0000000..adf0d7a --- /dev/null +++ b/audio_processing/aec3/echo_path_variability.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_ + +namespace webrtc { + +struct EchoPathVariability { + enum class DelayAdjustment { + kNone, + kBufferReadjustment, + kBufferFlush, + kDelayReset, + kNewDetectedDelay + }; + + EchoPathVariability(bool gain_change, + DelayAdjustment delay_change, + bool clock_drift); + + bool AudioPathChanged() const { + return gain_change || delay_change != DelayAdjustment::kNone; + } + bool gain_change; + DelayAdjustment delay_change; + bool clock_drift; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_ diff --git a/audio_processing/aec3/echo_path_variability_unittest.cc b/audio_processing/aec3/echo_path_variability_unittest.cc new file mode 100644 index 0000000..0f10f95 --- /dev/null +++ b/audio_processing/aec3/echo_path_variability_unittest.cc @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/echo_path_variability.h" + +#include "test/gtest.h" + +namespace webrtc { + +TEST(EchoPathVariability, CorrectBehavior) { + // Test correct passing and reporting of the gain change information. + EchoPathVariability v( + true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false); + EXPECT_TRUE(v.gain_change); + EXPECT_TRUE(v.delay_change == + EchoPathVariability::DelayAdjustment::kNewDetectedDelay); + EXPECT_TRUE(v.AudioPathChanged()); + EXPECT_FALSE(v.clock_drift); + + v = EchoPathVariability(true, EchoPathVariability::DelayAdjustment::kNone, + false); + EXPECT_TRUE(v.gain_change); + EXPECT_TRUE(v.delay_change == EchoPathVariability::DelayAdjustment::kNone); + EXPECT_TRUE(v.AudioPathChanged()); + EXPECT_FALSE(v.clock_drift); + + v = EchoPathVariability( + false, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false); + EXPECT_FALSE(v.gain_change); + EXPECT_TRUE(v.delay_change == + EchoPathVariability::DelayAdjustment::kNewDetectedDelay); + EXPECT_TRUE(v.AudioPathChanged()); + EXPECT_FALSE(v.clock_drift); + + v = EchoPathVariability(false, EchoPathVariability::DelayAdjustment::kNone, + false); + EXPECT_FALSE(v.gain_change); + EXPECT_TRUE(v.delay_change == EchoPathVariability::DelayAdjustment::kNone); + EXPECT_FALSE(v.AudioPathChanged()); + EXPECT_FALSE(v.clock_drift); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/echo_remover.cc b/audio_processing/aec3/echo_remover.cc new file mode 100644 index 0000000..b37587f --- /dev/null +++ b/audio_processing/aec3/echo_remover.cc @@ -0,0 +1,495 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/echo_remover.h" + +#include +#include + +#include +#include +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec3_fft.h" +#include "audio_processing/aec3/aec_state.h" +#include "audio_processing/aec3/comfort_noise_generator.h" +#include "audio_processing/aec3/echo_path_variability.h" +#include "audio_processing/aec3/echo_remover_metrics.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/aec3/render_signal_analyzer.h" +#include "audio_processing/aec3/residual_echo_estimator.h" +#include "audio_processing/aec3/subtractor.h" +#include "audio_processing/aec3/subtractor_output.h" +#include "audio_processing/aec3/suppression_filter.h" +#include "audio_processing/aec3/suppression_gain.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +namespace { + +// Maximum number of channels for which the capture channel data is stored on +// the stack. If the number of channels are larger than this, they are stored +// using scratch memory that is pre-allocated on the heap. The reason for this +// partitioning is not to waste heap space for handling the more common numbers +// of channels, while at the same time not limiting the support for higher +// numbers of channels by enforcing the capture channel data to be stored on the +// stack using a fixed maximum value. +constexpr size_t kMaxNumChannelsOnStack = 2; + +// Chooses the number of channels to store on the heap when that is required due +// to the number of capture channels being larger than the pre-defined number +// of channels to store on the stack. +size_t NumChannelsOnHeap(size_t num_capture_channels) { + return num_capture_channels > kMaxNumChannelsOnStack ? num_capture_channels + : 0; +} + +void LinearEchoPower(const FftData& E, + const FftData& Y, + std::array* S2) { + for (size_t k = 0; k < E.re.size(); ++k) { + (*S2)[k] = (Y.re[k] - E.re[k]) * (Y.re[k] - E.re[k]) + + (Y.im[k] - E.im[k]) * (Y.im[k] - E.im[k]); + } +} + +// Fades between two input signals using a fix-sized transition. +void SignalTransition(rtc::ArrayView from, + rtc::ArrayView to, + rtc::ArrayView out) { + if (from == to) { + RTC_DCHECK_EQ(to.size(), out.size()); + std::copy(to.begin(), to.end(), out.begin()); + } else { + constexpr size_t kTransitionSize = 30; + constexpr float kOneByTransitionSizePlusOne = 1.f / (kTransitionSize + 1); + + RTC_DCHECK_EQ(from.size(), to.size()); + RTC_DCHECK_EQ(from.size(), out.size()); + RTC_DCHECK_LE(kTransitionSize, out.size()); + + for (size_t k = 0; k < kTransitionSize; ++k) { + float a = (k + 1) * kOneByTransitionSizePlusOne; + out[k] = a * to[k] + (1.f - a) * from[k]; + } + + std::copy(to.begin() + kTransitionSize, to.end(), + out.begin() + kTransitionSize); + } +} + +// Computes a windowed (square root Hanning) padded FFT and updates the related +// memory. +void WindowedPaddedFft(const Aec3Fft& fft, + rtc::ArrayView v, + rtc::ArrayView v_old, + FftData* V) { + fft.PaddedFft(v, v_old, Aec3Fft::Window::kSqrtHanning, V); + std::copy(v.begin(), v.end(), v_old.begin()); +} + +// Class for removing the echo from the capture signal. +class EchoRemoverImpl final : public EchoRemover { + public: + EchoRemoverImpl(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels); + ~EchoRemoverImpl() override; + EchoRemoverImpl(const EchoRemoverImpl&) = delete; + EchoRemoverImpl& operator=(const EchoRemoverImpl&) = delete; + + void GetMetrics(EchoControl::Metrics* metrics) const override; + + // Removes the echo from a block of samples from the capture signal. The + // supplied render signal is assumed to be pre-aligned with the capture + // signal. + void ProcessCapture( + EchoPathVariability echo_path_variability, + bool capture_signal_saturation, + const absl::optional& external_delay, + RenderBuffer* render_buffer, + std::vector>>* linear_output, + std::vector>>* capture) override; + + // Updates the status on whether echo leakage is detected in the output of the + // echo remover. + void UpdateEchoLeakageStatus(bool leakage_detected) override { + echo_leakage_detected_ = leakage_detected; + } + + private: + // Selects which of the shadow and main linear filter outputs that is most + // appropriate to pass to the suppressor and forms the linear filter output by + // smoothly transition between those. + void FormLinearFilterOutput(const SubtractorOutput& subtractor_output, + rtc::ArrayView output); + + static int instance_count_; + const EchoCanceller3Config config_; + const Aec3Fft fft_; + std::unique_ptr data_dumper_; + const Aec3Optimization optimization_; + const int sample_rate_hz_; + const size_t num_render_channels_; + const size_t num_capture_channels_; + const bool use_shadow_filter_output_; + Subtractor subtractor_; + SuppressionGain suppression_gain_; + ComfortNoiseGenerator cng_; + SuppressionFilter suppression_filter_; + RenderSignalAnalyzer render_signal_analyzer_; + ResidualEchoEstimator residual_echo_estimator_; + bool echo_leakage_detected_ = false; + AecState aec_state_; + EchoRemoverMetrics metrics_; + std::vector> e_old_; + std::vector> y_old_; + size_t block_counter_ = 0; + int gain_change_hangover_ = 0; + bool main_filter_output_last_selected_ = true; + + std::vector> e_heap_; + std::vector> Y2_heap_; + std::vector> E2_heap_; + std::vector> R2_heap_; + std::vector> S2_linear_heap_; + std::vector Y_heap_; + std::vector E_heap_; + std::vector comfort_noise_heap_; + std::vector high_band_comfort_noise_heap_; + std::vector subtractor_output_heap_; +}; + +int EchoRemoverImpl::instance_count_ = 0; + +EchoRemoverImpl::EchoRemoverImpl(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels) + : config_(config), + fft_(), + data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + optimization_(DetectOptimization()), + sample_rate_hz_(sample_rate_hz), + num_render_channels_(num_render_channels), + num_capture_channels_(num_capture_channels), + use_shadow_filter_output_( + config_.filter.enable_shadow_filter_output_usage), + subtractor_(config, + num_render_channels_, + num_capture_channels_, + data_dumper_.get(), + optimization_), + suppression_gain_(config_, + optimization_, + sample_rate_hz, + num_capture_channels), + cng_(optimization_, num_capture_channels_), + suppression_filter_(optimization_, + sample_rate_hz_, + num_capture_channels_), + render_signal_analyzer_(config_), + residual_echo_estimator_(config_, num_render_channels), + aec_state_(config_, num_capture_channels_), + e_old_(num_capture_channels_, {0.f}), + y_old_(num_capture_channels_, {0.f}), + e_heap_(NumChannelsOnHeap(num_capture_channels_), {0.f}), + Y2_heap_(NumChannelsOnHeap(num_capture_channels_)), + E2_heap_(NumChannelsOnHeap(num_capture_channels_)), + R2_heap_(NumChannelsOnHeap(num_capture_channels_)), + S2_linear_heap_(NumChannelsOnHeap(num_capture_channels_)), + Y_heap_(NumChannelsOnHeap(num_capture_channels_)), + E_heap_(NumChannelsOnHeap(num_capture_channels_)), + comfort_noise_heap_(NumChannelsOnHeap(num_capture_channels_)), + high_band_comfort_noise_heap_(NumChannelsOnHeap(num_capture_channels_)), + subtractor_output_heap_(NumChannelsOnHeap(num_capture_channels_)) { + RTC_DCHECK(ValidFullBandRate(sample_rate_hz)); +} + +EchoRemoverImpl::~EchoRemoverImpl() = default; + +void EchoRemoverImpl::GetMetrics(EchoControl::Metrics* metrics) const { + // Echo return loss (ERL) is inverted to go from gain to attenuation. + metrics->echo_return_loss = -10.0 * std::log10(aec_state_.ErlTimeDomain()); + metrics->echo_return_loss_enhancement = + Log2TodB(aec_state_.FullBandErleLog2()); +} + +void EchoRemoverImpl::ProcessCapture( + EchoPathVariability echo_path_variability, + bool capture_signal_saturation, + const absl::optional& external_delay, + RenderBuffer* render_buffer, + std::vector>>* linear_output, + std::vector>>* capture) { + ++block_counter_; + const std::vector>>& x = + render_buffer->Block(0); + std::vector>>* y = capture; + RTC_DCHECK(render_buffer); + RTC_DCHECK(y); + RTC_DCHECK_EQ(x.size(), NumBandsForRate(sample_rate_hz_)); + RTC_DCHECK_EQ(y->size(), NumBandsForRate(sample_rate_hz_)); + RTC_DCHECK_EQ(x[0].size(), num_render_channels_); + RTC_DCHECK_EQ((*y)[0].size(), num_capture_channels_); + RTC_DCHECK_EQ(x[0][0].size(), kBlockSize); + RTC_DCHECK_EQ((*y)[0][0].size(), kBlockSize); + + // Stack allocated data to use when the number of channels is low. + std::array, kMaxNumChannelsOnStack> e_stack; + std::array, kMaxNumChannelsOnStack> + Y2_stack; + std::array, kMaxNumChannelsOnStack> + E2_stack; + std::array, kMaxNumChannelsOnStack> + R2_stack; + std::array, kMaxNumChannelsOnStack> + S2_linear_stack; + std::array Y_stack; + std::array E_stack; + std::array comfort_noise_stack; + std::array high_band_comfort_noise_stack; + std::array subtractor_output_stack; + + rtc::ArrayView> e(e_stack.data(), + num_capture_channels_); + rtc::ArrayView> Y2( + Y2_stack.data(), num_capture_channels_); + rtc::ArrayView> E2( + E2_stack.data(), num_capture_channels_); + rtc::ArrayView> R2( + R2_stack.data(), num_capture_channels_); + rtc::ArrayView> S2_linear( + S2_linear_stack.data(), num_capture_channels_); + rtc::ArrayView Y(Y_stack.data(), num_capture_channels_); + rtc::ArrayView E(E_stack.data(), num_capture_channels_); + rtc::ArrayView comfort_noise(comfort_noise_stack.data(), + num_capture_channels_); + rtc::ArrayView high_band_comfort_noise( + high_band_comfort_noise_stack.data(), num_capture_channels_); + rtc::ArrayView subtractor_output( + subtractor_output_stack.data(), num_capture_channels_); + if (NumChannelsOnHeap(num_capture_channels_) > 0) { + // If the stack-allocated space is too small, use the heap for storing the + // microphone data. + e = rtc::ArrayView>(e_heap_.data(), + num_capture_channels_); + Y2 = rtc::ArrayView>( + Y2_heap_.data(), num_capture_channels_); + E2 = rtc::ArrayView>( + E2_heap_.data(), num_capture_channels_); + R2 = rtc::ArrayView>( + R2_heap_.data(), num_capture_channels_); + S2_linear = rtc::ArrayView>( + S2_linear_heap_.data(), num_capture_channels_); + Y = rtc::ArrayView(Y_heap_.data(), num_capture_channels_); + E = rtc::ArrayView(E_heap_.data(), num_capture_channels_); + comfort_noise = rtc::ArrayView(comfort_noise_heap_.data(), + num_capture_channels_); + high_band_comfort_noise = rtc::ArrayView( + high_band_comfort_noise_heap_.data(), num_capture_channels_); + subtractor_output = rtc::ArrayView( + subtractor_output_heap_.data(), num_capture_channels_); + } + + data_dumper_->DumpWav("aec3_echo_remover_capture_input", kBlockSize, + &(*y)[0][0][0], 16000, 1); + data_dumper_->DumpWav("aec3_echo_remover_render_input", kBlockSize, + &x[0][0][0], 16000, 1); + data_dumper_->DumpRaw("aec3_echo_remover_capture_input", (*y)[0][0]); + data_dumper_->DumpRaw("aec3_echo_remover_render_input", x[0][0]); + + aec_state_.UpdateCaptureSaturation(capture_signal_saturation); + + if (echo_path_variability.AudioPathChanged()) { + // Ensure that the gain change is only acted on once per frame. + if (echo_path_variability.gain_change) { + if (gain_change_hangover_ == 0) { + constexpr int kMaxBlocksPerFrame = 3; + gain_change_hangover_ = kMaxBlocksPerFrame; + rtc::LoggingSeverity log_level = + config_.delay.log_warning_on_delay_changes ? rtc::LS_WARNING + : rtc::LS_INFO; + RTC_LOG_V(log_level) + << "Gain change detected at block " << block_counter_; + } else { + echo_path_variability.gain_change = false; + } + } + + subtractor_.HandleEchoPathChange(echo_path_variability); + aec_state_.HandleEchoPathChange(echo_path_variability); + + if (echo_path_variability.delay_change != + EchoPathVariability::DelayAdjustment::kNone) { + suppression_gain_.SetInitialState(true); + } + } + if (gain_change_hangover_ > 0) { + --gain_change_hangover_; + } + + // Analyze the render signal. + render_signal_analyzer_.Update(*render_buffer, + aec_state_.MinDirectPathFilterDelay()); + + // State transition. + if (aec_state_.TransitionTriggered()) { + subtractor_.ExitInitialState(); + suppression_gain_.SetInitialState(false); + } + + // Perform linear echo cancellation. + subtractor_.Process(*render_buffer, (*y)[0], render_signal_analyzer_, + aec_state_, subtractor_output); + + // Compute spectra. + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + FormLinearFilterOutput(subtractor_output[ch], e[ch]); + WindowedPaddedFft(fft_, (*y)[0][ch], y_old_[ch], &Y[ch]); + WindowedPaddedFft(fft_, e[ch], e_old_[ch], &E[ch]); + LinearEchoPower(E[ch], Y[ch], &S2_linear[ch]); + Y[ch].Spectrum(optimization_, Y2[ch]); + E[ch].Spectrum(optimization_, E2[ch]); + } + + // Optionally return the linear filter output. + if (linear_output) { + RTC_DCHECK_GE(1, linear_output->size()); + RTC_DCHECK_EQ(num_capture_channels_, linear_output[0].size()); + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + RTC_DCHECK_EQ(kBlockSize, (*linear_output)[0][ch].size()); + std::copy(e[ch].begin(), e[ch].end(), (*linear_output)[0][ch].begin()); + } + } + + // Update the AEC state information. + aec_state_.Update(external_delay, subtractor_.FilterFrequencyResponses(), + subtractor_.FilterImpulseResponses(), *render_buffer, E2, + Y2, subtractor_output); + + const auto& Y_fft = aec_state_.UseLinearFilterOutput() ? E : Y; + + data_dumper_->DumpWav("aec3_output_linear", kBlockSize, &(*y)[0][0][0], 16000, + 1); + data_dumper_->DumpWav("aec3_output_linear2", kBlockSize, &e[0][0], 16000, 1); + + // Estimate the residual echo power. + residual_echo_estimator_.Estimate(aec_state_, *render_buffer, S2_linear, Y2, + R2); + + cng_.Compute(aec_state_.SaturatedCapture(), Y2, comfort_noise, + high_band_comfort_noise); + + if (aec_state_.UsableLinearEstimate()) { + // E2 is bound by Y2. + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + std::transform(E2[ch].begin(), E2[ch].end(), Y2[ch].begin(), + E2[ch].begin(), + [](float a, float b) { return std::min(a, b); }); + } + } + + const auto& nearend_spectrum = aec_state_.UsableLinearEstimate() ? E2 : Y2; + + const auto& echo_spectrum = + aec_state_.UsableLinearEstimate() ? S2_linear : R2; + + float high_bands_gain; + std::array G; + suppression_gain_.GetGain(nearend_spectrum, echo_spectrum, R2, + cng_.NoiseSpectrum(), render_signal_analyzer_, + aec_state_, x, &high_bands_gain, &G); + + suppression_filter_.ApplyGain(comfort_noise, high_band_comfort_noise, G, + high_bands_gain, Y_fft, y); + + // Update the metrics. + metrics_.Update(aec_state_, cng_.NoiseSpectrum()[0], G); + + // Debug outputs for the purpose of development and analysis. + data_dumper_->DumpWav("aec3_echo_estimate", kBlockSize, + &subtractor_output[0].s_main[0], 16000, 1); + data_dumper_->DumpRaw("aec3_output", (*y)[0][0]); + data_dumper_->DumpRaw("aec3_narrow_render", + render_signal_analyzer_.NarrowPeakBand() ? 1 : 0); + data_dumper_->DumpRaw("aec3_N2", cng_.NoiseSpectrum()[0]); + data_dumper_->DumpRaw("aec3_suppressor_gain", G); + data_dumper_->DumpWav("aec3_output", + rtc::ArrayView(&(*y)[0][0][0], kBlockSize), + 16000, 1); + data_dumper_->DumpRaw("aec3_using_subtractor_output[0]", + aec_state_.UseLinearFilterOutput() ? 1 : 0); + data_dumper_->DumpRaw("aec3_E2", E2[0]); + data_dumper_->DumpRaw("aec3_S2_linear", S2_linear[0]); + data_dumper_->DumpRaw("aec3_Y2", Y2[0]); + data_dumper_->DumpRaw( + "aec3_X2", render_buffer->Spectrum( + aec_state_.MinDirectPathFilterDelay())[/*channel=*/0]); + data_dumper_->DumpRaw("aec3_R2", R2[0]); + data_dumper_->DumpRaw("aec3_filter_delay", + aec_state_.MinDirectPathFilterDelay()); + data_dumper_->DumpRaw("aec3_capture_saturation", + aec_state_.SaturatedCapture() ? 1 : 0); +} + +void EchoRemoverImpl::FormLinearFilterOutput( + const SubtractorOutput& subtractor_output, + rtc::ArrayView output) { + RTC_DCHECK_EQ(subtractor_output.e_main.size(), output.size()); + RTC_DCHECK_EQ(subtractor_output.e_shadow.size(), output.size()); + bool use_main_output = true; + if (use_shadow_filter_output_) { + // As the output of the main adaptive filter generally should be better + // than the shadow filter output, add a margin and threshold for when + // choosing the shadow filter output. + if (subtractor_output.e2_shadow < 0.9f * subtractor_output.e2_main && + subtractor_output.y2 > 30.f * 30.f * kBlockSize && + (subtractor_output.s2_main > 60.f * 60.f * kBlockSize || + subtractor_output.s2_shadow > 60.f * 60.f * kBlockSize)) { + use_main_output = false; + } else { + // If the main filter is diverged, choose the filter output that has the + // lowest power. + if (subtractor_output.e2_shadow < subtractor_output.e2_main && + subtractor_output.y2 < subtractor_output.e2_main) { + use_main_output = false; + } + } + } + + SignalTransition( + main_filter_output_last_selected_ ? subtractor_output.e_main + : subtractor_output.e_shadow, + use_main_output ? subtractor_output.e_main : subtractor_output.e_shadow, + output); + main_filter_output_last_selected_ = use_main_output; +} + +} // namespace + +EchoRemover* EchoRemover::Create(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels) { + return new EchoRemoverImpl(config, sample_rate_hz, num_render_channels, + num_capture_channels); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/echo_remover.h b/audio_processing/aec3/echo_remover.h new file mode 100644 index 0000000..00c03e1 --- /dev/null +++ b/audio_processing/aec3/echo_remover.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_ + +#include + +#include "absl/types/optional.h" +#include "api/echo_canceller3_config.h" +#include "api/echo_control.h" +#include "audio_processing/aec3/delay_estimate.h" +#include "audio_processing/aec3/echo_path_variability.h" +#include "audio_processing/aec3/render_buffer.h" + +namespace webrtc { + +// Class for removing the echo from the capture signal. +class EchoRemover { + public: + static EchoRemover* Create(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels, + size_t num_capture_channels); + virtual ~EchoRemover() = default; + + // Get current metrics. + virtual void GetMetrics(EchoControl::Metrics* metrics) const = 0; + + // Removes the echo from a block of samples from the capture signal. The + // supplied render signal is assumed to be pre-aligned with the capture + // signal. + virtual void ProcessCapture( + EchoPathVariability echo_path_variability, + bool capture_signal_saturation, + const absl::optional& external_delay, + RenderBuffer* render_buffer, + std::vector>>* linear_output, + std::vector>>* capture) = 0; + + // Updates the status on whether echo leakage is detected in the output of the + // echo remover. + virtual void UpdateEchoLeakageStatus(bool leakage_detected) = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_ diff --git a/audio_processing/aec3/echo_remover_metrics.cc b/audio_processing/aec3/echo_remover_metrics.cc new file mode 100644 index 0000000..83e5f54 --- /dev/null +++ b/audio_processing/aec3/echo_remover_metrics.cc @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/echo_remover_metrics.h" + +#include +#include + +#include +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/numerics/safe_minmax.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { + +namespace { + +constexpr float kOneByMetricsCollectionBlocks = 1.f / kMetricsCollectionBlocks; + +} // namespace + +EchoRemoverMetrics::DbMetric::DbMetric() : DbMetric(0.f, 0.f, 0.f) {} +EchoRemoverMetrics::DbMetric::DbMetric(float sum_value, + float floor_value, + float ceil_value) + : sum_value(sum_value), floor_value(floor_value), ceil_value(ceil_value) {} + +void EchoRemoverMetrics::DbMetric::Update(float value) { + sum_value += value; + floor_value = std::min(floor_value, value); + ceil_value = std::max(ceil_value, value); +} + +void EchoRemoverMetrics::DbMetric::UpdateInstant(float value) { + sum_value = value; + floor_value = std::min(floor_value, value); + ceil_value = std::max(ceil_value, value); +} + +EchoRemoverMetrics::EchoRemoverMetrics() { + ResetMetrics(); +} + +void EchoRemoverMetrics::ResetMetrics() { + erl_.fill(DbMetric(0.f, 10000.f, 0.000f)); + erl_time_domain_ = DbMetric(0.f, 10000.f, 0.000f); + erle_.fill(DbMetric(0.f, 0.f, 1000.f)); + erle_time_domain_ = DbMetric(0.f, 0.f, 1000.f); + comfort_noise_.fill(DbMetric(0.f, 100000000.f, 0.f)); + suppressor_gain_.fill(DbMetric(0.f, 1.f, 0.f)); + active_render_count_ = 0; + saturated_capture_ = false; +} + +void EchoRemoverMetrics::Update( + const AecState& aec_state, + const std::array& comfort_noise_spectrum, + const std::array& suppressor_gain) { + metrics_reported_ = false; + if (++block_counter_ <= kMetricsCollectionBlocks) { + aec3::UpdateDbMetric(aec_state.Erl(), &erl_); + erl_time_domain_.UpdateInstant(aec_state.ErlTimeDomain()); + aec3::UpdateDbMetric(aec_state.Erle()[0], &erle_); + erle_time_domain_.UpdateInstant(aec_state.FullBandErleLog2()); + aec3::UpdateDbMetric(comfort_noise_spectrum, &comfort_noise_); + aec3::UpdateDbMetric(suppressor_gain, &suppressor_gain_); + active_render_count_ += (aec_state.ActiveRender() ? 1 : 0); + saturated_capture_ = saturated_capture_ || aec_state.SaturatedCapture(); + } else { + // Report the metrics over several frames in order to lower the impact of + // the logarithms involved on the computational complexity. + constexpr int kMetricsCollectionBlocksBy2 = kMetricsCollectionBlocks / 2; + constexpr float kComfortNoiseScaling = 1.f / (kBlockSize * kBlockSize); + switch (block_counter_) { + case kMetricsCollectionBlocks + 1: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErleBand0.Average", + aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, + kOneByMetricsCollectionBlocks, + erle_[0].sum_value), + 0, 19, 20); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErleBand0.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, 1.f, + erle_[0].ceil_value), + 0, 19, 20); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErleBand0.Min", + aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, 1.f, + erle_[0].floor_value), + 0, 19, 20); + break; + case kMetricsCollectionBlocks + 2: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErleBand1.Average", + aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, + kOneByMetricsCollectionBlocks, + erle_[1].sum_value), + 0, 19, 20); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErleBand1.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, 1.f, + erle_[1].ceil_value), + 0, 19, 20); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErleBand1.Min", + aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, 1.f, + erle_[1].floor_value), + 0, 19, 20); + break; + case kMetricsCollectionBlocks + 3: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErlBand0.Average", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, + kOneByMetricsCollectionBlocks, + erl_[0].sum_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErlBand0.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f, + erl_[0].ceil_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErlBand0.Min", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f, + erl_[0].floor_value), + 0, 59, 30); + break; + case kMetricsCollectionBlocks + 4: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErlBand1.Average", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, + kOneByMetricsCollectionBlocks, + erl_[1].sum_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErlBand1.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f, + erl_[1].ceil_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ErlBand1.Min", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f, + erl_[1].floor_value), + 0, 59, 30); + break; + case kMetricsCollectionBlocks + 5: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ComfortNoiseBand0.Average", + aec3::TransformDbMetricForReporting( + true, 0.f, 89.f, -90.3f, + kComfortNoiseScaling * kOneByMetricsCollectionBlocks, + comfort_noise_[0].sum_value), + 0, 89, 45); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ComfortNoiseBand0.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 89.f, -90.3f, + kComfortNoiseScaling, + comfort_noise_[0].ceil_value), + 0, 89, 45); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ComfortNoiseBand0.Min", + aec3::TransformDbMetricForReporting(true, 0.f, 89.f, -90.3f, + kComfortNoiseScaling, + comfort_noise_[0].floor_value), + 0, 89, 45); + break; + case kMetricsCollectionBlocks + 6: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ComfortNoiseBand1.Average", + aec3::TransformDbMetricForReporting( + true, 0.f, 89.f, -90.3f, + kComfortNoiseScaling * kOneByMetricsCollectionBlocks, + comfort_noise_[1].sum_value), + 0, 89, 45); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ComfortNoiseBand1.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 89.f, -90.3f, + kComfortNoiseScaling, + comfort_noise_[1].ceil_value), + 0, 89, 45); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.ComfortNoiseBand1.Min", + aec3::TransformDbMetricForReporting(true, 0.f, 89.f, -90.3f, + kComfortNoiseScaling, + comfort_noise_[1].floor_value), + 0, 89, 45); + break; + case kMetricsCollectionBlocks + 7: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.SuppressorGainBand0.Average", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 0.f, + kOneByMetricsCollectionBlocks, + suppressor_gain_[0].sum_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.SuppressorGainBand0.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 0.f, 1.f, + suppressor_gain_[0].ceil_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.SuppressorGainBand0.Min", + aec3::TransformDbMetricForReporting( + true, 0.f, 59.f, 0.f, 1.f, suppressor_gain_[0].floor_value), + 0, 59, 30); + break; + case kMetricsCollectionBlocks + 8: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.SuppressorGainBand1.Average", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 0.f, + kOneByMetricsCollectionBlocks, + suppressor_gain_[1].sum_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.SuppressorGainBand1.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 0.f, 1.f, + suppressor_gain_[1].ceil_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.SuppressorGainBand1.Min", + aec3::TransformDbMetricForReporting( + true, 0.f, 59.f, 0.f, 1.f, suppressor_gain_[1].floor_value), + 0, 59, 30); + break; + case kMetricsCollectionBlocks + 9: + RTC_HISTOGRAM_BOOLEAN( + "WebRTC.Audio.EchoCanceller.UsableLinearEstimate", + static_cast(aec_state.UsableLinearEstimate() ? 1 : 0)); + RTC_HISTOGRAM_BOOLEAN( + "WebRTC.Audio.EchoCanceller.ActiveRender", + static_cast( + active_render_count_ > kMetricsCollectionBlocksBy2 ? 1 : 0)); + RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.FilterDelay", + aec_state.MinDirectPathFilterDelay(), 0, 30, + 31); + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.EchoCanceller.CaptureSaturation", + static_cast(saturated_capture_ ? 1 : 0)); + break; + case kMetricsCollectionBlocks + 10: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.Erl.Value", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f, + erl_time_domain_.sum_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.Erl.Max", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f, + erl_time_domain_.ceil_value), + 0, 59, 30); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.Erl.Min", + aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f, + erl_time_domain_.floor_value), + 0, 59, 30); + break; + case kMetricsCollectionBlocks + 11: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.Erle.Value", + aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f, + erle_time_domain_.sum_value), + 0, 19, 20); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.Erle.Max", + aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f, + erle_time_domain_.ceil_value), + 0, 19, 20); + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.Audio.EchoCanceller.Erle.Min", + aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f, + erle_time_domain_.floor_value), + 0, 19, 20); + metrics_reported_ = true; + RTC_DCHECK_EQ(kMetricsReportingIntervalBlocks, block_counter_); + block_counter_ = 0; + ResetMetrics(); + break; + default: + RTC_NOTREACHED(); + break; + } + } +} + +namespace aec3 { + +void UpdateDbMetric(const std::array& value, + std::array* statistic) { + RTC_DCHECK(statistic); + // Truncation is intended in the band width computation. + constexpr int kNumBands = 2; + constexpr int kBandWidth = 65 / kNumBands; + constexpr float kOneByBandWidth = 1.f / kBandWidth; + RTC_DCHECK_EQ(kNumBands, statistic->size()); + RTC_DCHECK_EQ(65, value.size()); + for (size_t k = 0; k < statistic->size(); ++k) { + float average_band = + std::accumulate(value.begin() + kBandWidth * k, + value.begin() + kBandWidth * (k + 1), 0.f) * + kOneByBandWidth; + (*statistic)[k].Update(average_band); + } +} + +int TransformDbMetricForReporting(bool negate, + float min_value, + float max_value, + float offset, + float scaling, + float value) { + float new_value = 10.f * std::log10(value * scaling + 1e-10f) + offset; + if (negate) { + new_value = -new_value; + } + return static_cast(rtc::SafeClamp(new_value, min_value, max_value)); +} + +} // namespace aec3 + +} // namespace webrtc diff --git a/audio_processing/aec3/echo_remover_metrics.h b/audio_processing/aec3/echo_remover_metrics.h new file mode 100644 index 0000000..80b36b9 --- /dev/null +++ b/audio_processing/aec3/echo_remover_metrics.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_ + +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec_state.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// Handles the reporting of metrics for the echo remover. +class EchoRemoverMetrics { + public: + struct DbMetric { + DbMetric(); + DbMetric(float sum_value, float floor_value, float ceil_value); + void Update(float value); + void UpdateInstant(float value); + float sum_value; + float floor_value; + float ceil_value; + }; + + EchoRemoverMetrics(); + + // Updates the metric with new data. + void Update( + const AecState& aec_state, + const std::array& comfort_noise_spectrum, + const std::array& suppressor_gain); + + // Returns true if the metrics have just been reported, otherwise false. + bool MetricsReported() { return metrics_reported_; } + + private: + // Resets the metrics. + void ResetMetrics(); + + int block_counter_ = 0; + std::array erl_; + DbMetric erl_time_domain_; + std::array erle_; + DbMetric erle_time_domain_; + std::array comfort_noise_; + std::array suppressor_gain_; + int active_render_count_ = 0; + bool saturated_capture_ = false; + bool metrics_reported_ = false; + + RTC_DISALLOW_COPY_AND_ASSIGN(EchoRemoverMetrics); +}; + +namespace aec3 { + +// Updates a banded metric of type DbMetric with the values in the supplied +// array. +void UpdateDbMetric(const std::array& value, + std::array* statistic); + +// Transforms a DbMetric from the linear domain into the logarithmic domain. +int TransformDbMetricForReporting(bool negate, + float min_value, + float max_value, + float offset, + float scaling, + float value); + +} // namespace aec3 + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_ diff --git a/audio_processing/aec3/echo_remover_metrics_unittest.cc b/audio_processing/aec3/echo_remover_metrics_unittest.cc new file mode 100644 index 0000000..30c6611 --- /dev/null +++ b/audio_processing/aec3/echo_remover_metrics_unittest.cc @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/echo_remover_metrics.h" + +#include + +#include + +#include "modules/audio_processing/aec3/aec3_fft.h" +#include "modules/audio_processing/aec3/aec_state.h" +#include "test/gtest.h" + +namespace webrtc { + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for non-null input. +TEST(UpdateDbMetric, NullValue) { + std::array value; + value.fill(0.f); + EXPECT_DEATH(aec3::UpdateDbMetric(value, nullptr), ""); +} + +#endif + +// Verifies the updating functionality of UpdateDbMetric. +TEST(UpdateDbMetric, Updating) { + std::array value; + std::array statistic; + statistic.fill(EchoRemoverMetrics::DbMetric(0.f, 100.f, -100.f)); + constexpr float kValue0 = 10.f; + constexpr float kValue1 = 20.f; + std::fill(value.begin(), value.begin() + 32, kValue0); + std::fill(value.begin() + 32, value.begin() + 64, kValue1); + + aec3::UpdateDbMetric(value, &statistic); + EXPECT_FLOAT_EQ(kValue0, statistic[0].sum_value); + EXPECT_FLOAT_EQ(kValue0, statistic[0].ceil_value); + EXPECT_FLOAT_EQ(kValue0, statistic[0].floor_value); + EXPECT_FLOAT_EQ(kValue1, statistic[1].sum_value); + EXPECT_FLOAT_EQ(kValue1, statistic[1].ceil_value); + EXPECT_FLOAT_EQ(kValue1, statistic[1].floor_value); + + aec3::UpdateDbMetric(value, &statistic); + EXPECT_FLOAT_EQ(2.f * kValue0, statistic[0].sum_value); + EXPECT_FLOAT_EQ(kValue0, statistic[0].ceil_value); + EXPECT_FLOAT_EQ(kValue0, statistic[0].floor_value); + EXPECT_FLOAT_EQ(2.f * kValue1, statistic[1].sum_value); + EXPECT_FLOAT_EQ(kValue1, statistic[1].ceil_value); + EXPECT_FLOAT_EQ(kValue1, statistic[1].floor_value); +} + +// Verifies that the TransformDbMetricForReporting method produces the desired +// output for values for dBFS. +TEST(TransformDbMetricForReporting, DbFsScaling) { + std::array x; + FftData X; + std::array X2; + Aec3Fft fft; + x.fill(1000.f); + fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, &X); + X.Spectrum(Aec3Optimization::kNone, X2); + + float offset = -10.f * std::log10(32768.f * 32768.f); + EXPECT_NEAR(offset, -90.3f, 0.1f); + EXPECT_EQ( + static_cast(30.3f), + aec3::TransformDbMetricForReporting( + true, 0.f, 90.f, offset, 1.f / (kBlockSize * kBlockSize), X2[0])); +} + +// Verifies that the TransformDbMetricForReporting method is able to properly +// limit the output. +TEST(TransformDbMetricForReporting, Limits) { + EXPECT_EQ(0, aec3::TransformDbMetricForReporting(false, 0.f, 10.f, 0.f, 1.f, + 0.001f)); + EXPECT_EQ(10, aec3::TransformDbMetricForReporting(false, 0.f, 10.f, 0.f, 1.f, + 100.f)); +} + +// Verifies that the TransformDbMetricForReporting method is able to properly +// negate output. +TEST(TransformDbMetricForReporting, Negate) { + EXPECT_EQ(10, aec3::TransformDbMetricForReporting(true, -20.f, 20.f, 0.f, 1.f, + 0.1f)); + EXPECT_EQ(-10, aec3::TransformDbMetricForReporting(true, -20.f, 20.f, 0.f, + 1.f, 10.f)); +} + +// Verify the Update functionality of DbMetric. +TEST(DbMetric, Update) { + EchoRemoverMetrics::DbMetric metric(0.f, 20.f, -20.f); + constexpr int kNumValues = 100; + constexpr float kValue = 10.f; + for (int k = 0; k < kNumValues; ++k) { + metric.Update(kValue); + } + EXPECT_FLOAT_EQ(kValue * kNumValues, metric.sum_value); + EXPECT_FLOAT_EQ(kValue, metric.ceil_value); + EXPECT_FLOAT_EQ(kValue, metric.floor_value); +} + +// Verify the Update functionality of DbMetric. +TEST(DbMetric, UpdateInstant) { + EchoRemoverMetrics::DbMetric metric(0.f, 20.f, -20.f); + constexpr float kMinValue = -77.f; + constexpr float kMaxValue = 33.f; + constexpr float kLastValue = (kMinValue + kMaxValue) / 2.0f; + for (float value = kMinValue; value <= kMaxValue; value++) + metric.UpdateInstant(value); + metric.UpdateInstant(kLastValue); + EXPECT_FLOAT_EQ(kLastValue, metric.sum_value); + EXPECT_FLOAT_EQ(kMaxValue, metric.ceil_value); + EXPECT_FLOAT_EQ(kMinValue, metric.floor_value); +} + +// Verify the constructor functionality of DbMetric. +TEST(DbMetric, Constructor) { + EchoRemoverMetrics::DbMetric metric; + EXPECT_FLOAT_EQ(0.f, metric.sum_value); + EXPECT_FLOAT_EQ(0.f, metric.ceil_value); + EXPECT_FLOAT_EQ(0.f, metric.floor_value); + + metric = EchoRemoverMetrics::DbMetric(1.f, 2.f, 3.f); + EXPECT_FLOAT_EQ(1.f, metric.sum_value); + EXPECT_FLOAT_EQ(2.f, metric.floor_value); + EXPECT_FLOAT_EQ(3.f, metric.ceil_value); +} + +// Verify the general functionality of EchoRemoverMetrics. +TEST(EchoRemoverMetrics, NormalUsage) { + EchoRemoverMetrics metrics; + AecState aec_state(EchoCanceller3Config{}, 1); + std::array comfort_noise_spectrum; + std::array suppressor_gain; + comfort_noise_spectrum.fill(10.f); + suppressor_gain.fill(1.f); + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) { + metrics.Update(aec_state, comfort_noise_spectrum, suppressor_gain); + EXPECT_FALSE(metrics.MetricsReported()); + } + metrics.Update(aec_state, comfort_noise_spectrum, suppressor_gain); + EXPECT_TRUE(metrics.MetricsReported()); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/echo_remover_unittest.cc b/audio_processing/aec3/echo_remover_unittest.cc new file mode 100644 index 0000000..d79993a --- /dev/null +++ b/audio_processing/aec3/echo_remover_unittest.cc @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/echo_remover.h" + +#include +#include +#include +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/render_buffer.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +std::string ProduceDebugText(int sample_rate_hz) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz; + return ss.Release(); +} + +std::string ProduceDebugText(int sample_rate_hz, int delay) { + rtc::StringBuilder ss(ProduceDebugText(sample_rate_hz)); + ss << ", Delay: " << delay; + return ss.Release(); +} + +} // namespace + +// Verifies the basic API call sequence +TEST(EchoRemover, BasicApiCalls) { + absl::optional delay_estimate; + for (auto rate : {16000, 32000, 48000}) { + for (size_t num_render_channels : {1, 2, 8}) { + for (size_t num_capture_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr remover( + EchoRemover::Create(EchoCanceller3Config(), rate, + num_render_channels, num_capture_channels)); + std::unique_ptr render_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), rate, + num_render_channels)); + + std::vector>> render( + NumBandsForRate(rate), + std::vector>( + num_render_channels, std::vector(kBlockSize, 0.f))); + std::vector>> capture( + NumBandsForRate(rate), + std::vector>( + num_capture_channels, std::vector(kBlockSize, 0.f))); + for (size_t k = 0; k < 100; ++k) { + EchoPathVariability echo_path_variability( + k % 3 == 0 ? true : false, + k % 5 == 0 + ? EchoPathVariability::DelayAdjustment::kNewDetectedDelay + : EchoPathVariability::DelayAdjustment::kNone, + false); + render_buffer->Insert(render); + render_buffer->PrepareCaptureProcessing(); + + remover->ProcessCapture( + echo_path_variability, k % 2 == 0 ? true : false, delay_estimate, + render_buffer->GetRenderBuffer(), nullptr, &capture); + } + } + } + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for the samplerate. +// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH +// tests on test bots has been fixed. +TEST(EchoRemover, DISABLED_WrongSampleRate) { + EXPECT_DEATH(std::unique_ptr( + EchoRemover::Create(EchoCanceller3Config(), 8001, 1, 1)), + ""); +} + +// Verifies the check for the capture block size. +TEST(EchoRemover, WrongCaptureBlockSize) { + absl::optional delay_estimate; + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr remover( + EchoRemover::Create(EchoCanceller3Config(), rate, 1, 1)); + std::unique_ptr render_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), rate, 1)); + std::vector>> capture( + NumBandsForRate(rate), std::vector>( + 1, std::vector(kBlockSize - 1, 0.f))); + EchoPathVariability echo_path_variability( + false, EchoPathVariability::DelayAdjustment::kNone, false); + EXPECT_DEATH(remover->ProcessCapture( + echo_path_variability, false, delay_estimate, + render_buffer->GetRenderBuffer(), nullptr, &capture), + ""); + } +} + +// Verifies the check for the number of capture bands. +// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH +// tests on test bots has been fixed.c +TEST(EchoRemover, DISABLED_WrongCaptureNumBands) { + absl::optional delay_estimate; + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr remover( + EchoRemover::Create(EchoCanceller3Config(), rate, 1, 1)); + std::unique_ptr render_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), rate, 1)); + std::vector>> capture( + NumBandsForRate(rate == 48000 ? 16000 : rate + 16000), + std::vector>(1, + std::vector(kBlockSize, 0.f))); + EchoPathVariability echo_path_variability( + false, EchoPathVariability::DelayAdjustment::kNone, false); + EXPECT_DEATH(remover->ProcessCapture( + echo_path_variability, false, delay_estimate, + render_buffer->GetRenderBuffer(), nullptr, &capture), + ""); + } +} + +// Verifies the check for non-null capture block. +TEST(EchoRemover, NullCapture) { + absl::optional delay_estimate; + std::unique_ptr remover( + EchoRemover::Create(EchoCanceller3Config(), 16000, 1, 1)); + std::unique_ptr render_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), 16000, 1)); + EchoPathVariability echo_path_variability( + false, EchoPathVariability::DelayAdjustment::kNone, false); + EXPECT_DEATH(remover->ProcessCapture( + echo_path_variability, false, delay_estimate, + render_buffer->GetRenderBuffer(), nullptr, nullptr), + ""); +} + +#endif + +// Performs a sanity check that the echo_remover is able to properly +// remove echoes. +TEST(EchoRemover, BasicEchoRemoval) { + constexpr int kNumBlocksToProcess = 500; + Random random_generator(42U); + absl::optional delay_estimate; + for (size_t num_channels : {1, 2, 4}) { + for (auto rate : {16000, 32000, 48000}) { + std::vector>> x( + NumBandsForRate(rate), + std::vector>(num_channels, + std::vector(kBlockSize, 0.f))); + std::vector>> y( + NumBandsForRate(rate), + std::vector>(num_channels, + std::vector(kBlockSize, 0.f))); + EchoPathVariability echo_path_variability( + false, EchoPathVariability::DelayAdjustment::kNone, false); + for (size_t delay_samples : {0, 64, 150, 200, 301}) { + SCOPED_TRACE(ProduceDebugText(rate, delay_samples)); + EchoCanceller3Config config; + std::unique_ptr remover( + EchoRemover::Create(config, rate, num_channels, num_channels)); + std::unique_ptr render_buffer( + RenderDelayBuffer::Create(config, rate, num_channels)); + render_buffer->AlignFromDelay(delay_samples / kBlockSize); + + std::vector>>> + delay_buffers(x.size()); + for (size_t band = 0; band < delay_buffers.size(); ++band) { + delay_buffers[band].resize(x[0].size()); + } + + for (size_t band = 0; band < x.size(); ++band) { + for (size_t channel = 0; channel < x[0].size(); ++channel) { + delay_buffers[band][channel].reset( + new DelayBuffer(delay_samples)); + } + } + + float input_energy = 0.f; + float output_energy = 0.f; + for (int k = 0; k < kNumBlocksToProcess; ++k) { + const bool silence = k < 100 || (k % 100 >= 10); + + for (size_t band = 0; band < x.size(); ++band) { + for (size_t channel = 0; channel < x[0].size(); ++channel) { + if (silence) { + std::fill(x[band][channel].begin(), x[band][channel].end(), + 0.f); + } else { + RandomizeSampleVector(&random_generator, x[band][channel]); + } + delay_buffers[band][channel]->Delay(x[band][channel], + y[band][channel]); + } + } + + if (k > kNumBlocksToProcess / 2) { + input_energy = std::inner_product(y[0][0].begin(), y[0][0].end(), + y[0][0].begin(), input_energy); + } + + render_buffer->Insert(x); + render_buffer->PrepareCaptureProcessing(); + + remover->ProcessCapture(echo_path_variability, false, delay_estimate, + render_buffer->GetRenderBuffer(), nullptr, + &y); + + if (k > kNumBlocksToProcess / 2) { + output_energy = std::inner_product(y[0][0].begin(), y[0][0].end(), + y[0][0].begin(), output_energy); + } + } + EXPECT_GT(input_energy, 10.f * output_energy); + } + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/erl_estimator.cc b/audio_processing/aec3/erl_estimator.cc new file mode 100644 index 0000000..192de47 --- /dev/null +++ b/audio_processing/aec3/erl_estimator.cc @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/erl_estimator.h" + +#include +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { + +constexpr float kMinErl = 0.01f; +constexpr float kMaxErl = 1000.f; + +} // namespace + +ErlEstimator::ErlEstimator(size_t startup_phase_length_blocks_) + : startup_phase_length_blocks__(startup_phase_length_blocks_) { + erl_.fill(kMaxErl); + hold_counters_.fill(0); + erl_time_domain_ = kMaxErl; + hold_counter_time_domain_ = 0; +} + +ErlEstimator::~ErlEstimator() = default; + +void ErlEstimator::Reset() { + blocks_since_reset_ = 0; +} + +void ErlEstimator::Update( + const std::vector& converged_filters, + rtc::ArrayView> render_spectra, + rtc::ArrayView> + capture_spectra) { + const size_t num_capture_channels = converged_filters.size(); + RTC_DCHECK_EQ(capture_spectra.size(), num_capture_channels); + + // Corresponds to WGN of power -46 dBFS. + constexpr float kX2Min = 44015068.0f; + + const auto first_converged_iter = + std::find(converged_filters.begin(), converged_filters.end(), true); + const bool any_filter_converged = + first_converged_iter != converged_filters.end(); + + if (++blocks_since_reset_ < startup_phase_length_blocks__ || + !any_filter_converged) { + return; + } + + // Use the maximum spectrum across capture and the maximum across render. + std::array max_capture_spectrum_data; + std::array max_capture_spectrum = + capture_spectra[/*channel=*/0]; + if (num_capture_channels > 1) { + // Initialize using the first channel with a converged filter. + const size_t first_converged = + std::distance(converged_filters.begin(), first_converged_iter); + RTC_DCHECK_GE(first_converged, 0); + RTC_DCHECK_LT(first_converged, num_capture_channels); + max_capture_spectrum_data = capture_spectra[first_converged]; + + for (size_t ch = first_converged + 1; ch < num_capture_channels; ++ch) { + if (!converged_filters[ch]) { + continue; + } + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + max_capture_spectrum_data[k] = + std::max(max_capture_spectrum_data[k], capture_spectra[ch][k]); + } + } + max_capture_spectrum = max_capture_spectrum_data; + } + + const size_t num_render_channels = render_spectra.size(); + std::array max_render_spectrum_data; + rtc::ArrayView max_render_spectrum = + render_spectra[/*channel=*/0]; + if (num_render_channels > 1) { + std::copy(render_spectra[0].begin(), render_spectra[0].end(), + max_render_spectrum_data.begin()); + for (size_t ch = 1; ch < num_render_channels; ++ch) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + max_render_spectrum_data[k] = + std::max(max_render_spectrum_data[k], render_spectra[ch][k]); + } + } + max_render_spectrum = max_render_spectrum_data; + } + + const auto& X2 = max_render_spectrum; + const auto& Y2 = max_capture_spectrum; + + // Update the estimates in a maximum statistics manner. + for (size_t k = 1; k < kFftLengthBy2; ++k) { + if (X2[k] > kX2Min) { + const float new_erl = Y2[k] / X2[k]; + if (new_erl < erl_[k]) { + hold_counters_[k - 1] = 1000; + erl_[k] += 0.1f * (new_erl - erl_[k]); + erl_[k] = std::max(erl_[k], kMinErl); + } + } + } + + std::for_each(hold_counters_.begin(), hold_counters_.end(), + [](int& a) { --a; }); + std::transform(hold_counters_.begin(), hold_counters_.end(), erl_.begin() + 1, + erl_.begin() + 1, [](int a, float b) { + return a > 0 ? b : std::min(kMaxErl, 2.f * b); + }); + + erl_[0] = erl_[1]; + erl_[kFftLengthBy2] = erl_[kFftLengthBy2 - 1]; + + // Compute ERL over all frequency bins. + const float X2_sum = std::accumulate(X2.begin(), X2.end(), 0.0f); + + if (X2_sum > kX2Min * X2.size()) { + const float Y2_sum = std::accumulate(Y2.begin(), Y2.end(), 0.0f); + const float new_erl = Y2_sum / X2_sum; + if (new_erl < erl_time_domain_) { + hold_counter_time_domain_ = 1000; + erl_time_domain_ += 0.1f * (new_erl - erl_time_domain_); + erl_time_domain_ = std::max(erl_time_domain_, kMinErl); + } + } + + --hold_counter_time_domain_; + erl_time_domain_ = (hold_counter_time_domain_ > 0) + ? erl_time_domain_ + : std::min(kMaxErl, 2.f * erl_time_domain_); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/erl_estimator.h b/audio_processing/aec3/erl_estimator.h new file mode 100644 index 0000000..6a1a332 --- /dev/null +++ b/audio_processing/aec3/erl_estimator.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_ + +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// Estimates the echo return loss based on the signal spectra. +class ErlEstimator { + public: + explicit ErlEstimator(size_t startup_phase_length_blocks_); + ~ErlEstimator(); + + // Resets the ERL estimation. + void Reset(); + + // Updates the ERL estimate. + void Update(const std::vector& converged_filters, + rtc::ArrayView> + render_spectra, + rtc::ArrayView> + capture_spectra); + + // Returns the most recent ERL estimate. + const std::array& Erl() const { return erl_; } + float ErlTimeDomain() const { return erl_time_domain_; } + + private: + const size_t startup_phase_length_blocks__; + std::array erl_; + std::array hold_counters_; + float erl_time_domain_; + int hold_counter_time_domain_; + size_t blocks_since_reset_ = 0; + RTC_DISALLOW_COPY_AND_ASSIGN(ErlEstimator); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_ diff --git a/audio_processing/aec3/erl_estimator_unittest.cc b/audio_processing/aec3/erl_estimator_unittest.cc new file mode 100644 index 0000000..344551d --- /dev/null +++ b/audio_processing/aec3/erl_estimator_unittest.cc @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/erl_estimator.h" + +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { +std::string ProduceDebugText(size_t num_render_channels, + size_t num_capture_channels) { + rtc::StringBuilder ss; + ss << "Render channels: " << num_render_channels; + ss << ", Capture channels: " << num_capture_channels; + return ss.Release(); +} + +void VerifyErl(const std::array& erl, + float erl_time_domain, + float reference) { + std::for_each(erl.begin(), erl.end(), + [reference](float a) { EXPECT_NEAR(reference, a, 0.001); }); + EXPECT_NEAR(reference, erl_time_domain, 0.001); +} + +} // namespace + +// Verifies that the correct ERL estimates are achieved. +TEST(ErlEstimator, Estimates) { + for (size_t num_render_channels : {1, 2, 8}) { + for (size_t num_capture_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(num_render_channels, num_capture_channels)); + std::vector> X2( + num_render_channels); + for (auto& X2_ch : X2) { + X2_ch.fill(0.f); + } + std::vector> Y2( + num_capture_channels); + for (auto& Y2_ch : Y2) { + Y2_ch.fill(0.f); + } + std::vector converged_filters(num_capture_channels, false); + const size_t converged_idx = num_capture_channels - 1; + converged_filters[converged_idx] = true; + + ErlEstimator estimator(0); + + // Verifies that the ERL estimate is properly reduced to lower values. + for (auto& X2_ch : X2) { + X2_ch.fill(500 * 1000.f * 1000.f); + } + Y2[converged_idx].fill(10 * X2[0][0]); + for (size_t k = 0; k < 200; ++k) { + estimator.Update(converged_filters, X2, Y2); + } + VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 10.f); + + // Verifies that the ERL is not immediately increased when the ERL in the + // data increases. + Y2[converged_idx].fill(10000 * X2[0][0]); + for (size_t k = 0; k < 998; ++k) { + estimator.Update(converged_filters, X2, Y2); + } + VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 10.f); + + // Verifies that the rate of increase is 3 dB. + estimator.Update(converged_filters, X2, Y2); + VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 20.f); + + // Verifies that the maximum ERL is achieved when there are no low RLE + // estimates. + for (size_t k = 0; k < 1000; ++k) { + estimator.Update(converged_filters, X2, Y2); + } + VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 1000.f); + + // Verifies that the ERL estimate is is not updated for low-level signals + for (auto& X2_ch : X2) { + X2_ch.fill(1000.f * 1000.f); + } + Y2[converged_idx].fill(10 * X2[0][0]); + for (size_t k = 0; k < 200; ++k) { + estimator.Update(converged_filters, X2, Y2); + } + VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 1000.f); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/erle_estimator.cc b/audio_processing/aec3/erle_estimator.cc new file mode 100644 index 0000000..412cbb5 --- /dev/null +++ b/audio_processing/aec3/erle_estimator.cc @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/erle_estimator.h" + +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +ErleEstimator::ErleEstimator(size_t startup_phase_length_blocks, + const EchoCanceller3Config& config, + size_t num_capture_channels) + : startup_phase_length_blocks_(startup_phase_length_blocks), + fullband_erle_estimator_(config.erle, num_capture_channels), + subband_erle_estimator_(config, num_capture_channels) { + if (config.erle.num_sections > 1) { + signal_dependent_erle_estimator_ = + std::make_unique(config, + num_capture_channels); + } + Reset(true); +} + +ErleEstimator::~ErleEstimator() = default; + +void ErleEstimator::Reset(bool delay_change) { + fullband_erle_estimator_.Reset(); + subband_erle_estimator_.Reset(); + if (signal_dependent_erle_estimator_) { + signal_dependent_erle_estimator_->Reset(); + } + if (delay_change) { + blocks_since_reset_ = 0; + } +} + +void ErleEstimator::Update( + const RenderBuffer& render_buffer, + rtc::ArrayView>> + filter_frequency_responses, + rtc::ArrayView + avg_render_spectrum_with_reverb, + rtc::ArrayView> capture_spectra, + rtc::ArrayView> + subtractor_spectra, + const std::vector& converged_filters) { + RTC_DCHECK_EQ(subband_erle_estimator_.Erle().size(), capture_spectra.size()); + RTC_DCHECK_EQ(subband_erle_estimator_.Erle().size(), + subtractor_spectra.size()); + const auto& X2_reverb = avg_render_spectrum_with_reverb; + const auto& Y2 = capture_spectra; + const auto& E2 = subtractor_spectra; + + if (++blocks_since_reset_ < startup_phase_length_blocks_) { + return; + } + + subband_erle_estimator_.Update(X2_reverb, Y2, E2, converged_filters); + + if (signal_dependent_erle_estimator_) { + signal_dependent_erle_estimator_->Update( + render_buffer, filter_frequency_responses, X2_reverb, Y2, E2, + subband_erle_estimator_.Erle(), converged_filters); + } + + fullband_erle_estimator_.Update(X2_reverb, Y2, E2, converged_filters); +} + +void ErleEstimator::Dump( + const std::unique_ptr& data_dumper) const { + fullband_erle_estimator_.Dump(data_dumper); + subband_erle_estimator_.Dump(data_dumper); + if (signal_dependent_erle_estimator_) { + signal_dependent_erle_estimator_->Dump(data_dumper); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/erle_estimator.h b/audio_processing/aec3/erle_estimator.h new file mode 100644 index 0000000..d9b40c5 --- /dev/null +++ b/audio_processing/aec3/erle_estimator.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_ + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/fullband_erle_estimator.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/aec3/signal_dependent_erle_estimator.h" +#include "audio_processing/aec3/subband_erle_estimator.h" +#include "audio_processing/logging/apm_data_dumper.h" + +namespace webrtc { + +// Estimates the echo return loss enhancement. One estimate is done per subband +// and another one is done using the aggreation of energy over all the subbands. +class ErleEstimator { + public: + ErleEstimator(size_t startup_phase_length_blocks, + const EchoCanceller3Config& config, + size_t num_capture_channels); + ~ErleEstimator(); + + // Resets the fullband ERLE estimator and the subbands ERLE estimators. + void Reset(bool delay_change); + + // Updates the ERLE estimates. + void Update( + const RenderBuffer& render_buffer, + rtc::ArrayView>> + filter_frequency_responses, + rtc::ArrayView + avg_render_spectrum_with_reverb, + rtc::ArrayView> + capture_spectra, + rtc::ArrayView> + subtractor_spectra, + const std::vector& converged_filters); + + // Returns the most recent subband ERLE estimates. + rtc::ArrayView> Erle() const { + return signal_dependent_erle_estimator_ + ? signal_dependent_erle_estimator_->Erle() + : subband_erle_estimator_.Erle(); + } + + // Returns the subband ERLE that are estimated during onsets (only used for + // testing). + rtc::ArrayView> ErleOnsets() + const { + return subband_erle_estimator_.ErleOnsets(); + } + + // Returns the fullband ERLE estimate. + float FullbandErleLog2() const { + return fullband_erle_estimator_.FullbandErleLog2(); + } + + // Returns an estimation of the current linear filter quality based on the + // current and past fullband ERLE estimates. The returned value is a float + // vector with content between 0 and 1 where 1 indicates that, at this current + // time instant, the linear filter is reaching its maximum subtraction + // performance. + rtc::ArrayView> GetInstLinearQualityEstimates() + const { + return fullband_erle_estimator_.GetInstLinearQualityEstimates(); + } + + void Dump(const std::unique_ptr& data_dumper) const; + + private: + const size_t startup_phase_length_blocks_; + FullBandErleEstimator fullband_erle_estimator_; + SubbandErleEstimator subband_erle_estimator_; + std::unique_ptr + signal_dependent_erle_estimator_; + size_t blocks_since_reset_ = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_ diff --git a/audio_processing/aec3/erle_estimator_unittest.cc b/audio_processing/aec3/erle_estimator_unittest.cc new file mode 100644 index 0000000..48a6d6c --- /dev/null +++ b/audio_processing/aec3/erle_estimator_unittest.cc @@ -0,0 +1,278 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/erle_estimator.h" + +#include + +#include "api/array_view.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/aec3/spectrum_buffer.h" +#include "rtc_base/random.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +constexpr int kLowFrequencyLimit = kFftLengthBy2 / 2; +constexpr float kTrueErle = 10.f; +constexpr float kTrueErleOnsets = 1.0f; +constexpr float kEchoPathGain = 3.f; + +void VerifyErleBands( + rtc::ArrayView> erle, + float reference_lf, + float reference_hf) { + for (size_t ch = 0; ch < erle.size(); ++ch) { + std::for_each( + erle[ch].begin(), erle[ch].begin() + kLowFrequencyLimit, + [reference_lf](float a) { EXPECT_NEAR(reference_lf, a, 0.001); }); + std::for_each( + erle[ch].begin() + kLowFrequencyLimit, erle[ch].end(), + [reference_hf](float a) { EXPECT_NEAR(reference_hf, a, 0.001); }); + } +} + +void VerifyErle( + rtc::ArrayView> erle, + float erle_time_domain, + float reference_lf, + float reference_hf) { + VerifyErleBands(erle, reference_lf, reference_hf); + EXPECT_NEAR(reference_lf, erle_time_domain, 0.5); +} + +void FormFarendTimeFrame(std::vector>>* x) { + const std::array frame = { + 7459.88, 17209.6, 17383, 20768.9, 16816.7, 18386.3, 4492.83, 9675.85, + 6665.52, 14808.6, 9342.3, 7483.28, 19261.7, 4145.98, 1622.18, 13475.2, + 7166.32, 6856.61, 21937, 7263.14, 9569.07, 14919, 8413.32, 7551.89, + 7848.65, 6011.27, 13080.6, 15865.2, 12656, 17459.6, 4263.93, 4503.03, + 9311.79, 21095.8, 12657.9, 13906.6, 19267.2, 11338.1, 16828.9, 11501.6, + 11405, 15031.4, 14541.6, 19765.5, 18346.3, 19350.2, 3157.47, 18095.8, + 1743.68, 21328.2, 19727.5, 7295.16, 10332.4, 11055.5, 20107.4, 14708.4, + 12416.2, 16434, 2454.69, 9840.8, 6867.23, 1615.75, 6059.9, 8394.19}; + for (size_t band = 0; band < x->size(); ++band) { + for (size_t channel = 0; channel < (*x)[band].size(); ++channel) { + RTC_DCHECK_GE((*x)[band][channel].size(), frame.size()); + std::copy(frame.begin(), frame.end(), (*x)[band][channel].begin()); + } + } +} + +void FormFarendFrame(const RenderBuffer& render_buffer, + float erle, + std::array* X2, + rtc::ArrayView> E2, + rtc::ArrayView> Y2) { + const auto& spectrum_buffer = render_buffer.GetSpectrumBuffer(); + const int num_render_channels = spectrum_buffer.buffer[0].size(); + const int num_capture_channels = Y2.size(); + + X2->fill(0.f); + for (int ch = 0; ch < num_render_channels; ++ch) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + (*X2)[k] += spectrum_buffer.buffer[spectrum_buffer.write][ch][k] / + num_render_channels; + } + } + + for (int ch = 0; ch < num_capture_channels; ++ch) { + std::transform(X2->begin(), X2->end(), Y2[ch].begin(), + [](float a) { return a * kEchoPathGain * kEchoPathGain; }); + std::transform(Y2[ch].begin(), Y2[ch].end(), E2[ch].begin(), + [erle](float a) { return a / erle; }); + } +} + +void FormNearendFrame( + std::vector>>* x, + std::array* X2, + rtc::ArrayView> E2, + rtc::ArrayView> Y2) { + for (size_t band = 0; band < x->size(); ++band) { + for (size_t ch = 0; ch < (*x)[band].size(); ++ch) { + std::fill((*x)[band][ch].begin(), (*x)[band][ch].end(), 0.f); + } + } + + X2->fill(0.f); + for (size_t ch = 0; ch < Y2.size(); ++ch) { + Y2[ch].fill(500.f * 1000.f * 1000.f); + E2[ch].fill(Y2[ch][0]); + } +} + +void GetFilterFreq( + size_t delay_headroom_samples, + rtc::ArrayView>> + filter_frequency_response) { + const size_t delay_headroom_blocks = delay_headroom_samples / kBlockSize; + for (size_t ch = 0; ch < filter_frequency_response[0].size(); ++ch) { + for (auto& block_freq_resp : filter_frequency_response) { + block_freq_resp[ch].fill(0.f); + } + + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + filter_frequency_response[delay_headroom_blocks][ch][k] = kEchoPathGain; + } + } +} + +} // namespace + +TEST(ErleEstimator, VerifyErleIncreaseAndHold) { + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + for (size_t num_render_channels : {1, 2, 4, 8}) { + for (size_t num_capture_channels : {1, 2, 4}) { + std::array X2; + std::vector> E2( + num_capture_channels); + std::vector> Y2( + num_capture_channels); + std::vector converged_filters(num_capture_channels, true); + + EchoCanceller3Config config; + config.erle.onset_detection = true; + + std::vector>> x( + kNumBands, + std::vector>(num_render_channels, + std::vector(kBlockSize, 0.f))); + std::vector>> + filter_frequency_response( + config.filter.main.length_blocks, + std::vector>( + num_capture_channels)); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, + num_render_channels)); + + GetFilterFreq(config.delay.delay_headroom_samples, + filter_frequency_response); + + ErleEstimator estimator(0, config, num_capture_channels); + + FormFarendTimeFrame(&x); + render_delay_buffer->Insert(x); + render_delay_buffer->PrepareCaptureProcessing(); + // Verifies that the ERLE estimate is properly increased to higher values. + FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), kTrueErle, &X2, + E2, Y2); + for (size_t k = 0; k < 200; ++k) { + render_delay_buffer->Insert(x); + render_delay_buffer->PrepareCaptureProcessing(); + estimator.Update(*render_delay_buffer->GetRenderBuffer(), + filter_frequency_response, X2, Y2, E2, + converged_filters); + } + VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()), + config.erle.max_l, config.erle.max_h); + + FormNearendFrame(&x, &X2, E2, Y2); + // Verifies that the ERLE is not immediately decreased during nearend + // activity. + for (size_t k = 0; k < 50; ++k) { + render_delay_buffer->Insert(x); + render_delay_buffer->PrepareCaptureProcessing(); + estimator.Update(*render_delay_buffer->GetRenderBuffer(), + filter_frequency_response, X2, Y2, E2, + converged_filters); + } + VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()), + config.erle.max_l, config.erle.max_h); + } + } +} + +TEST(ErleEstimator, VerifyErleTrackingOnOnsets) { + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + for (size_t num_render_channels : {1, 2, 4, 8}) { + for (size_t num_capture_channels : {1, 2, 4}) { + std::array X2; + std::vector> E2( + num_capture_channels); + std::vector> Y2( + num_capture_channels); + std::vector converged_filters(num_capture_channels, true); + EchoCanceller3Config config; + config.erle.onset_detection = true; + std::vector>> x( + kNumBands, + std::vector>(num_render_channels, + std::vector(kBlockSize, 0.f))); + std::vector>> + filter_frequency_response( + config.filter.main.length_blocks, + std::vector>( + num_capture_channels)); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, + num_render_channels)); + + GetFilterFreq(config.delay.delay_headroom_samples, + filter_frequency_response); + + ErleEstimator estimator(/*startup_phase_length_blocks=*/0, config, + num_capture_channels); + + FormFarendTimeFrame(&x); + render_delay_buffer->Insert(x); + render_delay_buffer->PrepareCaptureProcessing(); + + for (size_t burst = 0; burst < 20; ++burst) { + FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), + kTrueErleOnsets, &X2, E2, Y2); + for (size_t k = 0; k < 10; ++k) { + render_delay_buffer->Insert(x); + render_delay_buffer->PrepareCaptureProcessing(); + estimator.Update(*render_delay_buffer->GetRenderBuffer(), + filter_frequency_response, X2, Y2, E2, + converged_filters); + } + FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), kTrueErle, &X2, + E2, Y2); + for (size_t k = 0; k < 200; ++k) { + render_delay_buffer->Insert(x); + render_delay_buffer->PrepareCaptureProcessing(); + estimator.Update(*render_delay_buffer->GetRenderBuffer(), + filter_frequency_response, X2, Y2, E2, + converged_filters); + } + FormNearendFrame(&x, &X2, E2, Y2); + for (size_t k = 0; k < 300; ++k) { + render_delay_buffer->Insert(x); + render_delay_buffer->PrepareCaptureProcessing(); + estimator.Update(*render_delay_buffer->GetRenderBuffer(), + filter_frequency_response, X2, Y2, E2, + converged_filters); + } + } + VerifyErleBands(estimator.ErleOnsets(), config.erle.min, config.erle.min); + FormNearendFrame(&x, &X2, E2, Y2); + for (size_t k = 0; k < 1000; k++) { + estimator.Update(*render_delay_buffer->GetRenderBuffer(), + filter_frequency_response, X2, Y2, E2, + converged_filters); + } + // Verifies that during ne activity, Erle converges to the Erle for + // onsets. + VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()), + config.erle.min, config.erle.min); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/fft_buffer.cc b/audio_processing/aec3/fft_buffer.cc new file mode 100644 index 0000000..253c5eb --- /dev/null +++ b/audio_processing/aec3/fft_buffer.cc @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/fft_buffer.h" + +namespace webrtc { + +FftBuffer::FftBuffer(size_t size, size_t num_channels) + : size(static_cast(size)), + buffer(size, std::vector(num_channels)) { + for (auto& block : buffer) { + for (auto& channel_fft_data : block) { + channel_fft_data.Clear(); + } + } +} + +FftBuffer::~FftBuffer() = default; + +} // namespace webrtc diff --git a/audio_processing/aec3/fft_buffer.h b/audio_processing/aec3/fft_buffer.h new file mode 100644 index 0000000..3ad7298 --- /dev/null +++ b/audio_processing/aec3/fft_buffer.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_ + +#include + +#include + +#include "audio_processing/aec3/fft_data.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +// Struct for bundling a circular buffer of FftData objects together with the +// read and write indices. +struct FftBuffer { + FftBuffer(size_t size, size_t num_channels); + ~FftBuffer(); + + int IncIndex(int index) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return index < size - 1 ? index + 1 : 0; + } + + int DecIndex(int index) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return index > 0 ? index - 1 : size - 1; + } + + int OffsetIndex(int index, int offset) const { + RTC_DCHECK_GE(buffer.size(), offset); + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return (size + index + offset) % size; + } + + void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); } + void IncWriteIndex() { write = IncIndex(write); } + void DecWriteIndex() { write = DecIndex(write); } + void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); } + void IncReadIndex() { read = IncIndex(read); } + void DecReadIndex() { read = DecIndex(read); } + + const int size; + std::vector> buffer; + int write = 0; + int read = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_ diff --git a/audio_processing/aec3/fft_data.h b/audio_processing/aec3/fft_data.h new file mode 100644 index 0000000..8f0ebc9 --- /dev/null +++ b/audio_processing/aec3/fft_data.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_ + +// Defines WEBRTC_ARCH_X86_FAMILY, used below. +#include "rtc_base/system/arch.h" + +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { + +// Struct that holds imaginary data produced from 128 point real-valued FFTs. +struct FftData { + // Copies the data in src. + void Assign(const FftData& src) { + std::copy(src.re.begin(), src.re.end(), re.begin()); + std::copy(src.im.begin(), src.im.end(), im.begin()); + im[0] = im[kFftLengthBy2] = 0; + } + + // Clears all the imaginary. + void Clear() { + re.fill(0.f); + im.fill(0.f); + } + + // Computes the power spectrum of the data. + void Spectrum(Aec3Optimization optimization, + rtc::ArrayView power_spectrum) const { + RTC_DCHECK_EQ(kFftLengthBy2Plus1, power_spectrum.size()); + switch (optimization) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: { + constexpr int kNumFourBinBands = kFftLengthBy2 / 4; + constexpr int kLimit = kNumFourBinBands * 4; + for (size_t k = 0; k < kLimit; k += 4) { + const __m128 r = _mm_loadu_ps(&re[k]); + const __m128 i = _mm_loadu_ps(&im[k]); + const __m128 ii = _mm_mul_ps(i, i); + const __m128 rr = _mm_mul_ps(r, r); + const __m128 rrii = _mm_add_ps(rr, ii); + _mm_storeu_ps(&power_spectrum[k], rrii); + } + power_spectrum[kFftLengthBy2] = re[kFftLengthBy2] * re[kFftLengthBy2] + + im[kFftLengthBy2] * im[kFftLengthBy2]; + } break; +#endif + default: + std::transform(re.begin(), re.end(), im.begin(), power_spectrum.begin(), + [](float a, float b) { return a * a + b * b; }); + } + } + + // Copy the data from an interleaved array. + void CopyFromPackedArray(const std::array& v) { + re[0] = v[0]; + re[kFftLengthBy2] = v[1]; + im[0] = im[kFftLengthBy2] = 0; + for (size_t k = 1, j = 2; k < kFftLengthBy2; ++k) { + re[k] = v[j++]; + im[k] = v[j++]; + } + } + + // Copies the data into an interleaved array. + void CopyToPackedArray(std::array* v) const { + RTC_DCHECK(v); + (*v)[0] = re[0]; + (*v)[1] = re[kFftLengthBy2]; + for (size_t k = 1, j = 2; k < kFftLengthBy2; ++k) { + (*v)[j++] = re[k]; + (*v)[j++] = im[k]; + } + } + + std::array re; + std::array im; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_ diff --git a/audio_processing/aec3/fft_data_unittest.cc b/audio_processing/aec3/fft_data_unittest.cc new file mode 100644 index 0000000..0812fd6 --- /dev/null +++ b/audio_processing/aec3/fft_data_unittest.cc @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/fft_data.h" + +#include "rtc_base/system/arch.h" +#include "system_wrappers/include/cpu_features_wrapper.h" +#include "test/gtest.h" + +namespace webrtc { + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Verifies that the optimized methods are bitexact to their reference +// counterparts. +TEST(FftData, TestOptimizations) { + if (WebRtc_GetCPUInfo(kSSE2) != 0) { + FftData x; + + for (size_t k = 0; k < x.re.size(); ++k) { + x.re[k] = k + 1; + } + + x.im[0] = x.im[x.im.size() - 1] = 0.f; + for (size_t k = 1; k < x.im.size() - 1; ++k) { + x.im[k] = 2.f * (k + 1); + } + + std::array spectrum; + std::array spectrum_sse2; + x.Spectrum(Aec3Optimization::kNone, spectrum); + x.Spectrum(Aec3Optimization::kSse2, spectrum_sse2); + EXPECT_EQ(spectrum, spectrum_sse2); + } +} +#endif + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for null output in CopyToPackedArray. +TEST(FftData, NonNullCopyToPackedArrayOutput) { + EXPECT_DEATH(FftData().CopyToPackedArray(nullptr), ""); +} + +// Verifies the check for null output in Spectrum. +TEST(FftData, NonNullSpectrumOutput) { + EXPECT_DEATH(FftData().Spectrum(Aec3Optimization::kNone, nullptr), ""); +} + +#endif + +// Verifies that the Assign method properly copies the data from the source and +// ensures that the imaginary components for the DC and Nyquist bins are 0. +TEST(FftData, Assign) { + FftData x; + FftData y; + + x.re.fill(1.f); + x.im.fill(2.f); + y.Assign(x); + EXPECT_EQ(x.re, y.re); + EXPECT_EQ(0.f, y.im[0]); + EXPECT_EQ(0.f, y.im[x.im.size() - 1]); + for (size_t k = 1; k < x.im.size() - 1; ++k) { + EXPECT_EQ(x.im[k], y.im[k]); + } +} + +// Verifies that the Clear method properly clears all the data. +TEST(FftData, Clear) { + FftData x_ref; + FftData x; + + x_ref.re.fill(0.f); + x_ref.im.fill(0.f); + + x.re.fill(1.f); + x.im.fill(2.f); + x.Clear(); + + EXPECT_EQ(x_ref.re, x.re); + EXPECT_EQ(x_ref.im, x.im); +} + +// Verifies that the spectrum is correctly computed. +TEST(FftData, Spectrum) { + FftData x; + + for (size_t k = 0; k < x.re.size(); ++k) { + x.re[k] = k + 1; + } + + x.im[0] = x.im[x.im.size() - 1] = 0.f; + for (size_t k = 1; k < x.im.size() - 1; ++k) { + x.im[k] = 2.f * (k + 1); + } + + std::array spectrum; + x.Spectrum(Aec3Optimization::kNone, spectrum); + + EXPECT_EQ(x.re[0] * x.re[0], spectrum[0]); + EXPECT_EQ(x.re[spectrum.size() - 1] * x.re[spectrum.size() - 1], + spectrum[spectrum.size() - 1]); + for (size_t k = 1; k < spectrum.size() - 1; ++k) { + EXPECT_EQ(x.re[k] * x.re[k] + x.im[k] * x.im[k], spectrum[k]); + } +} + +// Verifies that the functionality in CopyToPackedArray works as intended. +TEST(FftData, CopyToPackedArray) { + FftData x; + std::array x_packed; + + for (size_t k = 0; k < x.re.size(); ++k) { + x.re[k] = k + 1; + } + + x.im[0] = x.im[x.im.size() - 1] = 0.f; + for (size_t k = 1; k < x.im.size() - 1; ++k) { + x.im[k] = 2.f * (k + 1); + } + + x.CopyToPackedArray(&x_packed); + + EXPECT_EQ(x.re[0], x_packed[0]); + EXPECT_EQ(x.re[x.re.size() - 1], x_packed[1]); + for (size_t k = 1; k < x_packed.size() / 2; ++k) { + EXPECT_EQ(x.re[k], x_packed[2 * k]); + EXPECT_EQ(x.im[k], x_packed[2 * k + 1]); + } +} + +// Verifies that the functionality in CopyFromPackedArray works as intended +// (relies on that the functionality in CopyToPackedArray has been verified in +// the test above). +TEST(FftData, CopyFromPackedArray) { + FftData x_ref; + FftData x; + std::array x_packed; + + for (size_t k = 0; k < x_ref.re.size(); ++k) { + x_ref.re[k] = k + 1; + } + + x_ref.im[0] = x_ref.im[x_ref.im.size() - 1] = 0.f; + for (size_t k = 1; k < x_ref.im.size() - 1; ++k) { + x_ref.im[k] = 2.f * (k + 1); + } + + x_ref.CopyToPackedArray(&x_packed); + x.CopyFromPackedArray(x_packed); + + EXPECT_EQ(x_ref.re, x.re); + EXPECT_EQ(x_ref.im, x.im); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/filter_analyzer.cc b/audio_processing/aec3/filter_analyzer.cc new file mode 100644 index 0000000..3879942 --- /dev/null +++ b/audio_processing/aec3/filter_analyzer.cc @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/filter_analyzer.h" + +#include + +#include +#include +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +size_t FindPeakIndex(rtc::ArrayView filter_time_domain, + size_t peak_index_in, + size_t start_sample, + size_t end_sample) { + size_t peak_index_out = peak_index_in; + float max_h2 = + filter_time_domain[peak_index_out] * filter_time_domain[peak_index_out]; + for (size_t k = start_sample; k <= end_sample; ++k) { + float tmp = filter_time_domain[k] * filter_time_domain[k]; + if (tmp > max_h2) { + peak_index_out = k; + max_h2 = tmp; + } + } + + return peak_index_out; +} + +} // namespace + +int FilterAnalyzer::instance_count_ = 0; + +FilterAnalyzer::FilterAnalyzer(const EchoCanceller3Config& config, + size_t num_capture_channels) + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + bounded_erl_(config.ep_strength.bounded_erl), + default_gain_(config.ep_strength.default_gain), + h_highpass_(num_capture_channels, + std::vector( + GetTimeDomainLength(config.filter.main.length_blocks), + 0.f)), + filter_analysis_states_(num_capture_channels, + FilterAnalysisState(config)), + filter_delays_blocks_(num_capture_channels, 0) { + Reset(); +} + +FilterAnalyzer::~FilterAnalyzer() = default; + +void FilterAnalyzer::Reset() { + blocks_since_reset_ = 0; + ResetRegion(); + for (auto& state : filter_analysis_states_) { + state.peak_index = 0; + state.gain = default_gain_; + state.consistent_filter_detector.Reset(); + } + std::fill(filter_delays_blocks_.begin(), filter_delays_blocks_.end(), 0); +} + +void FilterAnalyzer::Update( + rtc::ArrayView> filters_time_domain, + const RenderBuffer& render_buffer, + bool* any_filter_consistent, + float* max_echo_path_gain) { + RTC_DCHECK(any_filter_consistent); + RTC_DCHECK(max_echo_path_gain); + RTC_DCHECK_EQ(filters_time_domain.size(), filter_analysis_states_.size()); + RTC_DCHECK_EQ(filters_time_domain.size(), h_highpass_.size()); + + ++blocks_since_reset_; + SetRegionToAnalyze(filters_time_domain[0].size()); + AnalyzeRegion(filters_time_domain, render_buffer); + + // Aggregate the results for all capture channels. + auto& st_ch0 = filter_analysis_states_[0]; + *any_filter_consistent = st_ch0.consistent_estimate; + *max_echo_path_gain = st_ch0.gain; + min_filter_delay_blocks_ = filter_delays_blocks_[0]; + for (size_t ch = 1; ch < filters_time_domain.size(); ++ch) { + auto& st_ch = filter_analysis_states_[ch]; + *any_filter_consistent = + *any_filter_consistent || st_ch.consistent_estimate; + *max_echo_path_gain = std::max(*max_echo_path_gain, st_ch.gain); + min_filter_delay_blocks_ = + std::min(min_filter_delay_blocks_, filter_delays_blocks_[ch]); + } +} + +void FilterAnalyzer::AnalyzeRegion( + rtc::ArrayView> filters_time_domain, + const RenderBuffer& render_buffer) { + // Preprocess the filter to avoid issues with low-frequency components in the + // filter. + PreProcessFilters(filters_time_domain); + data_dumper_->DumpRaw("aec3_linear_filter_processed_td", h_highpass_[0]); + + constexpr float kOneByBlockSize = 1.f / kBlockSize; + for (size_t ch = 0; ch < filters_time_domain.size(); ++ch) { + RTC_DCHECK_LT(region_.start_sample_, filters_time_domain[ch].size()); + RTC_DCHECK_LT(filter_analysis_states_[ch].peak_index, + filters_time_domain[0].size()); + RTC_DCHECK_LT(region_.end_sample_, filters_time_domain[ch].size()); + + auto& st_ch = filter_analysis_states_[ch]; + RTC_DCHECK_EQ(h_highpass_[ch].size(), filters_time_domain[ch].size()); + st_ch.peak_index = + FindPeakIndex(h_highpass_[ch], st_ch.peak_index, region_.start_sample_, + region_.end_sample_); + filter_delays_blocks_[ch] = st_ch.peak_index >> kBlockSizeLog2; + UpdateFilterGain(h_highpass_[ch], &st_ch); + st_ch.filter_length_blocks = + filters_time_domain[ch].size() * kOneByBlockSize; + st_ch.consistent_estimate = st_ch.consistent_filter_detector.Detect( + h_highpass_[ch], region_, + render_buffer.Block(-filter_delays_blocks_[ch])[0], st_ch.peak_index, + filter_delays_blocks_[ch]); + } +} + +void FilterAnalyzer::UpdateFilterGain( + rtc::ArrayView filter_time_domain, + FilterAnalysisState* st) { + bool sufficient_time_to_converge = + blocks_since_reset_ > 5 * kNumBlocksPerSecond; + if (sufficient_time_to_converge && st->consistent_estimate) { + st->gain = fabsf(filter_time_domain[st->peak_index]); + } else { + // TODO(peah): Verify whether this check against a float is ok. + if (st->gain) { + st->gain = std::max(st->gain, fabsf(filter_time_domain[st->peak_index])); + } + } + + if (bounded_erl_ && st->gain) { + st->gain = std::max(st->gain, 0.01f); + } +} + +void FilterAnalyzer::PreProcessFilters( + rtc::ArrayView> filters_time_domain) { + for (size_t ch = 0; ch < filters_time_domain.size(); ++ch) { + RTC_DCHECK_LT(region_.start_sample_, filters_time_domain[ch].size()); + RTC_DCHECK_LT(region_.end_sample_, filters_time_domain[ch].size()); + + RTC_DCHECK_GE(h_highpass_[ch].capacity(), filters_time_domain[ch].size()); + h_highpass_[ch].resize(filters_time_domain[ch].size()); + // Minimum phase high-pass filter with cutoff frequency at about 600 Hz. + constexpr std::array h = { + {0.7929742f, -0.36072128f, -0.47047766f}}; + + std::fill(h_highpass_[ch].begin() + region_.start_sample_, + h_highpass_[ch].begin() + region_.end_sample_ + 1, 0.f); + for (size_t k = std::max(h.size() - 1, region_.start_sample_); + k <= region_.end_sample_; ++k) { + for (size_t j = 0; j < h.size(); ++j) { + h_highpass_[ch][k] += filters_time_domain[ch][k - j] * h[j]; + } + } + } +} + +void FilterAnalyzer::ResetRegion() { + region_.start_sample_ = 0; + region_.end_sample_ = 0; +} + +void FilterAnalyzer::SetRegionToAnalyze(size_t filter_size) { + constexpr size_t kNumberBlocksToUpdate = 1; + auto& r = region_; + r.start_sample_ = r.end_sample_ >= filter_size - 1 ? 0 : r.end_sample_ + 1; + r.end_sample_ = + std::min(r.start_sample_ + kNumberBlocksToUpdate * kBlockSize - 1, + filter_size - 1); + + // Check range. + RTC_DCHECK_LT(r.start_sample_, filter_size); + RTC_DCHECK_LT(r.end_sample_, filter_size); + RTC_DCHECK_LE(r.start_sample_, r.end_sample_); +} + +FilterAnalyzer::ConsistentFilterDetector::ConsistentFilterDetector( + const EchoCanceller3Config& config) + : active_render_threshold_(config.render_levels.active_render_limit * + config.render_levels.active_render_limit * + kFftLengthBy2) {} + +void FilterAnalyzer::ConsistentFilterDetector::Reset() { + significant_peak_ = false; + filter_floor_accum_ = 0.f; + filter_secondary_peak_ = 0.f; + filter_floor_low_limit_ = 0; + filter_floor_high_limit_ = 0; + consistent_estimate_counter_ = 0; + consistent_delay_reference_ = -10; +} + +bool FilterAnalyzer::ConsistentFilterDetector::Detect( + rtc::ArrayView filter_to_analyze, + const FilterRegion& region, + rtc::ArrayView> x_block, + size_t peak_index, + int delay_blocks) { + if (region.start_sample_ == 0) { + filter_floor_accum_ = 0.f; + filter_secondary_peak_ = 0.f; + filter_floor_low_limit_ = peak_index < 64 ? 0 : peak_index - 64; + filter_floor_high_limit_ = + peak_index > filter_to_analyze.size() - 129 ? 0 : peak_index + 128; + } + + for (size_t k = region.start_sample_; + k < std::min(region.end_sample_ + 1, filter_floor_low_limit_); ++k) { + float abs_h = fabsf(filter_to_analyze[k]); + filter_floor_accum_ += abs_h; + filter_secondary_peak_ = std::max(filter_secondary_peak_, abs_h); + } + + for (size_t k = std::max(filter_floor_high_limit_, region.start_sample_); + k <= region.end_sample_; ++k) { + float abs_h = fabsf(filter_to_analyze[k]); + filter_floor_accum_ += abs_h; + filter_secondary_peak_ = std::max(filter_secondary_peak_, abs_h); + } + + if (region.end_sample_ == filter_to_analyze.size() - 1) { + float filter_floor = filter_floor_accum_ / + (filter_floor_low_limit_ + filter_to_analyze.size() - + filter_floor_high_limit_); + float abs_peak = fabsf(filter_to_analyze[peak_index]); + significant_peak_ = abs_peak > 10.f * filter_floor && + abs_peak > 2.f * filter_secondary_peak_; + } + + if (significant_peak_) { + bool active_render_block = false; + for (auto& x_channel : x_block) { + const float x_energy = std::inner_product( + x_channel.begin(), x_channel.end(), x_channel.begin(), 0.f); + if (x_energy > active_render_threshold_) { + active_render_block = true; + break; + } + } + + if (consistent_delay_reference_ == delay_blocks) { + if (active_render_block) { + ++consistent_estimate_counter_; + } + } else { + consistent_estimate_counter_ = 0; + consistent_delay_reference_ = delay_blocks; + } + } + + return consistent_estimate_counter_ > 1.5f * kNumBlocksPerSecond; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/filter_analyzer.h b/audio_processing/aec3/filter_analyzer.h new file mode 100644 index 0000000..2faa29e --- /dev/null +++ b/audio_processing/aec3/filter_analyzer.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_FILTER_ANALYZER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_FILTER_ANALYZER_H_ + +#include + +#include +#include +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +class ApmDataDumper; +class RenderBuffer; + +// Class for analyzing the properties of an adaptive filter. +class FilterAnalyzer { + public: + FilterAnalyzer(const EchoCanceller3Config& config, + size_t num_capture_channels); + ~FilterAnalyzer(); + + FilterAnalyzer(const FilterAnalyzer&) = delete; + FilterAnalyzer& operator=(const FilterAnalyzer&) = delete; + + // Resets the analysis. + void Reset(); + + // Updates the estimates with new input data. + void Update(rtc::ArrayView> filters_time_domain, + const RenderBuffer& render_buffer, + bool* any_filter_consistent, + float* max_echo_path_gain); + + // Returns the delay in blocks for each filter. + rtc::ArrayView FilterDelaysBlocks() const { + return filter_delays_blocks_; + } + + // Returns the minimum delay of all filters in terms of blocks. + int MinFilterDelayBlocks() const { return min_filter_delay_blocks_; } + + // Returns the number of blocks for the current used filter. + int FilterLengthBlocks() const { + return filter_analysis_states_[0].filter_length_blocks; + } + + // Returns the preprocessed filter. + rtc::ArrayView> GetAdjustedFilters() const { + return h_highpass_; + } + + // Public for testing purposes only. + void SetRegionToAnalyze(size_t filter_size); + + private: + struct FilterAnalysisState; + + void AnalyzeRegion( + rtc::ArrayView> filters_time_domain, + const RenderBuffer& render_buffer); + + void UpdateFilterGain(rtc::ArrayView filters_time_domain, + FilterAnalysisState* st); + void PreProcessFilters( + rtc::ArrayView> filters_time_domain); + + void ResetRegion(); + + struct FilterRegion { + size_t start_sample_; + size_t end_sample_; + }; + + // This class checks whether the shape of the impulse response has been + // consistent over time. + class ConsistentFilterDetector { + public: + explicit ConsistentFilterDetector(const EchoCanceller3Config& config); + void Reset(); + bool Detect(rtc::ArrayView filter_to_analyze, + const FilterRegion& region, + rtc::ArrayView> x_block, + size_t peak_index, + int delay_blocks); + + private: + bool significant_peak_; + float filter_floor_accum_; + float filter_secondary_peak_; + size_t filter_floor_low_limit_; + size_t filter_floor_high_limit_; + const float active_render_threshold_; + size_t consistent_estimate_counter_ = 0; + int consistent_delay_reference_ = -10; + }; + + struct FilterAnalysisState { + explicit FilterAnalysisState(const EchoCanceller3Config& config) + : filter_length_blocks(config.filter.main_initial.length_blocks), + consistent_filter_detector(config) {} + float gain; + size_t peak_index; + int filter_length_blocks; + bool consistent_estimate = false; + ConsistentFilterDetector consistent_filter_detector; + }; + + static int instance_count_; + std::unique_ptr data_dumper_; + const bool bounded_erl_; + const float default_gain_; + std::vector> h_highpass_; + + size_t blocks_since_reset_ = 0; + FilterRegion region_; + + std::vector filter_analysis_states_; + std::vector filter_delays_blocks_; + + int min_filter_delay_blocks_ = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_FILTER_ANALYZER_H_ diff --git a/audio_processing/aec3/filter_analyzer_unittest.cc b/audio_processing/aec3/filter_analyzer_unittest.cc new file mode 100644 index 0000000..f1e2e4c --- /dev/null +++ b/audio_processing/aec3/filter_analyzer_unittest.cc @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/filter_analyzer.h" + +#include + +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { + +// Verifies that the filter analyzer handles filter resizes properly. +TEST(FilterAnalyzer, FilterResize) { + EchoCanceller3Config c; + std::vector filter(65, 0.f); + for (size_t num_capture_channels : {1, 2, 4}) { + FilterAnalyzer fa(c, num_capture_channels); + fa.SetRegionToAnalyze(filter.size()); + fa.SetRegionToAnalyze(filter.size()); + filter.resize(32); + fa.SetRegionToAnalyze(filter.size()); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/frame_blocker.cc b/audio_processing/aec3/frame_blocker.cc new file mode 100644 index 0000000..aa0e9d5 --- /dev/null +++ b/audio_processing/aec3/frame_blocker.cc @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/frame_blocker.h" + +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +FrameBlocker::FrameBlocker(size_t num_bands, size_t num_channels) + : num_bands_(num_bands), + num_channels_(num_channels), + buffer_(num_bands_, std::vector>(num_channels)) { + RTC_DCHECK_LT(0, num_bands); + RTC_DCHECK_LT(0, num_channels); + for (auto& band : buffer_) { + for (auto& channel : band) { + channel.reserve(kBlockSize); + RTC_DCHECK(channel.empty()); + } + } +} + +FrameBlocker::~FrameBlocker() = default; + +void FrameBlocker::InsertSubFrameAndExtractBlock( + const std::vector>>& sub_frame, + std::vector>>* block) { + RTC_DCHECK(block); + RTC_DCHECK_EQ(num_bands_, block->size()); + RTC_DCHECK_EQ(num_bands_, sub_frame.size()); + for (size_t band = 0; band < num_bands_; ++band) { + RTC_DCHECK_EQ(num_channels_, (*block)[band].size()); + RTC_DCHECK_EQ(num_channels_, sub_frame[band].size()); + for (size_t channel = 0; channel < num_channels_; ++channel) { + RTC_DCHECK_GE(kBlockSize - 16, buffer_[band][channel].size()); + RTC_DCHECK_EQ(kBlockSize, (*block)[band][channel].size()); + RTC_DCHECK_EQ(kSubFrameLength, sub_frame[band][channel].size()); + const int samples_to_block = kBlockSize - buffer_[band][channel].size(); + (*block)[band][channel].clear(); + (*block)[band][channel].insert((*block)[band][channel].begin(), + buffer_[band][channel].begin(), + buffer_[band][channel].end()); + (*block)[band][channel].insert( + (*block)[band][channel].begin() + buffer_[band][channel].size(), + sub_frame[band][channel].begin(), + sub_frame[band][channel].begin() + samples_to_block); + buffer_[band][channel].clear(); + buffer_[band][channel].insert( + buffer_[band][channel].begin(), + sub_frame[band][channel].begin() + samples_to_block, + sub_frame[band][channel].end()); + } + } +} + +bool FrameBlocker::IsBlockAvailable() const { + return kBlockSize == buffer_[0][0].size(); +} + +void FrameBlocker::ExtractBlock( + std::vector>>* block) { + RTC_DCHECK(block); + RTC_DCHECK_EQ(num_bands_, block->size()); + RTC_DCHECK(IsBlockAvailable()); + for (size_t band = 0; band < num_bands_; ++band) { + RTC_DCHECK_EQ(num_channels_, (*block)[band].size()); + for (size_t channel = 0; channel < num_channels_; ++channel) { + RTC_DCHECK_EQ(kBlockSize, buffer_[band][channel].size()); + RTC_DCHECK_EQ(kBlockSize, (*block)[band][channel].size()); + (*block)[band][channel].clear(); + (*block)[band][channel].insert((*block)[band][channel].begin(), + buffer_[band][channel].begin(), + buffer_[band][channel].end()); + buffer_[band][channel].clear(); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/frame_blocker.h b/audio_processing/aec3/frame_blocker.h new file mode 100644 index 0000000..0883dc6 --- /dev/null +++ b/audio_processing/aec3/frame_blocker.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_ + +#include + +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { + +// Class for producing 64 sample multiband blocks from frames consisting of 2 +// subframes of 80 samples. +class FrameBlocker { + public: + FrameBlocker(size_t num_bands, size_t num_channels); + ~FrameBlocker(); + FrameBlocker(const FrameBlocker&) = delete; + FrameBlocker& operator=(const FrameBlocker&) = delete; + + // Inserts one 80 sample multiband subframe from the multiband frame and + // extracts one 64 sample multiband block. + void InsertSubFrameAndExtractBlock( + const std::vector>>& sub_frame, + std::vector>>* block); + // Reports whether a multiband block of 64 samples is available for + // extraction. + bool IsBlockAvailable() const; + // Extracts a multiband block of 64 samples. + void ExtractBlock(std::vector>>* block); + + private: + const size_t num_bands_; + const size_t num_channels_; + std::vector>> buffer_; +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_ diff --git a/audio_processing/aec3/frame_blocker_unittest.cc b/audio_processing/aec3/frame_blocker_unittest.cc new file mode 100644 index 0000000..e907608 --- /dev/null +++ b/audio_processing/aec3/frame_blocker_unittest.cc @@ -0,0 +1,471 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/frame_blocker.h" + +#include +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/block_framer.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +float ComputeSampleValue(size_t chunk_counter, + size_t chunk_size, + size_t band, + size_t channel, + size_t sample_index, + int offset) { + float value = + static_cast(chunk_counter * chunk_size + sample_index + channel) + + offset; + return value > 0 ? 5000 * band + value : 0; +} + +void FillSubFrame(size_t sub_frame_counter, + int offset, + std::vector>>* sub_frame) { + for (size_t band = 0; band < sub_frame->size(); ++band) { + for (size_t channel = 0; channel < (*sub_frame)[band].size(); ++channel) { + for (size_t sample = 0; sample < (*sub_frame)[band][channel].size(); + ++sample) { + (*sub_frame)[band][channel][sample] = ComputeSampleValue( + sub_frame_counter, kSubFrameLength, band, channel, sample, offset); + } + } + } +} + +void FillSubFrameView( + size_t sub_frame_counter, + int offset, + std::vector>>* sub_frame, + std::vector>>* sub_frame_view) { + FillSubFrame(sub_frame_counter, offset, sub_frame); + for (size_t band = 0; band < sub_frame_view->size(); ++band) { + for (size_t channel = 0; channel < (*sub_frame_view)[band].size(); + ++channel) { + (*sub_frame_view)[band][channel] = rtc::ArrayView( + &(*sub_frame)[band][channel][0], (*sub_frame)[band][channel].size()); + } + } +} + +bool VerifySubFrame( + size_t sub_frame_counter, + int offset, + const std::vector>>& sub_frame_view) { + std::vector>> reference_sub_frame( + sub_frame_view.size(), + std::vector>( + sub_frame_view[0].size(), + std::vector(sub_frame_view[0][0].size(), 0.f))); + FillSubFrame(sub_frame_counter, offset, &reference_sub_frame); + for (size_t band = 0; band < sub_frame_view.size(); ++band) { + for (size_t channel = 0; channel < sub_frame_view[band].size(); ++channel) { + for (size_t sample = 0; sample < sub_frame_view[band][channel].size(); + ++sample) { + if (reference_sub_frame[band][channel][sample] != + sub_frame_view[band][channel][sample]) { + return false; + } + } + } + } + return true; +} + +bool VerifyBlock(size_t block_counter, + int offset, + const std::vector>>& block) { + for (size_t band = 0; band < block.size(); ++band) { + for (size_t channel = 0; channel < block[band].size(); ++channel) { + for (size_t sample = 0; sample < block[band][channel].size(); ++sample) { + const float reference_value = ComputeSampleValue( + block_counter, kBlockSize, band, channel, sample, offset); + if (reference_value != block[band][channel][sample]) { + return false; + } + } + } + } + return true; +} + +// Verifies that the FrameBlocker properly forms blocks out of the frames. +void RunBlockerTest(int sample_rate_hz, size_t num_channels) { + constexpr size_t kNumSubFramesToProcess = 20; + const size_t num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> block( + num_bands, std::vector>( + num_channels, std::vector(kBlockSize, 0.f))); + std::vector>> input_sub_frame( + num_bands, std::vector>( + num_channels, std::vector(kSubFrameLength, 0.f))); + std::vector>> input_sub_frame_view( + num_bands, std::vector>(num_channels)); + FrameBlocker blocker(num_bands, num_channels); + + size_t block_counter = 0; + for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess; + ++sub_frame_index) { + FillSubFrameView(sub_frame_index, 0, &input_sub_frame, + &input_sub_frame_view); + + blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block); + VerifyBlock(block_counter++, 0, block); + + if ((sub_frame_index + 1) % 4 == 0) { + EXPECT_TRUE(blocker.IsBlockAvailable()); + } else { + EXPECT_FALSE(blocker.IsBlockAvailable()); + } + if (blocker.IsBlockAvailable()) { + blocker.ExtractBlock(&block); + VerifyBlock(block_counter++, 0, block); + } + } +} + +// Verifies that the FrameBlocker and BlockFramer work well together and produce +// the expected output. +void RunBlockerAndFramerTest(int sample_rate_hz, size_t num_channels) { + const size_t kNumSubFramesToProcess = 20; + const size_t num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> block( + num_bands, std::vector>( + num_channels, std::vector(kBlockSize, 0.f))); + std::vector>> input_sub_frame( + num_bands, std::vector>( + num_channels, std::vector(kSubFrameLength, 0.f))); + std::vector>> output_sub_frame( + num_bands, std::vector>( + num_channels, std::vector(kSubFrameLength, 0.f))); + std::vector>> output_sub_frame_view( + num_bands, std::vector>(num_channels)); + std::vector>> input_sub_frame_view( + num_bands, std::vector>(num_channels)); + FrameBlocker blocker(num_bands, num_channels); + BlockFramer framer(num_bands, num_channels); + + for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess; + ++sub_frame_index) { + FillSubFrameView(sub_frame_index, 0, &input_sub_frame, + &input_sub_frame_view); + FillSubFrameView(sub_frame_index, 0, &output_sub_frame, + &output_sub_frame_view); + + blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block); + framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view); + + if ((sub_frame_index + 1) % 4 == 0) { + EXPECT_TRUE(blocker.IsBlockAvailable()); + } else { + EXPECT_FALSE(blocker.IsBlockAvailable()); + } + if (blocker.IsBlockAvailable()) { + blocker.ExtractBlock(&block); + framer.InsertBlock(block); + } + if (sub_frame_index > 1) { + EXPECT_TRUE(VerifySubFrame(sub_frame_index, -64, output_sub_frame_view)); + } + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +// Verifies that the FrameBlocker crashes if the InsertSubFrameAndExtractBlock +// method is called for inputs with the wrong number of bands or band lengths. +void RunWronglySizedInsertAndExtractParametersTest( + int sample_rate_hz, + size_t correct_num_channels, + size_t num_block_bands, + size_t num_block_channels, + size_t block_length, + size_t num_sub_frame_bands, + size_t num_sub_frame_channels, + size_t sub_frame_length) { + const size_t correct_num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> block( + num_block_bands, + std::vector>(num_block_channels, + std::vector(block_length, 0.f))); + std::vector>> input_sub_frame( + num_sub_frame_bands, + std::vector>( + num_sub_frame_channels, std::vector(sub_frame_length, 0.f))); + std::vector>> input_sub_frame_view( + input_sub_frame.size(), + std::vector>(num_sub_frame_channels)); + FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view); + FrameBlocker blocker(correct_num_bands, correct_num_channels); + EXPECT_DEATH( + blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block), ""); +} + +// Verifies that the FrameBlocker crashes if the ExtractBlock method is called +// for inputs with the wrong number of bands or band lengths. +void RunWronglySizedExtractParameterTest(int sample_rate_hz, + size_t correct_num_channels, + size_t num_block_bands, + size_t num_block_channels, + size_t block_length) { + const size_t correct_num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> correct_block( + correct_num_bands, + std::vector>(correct_num_channels, + std::vector(kBlockSize, 0.f))); + std::vector>> wrong_block( + num_block_bands, + std::vector>(num_block_channels, + std::vector(block_length, 0.f))); + std::vector>> input_sub_frame( + correct_num_bands, + std::vector>( + correct_num_channels, std::vector(kSubFrameLength, 0.f))); + std::vector>> input_sub_frame_view( + input_sub_frame.size(), + std::vector>(correct_num_channels)); + FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view); + FrameBlocker blocker(correct_num_bands, correct_num_channels); + blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block); + blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block); + blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block); + blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block); + + EXPECT_DEATH(blocker.ExtractBlock(&wrong_block), ""); +} + +// Verifies that the FrameBlocker crashes if the ExtractBlock method is called +// after a wrong number of previous InsertSubFrameAndExtractBlock method calls +// have been made. +void RunWrongExtractOrderTest(int sample_rate_hz, + size_t num_channels, + size_t num_preceeding_api_calls) { + const size_t num_bands = NumBandsForRate(sample_rate_hz); + + std::vector>> block( + num_bands, std::vector>( + num_channels, std::vector(kBlockSize, 0.f))); + std::vector>> input_sub_frame( + num_bands, std::vector>( + num_channels, std::vector(kSubFrameLength, 0.f))); + std::vector>> input_sub_frame_view( + input_sub_frame.size(), std::vector>(num_channels)); + FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view); + FrameBlocker blocker(num_bands, num_channels); + for (size_t k = 0; k < num_preceeding_api_calls; ++k) { + blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block); + } + + EXPECT_DEATH(blocker.ExtractBlock(&block), ""); +} +#endif + +std::string ProduceDebugText(int sample_rate_hz, size_t num_channels) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz; + ss << ", number of channels: " << num_channels; + return ss.Release(); +} + +} // namespace + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +TEST(FrameBlocker, WrongNumberOfBandsInBlockForInsertSubFrameAndExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_bands = (correct_num_bands % 3) + 1; + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, wrong_num_bands, correct_num_channels, + kBlockSize, correct_num_bands, correct_num_channels, kSubFrameLength); + } + } +} + +TEST(FrameBlocker, + WrongNumberOfChannelsInBlockForInsertSubFrameAndExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_channels = correct_num_channels + 1; + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, wrong_num_channels, + kBlockSize, correct_num_bands, correct_num_channels, kSubFrameLength); + } + } +} + +TEST(FrameBlocker, + WrongNumberOfBandsInSubFrameForInsertSubFrameAndExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_bands = (correct_num_bands % 3) + 1; + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, correct_num_channels, + kBlockSize, wrong_num_bands, correct_num_channels, kSubFrameLength); + } + } +} + +TEST(FrameBlocker, + WrongNumberOfChannelsInSubFrameForInsertSubFrameAndExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_channels = correct_num_channels + 1; + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, wrong_num_channels, + kBlockSize, correct_num_bands, wrong_num_channels, kSubFrameLength); + } + } +} + +TEST(FrameBlocker, + WrongNumberOfSamplesInBlockForInsertSubFrameAndExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, correct_num_channels, + kBlockSize - 1, correct_num_bands, correct_num_channels, + kSubFrameLength); + } + } +} + +TEST(FrameBlocker, + WrongNumberOfSamplesInSubFrameForInsertSubFrameAndExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + RunWronglySizedInsertAndExtractParametersTest( + rate, correct_num_channels, correct_num_bands, correct_num_channels, + kBlockSize, correct_num_bands, correct_num_channels, + kSubFrameLength - 1); + } + } +} + +TEST(FrameBlocker, WrongNumberOfBandsInBlockForExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_bands = (correct_num_bands % 3) + 1; + RunWronglySizedExtractParameterTest(rate, correct_num_channels, + wrong_num_bands, correct_num_channels, + kBlockSize); + } + } +} + +TEST(FrameBlocker, WrongNumberOfChannelsInBlockForExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + const size_t wrong_num_channels = correct_num_channels + 1; + RunWronglySizedExtractParameterTest(rate, correct_num_channels, + correct_num_bands, wrong_num_channels, + kBlockSize); + } + } +} + +TEST(FrameBlocker, WrongNumberOfSamplesInBlockForExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t correct_num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, correct_num_channels)); + const size_t correct_num_bands = NumBandsForRate(rate); + RunWronglySizedExtractParameterTest(rate, correct_num_channels, + correct_num_bands, + correct_num_channels, kBlockSize - 1); + } + } +} + +TEST(FrameBlocker, WrongNumberOfPreceedingApiCallsForExtractBlock) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t num_channels : {1, 2, 4, 8}) { + for (size_t num_calls = 0; num_calls < 4; ++num_calls) { + rtc::StringBuilder ss; + ss << "Sample rate: " << rate; + ss << "Num channels: " << num_channels; + ss << ", Num preceeding InsertSubFrameAndExtractBlock calls: " + << num_calls; + + SCOPED_TRACE(ss.str()); + RunWrongExtractOrderTest(rate, num_channels, num_calls); + } + } + } +} + +// Verifies that the verification for 0 number of channels works. +TEST(FrameBlocker, ZeroNumberOfChannelsParameter) { + EXPECT_DEATH(FrameBlocker(16000, 0), ""); +} + +// Verifies that the verification for 0 number of bands works. +TEST(FrameBlocker, ZeroNumberOfBandsParameter) { + EXPECT_DEATH(FrameBlocker(0, 1), ""); +} + +// Verifiers that the verification for null sub_frame pointer works. +TEST(FrameBlocker, NullBlockParameter) { + std::vector>> sub_frame( + 1, std::vector>( + 1, std::vector(kSubFrameLength, 0.f))); + std::vector>> sub_frame_view( + sub_frame.size()); + FillSubFrameView(0, 0, &sub_frame, &sub_frame_view); + EXPECT_DEATH( + FrameBlocker(1, 1).InsertSubFrameAndExtractBlock(sub_frame_view, nullptr), + ""); +} + +#endif + +TEST(FrameBlocker, BlockBitexactness) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, num_channels)); + RunBlockerTest(rate, num_channels); + } + } +} + +TEST(FrameBlocker, BlockerAndFramer) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t num_channels : {1, 2, 4, 8}) { + SCOPED_TRACE(ProduceDebugText(rate, num_channels)); + RunBlockerAndFramerTest(rate, num_channels); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/fullband_erle_estimator.cc b/audio_processing/aec3/fullband_erle_estimator.cc new file mode 100644 index 0000000..cdb8cd6 --- /dev/null +++ b/audio_processing/aec3/fullband_erle_estimator.cc @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/fullband_erle_estimator.h" + +#include +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" +#include "rtc_base/numerics/safe_minmax.h" + +namespace webrtc { + +namespace { +constexpr float kEpsilon = 1e-3f; +constexpr float kX2BandEnergyThreshold = 44015068.0f; +constexpr int kBlocksToHoldErle = 100; +constexpr int kPointsToAccumulate = 6; +} // namespace + +FullBandErleEstimator::FullBandErleEstimator( + const EchoCanceller3Config::Erle& config, + size_t num_capture_channels) + : min_erle_log2_(FastApproxLog2f(config.min + kEpsilon)), + max_erle_lf_log2(FastApproxLog2f(config.max_l + kEpsilon)), + hold_counters_time_domain_(num_capture_channels, 0), + erle_time_domain_log2_(num_capture_channels, min_erle_log2_), + instantaneous_erle_(num_capture_channels, ErleInstantaneous(config)), + linear_filters_qualities_(num_capture_channels) { + Reset(); +} + +FullBandErleEstimator::~FullBandErleEstimator() = default; + +void FullBandErleEstimator::Reset() { + for (auto& instantaneous_erle_ch : instantaneous_erle_) { + instantaneous_erle_ch.Reset(); + } + + UpdateQualityEstimates(); + std::fill(erle_time_domain_log2_.begin(), erle_time_domain_log2_.end(), + min_erle_log2_); + std::fill(hold_counters_time_domain_.begin(), + hold_counters_time_domain_.end(), 0); +} + +void FullBandErleEstimator::Update( + rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + const std::vector& converged_filters) { + for (size_t ch = 0; ch < Y2.size(); ++ch) { + if (converged_filters[ch]) { + // Computes the fullband ERLE. + const float X2_sum = std::accumulate(X2.begin(), X2.end(), 0.0f); + if (X2_sum > kX2BandEnergyThreshold * X2.size()) { + const float Y2_sum = + std::accumulate(Y2[ch].begin(), Y2[ch].end(), 0.0f); + const float E2_sum = + std::accumulate(E2[ch].begin(), E2[ch].end(), 0.0f); + if (instantaneous_erle_[ch].Update(Y2_sum, E2_sum)) { + hold_counters_time_domain_[ch] = kBlocksToHoldErle; + erle_time_domain_log2_[ch] += + 0.1f * ((instantaneous_erle_[ch].GetInstErleLog2().value()) - + erle_time_domain_log2_[ch]); + erle_time_domain_log2_[ch] = rtc::SafeClamp( + erle_time_domain_log2_[ch], min_erle_log2_, max_erle_lf_log2); + } + } + } + --hold_counters_time_domain_[ch]; + if (hold_counters_time_domain_[ch] <= 0) { + erle_time_domain_log2_[ch] = + std::max(min_erle_log2_, erle_time_domain_log2_[ch] - 0.044f); + } + if (hold_counters_time_domain_[ch] == 0) { + instantaneous_erle_[ch].ResetAccumulators(); + } + } + + UpdateQualityEstimates(); +} + +void FullBandErleEstimator::Dump( + const std::unique_ptr& data_dumper) const { + data_dumper->DumpRaw("aec3_fullband_erle_log2", FullbandErleLog2()); + instantaneous_erle_[0].Dump(data_dumper); +} + +void FullBandErleEstimator::UpdateQualityEstimates() { + for (size_t ch = 0; ch < instantaneous_erle_.size(); ++ch) { + linear_filters_qualities_[ch] = + instantaneous_erle_[ch].GetQualityEstimate(); + } +} + +FullBandErleEstimator::ErleInstantaneous::ErleInstantaneous( + const EchoCanceller3Config::Erle& config) + : clamp_inst_quality_to_zero_(config.clamp_quality_estimate_to_zero), + clamp_inst_quality_to_one_(config.clamp_quality_estimate_to_one) { + Reset(); +} + +FullBandErleEstimator::ErleInstantaneous::~ErleInstantaneous() = default; + +bool FullBandErleEstimator::ErleInstantaneous::Update(const float Y2_sum, + const float E2_sum) { + bool update_estimates = false; + E2_acum_ += E2_sum; + Y2_acum_ += Y2_sum; + num_points_++; + if (num_points_ == kPointsToAccumulate) { + if (E2_acum_ > 0.f) { + update_estimates = true; + erle_log2_ = FastApproxLog2f(Y2_acum_ / E2_acum_ + kEpsilon); + } + num_points_ = 0; + E2_acum_ = 0.f; + Y2_acum_ = 0.f; + } + + if (update_estimates) { + UpdateMaxMin(); + UpdateQualityEstimate(); + } + return update_estimates; +} + +void FullBandErleEstimator::ErleInstantaneous::Reset() { + ResetAccumulators(); + max_erle_log2_ = -10.f; // -30 dB. + min_erle_log2_ = 33.f; // 100 dB. + inst_quality_estimate_ = 0.f; +} + +void FullBandErleEstimator::ErleInstantaneous::ResetAccumulators() { + erle_log2_ = absl::nullopt; + inst_quality_estimate_ = 0.f; + num_points_ = 0; + E2_acum_ = 0.f; + Y2_acum_ = 0.f; +} + +void FullBandErleEstimator::ErleInstantaneous::Dump( + const std::unique_ptr& data_dumper) const { + data_dumper->DumpRaw("aec3_fullband_erle_inst_log2", + erle_log2_ ? *erle_log2_ : -10.f); + data_dumper->DumpRaw( + "aec3_erle_instantaneous_quality", + GetQualityEstimate() ? GetQualityEstimate().value() : 0.f); + data_dumper->DumpRaw("aec3_fullband_erle_max_log2", max_erle_log2_); + data_dumper->DumpRaw("aec3_fullband_erle_min_log2", min_erle_log2_); +} + +void FullBandErleEstimator::ErleInstantaneous::UpdateMaxMin() { + RTC_DCHECK(erle_log2_); + if (erle_log2_.value() > max_erle_log2_) { + max_erle_log2_ = erle_log2_.value(); + } else { + max_erle_log2_ -= 0.0004; // Forget factor, approx 1dB every 3 sec. + } + + if (erle_log2_.value() < min_erle_log2_) { + min_erle_log2_ = erle_log2_.value(); + } else { + min_erle_log2_ += 0.0004; // Forget factor, approx 1dB every 3 sec. + } +} + +void FullBandErleEstimator::ErleInstantaneous::UpdateQualityEstimate() { + const float alpha = 0.07f; + float quality_estimate = 0.f; + RTC_DCHECK(erle_log2_); + // TODO(peah): Currently, the estimate can become be less than 0; this should + // be corrected. + if (max_erle_log2_ > min_erle_log2_) { + quality_estimate = (erle_log2_.value() - min_erle_log2_) / + (max_erle_log2_ - min_erle_log2_); + } + + if (quality_estimate > inst_quality_estimate_) { + inst_quality_estimate_ = quality_estimate; + } else { + inst_quality_estimate_ += + alpha * (quality_estimate - inst_quality_estimate_); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/fullband_erle_estimator.h b/audio_processing/aec3/fullband_erle_estimator.h new file mode 100644 index 0000000..2ec215b --- /dev/null +++ b/audio_processing/aec3/fullband_erle_estimator.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_FULLBAND_ERLE_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_FULLBAND_ERLE_ESTIMATOR_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/logging/apm_data_dumper.h" + +namespace webrtc { + +// Estimates the echo return loss enhancement using the energy of all the +// freuquency bands. +class FullBandErleEstimator { + public: + FullBandErleEstimator(const EchoCanceller3Config::Erle& config, + size_t num_capture_channels); + ~FullBandErleEstimator(); + // Resets the ERLE estimator. + void Reset(); + + // Updates the ERLE estimator. + void Update(rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + const std::vector& converged_filters); + + // Returns the fullband ERLE estimates in log2 units. + float FullbandErleLog2() const { + float min_erle = erle_time_domain_log2_[0]; + for (size_t ch = 1; ch < erle_time_domain_log2_.size(); ++ch) { + min_erle = std::min(min_erle, erle_time_domain_log2_[ch]); + } + return min_erle; + } + + // Returns an estimation of the current linear filter quality. It returns a + // float number between 0 and 1 mapping 1 to the highest possible quality. + rtc::ArrayView> GetInstLinearQualityEstimates() + const { + return linear_filters_qualities_; + } + + void Dump(const std::unique_ptr& data_dumper) const; + + private: + void UpdateQualityEstimates(); + + class ErleInstantaneous { + public: + explicit ErleInstantaneous(const EchoCanceller3Config::Erle& config); + ~ErleInstantaneous(); + + // Updates the estimator with a new point, returns true + // if the instantaneous ERLE was updated due to having enough + // points for performing the estimate. + bool Update(const float Y2_sum, const float E2_sum); + // Resets the instantaneous ERLE estimator to its initial state. + void Reset(); + // Resets the members related with an instantaneous estimate. + void ResetAccumulators(); + // Returns the instantaneous ERLE in log2 units. + absl::optional GetInstErleLog2() const { return erle_log2_; } + // Gets an indication between 0 and 1 of the performance of the linear + // filter for the current time instant. + absl::optional GetQualityEstimate() const { + if (erle_log2_) { + float value = inst_quality_estimate_; + if (clamp_inst_quality_to_zero_) { + value = std::max(0.f, value); + } + if (clamp_inst_quality_to_one_) { + value = std::min(1.f, value); + } + return absl::optional(value); + } + return absl::nullopt; + } + void Dump(const std::unique_ptr& data_dumper) const; + + private: + void UpdateMaxMin(); + void UpdateQualityEstimate(); + const bool clamp_inst_quality_to_zero_; + const bool clamp_inst_quality_to_one_; + absl::optional erle_log2_; + float inst_quality_estimate_; + float max_erle_log2_; + float min_erle_log2_; + float Y2_acum_; + float E2_acum_; + int num_points_; + }; + + const float min_erle_log2_; + const float max_erle_lf_log2; + std::vector hold_counters_time_domain_; + std::vector erle_time_domain_log2_; + std::vector instantaneous_erle_; + std::vector> linear_filters_qualities_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_FULLBAND_ERLE_ESTIMATOR_H_ diff --git a/audio_processing/aec3/main_filter_update_gain.cc b/audio_processing/aec3/main_filter_update_gain.cc new file mode 100644 index 0000000..50d4759 --- /dev/null +++ b/audio_processing/aec3/main_filter_update_gain.cc @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/main_filter_update_gain.h" + +#include +#include + +#include "audio_processing/aec3/adaptive_fir_filter.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/echo_path_variability.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/aec3/render_signal_analyzer.h" +#include "audio_processing/aec3/subtractor_output.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +constexpr float kHErrorInitial = 10000.f; +constexpr int kPoorExcitationCounterInitial = 1000; + +} // namespace + +int MainFilterUpdateGain::instance_count_ = 0; + +MainFilterUpdateGain::MainFilterUpdateGain( + const EchoCanceller3Config::Filter::MainConfiguration& config, + size_t config_change_duration_blocks) + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + config_change_duration_blocks_( + static_cast(config_change_duration_blocks)), + poor_excitation_counter_(kPoorExcitationCounterInitial) { + SetConfig(config, true); + H_error_.fill(kHErrorInitial); + RTC_DCHECK_LT(0, config_change_duration_blocks_); + one_by_config_change_duration_blocks_ = 1.f / config_change_duration_blocks_; +} + +MainFilterUpdateGain::~MainFilterUpdateGain() {} + +void MainFilterUpdateGain::HandleEchoPathChange( + const EchoPathVariability& echo_path_variability) { + if (echo_path_variability.gain_change) { + // TODO(bugs.webrtc.org/9526) Handle gain changes. + } + + if (echo_path_variability.delay_change != + EchoPathVariability::DelayAdjustment::kNone) { + H_error_.fill(kHErrorInitial); + } + + if (!echo_path_variability.gain_change) { + poor_excitation_counter_ = kPoorExcitationCounterInitial; + call_counter_ = 0; + } +} + +void MainFilterUpdateGain::Compute( + const std::array& render_power, + const RenderSignalAnalyzer& render_signal_analyzer, + const SubtractorOutput& subtractor_output, + rtc::ArrayView erl, + size_t size_partitions, + bool saturated_capture_signal, + FftData* gain_fft) { + RTC_DCHECK(gain_fft); + // Introducing shorter notation to improve readability. + const FftData& E_main = subtractor_output.E_main; + const auto& E2_main = subtractor_output.E2_main; + const auto& E2_shadow = subtractor_output.E2_shadow; + FftData* G = gain_fft; + const auto& X2 = render_power; + + ++call_counter_; + + UpdateCurrentConfig(); + + if (render_signal_analyzer.PoorSignalExcitation()) { + poor_excitation_counter_ = 0; + } + + // Do not update the filter if the render is not sufficiently excited. + if (++poor_excitation_counter_ < size_partitions || + saturated_capture_signal || call_counter_ <= size_partitions) { + G->re.fill(0.f); + G->im.fill(0.f); + } else { + // Corresponds to WGN of power -39 dBFS. + std::array mu; + // mu = H_error / (0.5* H_error* X2 + n * E2). + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + if (X2[k] >= current_config_.noise_gate) { + mu[k] = H_error_[k] / + (0.5f * H_error_[k] * X2[k] + size_partitions * E2_main[k]); + } else { + mu[k] = 0.f; + } + } + + // Avoid updating the filter close to narrow bands in the render signals. + render_signal_analyzer.MaskRegionsAroundNarrowBands(&mu); + + // H_error = H_error - 0.5 * mu * X2 * H_error. + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + H_error_[k] -= 0.5f * mu[k] * X2[k] * H_error_[k]; + } + + // G = mu * E. + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + G->re[k] = mu[k] * E_main.re[k]; + G->im[k] = mu[k] * E_main.im[k]; + } + } + + // H_error = H_error + factor * erl. + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + if (E2_shadow[k] >= E2_main[k]) { + H_error_[k] += current_config_.leakage_converged * erl[k]; + } else { + H_error_[k] += current_config_.leakage_diverged * erl[k]; + } + + H_error_[k] = std::max(H_error_[k], current_config_.error_floor); + H_error_[k] = std::min(H_error_[k], current_config_.error_ceil); + } + + data_dumper_->DumpRaw("aec3_main_gain_H_error", H_error_); +} + +void MainFilterUpdateGain::UpdateCurrentConfig() { + RTC_DCHECK_GE(config_change_duration_blocks_, config_change_counter_); + if (config_change_counter_ > 0) { + if (--config_change_counter_ > 0) { + auto average = [](float from, float to, float from_weight) { + return from * from_weight + to * (1.f - from_weight); + }; + + float change_factor = + config_change_counter_ * one_by_config_change_duration_blocks_; + + current_config_.leakage_converged = + average(old_target_config_.leakage_converged, + target_config_.leakage_converged, change_factor); + current_config_.leakage_diverged = + average(old_target_config_.leakage_diverged, + target_config_.leakage_diverged, change_factor); + current_config_.error_floor = + average(old_target_config_.error_floor, target_config_.error_floor, + change_factor); + current_config_.error_ceil = + average(old_target_config_.error_ceil, target_config_.error_ceil, + change_factor); + current_config_.noise_gate = + average(old_target_config_.noise_gate, target_config_.noise_gate, + change_factor); + } else { + current_config_ = old_target_config_ = target_config_; + } + } + RTC_DCHECK_LE(0, config_change_counter_); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/main_filter_update_gain.h b/audio_processing/aec3/main_filter_update_gain.h new file mode 100644 index 0000000..ea46196 --- /dev/null +++ b/audio_processing/aec3/main_filter_update_gain.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_MAIN_FILTER_UPDATE_GAIN_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_MAIN_FILTER_UPDATE_GAIN_H_ + +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { + +class AdaptiveFirFilter; +class ApmDataDumper; +struct EchoPathVariability; +struct FftData; +class RenderSignalAnalyzer; +struct SubtractorOutput; + +// Provides functionality for computing the adaptive gain for the main filter. +class MainFilterUpdateGain { + public: + MainFilterUpdateGain( + const EchoCanceller3Config::Filter::MainConfiguration& config, + size_t config_change_duration_blocks); + ~MainFilterUpdateGain(); + + MainFilterUpdateGain(const MainFilterUpdateGain&) = delete; + MainFilterUpdateGain& operator=(const MainFilterUpdateGain&) = delete; + + // Takes action in the case of a known echo path change. + void HandleEchoPathChange(const EchoPathVariability& echo_path_variability); + + // Computes the gain. + void Compute(const std::array& render_power, + const RenderSignalAnalyzer& render_signal_analyzer, + const SubtractorOutput& subtractor_output, + rtc::ArrayView erl, + size_t size_partitions, + bool saturated_capture_signal, + FftData* gain_fft); + + // Sets a new config. + void SetConfig(const EchoCanceller3Config::Filter::MainConfiguration& config, + bool immediate_effect) { + if (immediate_effect) { + old_target_config_ = current_config_ = target_config_ = config; + config_change_counter_ = 0; + } else { + old_target_config_ = current_config_; + target_config_ = config; + config_change_counter_ = config_change_duration_blocks_; + } + } + + private: + static int instance_count_; + std::unique_ptr data_dumper_; + const int config_change_duration_blocks_; + float one_by_config_change_duration_blocks_; + EchoCanceller3Config::Filter::MainConfiguration current_config_; + EchoCanceller3Config::Filter::MainConfiguration target_config_; + EchoCanceller3Config::Filter::MainConfiguration old_target_config_; + std::array H_error_; + size_t poor_excitation_counter_; + size_t call_counter_ = 0; + int config_change_counter_ = 0; + + // Updates the current config towards the target config. + void UpdateCurrentConfig(); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_MAIN_FILTER_UPDATE_GAIN_H_ diff --git a/audio_processing/aec3/main_filter_update_gain_unittest.cc b/audio_processing/aec3/main_filter_update_gain_unittest.cc new file mode 100644 index 0000000..f79b2d6 --- /dev/null +++ b/audio_processing/aec3/main_filter_update_gain_unittest.cc @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/main_filter_update_gain.h" + +#include +#include +#include + +#include "modules/audio_processing/aec3/adaptive_fir_filter.h" +#include "modules/audio_processing/aec3/adaptive_fir_filter_erl.h" +#include "modules/audio_processing/aec3/aec_state.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/aec3/render_signal_analyzer.h" +#include "modules/audio_processing/aec3/shadow_filter_update_gain.h" +#include "modules/audio_processing/aec3/subtractor_output.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/numerics/safe_minmax.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +// Method for performing the simulations needed to test the main filter update +// gain functionality. +void RunFilterUpdateTest(int num_blocks_to_process, + size_t delay_samples, + int filter_length_blocks, + const std::vector& blocks_with_echo_path_changes, + const std::vector& blocks_with_saturation, + bool use_silent_render_in_second_half, + std::array* e_last_block, + std::array* y_last_block, + FftData* G_last_block) { + ApmDataDumper data_dumper(42); + Aec3Optimization optimization = DetectOptimization(); + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + EchoCanceller3Config config; + config.filter.main.length_blocks = filter_length_blocks; + config.filter.shadow.length_blocks = filter_length_blocks; + AdaptiveFirFilter main_filter(config.filter.main.length_blocks, + config.filter.main.length_blocks, + config.filter.config_change_duration_blocks, + kNumRenderChannels, optimization, &data_dumper); + AdaptiveFirFilter shadow_filter( + config.filter.shadow.length_blocks, config.filter.shadow.length_blocks, + config.filter.config_change_duration_blocks, kNumRenderChannels, + optimization, &data_dumper); + std::vector>> H2( + kNumCaptureChannels, std::vector>( + main_filter.max_filter_size_partitions(), + std::array())); + for (auto& H2_ch : H2) { + for (auto& H2_k : H2_ch) { + H2_k.fill(0.f); + } + } + std::vector> h( + kNumCaptureChannels, + std::vector( + GetTimeDomainLength(main_filter.max_filter_size_partitions()), 0.f)); + + Aec3Fft fft; + std::array x_old; + x_old.fill(0.f); + ShadowFilterUpdateGain shadow_gain( + config.filter.shadow, config.filter.config_change_duration_blocks); + MainFilterUpdateGain main_gain(config.filter.main, + config.filter.config_change_duration_blocks); + Random random_generator(42U); + std::vector>> x( + kNumBands, std::vector>( + kNumRenderChannels, std::vector(kBlockSize, 0.f))); + std::vector y(kBlockSize, 0.f); + config.delay.default_delay = 1; + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, kNumRenderChannels)); + AecState aec_state(config, kNumCaptureChannels); + RenderSignalAnalyzer render_signal_analyzer(config); + absl::optional delay_estimate; + std::array s_scratch; + std::array s; + FftData S; + FftData G; + std::vector output(kNumCaptureChannels); + for (auto& subtractor_output : output) { + subtractor_output.Reset(); + } + FftData& E_main = output[0].E_main; + FftData E_shadow; + std::vector> Y2(kNumCaptureChannels); + std::vector> E2_main( + kNumCaptureChannels); + std::array& e_main = output[0].e_main; + std::array& e_shadow = output[0].e_shadow; + for (auto& Y2_ch : Y2) { + Y2_ch.fill(0.f); + } + + constexpr float kScale = 1.0f / kFftLengthBy2; + + DelayBuffer delay_buffer(delay_samples); + for (int k = 0; k < num_blocks_to_process; ++k) { + // Handle echo path changes. + if (std::find(blocks_with_echo_path_changes.begin(), + blocks_with_echo_path_changes.end(), + k) != blocks_with_echo_path_changes.end()) { + main_filter.HandleEchoPathChange(); + } + + // Handle saturation. + const bool saturation = + std::find(blocks_with_saturation.begin(), blocks_with_saturation.end(), + k) != blocks_with_saturation.end(); + + // Create the render signal. + if (use_silent_render_in_second_half && k > num_blocks_to_process / 2) { + for (size_t band = 0; band < x.size(); ++band) { + for (size_t channel = 0; channel < x[band].size(); ++channel) { + std::fill(x[band][channel].begin(), x[band][channel].end(), 0.f); + } + } + } else { + for (size_t band = 0; band < x.size(); ++band) { + for (size_t channel = 0; channel < x[band].size(); ++channel) { + RandomizeSampleVector(&random_generator, x[band][channel]); + } + } + } + delay_buffer.Delay(x[0][0], y); + + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + + render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(), + aec_state.MinDirectPathFilterDelay()); + + // Apply the main filter. + main_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S); + fft.Ifft(S, &s_scratch); + std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2, + e_main.begin(), + [&](float a, float b) { return a - b * kScale; }); + std::for_each(e_main.begin(), e_main.end(), + [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); }); + fft.ZeroPaddedFft(e_main, Aec3Fft::Window::kRectangular, &E_main); + for (size_t k = 0; k < kBlockSize; ++k) { + s[k] = kScale * s_scratch[k + kFftLengthBy2]; + } + + // Apply the shadow filter. + shadow_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S); + fft.Ifft(S, &s_scratch); + std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2, + e_shadow.begin(), + [&](float a, float b) { return a - b * kScale; }); + std::for_each(e_shadow.begin(), e_shadow.end(), + [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); }); + fft.ZeroPaddedFft(e_shadow, Aec3Fft::Window::kRectangular, &E_shadow); + + // Compute spectra for future use. + E_main.Spectrum(Aec3Optimization::kNone, output[0].E2_main); + E_shadow.Spectrum(Aec3Optimization::kNone, output[0].E2_shadow); + + // Adapt the shadow filter. + std::array render_power; + render_delay_buffer->GetRenderBuffer()->SpectralSum( + shadow_filter.SizePartitions(), &render_power); + shadow_gain.Compute(render_power, render_signal_analyzer, E_shadow, + shadow_filter.SizePartitions(), saturation, &G); + shadow_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G); + + // Adapt the main filter + render_delay_buffer->GetRenderBuffer()->SpectralSum( + main_filter.SizePartitions(), &render_power); + + std::array erl; + ComputeErl(optimization, H2[0], erl); + main_gain.Compute(render_power, render_signal_analyzer, output[0], erl, + main_filter.SizePartitions(), saturation, &G); + main_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G, &h[0]); + + // Update the delay. + aec_state.HandleEchoPathChange(EchoPathVariability( + false, EchoPathVariability::DelayAdjustment::kNone, false)); + main_filter.ComputeFrequencyResponse(&H2[0]); + std::copy(output[0].E2_main.begin(), output[0].E2_main.end(), + E2_main[0].begin()); + aec_state.Update(delay_estimate, H2, h, + *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, + output); + } + + std::copy(e_main.begin(), e_main.end(), e_last_block->begin()); + std::copy(y.begin(), y.end(), y_last_block->begin()); + std::copy(G.re.begin(), G.re.end(), G_last_block->re.begin()); + std::copy(G.im.begin(), G.im.end(), G_last_block->im.begin()); +} + +std::string ProduceDebugText(int filter_length_blocks) { + rtc::StringBuilder ss; + ss << "Length: " << filter_length_blocks; + return ss.Release(); +} + +std::string ProduceDebugText(size_t delay, int filter_length_blocks) { + rtc::StringBuilder ss; + ss << "Delay: " << delay << ", "; + ss << ProduceDebugText(filter_length_blocks); + return ss.Release(); +} + +} // namespace + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies that the check for non-null output gain parameter works. +TEST(MainFilterUpdateGain, NullDataOutputGain) { + ApmDataDumper data_dumper(42); + EchoCanceller3Config config; + RenderSignalAnalyzer analyzer(config); + SubtractorOutput output; + MainFilterUpdateGain gain(config.filter.main, + config.filter.config_change_duration_blocks); + std::array render_power; + render_power.fill(0.f); + std::array erl; + erl.fill(0.f); + EXPECT_DEATH(gain.Compute(render_power, analyzer, output, erl, + config.filter.main.length_blocks, false, nullptr), + ""); +} + +#endif + +// Verifies that the gain formed causes the filter using it to converge. +TEST(MainFilterUpdateGain, GainCausesFilterToConverge) { + std::vector blocks_with_echo_path_changes; + std::vector blocks_with_saturation; + for (size_t filter_length_blocks : {12, 20, 30}) { + for (size_t delay_samples : {0, 64, 150, 200, 301}) { + SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks)); + + std::array e; + std::array y; + FftData G; + + RunFilterUpdateTest(600, delay_samples, filter_length_blocks, + blocks_with_echo_path_changes, blocks_with_saturation, + false, &e, &y, &G); + + // Verify that the main filter is able to perform well. + // Use different criteria to take overmodelling into account. + if (filter_length_blocks == 12) { + EXPECT_LT(1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f), + std::inner_product(y.begin(), y.end(), y.begin(), 0.f)); + } else { + EXPECT_LT(std::inner_product(e.begin(), e.end(), e.begin(), 0.f), + std::inner_product(y.begin(), y.end(), y.begin(), 0.f)); + } + } + } +} + +// Verifies that the magnitude of the gain on average decreases for a +// persistently exciting signal. +TEST(MainFilterUpdateGain, DecreasingGain) { + std::vector blocks_with_echo_path_changes; + std::vector blocks_with_saturation; + + std::array e; + std::array y; + FftData G_a; + FftData G_b; + FftData G_c; + std::array G_a_power; + std::array G_b_power; + std::array G_c_power; + + RunFilterUpdateTest(250, 65, 12, blocks_with_echo_path_changes, + blocks_with_saturation, false, &e, &y, &G_a); + RunFilterUpdateTest(500, 65, 12, blocks_with_echo_path_changes, + blocks_with_saturation, false, &e, &y, &G_b); + RunFilterUpdateTest(750, 65, 12, blocks_with_echo_path_changes, + blocks_with_saturation, false, &e, &y, &G_c); + + G_a.Spectrum(Aec3Optimization::kNone, G_a_power); + G_b.Spectrum(Aec3Optimization::kNone, G_b_power); + G_c.Spectrum(Aec3Optimization::kNone, G_c_power); + + EXPECT_GT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.), + std::accumulate(G_b_power.begin(), G_b_power.end(), 0.)); + + EXPECT_GT(std::accumulate(G_b_power.begin(), G_b_power.end(), 0.), + std::accumulate(G_c_power.begin(), G_c_power.end(), 0.)); +} + +// Verifies that the gain is zero when there is saturation and that the internal +// error estimates cause the gain to increase after a period of saturation. +TEST(MainFilterUpdateGain, SaturationBehavior) { + std::vector blocks_with_echo_path_changes; + std::vector blocks_with_saturation; + for (int k = 99; k < 200; ++k) { + blocks_with_saturation.push_back(k); + } + + for (size_t filter_length_blocks : {12, 20, 30}) { + SCOPED_TRACE(ProduceDebugText(filter_length_blocks)); + std::array e; + std::array y; + FftData G_a; + FftData G_b; + FftData G_a_ref; + G_a_ref.re.fill(0.f); + G_a_ref.im.fill(0.f); + + std::array G_a_power; + std::array G_b_power; + + RunFilterUpdateTest(100, 65, filter_length_blocks, + blocks_with_echo_path_changes, blocks_with_saturation, + false, &e, &y, &G_a); + + EXPECT_EQ(G_a_ref.re, G_a.re); + EXPECT_EQ(G_a_ref.im, G_a.im); + + RunFilterUpdateTest(99, 65, filter_length_blocks, + blocks_with_echo_path_changes, blocks_with_saturation, + false, &e, &y, &G_a); + RunFilterUpdateTest(201, 65, filter_length_blocks, + blocks_with_echo_path_changes, blocks_with_saturation, + false, &e, &y, &G_b); + + G_a.Spectrum(Aec3Optimization::kNone, G_a_power); + G_b.Spectrum(Aec3Optimization::kNone, G_b_power); + + EXPECT_LT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.), + std::accumulate(G_b_power.begin(), G_b_power.end(), 0.)); + } +} + +// Verifies that the gain increases after an echo path change. +// TODO(peah): Correct and reactivate this test. +TEST(MainFilterUpdateGain, DISABLED_EchoPathChangeBehavior) { + for (size_t filter_length_blocks : {12, 20, 30}) { + SCOPED_TRACE(ProduceDebugText(filter_length_blocks)); + std::vector blocks_with_echo_path_changes; + std::vector blocks_with_saturation; + blocks_with_echo_path_changes.push_back(99); + + std::array e; + std::array y; + FftData G_a; + FftData G_b; + std::array G_a_power; + std::array G_b_power; + + RunFilterUpdateTest(100, 65, filter_length_blocks, + blocks_with_echo_path_changes, blocks_with_saturation, + false, &e, &y, &G_a); + RunFilterUpdateTest(101, 65, filter_length_blocks, + blocks_with_echo_path_changes, blocks_with_saturation, + false, &e, &y, &G_b); + + G_a.Spectrum(Aec3Optimization::kNone, G_a_power); + G_b.Spectrum(Aec3Optimization::kNone, G_b_power); + + EXPECT_LT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.), + std::accumulate(G_b_power.begin(), G_b_power.end(), 0.)); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/matched_filter.cc b/audio_processing/aec3/matched_filter.cc new file mode 100644 index 0000000..d165820 --- /dev/null +++ b/audio_processing/aec3/matched_filter.cc @@ -0,0 +1,458 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/matched_filter.h" + +// Defines WEBRTC_ARCH_X86_FAMILY, used below. +#include "rtc_base/system/arch.h" + +#if defined(WEBRTC_HAS_NEON) +#include +#endif +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif +#include +#include +#include +#include +#include + +#include "audio_processing/aec3/downsampled_render_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace aec3 { + +#if defined(WEBRTC_HAS_NEON) + +void MatchedFilterCore_NEON(size_t x_start_index, + float x2_sum_threshold, + float smoothing, + rtc::ArrayView x, + rtc::ArrayView y, + rtc::ArrayView h, + bool* filters_updated, + float* error_sum) { + const int h_size = static_cast(h.size()); + const int x_size = static_cast(x.size()); + RTC_DCHECK_EQ(0, h_size % 4); + + // Process for all samples in the sub-block. + for (size_t i = 0; i < y.size(); ++i) { + // Apply the matched filter as filter * x, and compute x * x. + + RTC_DCHECK_GT(x_size, x_start_index); + const float* x_p = &x[x_start_index]; + const float* h_p = &h[0]; + + // Initialize values for the accumulation. + float32x4_t s_128 = vdupq_n_f32(0); + float32x4_t x2_sum_128 = vdupq_n_f32(0); + float x2_sum = 0.f; + float s = 0; + + // Compute loop chunk sizes until, and after, the wraparound of the circular + // buffer for x. + const int chunk1 = + std::min(h_size, static_cast(x_size - x_start_index)); + + // Perform the loop in two chunks. + const int chunk2 = h_size - chunk1; + for (int limit : {chunk1, chunk2}) { + // Perform 128 bit vector operations. + const int limit_by_4 = limit >> 2; + for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) { + // Load the data into 128 bit vectors. + const float32x4_t x_k = vld1q_f32(x_p); + const float32x4_t h_k = vld1q_f32(h_p); + // Compute and accumulate x * x and h * x. + x2_sum_128 = vmlaq_f32(x2_sum_128, x_k, x_k); + s_128 = vmlaq_f32(s_128, h_k, x_k); + } + + // Perform non-vector operations for any remaining items. + for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) { + const float x_k = *x_p; + x2_sum += x_k * x_k; + s += *h_p * x_k; + } + + x_p = &x[0]; + } + + // Combine the accumulated vector and scalar values. + float* v = reinterpret_cast(&x2_sum_128); + x2_sum += v[0] + v[1] + v[2] + v[3]; + v = reinterpret_cast(&s_128); + s += v[0] + v[1] + v[2] + v[3]; + + // Compute the matched filter error. + float e = y[i] - s; + const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f; + (*error_sum) += e * e; + + // Update the matched filter estimate in an NLMS manner. + if (x2_sum > x2_sum_threshold && !saturation) { + RTC_DCHECK_LT(0.f, x2_sum); + const float alpha = smoothing * e / x2_sum; + const float32x4_t alpha_128 = vmovq_n_f32(alpha); + + // filter = filter + smoothing * (y - filter * x) * x / x * x. + float* h_p = &h[0]; + x_p = &x[x_start_index]; + + // Perform the loop in two chunks. + for (int limit : {chunk1, chunk2}) { + // Perform 128 bit vector operations. + const int limit_by_4 = limit >> 2; + for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) { + // Load the data into 128 bit vectors. + float32x4_t h_k = vld1q_f32(h_p); + const float32x4_t x_k = vld1q_f32(x_p); + // Compute h = h + alpha * x. + h_k = vmlaq_f32(h_k, alpha_128, x_k); + + // Store the result. + vst1q_f32(h_p, h_k); + } + + // Perform non-vector operations for any remaining items. + for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) { + *h_p += alpha * *x_p; + } + + x_p = &x[0]; + } + + *filters_updated = true; + } + + x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1; + } +} + +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) + +void MatchedFilterCore_SSE2(size_t x_start_index, + float x2_sum_threshold, + float smoothing, + rtc::ArrayView x, + rtc::ArrayView y, + rtc::ArrayView h, + bool* filters_updated, + float* error_sum) { + const int h_size = static_cast(h.size()); + const int x_size = static_cast(x.size()); + RTC_DCHECK_EQ(0, h_size % 4); + + // Process for all samples in the sub-block. + for (size_t i = 0; i < y.size(); ++i) { + // Apply the matched filter as filter * x, and compute x * x. + + RTC_DCHECK_GT(x_size, x_start_index); + const float* x_p = &x[x_start_index]; + const float* h_p = &h[0]; + + // Initialize values for the accumulation. + __m128 s_128 = _mm_set1_ps(0); + __m128 x2_sum_128 = _mm_set1_ps(0); + float x2_sum = 0.f; + float s = 0; + + // Compute loop chunk sizes until, and after, the wraparound of the circular + // buffer for x. + const int chunk1 = + std::min(h_size, static_cast(x_size - x_start_index)); + + // Perform the loop in two chunks. + const int chunk2 = h_size - chunk1; + for (int limit : {chunk1, chunk2}) { + // Perform 128 bit vector operations. + const int limit_by_4 = limit >> 2; + for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) { + // Load the data into 128 bit vectors. + const __m128 x_k = _mm_loadu_ps(x_p); + const __m128 h_k = _mm_loadu_ps(h_p); + const __m128 xx = _mm_mul_ps(x_k, x_k); + // Compute and accumulate x * x and h * x. + x2_sum_128 = _mm_add_ps(x2_sum_128, xx); + const __m128 hx = _mm_mul_ps(h_k, x_k); + s_128 = _mm_add_ps(s_128, hx); + } + + // Perform non-vector operations for any remaining items. + for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) { + const float x_k = *x_p; + x2_sum += x_k * x_k; + s += *h_p * x_k; + } + + x_p = &x[0]; + } + + // Combine the accumulated vector and scalar values. + float* v = reinterpret_cast(&x2_sum_128); + x2_sum += v[0] + v[1] + v[2] + v[3]; + v = reinterpret_cast(&s_128); + s += v[0] + v[1] + v[2] + v[3]; + + // Compute the matched filter error. + float e = y[i] - s; + const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f; + (*error_sum) += e * e; + + // Update the matched filter estimate in an NLMS manner. + if (x2_sum > x2_sum_threshold && !saturation) { + RTC_DCHECK_LT(0.f, x2_sum); + const float alpha = smoothing * e / x2_sum; + const __m128 alpha_128 = _mm_set1_ps(alpha); + + // filter = filter + smoothing * (y - filter * x) * x / x * x. + float* h_p = &h[0]; + x_p = &x[x_start_index]; + + // Perform the loop in two chunks. + for (int limit : {chunk1, chunk2}) { + // Perform 128 bit vector operations. + const int limit_by_4 = limit >> 2; + for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) { + // Load the data into 128 bit vectors. + __m128 h_k = _mm_loadu_ps(h_p); + const __m128 x_k = _mm_loadu_ps(x_p); + + // Compute h = h + alpha * x. + const __m128 alpha_x = _mm_mul_ps(alpha_128, x_k); + h_k = _mm_add_ps(h_k, alpha_x); + + // Store the result. + _mm_storeu_ps(h_p, h_k); + } + + // Perform non-vector operations for any remaining items. + for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) { + *h_p += alpha * *x_p; + } + + x_p = &x[0]; + } + + *filters_updated = true; + } + + x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1; + } +} +#endif + +void MatchedFilterCore(size_t x_start_index, + float x2_sum_threshold, + float smoothing, + rtc::ArrayView x, + rtc::ArrayView y, + rtc::ArrayView h, + bool* filters_updated, + float* error_sum) { + // Process for all samples in the sub-block. + for (size_t i = 0; i < y.size(); ++i) { + // Apply the matched filter as filter * x, and compute x * x. + float x2_sum = 0.f; + float s = 0; + size_t x_index = x_start_index; + for (size_t k = 0; k < h.size(); ++k) { + x2_sum += x[x_index] * x[x_index]; + s += h[k] * x[x_index]; + x_index = x_index < (x.size() - 1) ? x_index + 1 : 0; + } + + // Compute the matched filter error. + float e = y[i] - s; + const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f; + (*error_sum) += e * e; + + // Update the matched filter estimate in an NLMS manner. + if (x2_sum > x2_sum_threshold && !saturation) { + RTC_DCHECK_LT(0.f, x2_sum); + const float alpha = smoothing * e / x2_sum; + + // filter = filter + smoothing * (y - filter * x) * x / x * x. + size_t x_index = x_start_index; + for (size_t k = 0; k < h.size(); ++k) { + h[k] += alpha * x[x_index]; + x_index = x_index < (x.size() - 1) ? x_index + 1 : 0; + } + *filters_updated = true; + } + + x_start_index = x_start_index > 0 ? x_start_index - 1 : x.size() - 1; + } +} + +} // namespace aec3 + +MatchedFilter::MatchedFilter(ApmDataDumper* data_dumper, + Aec3Optimization optimization, + size_t sub_block_size, + size_t window_size_sub_blocks, + int num_matched_filters, + size_t alignment_shift_sub_blocks, + float excitation_limit, + float smoothing, + float matching_filter_threshold) + : data_dumper_(data_dumper), + optimization_(optimization), + sub_block_size_(sub_block_size), + filter_intra_lag_shift_(alignment_shift_sub_blocks * sub_block_size_), + filters_( + num_matched_filters, + std::vector(window_size_sub_blocks * sub_block_size_, 0.f)), + lag_estimates_(num_matched_filters), + filters_offsets_(num_matched_filters, 0), + excitation_limit_(excitation_limit), + smoothing_(smoothing), + matching_filter_threshold_(matching_filter_threshold) { + RTC_DCHECK(data_dumper); + RTC_DCHECK_LT(0, window_size_sub_blocks); + RTC_DCHECK((kBlockSize % sub_block_size) == 0); + RTC_DCHECK((sub_block_size % 4) == 0); +} + +MatchedFilter::~MatchedFilter() = default; + +void MatchedFilter::Reset() { + for (auto& f : filters_) { + std::fill(f.begin(), f.end(), 0.f); + } + + for (auto& l : lag_estimates_) { + l = MatchedFilter::LagEstimate(); + } +} + +void MatchedFilter::Update(const DownsampledRenderBuffer& render_buffer, + rtc::ArrayView capture) { + RTC_DCHECK_EQ(sub_block_size_, capture.size()); + auto& y = capture; + const float x2_sum_threshold = + filters_[0].size() * excitation_limit_ * excitation_limit_; + + // Apply all matched filters. + size_t alignment_shift = 0; + for (size_t n = 0; n < filters_.size(); ++n) { + float error_sum = 0.f; + bool filters_updated = false; + + size_t x_start_index = + (render_buffer.read + alignment_shift + sub_block_size_ - 1) % + render_buffer.buffer.size(); + + switch (optimization_) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: + aec3::MatchedFilterCore_SSE2(x_start_index, x2_sum_threshold, + smoothing_, render_buffer.buffer, y, + filters_[n], &filters_updated, &error_sum); + break; +#endif +#if defined(WEBRTC_HAS_NEON) + case Aec3Optimization::kNeon: + aec3::MatchedFilterCore_NEON(x_start_index, x2_sum_threshold, + smoothing_, render_buffer.buffer, y, + filters_[n], &filters_updated, &error_sum); + break; +#endif + default: + aec3::MatchedFilterCore(x_start_index, x2_sum_threshold, smoothing_, + render_buffer.buffer, y, filters_[n], + &filters_updated, &error_sum); + } + + // Compute anchor for the matched filter error. + const float error_sum_anchor = + std::inner_product(y.begin(), y.end(), y.begin(), 0.f); + + // Estimate the lag in the matched filter as the distance to the portion in + // the filter that contributes the most to the matched filter output. This + // is detected as the peak of the matched filter. + const size_t lag_estimate = std::distance( + filters_[n].begin(), + std::max_element( + filters_[n].begin(), filters_[n].end(), + [](float a, float b) -> bool { return a * a < b * b; })); + + // Update the lag estimates for the matched filter. + lag_estimates_[n] = LagEstimate( + error_sum_anchor - error_sum, //accuracy + (lag_estimate > 2 && lag_estimate < (filters_[n].size() - 10) && + error_sum < matching_filter_threshold_ * error_sum_anchor), //reliable + lag_estimate + alignment_shift, filters_updated); + + RTC_DCHECK_GE(10, filters_.size()); + switch (n) { + case 0: + data_dumper_->DumpRaw("aec3_correlator_0_h", filters_[0]); + break; + case 1: + data_dumper_->DumpRaw("aec3_correlator_1_h", filters_[1]); + break; + case 2: + data_dumper_->DumpRaw("aec3_correlator_2_h", filters_[2]); + break; + case 3: + data_dumper_->DumpRaw("aec3_correlator_3_h", filters_[3]); + break; + case 4: + data_dumper_->DumpRaw("aec3_correlator_4_h", filters_[4]); + break; + case 5: + data_dumper_->DumpRaw("aec3_correlator_5_h", filters_[5]); + break; + case 6: + data_dumper_->DumpRaw("aec3_correlator_6_h", filters_[6]); + break; + case 7: + data_dumper_->DumpRaw("aec3_correlator_7_h", filters_[7]); + break; + case 8: + data_dumper_->DumpRaw("aec3_correlator_8_h", filters_[8]); + break; + case 9: + data_dumper_->DumpRaw("aec3_correlator_9_h", filters_[9]); + break; + default: + RTC_NOTREACHED(); + } + + alignment_shift += filter_intra_lag_shift_; + } +} + +void MatchedFilter::LogFilterProperties(int sample_rate_hz, + size_t shift, + size_t downsampling_factor) const { + size_t alignment_shift = 0; + constexpr int kFsBy1000 = 16; + for (size_t k = 0; k < filters_.size(); ++k) { + int start = static_cast(alignment_shift * downsampling_factor); + int end = static_cast((alignment_shift + filters_[k].size()) * + downsampling_factor); + RTC_LOG(LS_VERBOSE) << "Filter " << k << ": start: " + << (start - static_cast(shift)) / kFsBy1000 + << " ms, end: " + << (end - static_cast(shift)) / kFsBy1000 + << " ms."; + alignment_shift += filter_intra_lag_shift_; + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/matched_filter.h b/audio_processing/aec3/matched_filter.h new file mode 100644 index 0000000..9396d57 --- /dev/null +++ b/audio_processing/aec3/matched_filter.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_ + +#include + +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { + +class ApmDataDumper; +struct DownsampledRenderBuffer; + +namespace aec3 { + +#if defined(WEBRTC_HAS_NEON) + +// Filter core for the matched filter that is optimized for NEON. +void MatchedFilterCore_NEON(size_t x_start_index, + float x2_sum_threshold, + float smoothing, + rtc::ArrayView x, + rtc::ArrayView y, + rtc::ArrayView h, + bool* filters_updated, + float* error_sum); + +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) + +// Filter core for the matched filter that is optimized for SSE2. +void MatchedFilterCore_SSE2(size_t x_start_index, + float x2_sum_threshold, + float smoothing, + rtc::ArrayView x, + rtc::ArrayView y, + rtc::ArrayView h, + bool* filters_updated, + float* error_sum); + +#endif + +// Filter core for the matched filter. +void MatchedFilterCore(size_t x_start_index, + float x2_sum_threshold, + float smoothing, + rtc::ArrayView x, + rtc::ArrayView y, + rtc::ArrayView h, + bool* filters_updated, + float* error_sum); + +} // namespace aec3 + +// Produces recursively updated cross-correlation estimates for several signal +// shifts where the intra-shift spacing is uniform. +class MatchedFilter { + public: + // Stores properties for the lag estimate corresponding to a particular signal + // shift. + struct LagEstimate { + LagEstimate() = default; + LagEstimate(float accuracy, bool reliable, size_t lag, bool updated) + : accuracy(accuracy), reliable(reliable), lag(lag), updated(updated) {} + + float accuracy = 0.f; + bool reliable = false; + size_t lag = 0; + bool updated = false; + }; + + MatchedFilter(ApmDataDumper* data_dumper, + Aec3Optimization optimization, + size_t sub_block_size, + size_t window_size_sub_blocks, + int num_matched_filters, + size_t alignment_shift_sub_blocks, + float excitation_limit, + float smoothing, + float matching_filter_threshold); + + ~MatchedFilter(); + + // Updates the correlation with the values in the capture buffer. + void Update(const DownsampledRenderBuffer& render_buffer, + rtc::ArrayView capture); + + // Resets the matched filter. + void Reset(); + + // Returns the current lag estimates. + rtc::ArrayView GetLagEstimates() const { + return lag_estimates_; + } + + // Returns the maximum filter lag. + size_t GetMaxFilterLag() const { + return filters_.size() * filter_intra_lag_shift_ + filters_[0].size(); + } + + // Log matched filter properties. + void LogFilterProperties(int sample_rate_hz, + size_t shift, + size_t downsampling_factor) const; + + private: + ApmDataDumper* const data_dumper_; + const Aec3Optimization optimization_; + const size_t sub_block_size_; + const size_t filter_intra_lag_shift_; + std::vector> filters_; + std::vector lag_estimates_; + std::vector filters_offsets_; + const float excitation_limit_; + const float smoothing_; + const float matching_filter_threshold_; + + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MatchedFilter); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_ diff --git a/audio_processing/aec3/matched_filter_lag_aggregator.cc b/audio_processing/aec3/matched_filter_lag_aggregator.cc new file mode 100644 index 0000000..d8f1e98 --- /dev/null +++ b/audio_processing/aec3/matched_filter_lag_aggregator.cc @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/matched_filter_lag_aggregator.h" + +#include +#include + +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +MatchedFilterLagAggregator::MatchedFilterLagAggregator( + ApmDataDumper* data_dumper, + size_t max_filter_lag, + const EchoCanceller3Config::Delay::DelaySelectionThresholds& thresholds) + : data_dumper_(data_dumper), + histogram_(max_filter_lag + 1, 0), + thresholds_(thresholds) { + RTC_DCHECK(data_dumper); + RTC_DCHECK_LE(thresholds_.initial, thresholds_.converged); + histogram_data_.fill(0); +} + +MatchedFilterLagAggregator::~MatchedFilterLagAggregator() = default; + +void MatchedFilterLagAggregator::Reset(bool hard_reset) { + std::fill(histogram_.begin(), histogram_.end(), 0); + histogram_data_.fill(0); + histogram_data_index_ = 0; + if (hard_reset) { + significant_candidate_found_ = false; + } +} + +absl::optional MatchedFilterLagAggregator::Aggregate( + rtc::ArrayView lag_estimates) { + // Choose the strongest lag estimate as the best one. + float best_accuracy = 0.f; + int best_lag_estimate_index = -1; + for (size_t k = 0; k < lag_estimates.size(); ++k) { + if (lag_estimates[k].updated && lag_estimates[k].reliable) { + if (lag_estimates[k].accuracy > best_accuracy) { + best_accuracy = lag_estimates[k].accuracy; + best_lag_estimate_index = static_cast(k); + } + } + } + + // TODO(peah): Remove this logging once all development is done. + data_dumper_->DumpRaw("aec3_echo_path_delay_estimator_best_index", + best_lag_estimate_index); + data_dumper_->DumpRaw("aec3_echo_path_delay_estimator_histogram", histogram_); + + if (best_lag_estimate_index != -1) { + RTC_DCHECK_GT(histogram_.size(), histogram_data_[histogram_data_index_]); + RTC_DCHECK_LE(0, histogram_data_[histogram_data_index_]); + + --histogram_[histogram_data_[histogram_data_index_]]; + + histogram_data_[histogram_data_index_] = + lag_estimates[best_lag_estimate_index].lag; + + RTC_DCHECK_GT(histogram_.size(), histogram_data_[histogram_data_index_]); + RTC_DCHECK_LE(0, histogram_data_[histogram_data_index_]); + + ++histogram_[histogram_data_[histogram_data_index_]]; + + histogram_data_index_ = + (histogram_data_index_ + 1) % histogram_data_.size(); + + const int candidate = + std::distance(histogram_.begin(), + std::max_element(histogram_.begin(), histogram_.end())); + + significant_candidate_found_ = + significant_candidate_found_ || + histogram_[candidate] > thresholds_.converged; + if (histogram_[candidate] > thresholds_.converged || + (histogram_[candidate] > thresholds_.initial && + !significant_candidate_found_)) { + DelayEstimate::Quality quality = significant_candidate_found_ + ? DelayEstimate::Quality::kRefined + : DelayEstimate::Quality::kCoarse; + return DelayEstimate(quality, candidate); + } + } + + return absl::nullopt; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/matched_filter_lag_aggregator.h b/audio_processing/aec3/matched_filter_lag_aggregator.h new file mode 100644 index 0000000..2fc99da --- /dev/null +++ b/audio_processing/aec3/matched_filter_lag_aggregator.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_ + +#include + +#include "absl/types/optional.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/delay_estimate.h" +#include "audio_processing/aec3/matched_filter.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +class ApmDataDumper; + +// Aggregates lag estimates produced by the MatchedFilter class into a single +// reliable combined lag estimate. +class MatchedFilterLagAggregator { + public: + MatchedFilterLagAggregator( + ApmDataDumper* data_dumper, + size_t max_filter_lag, + const EchoCanceller3Config::Delay::DelaySelectionThresholds& thresholds); + ~MatchedFilterLagAggregator(); + + // Resets the aggregator. + void Reset(bool hard_reset); + + // Aggregates the provided lag estimates. + absl::optional Aggregate( + rtc::ArrayView lag_estimates); + + private: + ApmDataDumper* const data_dumper_; + std::vector histogram_; + std::array histogram_data_; + int histogram_data_index_ = 0; + bool significant_candidate_found_ = false; + const EchoCanceller3Config::Delay::DelaySelectionThresholds thresholds_; + + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MatchedFilterLagAggregator); +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_ diff --git a/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc b/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc new file mode 100644 index 0000000..e136c89 --- /dev/null +++ b/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/matched_filter_lag_aggregator.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "api/audio/echo_canceller3_config.h" +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +constexpr size_t kNumLagsBeforeDetection = 26; + +} // namespace + +// Verifies that the most accurate lag estimate is chosen. +TEST(MatchedFilterLagAggregator, MostAccurateLagChosen) { + constexpr size_t kLag1 = 5; + constexpr size_t kLag2 = 10; + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + std::vector lag_estimates(2); + MatchedFilterLagAggregator aggregator( + &data_dumper, std::max(kLag1, kLag2), + config.delay.delay_selection_thresholds); + lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, kLag1, true); + lag_estimates[1] = MatchedFilter::LagEstimate(0.5f, true, kLag2, true); + + for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) { + aggregator.Aggregate(lag_estimates); + } + + absl::optional aggregated_lag = + aggregator.Aggregate(lag_estimates); + EXPECT_TRUE(aggregated_lag); + EXPECT_EQ(kLag1, aggregated_lag->delay); + + lag_estimates[0] = MatchedFilter::LagEstimate(0.5f, true, kLag1, true); + lag_estimates[1] = MatchedFilter::LagEstimate(1.f, true, kLag2, true); + + for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) { + aggregated_lag = aggregator.Aggregate(lag_estimates); + EXPECT_TRUE(aggregated_lag); + EXPECT_EQ(kLag1, aggregated_lag->delay); + } + + aggregated_lag = aggregator.Aggregate(lag_estimates); + aggregated_lag = aggregator.Aggregate(lag_estimates); + EXPECT_TRUE(aggregated_lag); + EXPECT_EQ(kLag2, aggregated_lag->delay); +} + +// Verifies that varying lag estimates causes lag estimates to not be deemed +// reliable. +TEST(MatchedFilterLagAggregator, + LagEstimateInvarianceRequiredForAggregatedLag) { + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + std::vector lag_estimates(1); + MatchedFilterLagAggregator aggregator( + &data_dumper, 100, config.delay.delay_selection_thresholds); + + absl::optional aggregated_lag; + for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) { + lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, 10, true); + aggregated_lag = aggregator.Aggregate(lag_estimates); + } + EXPECT_TRUE(aggregated_lag); + + for (size_t k = 0; k < kNumLagsBeforeDetection * 100; ++k) { + lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, k % 100, true); + aggregated_lag = aggregator.Aggregate(lag_estimates); + } + EXPECT_FALSE(aggregated_lag); + + for (size_t k = 0; k < kNumLagsBeforeDetection * 100; ++k) { + lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, k % 100, true); + aggregated_lag = aggregator.Aggregate(lag_estimates); + EXPECT_FALSE(aggregated_lag); + } +} + +// Verifies that lag estimate updates are required to produce an updated lag +// aggregate. +TEST(MatchedFilterLagAggregator, + DISABLED_LagEstimateUpdatesRequiredForAggregatedLag) { + constexpr size_t kLag = 5; + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + std::vector lag_estimates(1); + MatchedFilterLagAggregator aggregator( + &data_dumper, kLag, config.delay.delay_selection_thresholds); + for (size_t k = 0; k < kNumLagsBeforeDetection * 10; ++k) { + lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, kLag, false); + absl::optional aggregated_lag = + aggregator.Aggregate(lag_estimates); + EXPECT_FALSE(aggregated_lag); + EXPECT_EQ(kLag, aggregated_lag->delay); + } +} + +// Verifies that an aggregated lag is persistent if the lag estimates do not +// change and that an aggregated lag is not produced without gaining lag +// estimate confidence. +TEST(MatchedFilterLagAggregator, DISABLED_PersistentAggregatedLag) { + constexpr size_t kLag1 = 5; + constexpr size_t kLag2 = 10; + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + std::vector lag_estimates(1); + MatchedFilterLagAggregator aggregator( + &data_dumper, std::max(kLag1, kLag2), + config.delay.delay_selection_thresholds); + absl::optional aggregated_lag; + for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) { + lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, kLag1, true); + aggregated_lag = aggregator.Aggregate(lag_estimates); + } + EXPECT_TRUE(aggregated_lag); + EXPECT_EQ(kLag1, aggregated_lag->delay); + + for (size_t k = 0; k < kNumLagsBeforeDetection * 40; ++k) { + lag_estimates[0] = MatchedFilter::LagEstimate(1.f, false, kLag2, true); + aggregated_lag = aggregator.Aggregate(lag_estimates); + EXPECT_TRUE(aggregated_lag); + EXPECT_EQ(kLag1, aggregated_lag->delay); + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for non-null data dumper. +TEST(MatchedFilterLagAggregator, NullDataDumper) { + EchoCanceller3Config config; + EXPECT_DEATH(MatchedFilterLagAggregator( + nullptr, 10, config.delay.delay_selection_thresholds), + ""); +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/matched_filter_unittest.cc b/audio_processing/aec3/matched_filter_unittest.cc new file mode 100644 index 0000000..8a6e22e --- /dev/null +++ b/audio_processing/aec3/matched_filter_unittest.cc @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/matched_filter.h" + +// Defines WEBRTC_ARCH_X86_FAMILY, used below. +#include "rtc_base/system/arch.h" + +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif +#include +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/decimator.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "system_wrappers/include/cpu_features_wrapper.h" +#include "test/gtest.h" + +namespace webrtc { +namespace aec3 { +namespace { + +std::string ProduceDebugText(size_t delay, size_t down_sampling_factor) { + rtc::StringBuilder ss; + ss << "Delay: " << delay; + ss << ", Down sampling factor: " << down_sampling_factor; + return ss.Release(); +} + +constexpr size_t kNumMatchedFilters = 10; +constexpr size_t kDownSamplingFactors[] = {2, 4, 8}; +constexpr size_t kWindowSizeSubBlocks = 32; +constexpr size_t kAlignmentShiftSubBlocks = kWindowSizeSubBlocks * 3 / 4; + +} // namespace + +#if defined(WEBRTC_HAS_NEON) +// Verifies that the optimized methods for NEON are similar to their reference +// counterparts. +TEST(MatchedFilter, TestNeonOptimizations) { + Random random_generator(42U); + constexpr float kSmoothing = 0.7f; + for (auto down_sampling_factor : kDownSamplingFactors) { + const size_t sub_block_size = kBlockSize / down_sampling_factor; + + std::vector x(2000); + RandomizeSampleVector(&random_generator, x); + std::vector y(sub_block_size); + std::vector h_NEON(512); + std::vector h(512); + int x_index = 0; + for (int k = 0; k < 1000; ++k) { + RandomizeSampleVector(&random_generator, y); + + bool filters_updated = false; + float error_sum = 0.f; + bool filters_updated_NEON = false; + float error_sum_NEON = 0.f; + + MatchedFilterCore_NEON(x_index, h.size() * 150.f * 150.f, kSmoothing, x, + y, h_NEON, &filters_updated_NEON, &error_sum_NEON); + + MatchedFilterCore(x_index, h.size() * 150.f * 150.f, kSmoothing, x, y, h, + &filters_updated, &error_sum); + + EXPECT_EQ(filters_updated, filters_updated_NEON); + EXPECT_NEAR(error_sum, error_sum_NEON, error_sum / 100000.f); + + for (size_t j = 0; j < h.size(); ++j) { + EXPECT_NEAR(h[j], h_NEON[j], 0.00001f); + } + + x_index = (x_index + sub_block_size) % x.size(); + } + } +} +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Verifies that the optimized methods for SSE2 are bitexact to their reference +// counterparts. +TEST(MatchedFilter, TestSse2Optimizations) { + bool use_sse2 = (WebRtc_GetCPUInfo(kSSE2) != 0); + if (use_sse2) { + Random random_generator(42U); + constexpr float kSmoothing = 0.7f; + for (auto down_sampling_factor : kDownSamplingFactors) { + const size_t sub_block_size = kBlockSize / down_sampling_factor; + std::vector x(2000); + RandomizeSampleVector(&random_generator, x); + std::vector y(sub_block_size); + std::vector h_SSE2(512); + std::vector h(512); + int x_index = 0; + for (int k = 0; k < 1000; ++k) { + RandomizeSampleVector(&random_generator, y); + + bool filters_updated = false; + float error_sum = 0.f; + bool filters_updated_SSE2 = false; + float error_sum_SSE2 = 0.f; + + MatchedFilterCore_SSE2(x_index, h.size() * 150.f * 150.f, kSmoothing, x, + y, h_SSE2, &filters_updated_SSE2, + &error_sum_SSE2); + + MatchedFilterCore(x_index, h.size() * 150.f * 150.f, kSmoothing, x, y, + h, &filters_updated, &error_sum); + + EXPECT_EQ(filters_updated, filters_updated_SSE2); + EXPECT_NEAR(error_sum, error_sum_SSE2, error_sum / 100000.f); + + for (size_t j = 0; j < h.size(); ++j) { + EXPECT_NEAR(h[j], h_SSE2[j], 0.00001f); + } + + x_index = (x_index + sub_block_size) % x.size(); + } + } + } +} + +#endif + +// Verifies that the matched filter produces proper lag estimates for +// artificially +// delayed signals. +TEST(MatchedFilter, LagEstimation) { + Random random_generator(42U); + constexpr size_t kNumChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + for (auto down_sampling_factor : kDownSamplingFactors) { + const size_t sub_block_size = kBlockSize / down_sampling_factor; + + std::vector>> render( + kNumBands, std::vector>( + kNumChannels, std::vector(kBlockSize, 0.f))); + std::vector> capture( + 1, std::vector(kBlockSize, 0.f)); + ApmDataDumper data_dumper(0); + for (size_t delay_samples : {5, 64, 150, 200, 800, 1000}) { + SCOPED_TRACE(ProduceDebugText(delay_samples, down_sampling_factor)); + EchoCanceller3Config config; + config.delay.down_sampling_factor = down_sampling_factor; + config.delay.num_filters = kNumMatchedFilters; + Decimator capture_decimator(down_sampling_factor); + DelayBuffer signal_delay_buffer(down_sampling_factor * + delay_samples); + MatchedFilter filter(&data_dumper, DetectOptimization(), sub_block_size, + kWindowSizeSubBlocks, kNumMatchedFilters, + kAlignmentShiftSubBlocks, 150, + config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold); + + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, kNumChannels)); + + // Analyze the correlation between render and capture. + for (size_t k = 0; k < (600 + delay_samples / sub_block_size); ++k) { + for (size_t band = 0; band < kNumBands; ++band) { + for (size_t channel = 0; channel < kNumChannels; ++channel) { + RandomizeSampleVector(&random_generator, render[band][channel]); + } + } + signal_delay_buffer.Delay(render[0][0], capture[0]); + render_delay_buffer->Insert(render); + + if (k == 0) { + render_delay_buffer->Reset(); + } + + render_delay_buffer->PrepareCaptureProcessing(); + std::array downsampled_capture_data; + rtc::ArrayView downsampled_capture( + downsampled_capture_data.data(), sub_block_size); + capture_decimator.Decimate(capture[0], downsampled_capture); + filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(), + downsampled_capture); + } + + // Obtain the lag estimates. + auto lag_estimates = filter.GetLagEstimates(); + + // Find which lag estimate should be the most accurate. + absl::optional expected_most_accurate_lag_estimate; + size_t alignment_shift_sub_blocks = 0; + for (size_t k = 0; k < config.delay.num_filters; ++k) { + if ((alignment_shift_sub_blocks + 3 * kWindowSizeSubBlocks / 4) * + sub_block_size > + delay_samples) { + expected_most_accurate_lag_estimate = k > 0 ? k - 1 : 0; + break; + } + alignment_shift_sub_blocks += kAlignmentShiftSubBlocks; + } + ASSERT_TRUE(expected_most_accurate_lag_estimate); + + // Verify that the expected most accurate lag estimate is the most + // accurate estimate. + for (size_t k = 0; k < kNumMatchedFilters; ++k) { + if (k != *expected_most_accurate_lag_estimate && + k != (*expected_most_accurate_lag_estimate + 1)) { + EXPECT_TRUE( + lag_estimates[*expected_most_accurate_lag_estimate].accuracy > + lag_estimates[k].accuracy || + !lag_estimates[k].reliable || + !lag_estimates[*expected_most_accurate_lag_estimate].reliable); + } + } + + // Verify that all lag estimates are updated as expected for signals + // containing strong noise. + for (auto& le : lag_estimates) { + EXPECT_TRUE(le.updated); + } + + // Verify that the expected most accurate lag estimate is reliable. + EXPECT_TRUE( + lag_estimates[*expected_most_accurate_lag_estimate].reliable || + lag_estimates[std::min(*expected_most_accurate_lag_estimate + 1, + lag_estimates.size() - 1)] + .reliable); + + // Verify that the expected most accurate lag estimate is correct. + if (lag_estimates[*expected_most_accurate_lag_estimate].reliable) { + EXPECT_TRUE(delay_samples == + lag_estimates[*expected_most_accurate_lag_estimate].lag); + } else { + EXPECT_TRUE( + delay_samples == + lag_estimates[std::min(*expected_most_accurate_lag_estimate + 1, + lag_estimates.size() - 1)] + .lag); + } + } + } +} + +// Verifies that the matched filter does not produce reliable and accurate +// estimates for uncorrelated render and capture signals. +TEST(MatchedFilter, LagNotReliableForUncorrelatedRenderAndCapture) { + constexpr size_t kNumChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + Random random_generator(42U); + for (auto down_sampling_factor : kDownSamplingFactors) { + EchoCanceller3Config config; + config.delay.down_sampling_factor = down_sampling_factor; + config.delay.num_filters = kNumMatchedFilters; + const size_t sub_block_size = kBlockSize / down_sampling_factor; + + std::vector>> render( + kNumBands, std::vector>( + kNumChannels, std::vector(kBlockSize, 0.f))); + std::array capture_data; + rtc::ArrayView capture(capture_data.data(), sub_block_size); + std::fill(capture.begin(), capture.end(), 0.f); + ApmDataDumper data_dumper(0); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, kNumChannels)); + MatchedFilter filter(&data_dumper, DetectOptimization(), sub_block_size, + kWindowSizeSubBlocks, kNumMatchedFilters, + kAlignmentShiftSubBlocks, 150, + config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold); + + // Analyze the correlation between render and capture. + for (size_t k = 0; k < 100; ++k) { + RandomizeSampleVector(&random_generator, render[0][0]); + RandomizeSampleVector(&random_generator, capture); + render_delay_buffer->Insert(render); + filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(), capture); + } + + // Obtain the lag estimates. + auto lag_estimates = filter.GetLagEstimates(); + EXPECT_EQ(kNumMatchedFilters, lag_estimates.size()); + + // Verify that no lag estimates are reliable. + for (auto& le : lag_estimates) { + EXPECT_FALSE(le.reliable); + } + } +} + +// Verifies that the matched filter does not produce updated lag estimates for +// render signals of low level. +TEST(MatchedFilter, LagNotUpdatedForLowLevelRender) { + Random random_generator(42U); + constexpr size_t kNumChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + for (auto down_sampling_factor : kDownSamplingFactors) { + const size_t sub_block_size = kBlockSize / down_sampling_factor; + + std::vector>> render( + kNumBands, std::vector>( + kNumChannels, std::vector(kBlockSize, 0.f))); + std::vector> capture( + 1, std::vector(kBlockSize, 0.f)); + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + MatchedFilter filter(&data_dumper, DetectOptimization(), sub_block_size, + kWindowSizeSubBlocks, kNumMatchedFilters, + kAlignmentShiftSubBlocks, 150, + config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), kSampleRateHz, + kNumChannels)); + Decimator capture_decimator(down_sampling_factor); + + // Analyze the correlation between render and capture. + for (size_t k = 0; k < 100; ++k) { + RandomizeSampleVector(&random_generator, render[0][0]); + for (auto& render_k : render[0][0]) { + render_k *= 149.f / 32767.f; + } + std::copy(render[0][0].begin(), render[0][0].end(), capture[0].begin()); + std::array downsampled_capture_data; + rtc::ArrayView downsampled_capture(downsampled_capture_data.data(), + sub_block_size); + capture_decimator.Decimate(capture[0], downsampled_capture); + filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(), + downsampled_capture); + } + + // Obtain the lag estimates. + auto lag_estimates = filter.GetLagEstimates(); + EXPECT_EQ(kNumMatchedFilters, lag_estimates.size()); + + // Verify that no lag estimates are updated and that no lag estimates are + // reliable. + for (auto& le : lag_estimates) { + EXPECT_FALSE(le.updated); + EXPECT_FALSE(le.reliable); + } + } +} + +// Verifies that the correct number of lag estimates are produced for a certain +// number of alignment shifts. +TEST(MatchedFilter, NumberOfLagEstimates) { + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + for (auto down_sampling_factor : kDownSamplingFactors) { + const size_t sub_block_size = kBlockSize / down_sampling_factor; + for (size_t num_matched_filters = 0; num_matched_filters < 10; + ++num_matched_filters) { + MatchedFilter filter(&data_dumper, DetectOptimization(), sub_block_size, + 32, num_matched_filters, 1, 150, + config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold); + EXPECT_EQ(num_matched_filters, filter.GetLagEstimates().size()); + } + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for non-zero windows size. +TEST(MatchedFilter, ZeroWindowSize) { + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + EXPECT_DEATH(MatchedFilter(&data_dumper, DetectOptimization(), 16, 0, 1, 1, + 150, config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold), + ""); +} + +// Verifies the check for non-null data dumper. +TEST(MatchedFilter, NullDataDumper) { + EchoCanceller3Config config; + EXPECT_DEATH(MatchedFilter(nullptr, DetectOptimization(), 16, 1, 1, 1, 150, + config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold), + ""); +} + +// Verifies the check for that the sub block size is a multiple of 4. +// TODO(peah): Activate the unittest once the required code has been landed. +TEST(MatchedFilter, DISABLED_BlockSizeMultipleOf4) { + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + EXPECT_DEATH(MatchedFilter(&data_dumper, DetectOptimization(), 15, 1, 1, 1, + 150, config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold), + ""); +} + +// Verifies the check for that there is an integer number of sub blocks that add +// up to a block size. +// TODO(peah): Activate the unittest once the required code has been landed. +TEST(MatchedFilter, DISABLED_SubBlockSizeAddsUpToBlockSize) { + ApmDataDumper data_dumper(0); + EchoCanceller3Config config; + EXPECT_DEATH(MatchedFilter(&data_dumper, DetectOptimization(), 12, 1, 1, 1, + 150, config.delay.delay_estimate_smoothing, + config.delay.delay_candidate_detection_threshold), + ""); +} + +#endif + +} // namespace aec3 +} // namespace webrtc diff --git a/audio_processing/aec3/moving_average.cc b/audio_processing/aec3/moving_average.cc new file mode 100644 index 0000000..ac90a86 --- /dev/null +++ b/audio_processing/aec3/moving_average.cc @@ -0,0 +1,60 @@ + +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/moving_average.h" + +#include +#include + +#include "rtc_base/checks.h" + +namespace webrtc { +namespace aec3 { + +MovingAverage::MovingAverage(size_t num_elem, size_t mem_len) + : num_elem_(num_elem), + mem_len_(mem_len - 1), + scaling_(1.0f / static_cast(mem_len)), + memory_(num_elem * mem_len_, 0.f), + mem_index_(0) { + RTC_DCHECK(num_elem_ > 0); + RTC_DCHECK(mem_len > 0); +} + +MovingAverage::~MovingAverage() = default; + +void MovingAverage::Average(rtc::ArrayView input, + rtc::ArrayView output) { + RTC_DCHECK(input.size() == num_elem_); + RTC_DCHECK(output.size() == num_elem_); + + // Sum all contributions. + std::copy(input.begin(), input.end(), output.begin()); + for (auto i = memory_.begin(); i < memory_.end(); i += num_elem_) { + std::transform(i, i + num_elem_, output.begin(), output.begin(), + std::plus()); + } + + // Divide by mem_len_. + for (float& o : output) { + o *= scaling_; + } + + // Update memory. + if (mem_len_ > 0) { + std::copy(input.begin(), input.end(), + memory_.begin() + mem_index_ * num_elem_); + mem_index_ = (mem_index_ + 1) % mem_len_; + } +} + +} // namespace aec3 +} // namespace webrtc diff --git a/audio_processing/aec3/moving_average.h b/audio_processing/aec3/moving_average.h new file mode 100644 index 0000000..741a2c4 --- /dev/null +++ b/audio_processing/aec3/moving_average.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOVING_AVERAGE_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_MOVING_AVERAGE_H_ + +#include + +#include + +#include "rtc_base/array_view.h" + +namespace webrtc { +namespace aec3 { + +class MovingAverage { + public: + // Creates an instance of MovingAverage that accepts inputs of length num_elem + // and averages over mem_len inputs. + MovingAverage(size_t num_elem, size_t mem_len); + ~MovingAverage(); + + // Computes the average of input and mem_len-1 previous inputs and stores the + // result in output. + void Average(rtc::ArrayView input, rtc::ArrayView output); + + private: + const size_t num_elem_; + const size_t mem_len_; + const float scaling_; + std::vector memory_; + size_t mem_index_; +}; + +} // namespace aec3 +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_MOVING_AVERAGE_H_ diff --git a/audio_processing/aec3/moving_average_unittest.cc b/audio_processing/aec3/moving_average_unittest.cc new file mode 100644 index 0000000..84ba9cb --- /dev/null +++ b/audio_processing/aec3/moving_average_unittest.cc @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/moving_average.h" + +#include "test/gtest.h" + +namespace webrtc { + +TEST(MovingAverage, Average) { + constexpr size_t num_elem = 4; + constexpr size_t mem_len = 3; + constexpr float e = 1e-6f; + aec3::MovingAverage ma(num_elem, mem_len); + std::array data1 = {1, 2, 3, 4}; + std::array data2 = {5, 1, 9, 7}; + std::array data3 = {3, 3, 5, 6}; + std::array data4 = {8, 4, 2, 1}; + std::array output; + + ma.Average(data1, output); + EXPECT_NEAR(output[0], data1[0] / 3.0f, e); + EXPECT_NEAR(output[1], data1[1] / 3.0f, e); + EXPECT_NEAR(output[2], data1[2] / 3.0f, e); + EXPECT_NEAR(output[3], data1[3] / 3.0f, e); + + ma.Average(data2, output); + EXPECT_NEAR(output[0], (data1[0] + data2[0]) / 3.0f, e); + EXPECT_NEAR(output[1], (data1[1] + data2[1]) / 3.0f, e); + EXPECT_NEAR(output[2], (data1[2] + data2[2]) / 3.0f, e); + EXPECT_NEAR(output[3], (data1[3] + data2[3]) / 3.0f, e); + + ma.Average(data3, output); + EXPECT_NEAR(output[0], (data1[0] + data2[0] + data3[0]) / 3.0f, e); + EXPECT_NEAR(output[1], (data1[1] + data2[1] + data3[1]) / 3.0f, e); + EXPECT_NEAR(output[2], (data1[2] + data2[2] + data3[2]) / 3.0f, e); + EXPECT_NEAR(output[3], (data1[3] + data2[3] + data3[3]) / 3.0f, e); + + ma.Average(data4, output); + EXPECT_NEAR(output[0], (data2[0] + data3[0] + data4[0]) / 3.0f, e); + EXPECT_NEAR(output[1], (data2[1] + data3[1] + data4[1]) / 3.0f, e); + EXPECT_NEAR(output[2], (data2[2] + data3[2] + data4[2]) / 3.0f, e); + EXPECT_NEAR(output[3], (data2[3] + data3[3] + data4[3]) / 3.0f, e); +} + +TEST(MovingAverage, PassThrough) { + constexpr size_t num_elem = 4; + constexpr size_t mem_len = 1; + constexpr float e = 1e-6f; + aec3::MovingAverage ma(num_elem, mem_len); + std::array data1 = {1, 2, 3, 4}; + std::array data2 = {5, 1, 9, 7}; + std::array data3 = {3, 3, 5, 6}; + std::array data4 = {8, 4, 2, 1}; + std::array output; + + ma.Average(data1, output); + EXPECT_NEAR(output[0], data1[0], e); + EXPECT_NEAR(output[1], data1[1], e); + EXPECT_NEAR(output[2], data1[2], e); + EXPECT_NEAR(output[3], data1[3], e); + + ma.Average(data2, output); + EXPECT_NEAR(output[0], data2[0], e); + EXPECT_NEAR(output[1], data2[1], e); + EXPECT_NEAR(output[2], data2[2], e); + EXPECT_NEAR(output[3], data2[3], e); + + ma.Average(data3, output); + EXPECT_NEAR(output[0], data3[0], e); + EXPECT_NEAR(output[1], data3[1], e); + EXPECT_NEAR(output[2], data3[2], e); + EXPECT_NEAR(output[3], data3[3], e); + + ma.Average(data4, output); + EXPECT_NEAR(output[0], data4[0], e); + EXPECT_NEAR(output[1], data4[1], e); + EXPECT_NEAR(output[2], data4[2], e); + EXPECT_NEAR(output[3], data4[3], e); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/nearend_detector.h b/audio_processing/aec3/nearend_detector.h new file mode 100644 index 0000000..221672b --- /dev/null +++ b/audio_processing/aec3/nearend_detector.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_NEAREND_DETECTOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_NEAREND_DETECTOR_H_ + +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { +// Class for selecting whether the suppressor is in the nearend or echo state. +class NearendDetector { + public: + virtual ~NearendDetector() {} + + // Returns whether the current state is the nearend state. + virtual bool IsNearendState() const = 0; + + // Updates the state selection based on latest spectral estimates. + virtual void Update( + rtc::ArrayView> + nearend_spectrum, + rtc::ArrayView> + residual_echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + bool initial_state) = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_NEAREND_DETECTOR_H_ diff --git a/audio_processing/aec3/render_buffer.cc b/audio_processing/aec3/render_buffer.cc new file mode 100644 index 0000000..5519534 --- /dev/null +++ b/audio_processing/aec3/render_buffer.cc @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/render_buffer.h" + +#include +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +RenderBuffer::RenderBuffer(BlockBuffer* block_buffer, + SpectrumBuffer* spectrum_buffer, + FftBuffer* fft_buffer) + : block_buffer_(block_buffer), + spectrum_buffer_(spectrum_buffer), + fft_buffer_(fft_buffer) { + RTC_DCHECK(block_buffer_); + RTC_DCHECK(spectrum_buffer_); + RTC_DCHECK(fft_buffer_); + RTC_DCHECK_EQ(block_buffer_->buffer.size(), fft_buffer_->buffer.size()); + RTC_DCHECK_EQ(spectrum_buffer_->buffer.size(), fft_buffer_->buffer.size()); + RTC_DCHECK_EQ(spectrum_buffer_->read, fft_buffer_->read); + RTC_DCHECK_EQ(spectrum_buffer_->write, fft_buffer_->write); +} + +RenderBuffer::~RenderBuffer() = default; + +void RenderBuffer::SpectralSum( + size_t num_spectra, + std::array* X2) const { + X2->fill(0.f); + int position = spectrum_buffer_->read; + for (size_t j = 0; j < num_spectra; ++j) { + for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) { + std::transform(X2->begin(), X2->end(), channel_spectrum.begin(), + X2->begin(), std::plus()); + } + position = spectrum_buffer_->IncIndex(position); + } +} + +void RenderBuffer::SpectralSums( + size_t num_spectra_shorter, + size_t num_spectra_longer, + std::array* X2_shorter, + std::array* X2_longer) const { + RTC_DCHECK_LE(num_spectra_shorter, num_spectra_longer); + X2_shorter->fill(0.f); + int position = spectrum_buffer_->read; + size_t j = 0; + for (; j < num_spectra_shorter; ++j) { + for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) { + std::transform(X2_shorter->begin(), X2_shorter->end(), + channel_spectrum.begin(), X2_shorter->begin(), + std::plus()); + } + position = spectrum_buffer_->IncIndex(position); + } + std::copy(X2_shorter->begin(), X2_shorter->end(), X2_longer->begin()); + for (; j < num_spectra_longer; ++j) { + for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) { + std::transform(X2_longer->begin(), X2_longer->end(), + channel_spectrum.begin(), X2_longer->begin(), + std::plus()); + } + position = spectrum_buffer_->IncIndex(position); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/render_buffer.h b/audio_processing/aec3/render_buffer.h new file mode 100644 index 0000000..91e21ad --- /dev/null +++ b/audio_processing/aec3/render_buffer.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_ + +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/block_buffer.h" +#include "audio_processing/aec3/fft_buffer.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/aec3/spectrum_buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// Provides a buffer of the render data for the echo remover. +class RenderBuffer { + public: + RenderBuffer(BlockBuffer* block_buffer, + SpectrumBuffer* spectrum_buffer, + FftBuffer* fft_buffer); + ~RenderBuffer(); + + // Get a block. + const std::vector>>& Block( + int buffer_offset_blocks) const { + int position = + block_buffer_->OffsetIndex(block_buffer_->read, buffer_offset_blocks); + return block_buffer_->buffer[position]; + } + + // Get the spectrum from one of the FFTs in the buffer. + rtc::ArrayView> Spectrum( + int buffer_offset_ffts) const { + int position = spectrum_buffer_->OffsetIndex(spectrum_buffer_->read, + buffer_offset_ffts); + return spectrum_buffer_->buffer[position]; + } + + // Returns the circular fft buffer. + rtc::ArrayView> GetFftBuffer() const { + return fft_buffer_->buffer; + } + + // Returns the current position in the circular buffer. + size_t Position() const { + RTC_DCHECK_EQ(spectrum_buffer_->read, fft_buffer_->read); + RTC_DCHECK_EQ(spectrum_buffer_->write, fft_buffer_->write); + return fft_buffer_->read; + } + + // Returns the sum of the spectrums for a certain number of FFTs. + void SpectralSum(size_t num_spectra, + std::array* X2) const; + + // Returns the sums of the spectrums for two numbers of FFTs. + void SpectralSums(size_t num_spectra_shorter, + size_t num_spectra_longer, + std::array* X2_shorter, + std::array* X2_longer) const; + + // Gets the recent activity seen in the render signal. + bool GetRenderActivity() const { return render_activity_; } + + // Specifies the recent activity seen in the render signal. + void SetRenderActivity(bool activity) { render_activity_ = activity; } + + // Returns the headroom between the write and the read positions in the + // buffer. + int Headroom() const { + // The write and read indices are decreased over time. + int headroom = + fft_buffer_->write < fft_buffer_->read + ? fft_buffer_->read - fft_buffer_->write + : fft_buffer_->size - fft_buffer_->write + fft_buffer_->read; + + RTC_DCHECK_LE(0, headroom); + RTC_DCHECK_GE(fft_buffer_->size, headroom); + + return headroom; + } + + // Returns a reference to the spectrum buffer. + const SpectrumBuffer& GetSpectrumBuffer() const { return *spectrum_buffer_; } + + // Returns a reference to the block buffer. + const BlockBuffer& GetBlockBuffer() const { return *block_buffer_; } + + private: + const BlockBuffer* const block_buffer_; + const SpectrumBuffer* const spectrum_buffer_; + const FftBuffer* const fft_buffer_; + bool render_activity_ = false; + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderBuffer); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_ diff --git a/audio_processing/aec3/render_buffer_unittest.cc b/audio_processing/aec3/render_buffer_unittest.cc new file mode 100644 index 0000000..6981f6d --- /dev/null +++ b/audio_processing/aec3/render_buffer_unittest.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/render_buffer.h" + +#include +#include +#include + +#include "test/gtest.h" + +namespace webrtc { + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for non-null fft buffer. +TEST(RenderBuffer, NullExternalFftBuffer) { + BlockBuffer block_buffer(10, 3, 1, kBlockSize); + SpectrumBuffer spectrum_buffer(10, 1); + EXPECT_DEATH(RenderBuffer(&block_buffer, &spectrum_buffer, nullptr), ""); +} + +// Verifies the check for non-null spectrum buffer. +TEST(RenderBuffer, NullExternalSpectrumBuffer) { + FftBuffer fft_buffer(10, 1); + BlockBuffer block_buffer(10, 3, 1, kBlockSize); + EXPECT_DEATH(RenderBuffer(&block_buffer, nullptr, &fft_buffer), ""); +} + +// Verifies the check for non-null block buffer. +TEST(RenderBuffer, NullExternalBlockBuffer) { + FftBuffer fft_buffer(10, 1); + SpectrumBuffer spectrum_buffer(10, 1); + EXPECT_DEATH(RenderBuffer(nullptr, &spectrum_buffer, &fft_buffer), ""); +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/render_delay_buffer.cc b/audio_processing/aec3/render_delay_buffer.cc new file mode 100644 index 0000000..653b226 --- /dev/null +++ b/audio_processing/aec3/render_delay_buffer.cc @@ -0,0 +1,505 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/render_delay_buffer.h" + +#include + +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec3_fft.h" +#include "audio_processing/aec3/alignment_mixer.h" +#include "audio_processing/aec3/block_buffer.h" +#include "audio_processing/aec3/decimator.h" +#include "audio_processing/aec3/downsampled_render_buffer.h" +#include "audio_processing/aec3/fft_buffer.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/aec3/spectrum_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { + +class RenderDelayBufferImpl final : public RenderDelayBuffer { + public: + RenderDelayBufferImpl(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels); + RenderDelayBufferImpl() = delete; + ~RenderDelayBufferImpl() override; + + void Reset() override; + BufferingEvent Insert( + const std::vector>>& block) override; + BufferingEvent PrepareCaptureProcessing() override; + bool AlignFromDelay(size_t delay) override; + void AlignFromExternalDelay() override; + size_t Delay() const override { return ComputeDelay(); } + size_t MaxDelay() const override { + return blocks_.buffer.size() - 1 - buffer_headroom_; + } + RenderBuffer* GetRenderBuffer() override { return &echo_remover_buffer_; } + + const DownsampledRenderBuffer& GetDownsampledRenderBuffer() const override { + return low_rate_; + } + + int BufferLatency() const; + void SetAudioBufferDelay(int delay_ms) override; + bool HasReceivedBufferDelay() override; + + private: + static int instance_count_; + std::unique_ptr data_dumper_; + const Aec3Optimization optimization_; + const EchoCanceller3Config config_; + const float render_linear_amplitude_gain_; + const rtc::LoggingSeverity delay_log_level_; + size_t down_sampling_factor_; + const int sub_block_size_; + BlockBuffer blocks_; + SpectrumBuffer spectra_; + FftBuffer ffts_; + absl::optional delay_; + RenderBuffer echo_remover_buffer_; + DownsampledRenderBuffer low_rate_; + AlignmentMixer render_mixer_; + Decimator render_decimator_; + const Aec3Fft fft_; + std::vector render_ds_; + const int buffer_headroom_; + bool last_call_was_render_ = false; + int num_api_calls_in_a_row_ = 0; + int max_observed_jitter_ = 1; + int64_t capture_call_counter_ = 0; + int64_t render_call_counter_ = 0; + bool render_activity_ = false; + size_t render_activity_counter_ = 0; + absl::optional external_audio_buffer_delay_; + bool external_audio_buffer_delay_verified_after_reset_ = false; + size_t min_latency_blocks_ = 0; + size_t excess_render_detection_counter_ = 0; + + int MapDelayToTotalDelay(size_t delay) const; + int ComputeDelay() const; + void ApplyTotalDelay(int delay); + void InsertBlock(const std::vector>>& block, + int previous_write); + bool DetectActiveRender(rtc::ArrayView x) const; + bool DetectExcessRenderBlocks(); + void IncrementWriteIndices(); + void IncrementLowRateReadIndices(); + void IncrementReadIndices(); + bool RenderOverrun(); + bool RenderUnderrun(); +}; + +int RenderDelayBufferImpl::instance_count_ = 0; + +RenderDelayBufferImpl::RenderDelayBufferImpl(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels) + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + optimization_(DetectOptimization()), + config_(config), + render_linear_amplitude_gain_( + std::pow(10.0f, config_.render_levels.render_power_gain_db / 20.f)), + delay_log_level_(config_.delay.log_warning_on_delay_changes + ? rtc::LS_WARNING + : rtc::LS_INFO), + down_sampling_factor_(config.delay.down_sampling_factor), + sub_block_size_(static_cast(down_sampling_factor_ > 0 + ? kBlockSize / down_sampling_factor_ + : kBlockSize)), + blocks_(GetRenderDelayBufferSize(down_sampling_factor_, + config.delay.num_filters, + config.filter.main.length_blocks), + NumBandsForRate(sample_rate_hz), + num_render_channels, + kBlockSize), + spectra_(blocks_.buffer.size(), num_render_channels), + ffts_(blocks_.buffer.size(), num_render_channels), + delay_(config_.delay.default_delay), + echo_remover_buffer_(&blocks_, &spectra_, &ffts_), + low_rate_(GetDownSampledBufferSize(down_sampling_factor_, + config.delay.num_filters)), + render_mixer_(num_render_channels, config.delay.render_alignment_mixing), + render_decimator_(down_sampling_factor_), + fft_(), + render_ds_(sub_block_size_, 0.f), + buffer_headroom_(config.filter.main.length_blocks) { + RTC_DCHECK_EQ(blocks_.buffer.size(), ffts_.buffer.size()); + RTC_DCHECK_EQ(spectra_.buffer.size(), ffts_.buffer.size()); + for (size_t i = 0; i < blocks_.buffer.size(); ++i) { + RTC_DCHECK_EQ(blocks_.buffer[i][0].size(), ffts_.buffer[i].size()); + RTC_DCHECK_EQ(spectra_.buffer[i].size(), ffts_.buffer[i].size()); + } + + Reset(); +} + +RenderDelayBufferImpl::~RenderDelayBufferImpl() = default; + +// Resets the buffer delays and clears the reported delays. +void RenderDelayBufferImpl::Reset() { + last_call_was_render_ = false; + num_api_calls_in_a_row_ = 1; + min_latency_blocks_ = 0; + excess_render_detection_counter_ = 0; + + // Initialize the read index to one sub-block before the write index. + low_rate_.read = low_rate_.OffsetIndex(low_rate_.write, sub_block_size_); + + // Check for any external audio buffer delay and whether it is feasible. + if (external_audio_buffer_delay_) { + const int headroom = 2; + size_t audio_buffer_delay_to_set; + // Minimum delay is 1 (like the low-rate render buffer). + if (*external_audio_buffer_delay_ <= headroom) { + audio_buffer_delay_to_set = 1; + } else { + audio_buffer_delay_to_set = *external_audio_buffer_delay_ - headroom; + } + + audio_buffer_delay_to_set = std::min(audio_buffer_delay_to_set, MaxDelay()); + + // When an external delay estimate is available, use that delay as the + // initial render buffer delay. + ApplyTotalDelay(audio_buffer_delay_to_set); + delay_ = ComputeDelay(); + + external_audio_buffer_delay_verified_after_reset_ = false; + } else { + // If an external delay estimate is not available, use that delay as the + // initial delay. Set the render buffer delays to the default delay. + ApplyTotalDelay(config_.delay.default_delay); + + // Unset the delays which are set by AlignFromDelay. + delay_ = absl::nullopt; + } +} + +// Inserts a new block into the render buffers. +RenderDelayBuffer::BufferingEvent RenderDelayBufferImpl::Insert( + const std::vector>>& block) { + ++render_call_counter_; + if (delay_) { + if (!last_call_was_render_) { + last_call_was_render_ = true; + num_api_calls_in_a_row_ = 1; + } else { + if (++num_api_calls_in_a_row_ > max_observed_jitter_) { + max_observed_jitter_ = num_api_calls_in_a_row_; + RTC_LOG_V(delay_log_level_) + << "New max number api jitter observed at render block " + << render_call_counter_ << ": " << num_api_calls_in_a_row_ + << " blocks"; + } + } + } + + // Increase the write indices to where the new blocks should be written. + const int previous_write = blocks_.write; + IncrementWriteIndices(); + + // Allow overrun and do a reset when render overrun occurrs due to more render + // data being inserted than capture data is received. + BufferingEvent event = + RenderOverrun() ? BufferingEvent::kRenderOverrun : BufferingEvent::kNone; + + // Detect and update render activity. + if (!render_activity_) { + render_activity_counter_ += DetectActiveRender(block[0][0]) ? 1 : 0; + render_activity_ = render_activity_counter_ >= 20; + } + + // Insert the new render block into the specified position. + InsertBlock(block, previous_write); + + if (event != BufferingEvent::kNone) { + Reset(); + } + + return event; +} + +// Prepares the render buffers for processing another capture block. +RenderDelayBuffer::BufferingEvent +RenderDelayBufferImpl::PrepareCaptureProcessing() { + RenderDelayBuffer::BufferingEvent event = BufferingEvent::kNone; + ++capture_call_counter_; + + if (delay_) { + if (last_call_was_render_) { + last_call_was_render_ = false; + num_api_calls_in_a_row_ = 1; + } else { + if (++num_api_calls_in_a_row_ > max_observed_jitter_) { + max_observed_jitter_ = num_api_calls_in_a_row_; + RTC_LOG_V(delay_log_level_) + << "New max number api jitter observed at capture block " + << capture_call_counter_ << ": " << num_api_calls_in_a_row_ + << " blocks"; + } + } + } + + if (DetectExcessRenderBlocks()) { + // Too many render blocks compared to capture blocks. Risk of delay ending + // up before the filter used by the delay estimator. + RTC_LOG_V(delay_log_level_) + << "Excess render blocks detected at block " << capture_call_counter_; + Reset(); + event = BufferingEvent::kRenderOverrun; + } else if (RenderUnderrun()) { + // Don't increment the read indices of the low rate buffer if there is a + // render underrun. + RTC_LOG_V(delay_log_level_) + << "Render buffer underrun detected at block " << capture_call_counter_; + IncrementReadIndices(); + // Incrementing the buffer index without increasing the low rate buffer + // index means that the delay is reduced by one. + if (delay_ && *delay_ > 0) + delay_ = *delay_ - 1; + event = BufferingEvent::kRenderUnderrun; + } else { + // Increment the read indices in the render buffers to point to the most + // recent block to use in the capture processing. + IncrementLowRateReadIndices(); + IncrementReadIndices(); + } + + echo_remover_buffer_.SetRenderActivity(render_activity_); + if (render_activity_) { + render_activity_counter_ = 0; + render_activity_ = false; + } + + return event; +} + +// Sets the delay and returns a bool indicating whether the delay was changed. +bool RenderDelayBufferImpl::AlignFromDelay(size_t delay) { + RTC_DCHECK(!config_.delay.use_external_delay_estimator); + if (!external_audio_buffer_delay_verified_after_reset_ && + external_audio_buffer_delay_ && delay_) { + int difference = static_cast(delay) - static_cast(*delay_); + RTC_LOG_V(delay_log_level_) + << "Mismatch between first estimated delay after reset " + "and externally reported audio buffer delay: " + << difference << " blocks"; + external_audio_buffer_delay_verified_after_reset_ = true; + } + if (delay_ && *delay_ == delay) { + return false; + } + delay_ = delay; + + // Compute the total delay and limit the delay to the allowed range. + int total_delay = MapDelayToTotalDelay(*delay_); + total_delay = + std::min(MaxDelay(), static_cast(std::max(total_delay, 0))); + + // Apply the delay to the buffers. + ApplyTotalDelay(total_delay); + return true; +} + +void RenderDelayBufferImpl::SetAudioBufferDelay(int delay_ms) { + if (!external_audio_buffer_delay_) { + RTC_LOG_V(delay_log_level_) + << "Receiving a first externally reported audio buffer delay of " + << delay_ms << " ms."; + } + + // Convert delay from milliseconds to blocks (rounded down). + external_audio_buffer_delay_ = delay_ms / 4; +} + +bool RenderDelayBufferImpl::HasReceivedBufferDelay() { + return external_audio_buffer_delay_.has_value(); +} + +// Maps the externally computed delay to the delay used internally. +int RenderDelayBufferImpl::MapDelayToTotalDelay( + size_t external_delay_blocks) const { + const int latency_blocks = BufferLatency(); + return latency_blocks + static_cast(external_delay_blocks); +} + +// Returns the delay (not including call jitter). +int RenderDelayBufferImpl::ComputeDelay() const { + const int latency_blocks = BufferLatency(); + int internal_delay = spectra_.read >= spectra_.write + ? spectra_.read - spectra_.write + : spectra_.size + spectra_.read - spectra_.write; + + return internal_delay - latency_blocks; +} + +// Set the read indices according to the delay. +void RenderDelayBufferImpl::ApplyTotalDelay(int delay) { + RTC_LOG_V(delay_log_level_) + << "Applying total delay of " << delay << " blocks."; + blocks_.read = blocks_.OffsetIndex(blocks_.write, -delay); + spectra_.read = spectra_.OffsetIndex(spectra_.write, delay); + ffts_.read = ffts_.OffsetIndex(ffts_.write, delay); +} + +void RenderDelayBufferImpl::AlignFromExternalDelay() { + RTC_DCHECK(config_.delay.use_external_delay_estimator); + if (external_audio_buffer_delay_) { + int64_t delay = render_call_counter_ - capture_call_counter_ + + *external_audio_buffer_delay_; + ApplyTotalDelay(delay); + } +} + +// Inserts a block into the render buffers. +void RenderDelayBufferImpl::InsertBlock( + const std::vector>>& block, + int previous_write) { + auto& b = blocks_; + auto& lr = low_rate_; + auto& ds = render_ds_; + auto& f = ffts_; + auto& s = spectra_; + const size_t num_bands = b.buffer[b.write].size(); + const size_t num_render_channels = b.buffer[b.write][0].size(); + RTC_DCHECK_EQ(block.size(), b.buffer[b.write].size()); + for (size_t band = 0; band < num_bands; ++band) { + RTC_DCHECK_EQ(block[band].size(), num_render_channels); + RTC_DCHECK_EQ(b.buffer[b.write][band].size(), num_render_channels); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + RTC_DCHECK_EQ(block[band][ch].size(), b.buffer[b.write][band][ch].size()); + std::copy(block[band][ch].begin(), block[band][ch].end(), + b.buffer[b.write][band][ch].begin()); + } + } + + if (render_linear_amplitude_gain_ != 1.f) { + for (size_t band = 0; band < num_bands; ++band) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + for (size_t k = 0; k < 64; ++k) { + b.buffer[b.write][band][ch][k] *= render_linear_amplitude_gain_; + } + } + } + } + + std::array downmixed_render; + render_mixer_.ProduceOutput(b.buffer[b.write][0], downmixed_render); + render_decimator_.Decimate(downmixed_render, ds); + data_dumper_->DumpWav("aec3_render_decimator_output", ds.size(), ds.data(), + 16000 / down_sampling_factor_, 1); + std::copy(ds.rbegin(), ds.rend(), lr.buffer.begin() + lr.write); + for (size_t channel = 0; channel < b.buffer[b.write][0].size(); ++channel) { + fft_.PaddedFft(b.buffer[b.write][0][channel], + b.buffer[previous_write][0][channel], + &f.buffer[f.write][channel]); + f.buffer[f.write][channel].Spectrum(optimization_, + s.buffer[s.write][channel]); + } +} + +bool RenderDelayBufferImpl::DetectActiveRender( + rtc::ArrayView x) const { + const float x_energy = std::inner_product(x.begin(), x.end(), x.begin(), 0.f); + return x_energy > (config_.render_levels.active_render_limit * + config_.render_levels.active_render_limit) * + kFftLengthBy2; +} + +bool RenderDelayBufferImpl::DetectExcessRenderBlocks() { + bool excess_render_detected = false; + const size_t latency_blocks = static_cast(BufferLatency()); + // The recently seen minimum latency in blocks. Should be close to 0. + min_latency_blocks_ = std::min(min_latency_blocks_, latency_blocks); + // After processing a configurable number of blocks the minimum latency is + // checked. + if (++excess_render_detection_counter_ >= + config_.buffering.excess_render_detection_interval_blocks) { + // If the minimum latency is not lower than the threshold there have been + // more render than capture frames. + excess_render_detected = min_latency_blocks_ > + config_.buffering.max_allowed_excess_render_blocks; + // Reset the counter and let the minimum latency be the current latency. + min_latency_blocks_ = latency_blocks; + excess_render_detection_counter_ = 0; + } + + data_dumper_->DumpRaw("aec3_latency_blocks", latency_blocks); + data_dumper_->DumpRaw("aec3_min_latency_blocks", min_latency_blocks_); + data_dumper_->DumpRaw("aec3_excess_render_detected", excess_render_detected); + return excess_render_detected; +} + +// Computes the latency in the buffer (the number of unread sub-blocks). +int RenderDelayBufferImpl::BufferLatency() const { + const DownsampledRenderBuffer& l = low_rate_; + int latency_samples = (l.buffer.size() + l.read - l.write) % l.buffer.size(); + int latency_blocks = latency_samples / sub_block_size_; + return latency_blocks; +} + +// Increments the write indices for the render buffers. +void RenderDelayBufferImpl::IncrementWriteIndices() { + low_rate_.UpdateWriteIndex(-sub_block_size_); + blocks_.IncWriteIndex(); + spectra_.DecWriteIndex(); + ffts_.DecWriteIndex(); +} + +// Increments the read indices of the low rate render buffers. +void RenderDelayBufferImpl::IncrementLowRateReadIndices() { + low_rate_.UpdateReadIndex(-sub_block_size_); +} + +// Increments the read indices for the render buffers. +void RenderDelayBufferImpl::IncrementReadIndices() { + if (blocks_.read != blocks_.write) { + blocks_.IncReadIndex(); + spectra_.DecReadIndex(); + ffts_.DecReadIndex(); + } +} + +// Checks for a render buffer overrun. +bool RenderDelayBufferImpl::RenderOverrun() { + return low_rate_.read == low_rate_.write || blocks_.read == blocks_.write; +} + +// Checks for a render buffer underrun. +bool RenderDelayBufferImpl::RenderUnderrun() { + return low_rate_.read == low_rate_.write; +} + +} // namespace + +RenderDelayBuffer* RenderDelayBuffer::Create(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels) { + return new RenderDelayBufferImpl(config, sample_rate_hz, num_render_channels); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/render_delay_buffer.h b/audio_processing/aec3/render_delay_buffer.h new file mode 100644 index 0000000..4ea95aa --- /dev/null +++ b/audio_processing/aec3/render_delay_buffer.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_ + +#include + +#include + +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/downsampled_render_buffer.h" +#include "audio_processing/aec3/render_buffer.h" + +namespace webrtc { + +// Class for buffering the incoming render blocks such that these may be +// extracted with a specified delay. +class RenderDelayBuffer { + public: + enum class BufferingEvent { + kNone, + kRenderUnderrun, + kRenderOverrun, + kApiCallSkew + }; + + static RenderDelayBuffer* Create(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_render_channels); + virtual ~RenderDelayBuffer() = default; + + // Resets the buffer alignment. + virtual void Reset() = 0; + + // Inserts a block into the buffer. + virtual BufferingEvent Insert( + const std::vector>>& block) = 0; + + // Updates the buffers one step based on the specified buffer delay. Returns + // an enum indicating whether there was a special event that occurred. + virtual BufferingEvent PrepareCaptureProcessing() = 0; + + // Sets the buffer delay and returns a bool indicating whether the delay + // changed. + virtual bool AlignFromDelay(size_t delay) = 0; + + // Sets the buffer delay from the most recently reported external delay. + virtual void AlignFromExternalDelay() = 0; + + // Gets the buffer delay. + virtual size_t Delay() const = 0; + + // Gets the buffer delay. + virtual size_t MaxDelay() const = 0; + + // Returns the render buffer for the echo remover. + virtual RenderBuffer* GetRenderBuffer() = 0; + + // Returns the downsampled render buffer. + virtual const DownsampledRenderBuffer& GetDownsampledRenderBuffer() const = 0; + + // Returns the maximum non calusal offset that can occur in the delay buffer. + static int DelayEstimatorOffset(const EchoCanceller3Config& config); + + // Provides an optional external estimate of the audio buffer delay. + virtual void SetAudioBufferDelay(int delay_ms) = 0; + + // Returns whether an external delay estimate has been reported via + // SetAudioBufferDelay. + virtual bool HasReceivedBufferDelay() = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_ diff --git a/audio_processing/aec3/render_delay_buffer_unittest.cc b/audio_processing/aec3/render_delay_buffer_unittest.cc new file mode 100644 index 0000000..35e8131 --- /dev/null +++ b/audio_processing/aec3/render_delay_buffer_unittest.cc @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/render_delay_buffer.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +std::string ProduceDebugText(int sample_rate_hz) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz; + return ss.Release(); +} + +} // namespace + +// Verifies that the buffer overflow is correctly reported. +TEST(RenderDelayBuffer, BufferOverflow) { + const EchoCanceller3Config config; + for (auto num_channels : {1, 2, 8}) { + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr delay_buffer( + RenderDelayBuffer::Create(config, rate, num_channels)); + std::vector>> block_to_insert( + NumBandsForRate(rate), + std::vector>(num_channels, + std::vector(kBlockSize, 0.f))); + for (size_t k = 0; k < 10; ++k) { + EXPECT_EQ(RenderDelayBuffer::BufferingEvent::kNone, + delay_buffer->Insert(block_to_insert)); + } + bool overrun_occurred = false; + for (size_t k = 0; k < 1000; ++k) { + RenderDelayBuffer::BufferingEvent event = + delay_buffer->Insert(block_to_insert); + overrun_occurred = + overrun_occurred || + RenderDelayBuffer::BufferingEvent::kRenderOverrun == event; + } + + EXPECT_TRUE(overrun_occurred); + } + } +} + +// Verifies that the check for available block works. +TEST(RenderDelayBuffer, AvailableBlock) { + constexpr size_t kNumChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + std::unique_ptr delay_buffer(RenderDelayBuffer::Create( + EchoCanceller3Config(), kSampleRateHz, kNumChannels)); + std::vector>> input_block( + kNumBands, std::vector>( + kNumChannels, std::vector(kBlockSize, 1.f))); + EXPECT_EQ(RenderDelayBuffer::BufferingEvent::kNone, + delay_buffer->Insert(input_block)); + delay_buffer->PrepareCaptureProcessing(); +} + +// Verifies the AlignFromDelay method. +TEST(RenderDelayBuffer, AlignFromDelay) { + EchoCanceller3Config config; + std::unique_ptr delay_buffer( + RenderDelayBuffer::Create(config, 16000, 1)); + ASSERT_TRUE(delay_buffer->Delay()); + delay_buffer->Reset(); + size_t initial_internal_delay = 0; + for (size_t delay = initial_internal_delay; + delay < initial_internal_delay + 20; ++delay) { + ASSERT_TRUE(delay_buffer->AlignFromDelay(delay)); + EXPECT_EQ(delay, delay_buffer->Delay()); + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for feasible delay. +// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH +// tests on test bots has been fixed. +TEST(RenderDelayBuffer, DISABLED_WrongDelay) { + std::unique_ptr delay_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), 48000, 1)); + EXPECT_DEATH(delay_buffer->AlignFromDelay(21), ""); +} + +// Verifies the check for the number of bands in the inserted blocks. +TEST(RenderDelayBuffer, WrongNumberOfBands) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr delay_buffer(RenderDelayBuffer::Create( + EchoCanceller3Config(), rate, num_channels)); + std::vector>> block_to_insert( + NumBandsForRate(rate < 48000 ? rate + 16000 : 16000), + std::vector>(num_channels, + std::vector(kBlockSize, 0.f))); + EXPECT_DEATH(delay_buffer->Insert(block_to_insert), ""); + } + } +} + +// Verifies the check for the number of channels in the inserted blocks. +TEST(RenderDelayBuffer, WrongNumberOfChannels) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr delay_buffer(RenderDelayBuffer::Create( + EchoCanceller3Config(), rate, num_channels)); + std::vector>> block_to_insert( + NumBandsForRate(rate), + std::vector>(num_channels + 1, + std::vector(kBlockSize, 0.f))); + EXPECT_DEATH(delay_buffer->Insert(block_to_insert), ""); + } + } +} + +// Verifies the check of the length of the inserted blocks. +TEST(RenderDelayBuffer, WrongBlockLength) { + for (auto rate : {16000, 32000, 48000}) { + for (size_t num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr delay_buffer(RenderDelayBuffer::Create( + EchoCanceller3Config(), rate, num_channels)); + std::vector>> block_to_insert( + NumBandsForRate(rate), + std::vector>( + num_channels, std::vector(kBlockSize - 1, 0.f))); + EXPECT_DEATH(delay_buffer->Insert(block_to_insert), ""); + } + } +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/render_delay_controller.cc b/audio_processing/aec3/render_delay_controller.cc new file mode 100644 index 0000000..2cd89e1 --- /dev/null +++ b/audio_processing/aec3/render_delay_controller.cc @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/aec3/render_delay_controller.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/delay_estimate.h" +#include "audio_processing/aec3/downsampled_render_buffer.h" +#include "audio_processing/aec3/echo_path_delay_estimator.h" +#include "audio_processing/aec3/render_delay_controller_metrics.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/checks.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +namespace { + +class RenderDelayControllerImpl final : public RenderDelayController { + public: + RenderDelayControllerImpl(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_capture_channels); + ~RenderDelayControllerImpl() override; + void Reset(bool reset_delay_confidence) override; + void LogRenderCall() override; + absl::optional GetDelay( + const DownsampledRenderBuffer& render_buffer, + size_t render_delay_buffer_delay, + const std::vector>& capture) override; + bool HasClockdrift() const override; + + private: + static int instance_count_; + std::unique_ptr data_dumper_; + const int hysteresis_limit_blocks_; + const int delay_headroom_samples_; + absl::optional delay_; + EchoPathDelayEstimator delay_estimator_; + RenderDelayControllerMetrics metrics_; + absl::optional delay_samples_; + size_t capture_call_counter_ = 0; + int delay_change_counter_ = 0; + DelayEstimate::Quality last_delay_estimate_quality_; + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderDelayControllerImpl); +}; + +DelayEstimate ComputeBufferDelay( + const absl::optional& current_delay, + int hysteresis_limit_blocks, + int delay_headroom_samples, + DelayEstimate estimated_delay) { + // Subtract delay headroom. + const int delay_with_headroom_samples = std::max( + static_cast(estimated_delay.delay) - delay_headroom_samples, 0); + + // Compute the buffer delay increase required to achieve the desired latency. + size_t new_delay_blocks = delay_with_headroom_samples >> kBlockSizeLog2; + + // Add hysteresis. + if (current_delay) { + size_t current_delay_blocks = current_delay->delay; + if (new_delay_blocks > current_delay_blocks && + new_delay_blocks <= current_delay_blocks + hysteresis_limit_blocks) { + new_delay_blocks = current_delay_blocks; + } + } + + DelayEstimate new_delay = estimated_delay; + new_delay.delay = new_delay_blocks; + return new_delay; +} + +int RenderDelayControllerImpl::instance_count_ = 0; + +RenderDelayControllerImpl::RenderDelayControllerImpl( + const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_capture_channels) + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + hysteresis_limit_blocks_( + static_cast(config.delay.hysteresis_limit_blocks)), + delay_headroom_samples_(config.delay.delay_headroom_samples), + delay_estimator_(data_dumper_.get(), config, num_capture_channels), + last_delay_estimate_quality_(DelayEstimate::Quality::kCoarse) { + RTC_DCHECK(ValidFullBandRate(sample_rate_hz)); + delay_estimator_.LogDelayEstimationProperties(sample_rate_hz, 0); +} + +RenderDelayControllerImpl::~RenderDelayControllerImpl() = default; + +void RenderDelayControllerImpl::Reset(bool reset_delay_confidence) { + delay_ = absl::nullopt; + delay_samples_ = absl::nullopt; + delay_estimator_.Reset(reset_delay_confidence); + delay_change_counter_ = 0; + if (reset_delay_confidence) { + last_delay_estimate_quality_ = DelayEstimate::Quality::kCoarse; + } +} + +void RenderDelayControllerImpl::LogRenderCall() {} + +absl::optional RenderDelayControllerImpl::GetDelay( + const DownsampledRenderBuffer& render_buffer, + size_t render_delay_buffer_delay, + const std::vector>& capture) { + RTC_DCHECK_EQ(kBlockSize, capture[0].size()); + ++capture_call_counter_; + + auto delay_samples = delay_estimator_.EstimateDelay(render_buffer, capture); + + if (delay_samples) { + if (!delay_samples_ || delay_samples->delay != delay_samples_->delay) { + delay_change_counter_ = 0; + } + if (delay_samples_) { + delay_samples_->blocks_since_last_change = + delay_samples_->delay == delay_samples->delay + ? delay_samples_->blocks_since_last_change + 1 + : 0; + delay_samples_->blocks_since_last_update = 0; + delay_samples_->delay = delay_samples->delay; + delay_samples_->quality = delay_samples->quality; + } else { + delay_samples_ = delay_samples; + } + } else { + if (delay_samples_) { + ++delay_samples_->blocks_since_last_change; + ++delay_samples_->blocks_since_last_update; + } + } + + if (delay_change_counter_ < 2 * kNumBlocksPerSecond) { + ++delay_change_counter_; + } + + if (delay_samples_) { + const bool use_hysteresis = + last_delay_estimate_quality_ == DelayEstimate::Quality::kRefined && + delay_samples_->quality == DelayEstimate::Quality::kRefined; + delay_ = ComputeBufferDelay(delay_, + use_hysteresis ? hysteresis_limit_blocks_ : 0, + delay_headroom_samples_, *delay_samples_); + last_delay_estimate_quality_ = delay_samples_->quality; + } + + metrics_.Update(delay_samples_ ? absl::optional(delay_samples_->delay) + : absl::nullopt, + delay_ ? delay_->delay : 0, 0, delay_estimator_.Clockdrift()); + + data_dumper_->DumpRaw("aec3_render_delay_controller_delay", + delay_samples ? delay_samples->delay : 0); + data_dumper_->DumpRaw("aec3_render_delay_controller_buffer_delay", + delay_ ? delay_->delay : 0); + + return delay_; +} + +bool RenderDelayControllerImpl::HasClockdrift() const { + return delay_estimator_.Clockdrift() != ClockdriftDetector::Level::kNone; +} + +} // namespace + +RenderDelayController* RenderDelayController::Create( + const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_capture_channels) { + return new RenderDelayControllerImpl(config, sample_rate_hz, + num_capture_channels); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/render_delay_controller.h b/audio_processing/aec3/render_delay_controller.h new file mode 100644 index 0000000..36852c7 --- /dev/null +++ b/audio_processing/aec3/render_delay_controller.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_ + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/delay_estimate.h" +#include "audio_processing/aec3/downsampled_render_buffer.h" +#include "audio_processing/aec3/render_delay_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" + +namespace webrtc { + +// Class for aligning the render and capture signal using a RenderDelayBuffer. +class RenderDelayController { + public: + static RenderDelayController* Create(const EchoCanceller3Config& config, + int sample_rate_hz, + size_t num_capture_channels); + virtual ~RenderDelayController() = default; + + // Resets the delay controller. If the delay confidence is reset, the reset + // behavior is as if the call is restarted. + virtual void Reset(bool reset_delay_confidence) = 0; + + // Logs a render call. + virtual void LogRenderCall() = 0; + + // Aligns the render buffer content with the capture signal. + virtual absl::optional GetDelay( + const DownsampledRenderBuffer& render_buffer, + size_t render_delay_buffer_delay, + const std::vector>& capture) = 0; + + // Returns true if clockdrift has been detected. + virtual bool HasClockdrift() const = 0; +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_ diff --git a/audio_processing/aec3/render_delay_controller_metrics.cc b/audio_processing/aec3/render_delay_controller_metrics.cc new file mode 100644 index 0000000..7bc53d9 --- /dev/null +++ b/audio_processing/aec3/render_delay_controller_metrics.cc @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/render_delay_controller_metrics.h" + +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { + +namespace { + +enum class DelayReliabilityCategory { + kNone, + kPoor, + kMedium, + kGood, + kExcellent, + kNumCategories +}; +enum class DelayChangesCategory { + kNone, + kFew, + kSeveral, + kMany, + kConstant, + kNumCategories +}; + +constexpr int kMaxSkewShiftCount = 20; + +} // namespace + +RenderDelayControllerMetrics::RenderDelayControllerMetrics() = default; + +void RenderDelayControllerMetrics::Update( + absl::optional delay_samples, + size_t buffer_delay_blocks, + absl::optional skew_shift_blocks, + ClockdriftDetector::Level clockdrift) { + ++call_counter_; + + if (!initial_update) { + size_t delay_blocks; + if (delay_samples) { + ++reliable_delay_estimate_counter_; + delay_blocks = (*delay_samples) / kBlockSize + 2; + } else { + delay_blocks = 0; + } + + if (delay_blocks != delay_blocks_) { + ++delay_change_counter_; + delay_blocks_ = delay_blocks; + } + + if (skew_shift_blocks) { + skew_shift_count_ = std::min(kMaxSkewShiftCount, skew_shift_count_); + } + } else if (++initial_call_counter_ == 5 * kNumBlocksPerSecond) { + initial_update = false; + } + + if (call_counter_ == kMetricsReportingIntervalBlocks) { + int value_to_report = static_cast(delay_blocks_); + value_to_report = std::min(124, value_to_report >> 1); + RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.EchoPathDelay", + value_to_report, 0, 124, 125); + + value_to_report = static_cast(buffer_delay_blocks + 2); + value_to_report = std::min(124, value_to_report >> 1); + RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.BufferDelay", + value_to_report, 0, 124, 125); + + DelayReliabilityCategory delay_reliability; + if (reliable_delay_estimate_counter_ == 0) { + delay_reliability = DelayReliabilityCategory::kNone; + } else if (reliable_delay_estimate_counter_ > (call_counter_ >> 1)) { + delay_reliability = DelayReliabilityCategory::kExcellent; + } else if (reliable_delay_estimate_counter_ > 100) { + delay_reliability = DelayReliabilityCategory::kGood; + } else if (reliable_delay_estimate_counter_ > 10) { + delay_reliability = DelayReliabilityCategory::kMedium; + } else { + delay_reliability = DelayReliabilityCategory::kPoor; + } + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.Audio.EchoCanceller.ReliableDelayEstimates", + static_cast(delay_reliability), + static_cast(DelayReliabilityCategory::kNumCategories)); + + DelayChangesCategory delay_changes; + if (delay_change_counter_ == 0) { + delay_changes = DelayChangesCategory::kNone; + } else if (delay_change_counter_ > 10) { + delay_changes = DelayChangesCategory::kConstant; + } else if (delay_change_counter_ > 5) { + delay_changes = DelayChangesCategory::kMany; + } else if (delay_change_counter_ > 2) { + delay_changes = DelayChangesCategory::kSeveral; + } else { + delay_changes = DelayChangesCategory::kFew; + } + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.Audio.EchoCanceller.DelayChanges", + static_cast(delay_changes), + static_cast(DelayChangesCategory::kNumCategories)); + + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.Audio.EchoCanceller.Clockdrift", static_cast(clockdrift), + static_cast(ClockdriftDetector::Level::kNumCategories)); + + metrics_reported_ = true; + call_counter_ = 0; + ResetMetrics(); + } else { + metrics_reported_ = false; + } + + if (!initial_update && ++skew_report_timer_ == 60 * kNumBlocksPerSecond) { + RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.MaxSkewShiftCount", + skew_shift_count_, 0, kMaxSkewShiftCount, + kMaxSkewShiftCount + 1); + + skew_shift_count_ = 0; + skew_report_timer_ = 0; + } +} + +void RenderDelayControllerMetrics::ResetMetrics() { + delay_change_counter_ = 0; + reliable_delay_estimate_counter_ = 0; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/render_delay_controller_metrics.h b/audio_processing/aec3/render_delay_controller_metrics.h new file mode 100644 index 0000000..7dc3b35 --- /dev/null +++ b/audio_processing/aec3/render_delay_controller_metrics.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_ + +#include + +#include "absl/types/optional.h" +#include "audio_processing/aec3/clockdrift_detector.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// Handles the reporting of metrics for the render delay controller. +class RenderDelayControllerMetrics { + public: + RenderDelayControllerMetrics(); + + // Updates the metric with new data. + void Update(absl::optional delay_samples, + size_t buffer_delay_blocks, + absl::optional skew_shift_blocks, + ClockdriftDetector::Level clockdrift); + + // Returns true if the metrics have just been reported, otherwise false. + bool MetricsReported() { return metrics_reported_; } + + private: + // Resets the metrics. + void ResetMetrics(); + + size_t delay_blocks_ = 0; + int reliable_delay_estimate_counter_ = 0; + int delay_change_counter_ = 0; + int call_counter_ = 0; + int skew_report_timer_ = 0; + int initial_call_counter_ = 0; + bool metrics_reported_ = false; + bool initial_update = true; + int skew_shift_count_ = 0; + + RTC_DISALLOW_COPY_AND_ASSIGN(RenderDelayControllerMetrics); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_ diff --git a/audio_processing/aec3/render_delay_controller_metrics_unittest.cc b/audio_processing/aec3/render_delay_controller_metrics_unittest.cc new file mode 100644 index 0000000..e7d7703 --- /dev/null +++ b/audio_processing/aec3/render_delay_controller_metrics_unittest.cc @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/render_delay_controller_metrics.h" + +#include "absl/types/optional.h" +#include "modules/audio_processing/aec3/aec3_common.h" +#include "test/gtest.h" + +namespace webrtc { + +// Verify the general functionality of RenderDelayControllerMetrics. +TEST(RenderDelayControllerMetrics, NormalUsage) { + RenderDelayControllerMetrics metrics; + + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) { + metrics.Update(absl::nullopt, 0, absl::nullopt, + ClockdriftDetector::Level::kNone); + EXPECT_FALSE(metrics.MetricsReported()); + } + metrics.Update(absl::nullopt, 0, absl::nullopt, + ClockdriftDetector::Level::kNone); + EXPECT_TRUE(metrics.MetricsReported()); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/render_delay_controller_unittest.cc b/audio_processing/aec3/render_delay_controller_unittest.cc new file mode 100644 index 0000000..fb7b86a --- /dev/null +++ b/audio_processing/aec3/render_delay_controller_unittest.cc @@ -0,0 +1,363 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/render_delay_controller.h" + +#include +#include +#include +#include + +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/block_processor.h" +#include "modules/audio_processing/aec3/decimator.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +std::string ProduceDebugText(int sample_rate_hz) { + rtc::StringBuilder ss; + ss << "Sample rate: " << sample_rate_hz; + return ss.Release(); +} + +std::string ProduceDebugText(int sample_rate_hz, + size_t delay, + size_t num_render_channels, + size_t num_capture_channels) { + rtc::StringBuilder ss; + ss << ProduceDebugText(sample_rate_hz) << ", Delay: " << delay + << ", Num render channels: " << num_render_channels + << ", Num capture channels: " << num_capture_channels; + return ss.Release(); +} + +constexpr size_t kDownSamplingFactors[] = {2, 4, 8}; + +} // namespace + +// Verifies the output of GetDelay when there are no AnalyzeRender calls. +// TODO(bugs.webrtc.org/11161): Re-enable tests. +TEST(RenderDelayController, DISABLED_NoRenderSignal) { + for (size_t num_render_channels : {1, 2, 8}) { + std::vector> block(1, + std::vector(kBlockSize, 0.f)); + EchoCanceller3Config config; + for (size_t num_matched_filters = 4; num_matched_filters <= 10; + num_matched_filters++) { + for (auto down_sampling_factor : kDownSamplingFactors) { + config.delay.down_sampling_factor = down_sampling_factor; + config.delay.num_filters = num_matched_filters; + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr delay_buffer( + RenderDelayBuffer::Create(config, rate, num_render_channels)); + std::unique_ptr delay_controller( + RenderDelayController::Create(config, rate, + /*num_capture_channels*/ 1)); + for (size_t k = 0; k < 100; ++k) { + auto delay = delay_controller->GetDelay( + delay_buffer->GetDownsampledRenderBuffer(), + delay_buffer->Delay(), block); + EXPECT_FALSE(delay->delay); + } + } + } + } + } +} + +// Verifies the basic API call sequence. +// TODO(bugs.webrtc.org/11161): Re-enable tests. +TEST(RenderDelayController, DISABLED_BasicApiCalls) { + for (size_t num_capture_channels : {1, 2, 4}) { + for (size_t num_render_channels : {1, 2, 8}) { + std::vector> capture_block( + num_capture_channels, std::vector(kBlockSize, 0.f)); + absl::optional delay_blocks; + for (size_t num_matched_filters = 4; num_matched_filters <= 10; + num_matched_filters++) { + for (auto down_sampling_factor : kDownSamplingFactors) { + EchoCanceller3Config config; + config.delay.down_sampling_factor = down_sampling_factor; + config.delay.num_filters = num_matched_filters; + config.delay.capture_alignment_mixing.downmix = false; + config.delay.capture_alignment_mixing.adaptive_selection = false; + + for (auto rate : {16000, 32000, 48000}) { + std::vector>> render_block( + NumBandsForRate(rate), + std::vector>( + num_render_channels, std::vector(kBlockSize, 0.f))); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, rate, num_render_channels)); + std::unique_ptr delay_controller( + RenderDelayController::Create(EchoCanceller3Config(), rate, + num_capture_channels)); + for (size_t k = 0; k < 10; ++k) { + render_delay_buffer->Insert(render_block); + render_delay_buffer->PrepareCaptureProcessing(); + + delay_blocks = delay_controller->GetDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), + render_delay_buffer->Delay(), capture_block); + } + EXPECT_TRUE(delay_blocks); + EXPECT_FALSE(delay_blocks->delay); + } + } + } + } + } +} + +// Verifies that the RenderDelayController is able to align the signals for +// simple timeshifts between the signals. +// TODO(bugs.webrtc.org/11161): Re-enable tests. +TEST(RenderDelayController, DISABLED_Alignment) { + Random random_generator(42U); + for (size_t num_capture_channels : {1, 2, 4}) { + std::vector> capture_block( + num_capture_channels, std::vector(kBlockSize, 0.f)); + for (size_t num_matched_filters = 4; num_matched_filters <= 10; + num_matched_filters++) { + for (auto down_sampling_factor : kDownSamplingFactors) { + EchoCanceller3Config config; + config.delay.down_sampling_factor = down_sampling_factor; + config.delay.num_filters = num_matched_filters; + config.delay.capture_alignment_mixing.downmix = false; + config.delay.capture_alignment_mixing.adaptive_selection = false; + + for (size_t num_render_channels : {1, 2, 8}) { + for (auto rate : {16000, 32000, 48000}) { + std::vector>> render_block( + NumBandsForRate(rate), + std::vector>( + num_render_channels, std::vector(kBlockSize, 0.f))); + + for (size_t delay_samples : {15, 50, 150, 200, 800, 4000}) { + absl::optional delay_blocks; + SCOPED_TRACE(ProduceDebugText(rate, delay_samples, + num_render_channels, + num_capture_channels)); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, rate, num_render_channels)); + std::unique_ptr delay_controller( + RenderDelayController::Create(config, rate, + num_capture_channels)); + DelayBuffer signal_delay_buffer(delay_samples); + for (size_t k = 0; k < (400 + delay_samples / kBlockSize); ++k) { + for (size_t band = 0; band < render_block.size(); ++band) { + for (size_t channel = 0; channel < render_block[band].size(); + ++channel) { + RandomizeSampleVector(&random_generator, + render_block[band][channel]); + } + } + signal_delay_buffer.Delay(render_block[0][0], capture_block[0]); + render_delay_buffer->Insert(render_block); + render_delay_buffer->PrepareCaptureProcessing(); + delay_blocks = delay_controller->GetDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), + render_delay_buffer->Delay(), capture_block); + } + ASSERT_TRUE(!!delay_blocks); + + constexpr int kDelayHeadroomBlocks = 1; + size_t expected_delay_blocks = + std::max(0, static_cast(delay_samples / kBlockSize) - + kDelayHeadroomBlocks); + + EXPECT_EQ(expected_delay_blocks, delay_blocks->delay); + } + } + } + } + } + } +} + +// Verifies that the RenderDelayController is able to properly handle noncausal +// delays. +// TODO(bugs.webrtc.org/11161): Re-enable tests. +TEST(RenderDelayController, DISABLED_NonCausalAlignment) { + Random random_generator(42U); + for (size_t num_capture_channels : {1, 2, 4}) { + for (size_t num_render_channels : {1, 2, 8}) { + for (size_t num_matched_filters = 4; num_matched_filters <= 10; + num_matched_filters++) { + for (auto down_sampling_factor : kDownSamplingFactors) { + EchoCanceller3Config config; + config.delay.down_sampling_factor = down_sampling_factor; + config.delay.num_filters = num_matched_filters; + config.delay.capture_alignment_mixing.downmix = false; + config.delay.capture_alignment_mixing.adaptive_selection = false; + for (auto rate : {16000, 32000, 48000}) { + std::vector>> render_block( + NumBandsForRate(rate), + std::vector>( + num_render_channels, std::vector(kBlockSize, 0.f))); + std::vector>> capture_block( + NumBandsForRate(rate), + std::vector>( + num_capture_channels, std::vector(kBlockSize, 0.f))); + + for (int delay_samples : {-15, -50, -150, -200}) { + absl::optional delay_blocks; + SCOPED_TRACE(ProduceDebugText(rate, -delay_samples, + num_render_channels, + num_capture_channels)); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, rate, num_render_channels)); + std::unique_ptr delay_controller( + RenderDelayController::Create(EchoCanceller3Config(), rate, + num_capture_channels)); + DelayBuffer signal_delay_buffer(-delay_samples); + for (int k = 0; + k < (400 - delay_samples / static_cast(kBlockSize)); + ++k) { + RandomizeSampleVector(&random_generator, capture_block[0][0]); + signal_delay_buffer.Delay(capture_block[0][0], + render_block[0][0]); + render_delay_buffer->Insert(render_block); + render_delay_buffer->PrepareCaptureProcessing(); + delay_blocks = delay_controller->GetDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), + render_delay_buffer->Delay(), capture_block[0]); + } + + ASSERT_FALSE(delay_blocks); + } + } + } + } + } + } +} + +// Verifies that the RenderDelayController is able to align the signals for +// simple timeshifts between the signals when there is jitter in the API calls. +// TODO(bugs.webrtc.org/11161): Re-enable tests. +TEST(RenderDelayController, DISABLED_AlignmentWithJitter) { + Random random_generator(42U); + for (size_t num_capture_channels : {1, 2, 4}) { + for (size_t num_render_channels : {1, 2, 8}) { + std::vector> capture_block( + num_capture_channels, std::vector(kBlockSize, 0.f)); + for (size_t num_matched_filters = 4; num_matched_filters <= 10; + num_matched_filters++) { + for (auto down_sampling_factor : kDownSamplingFactors) { + EchoCanceller3Config config; + config.delay.down_sampling_factor = down_sampling_factor; + config.delay.num_filters = num_matched_filters; + config.delay.capture_alignment_mixing.downmix = false; + config.delay.capture_alignment_mixing.adaptive_selection = false; + + for (auto rate : {16000, 32000, 48000}) { + std::vector>> render_block( + NumBandsForRate(rate), + std::vector>( + num_render_channels, std::vector(kBlockSize, 0.f))); + for (size_t delay_samples : {15, 50, 300, 800}) { + absl::optional delay_blocks; + SCOPED_TRACE(ProduceDebugText(rate, delay_samples, + num_render_channels, + num_capture_channels)); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, rate, num_render_channels)); + std::unique_ptr delay_controller( + RenderDelayController::Create(config, rate, + num_capture_channels)); + DelayBuffer signal_delay_buffer(delay_samples); + constexpr size_t kMaxTestJitterBlocks = 26; + for (size_t j = 0; j < (1000 + delay_samples / kBlockSize) / + kMaxTestJitterBlocks + + 1; + ++j) { + std::vector>> + capture_block_buffer; + for (size_t k = 0; k < (kMaxTestJitterBlocks - 1); ++k) { + RandomizeSampleVector(&random_generator, render_block[0][0]); + signal_delay_buffer.Delay(render_block[0][0], + capture_block[0]); + capture_block_buffer.push_back(capture_block); + render_delay_buffer->Insert(render_block); + } + for (size_t k = 0; k < (kMaxTestJitterBlocks - 1); ++k) { + render_delay_buffer->PrepareCaptureProcessing(); + delay_blocks = delay_controller->GetDelay( + render_delay_buffer->GetDownsampledRenderBuffer(), + render_delay_buffer->Delay(), capture_block_buffer[k]); + } + } + + constexpr int kDelayHeadroomBlocks = 1; + size_t expected_delay_blocks = + std::max(0, static_cast(delay_samples / kBlockSize) - + kDelayHeadroomBlocks); + if (expected_delay_blocks < 2) { + expected_delay_blocks = 0; + } + + ASSERT_TRUE(delay_blocks); + EXPECT_EQ(expected_delay_blocks, delay_blocks->delay); + } + } + } + } + } + } +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for the capture signal block size. +TEST(RenderDelayController, WrongCaptureSize) { + std::vector> block( + 1, std::vector(kBlockSize - 1, 0.f)); + EchoCanceller3Config config; + for (auto rate : {16000, 32000, 48000}) { + SCOPED_TRACE(ProduceDebugText(rate)); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, rate, 1)); + EXPECT_DEATH( + std::unique_ptr( + RenderDelayController::Create(EchoCanceller3Config(), rate, 1)) + ->GetDelay(render_delay_buffer->GetDownsampledRenderBuffer(), + render_delay_buffer->Delay(), block), + ""); + } +} + +// Verifies the check for correct sample rate. +// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH +// tests on test bots has been fixed. +TEST(RenderDelayController, DISABLED_WrongSampleRate) { + for (auto rate : {-1, 0, 8001, 16001}) { + SCOPED_TRACE(ProduceDebugText(rate)); + EchoCanceller3Config config; + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, rate, 1)); + EXPECT_DEATH( + std::unique_ptr( + RenderDelayController::Create(EchoCanceller3Config(), rate, 1)), + ""); + } +} + +#endif + +} // namespace webrtc diff --git a/audio_processing/aec3/render_signal_analyzer.cc b/audio_processing/aec3/render_signal_analyzer.cc new file mode 100644 index 0000000..85d79e8 --- /dev/null +++ b/audio_processing/aec3/render_signal_analyzer.cc @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/render_signal_analyzer.h" + +#include + +#include +#include +#include + +#include "rtc_base/array_view.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { +constexpr size_t kCounterThreshold = 5; + +// Identifies local bands with narrow characteristics. +void IdentifySmallNarrowBandRegions( + const RenderBuffer& render_buffer, + const absl::optional& delay_partitions, + std::array* narrow_band_counters) { + RTC_DCHECK(narrow_band_counters); + + if (!delay_partitions) { + narrow_band_counters->fill(0); + return; + } + + std::array channel_counters; + channel_counters.fill(0); + rtc::ArrayView> X2 = + render_buffer.Spectrum(*delay_partitions); + for (size_t ch = 0; ch < X2.size(); ++ch) { + for (size_t k = 1; k < kFftLengthBy2; ++k) { + if (X2[ch][k] > 3 * std::max(X2[ch][k - 1], X2[ch][k + 1])) { + ++channel_counters[k - 1]; + } + } + } + for (size_t k = 1; k < kFftLengthBy2; ++k) { + (*narrow_band_counters)[k - 1] = + channel_counters[k - 1] > 0 ? (*narrow_band_counters)[k - 1] + 1 : 0; + } +} + +// Identifies whether the signal has a single strong narrow-band component. +void IdentifyStrongNarrowBandComponent(const RenderBuffer& render_buffer, + int strong_peak_freeze_duration, + absl::optional* narrow_peak_band, + size_t* narrow_peak_counter) { + RTC_DCHECK(narrow_peak_band); + RTC_DCHECK(narrow_peak_counter); + if (*narrow_peak_band && + ++(*narrow_peak_counter) > + static_cast(strong_peak_freeze_duration)) { + *narrow_peak_band = absl::nullopt; + } + + const std::vector>>& x_latest = + render_buffer.Block(0); + float max_peak_level = 0.f; + for (size_t channel = 0; channel < x_latest[0].size(); ++channel) { + rtc::ArrayView X2_latest = + render_buffer.Spectrum(0)[channel]; + + // Identify the spectral peak. + const int peak_bin = + static_cast(std::max_element(X2_latest.begin(), X2_latest.end()) - + X2_latest.begin()); + + // Compute the level around the peak. + float non_peak_power = 0.f; + for (int k = std::max(0, peak_bin - 14); k < peak_bin - 4; ++k) { + non_peak_power = std::max(X2_latest[k], non_peak_power); + } + for (int k = peak_bin + 5; + k < std::min(peak_bin + 15, static_cast(kFftLengthBy2Plus1)); + ++k) { + non_peak_power = std::max(X2_latest[k], non_peak_power); + } + + // Assess the render signal strength. + auto result0 = std::minmax_element(x_latest[0][channel].begin(), + x_latest[0][channel].end()); + float max_abs = std::max(fabs(*result0.first), fabs(*result0.second)); + + if (x_latest.size() > 1) { + const auto result1 = std::minmax_element(x_latest[1][channel].begin(), + x_latest[1][channel].end()); + max_abs = + std::max(max_abs, static_cast(std::max( + fabs(*result1.first), fabs(*result1.second)))); + } + + // Detect whether the spectral peak has as strong narrowband nature. + const float peak_level = X2_latest[peak_bin]; + if (peak_bin > 0 && max_abs > 100 && peak_level > 100 * non_peak_power) { + // Store the strongest peak across channels. + if (peak_level > max_peak_level) { + max_peak_level = peak_level; + *narrow_peak_band = peak_bin; + *narrow_peak_counter = 0; + } + } + } +} + +} // namespace + +RenderSignalAnalyzer::RenderSignalAnalyzer(const EchoCanceller3Config& config) + : strong_peak_freeze_duration_(config.filter.main.length_blocks) { + narrow_band_counters_.fill(0); +} +RenderSignalAnalyzer::~RenderSignalAnalyzer() = default; + +void RenderSignalAnalyzer::Update( + const RenderBuffer& render_buffer, + const absl::optional& delay_partitions) { + // Identify bands of narrow nature. + IdentifySmallNarrowBandRegions(render_buffer, delay_partitions, + &narrow_band_counters_); + + // Identify the presence of a strong narrow band. + IdentifyStrongNarrowBandComponent(render_buffer, strong_peak_freeze_duration_, + &narrow_peak_band_, &narrow_peak_counter_); +} + +void RenderSignalAnalyzer::MaskRegionsAroundNarrowBands( + std::array* v) const { + RTC_DCHECK(v); + + // Set v to zero around narrow band signal regions. + if (narrow_band_counters_[0] > kCounterThreshold) { + (*v)[1] = (*v)[0] = 0.f; + } + for (size_t k = 2; k < kFftLengthBy2 - 1; ++k) { + if (narrow_band_counters_[k - 1] > kCounterThreshold) { + (*v)[k - 2] = (*v)[k - 1] = (*v)[k] = (*v)[k + 1] = (*v)[k + 2] = 0.f; + } + } + if (narrow_band_counters_[kFftLengthBy2 - 2] > kCounterThreshold) { + (*v)[kFftLengthBy2] = (*v)[kFftLengthBy2 - 1] = 0.f; + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/render_signal_analyzer.h b/audio_processing/aec3/render_signal_analyzer.h new file mode 100644 index 0000000..7ef26fc --- /dev/null +++ b/audio_processing/aec3/render_signal_analyzer.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_ + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/render_buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// Provides functionality for analyzing the properties of the render signal. +class RenderSignalAnalyzer { + public: + explicit RenderSignalAnalyzer(const EchoCanceller3Config& config); + ~RenderSignalAnalyzer(); + + // Updates the render signal analysis with the most recent render signal. + void Update(const RenderBuffer& render_buffer, + const absl::optional& delay_partitions); + + // Returns true if the render signal is poorly exciting. + bool PoorSignalExcitation() const { + RTC_DCHECK_LT(2, narrow_band_counters_.size()); + return std::any_of(narrow_band_counters_.begin(), + narrow_band_counters_.end(), + [](size_t a) { return a > 10; }); + } + + // Zeros the array around regions with narrow bands signal characteristics. + void MaskRegionsAroundNarrowBands( + std::array* v) const; + + absl::optional NarrowPeakBand() const { return narrow_peak_band_; } + + private: + const int strong_peak_freeze_duration_; + std::array narrow_band_counters_; + absl::optional narrow_peak_band_; + size_t narrow_peak_counter_; + + RTC_DISALLOW_COPY_AND_ASSIGN(RenderSignalAnalyzer); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_ diff --git a/audio_processing/aec3/render_signal_analyzer_unittest.cc b/audio_processing/aec3/render_signal_analyzer_unittest.cc new file mode 100644 index 0000000..f40fade --- /dev/null +++ b/audio_processing/aec3/render_signal_analyzer_unittest.cc @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/render_signal_analyzer.h" + +#include + +#include +#include +#include + +#include "api/array_view.h" +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/aec3_fft.h" +#include "modules/audio_processing/aec3/fft_data.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +constexpr float kPi = 3.141592f; + +void ProduceSinusoidInNoise(int sample_rate_hz, + size_t sinusoid_channel, + float sinusoidal_frequency_hz, + Random* random_generator, + size_t* sample_counter, + std::vector>>* x) { + // Fill x with low-amplitude noise. + for (auto& band : *x) { + for (auto& channel : band) { + RandomizeSampleVector(random_generator, channel, + /*amplitude=*/500.f); + } + } + // Produce a sinusoid of the specified frequency in the specified channel. + for (size_t k = *sample_counter, j = 0; k < (*sample_counter + kBlockSize); + ++k, ++j) { + (*x)[0][sinusoid_channel][j] += + 32000.f * + std::sin(2.f * kPi * sinusoidal_frequency_hz * k / sample_rate_hz); + } + *sample_counter = *sample_counter + kBlockSize; +} + +void RunNarrowBandDetectionTest(size_t num_channels) { + RenderSignalAnalyzer analyzer(EchoCanceller3Config{}); + Random random_generator(42U); + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + std::vector>> x( + kNumBands, std::vector>( + num_channels, std::vector(kBlockSize, 0.f))); + std::array x_old; + Aec3Fft fft; + EchoCanceller3Config config; + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, num_channels)); + + std::array mask; + x_old.fill(0.f); + constexpr int kSinusFrequencyBin = 32; + + auto generate_sinusoid_test = [&](bool known_delay) { + size_t sample_counter = 0; + for (size_t k = 0; k < 100; ++k) { + ProduceSinusoidInNoise(16000, num_channels - 1, + 16000 / 2 * kSinusFrequencyBin / kFftLengthBy2, + &random_generator, &sample_counter, &x); + + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + + analyzer.Update(*render_delay_buffer->GetRenderBuffer(), + known_delay ? absl::optional(0) : absl::nullopt); + } + }; + + generate_sinusoid_test(true); + mask.fill(1.f); + analyzer.MaskRegionsAroundNarrowBands(&mask); + for (int k = 0; k < static_cast(mask.size()); ++k) { + EXPECT_EQ(abs(k - kSinusFrequencyBin) <= 2 ? 0.f : 1.f, mask[k]); + } + EXPECT_TRUE(analyzer.PoorSignalExcitation()); + EXPECT_TRUE(static_cast(analyzer.NarrowPeakBand())); + EXPECT_EQ(*analyzer.NarrowPeakBand(), 32); + + // Verify that no bands are detected as narrow when the delay is unknown. + generate_sinusoid_test(false); + mask.fill(1.f); + analyzer.MaskRegionsAroundNarrowBands(&mask); + std::for_each(mask.begin(), mask.end(), [](float a) { EXPECT_EQ(1.f, a); }); + EXPECT_FALSE(analyzer.PoorSignalExcitation()); +} + +std::string ProduceDebugText(size_t num_channels) { + rtc::StringBuilder ss; + ss << "number of channels: " << num_channels; + return ss.Release(); +} +} // namespace + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +// Verifies that the check for non-null output parameter works. +TEST(RenderSignalAnalyzer, NullMaskOutput) { + RenderSignalAnalyzer analyzer(EchoCanceller3Config{}); + EXPECT_DEATH(analyzer.MaskRegionsAroundNarrowBands(nullptr), ""); +} + +#endif + +// Verify that no narrow bands are detected in a Gaussian noise signal. +TEST(RenderSignalAnalyzer, NoFalseDetectionOfNarrowBands) { + for (auto num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(num_channels)); + RenderSignalAnalyzer analyzer(EchoCanceller3Config{}); + Random random_generator(42U); + std::vector>> x( + 3, std::vector>( + num_channels, std::vector(kBlockSize, 0.f))); + std::array x_old; + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(EchoCanceller3Config(), 48000, num_channels)); + std::array mask; + x_old.fill(0.f); + + for (size_t k = 0; k < 100; ++k) { + for (auto& band : x) { + for (auto& channel : band) { + RandomizeSampleVector(&random_generator, channel); + } + } + + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + + analyzer.Update(*render_delay_buffer->GetRenderBuffer(), + absl::optional(0)); + } + + mask.fill(1.f); + analyzer.MaskRegionsAroundNarrowBands(&mask); + EXPECT_TRUE(std::all_of(mask.begin(), mask.end(), + [](float a) { return a == 1.f; })); + EXPECT_FALSE(analyzer.PoorSignalExcitation()); + EXPECT_FALSE(static_cast(analyzer.NarrowPeakBand())); + } +} + +// Verify that a sinusoid signal is detected as narrow bands. +TEST(RenderSignalAnalyzer, NarrowBandDetection) { + for (auto num_channels : {1, 2, 8}) { + SCOPED_TRACE(ProduceDebugText(num_channels)); + RunNarrowBandDetectionTest(num_channels); + } +} +} // namespace webrtc diff --git a/audio_processing/aec3/residual_echo_estimator.cc b/audio_processing/aec3/residual_echo_estimator.cc new file mode 100644 index 0000000..b98756c --- /dev/null +++ b/audio_processing/aec3/residual_echo_estimator.cc @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/residual_echo_estimator.h" + +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/reverb_model.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +// Computes the indexes that will be used for computing spectral power over +// the blocks surrounding the delay. +void GetRenderIndexesToAnalyze( + const SpectrumBuffer& spectrum_buffer, + const EchoCanceller3Config::EchoModel& echo_model, + int filter_delay_blocks, + int* idx_start, + int* idx_stop) { + RTC_DCHECK(idx_start); + RTC_DCHECK(idx_stop); + size_t window_start; + size_t window_end; + window_start = + std::max(0, filter_delay_blocks - + static_cast(echo_model.render_pre_window_size)); + window_end = filter_delay_blocks + + static_cast(echo_model.render_post_window_size); + *idx_start = spectrum_buffer.OffsetIndex(spectrum_buffer.read, window_start); + *idx_stop = spectrum_buffer.OffsetIndex(spectrum_buffer.read, window_end + 1); +} + +// Estimates the residual echo power based on the echo return loss enhancement +// (ERLE) and the linear power estimate. +void LinearEstimate( + rtc::ArrayView> S2_linear, + rtc::ArrayView> erle, + rtc::ArrayView> R2) { + RTC_DCHECK_EQ(S2_linear.size(), erle.size()); + RTC_DCHECK_EQ(S2_linear.size(), R2.size()); + + const size_t num_capture_channels = R2.size(); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + RTC_DCHECK_LT(0.f, erle[ch][k]); + R2[ch][k] = S2_linear[ch][k] / erle[ch][k]; + } + } +} + +// Estimates the residual echo power based on an uncertainty estimate of the +// echo return loss enhancement (ERLE) and the linear power estimate. +void LinearEstimate( + rtc::ArrayView> S2_linear, + float erle_uncertainty, + rtc::ArrayView> R2) { + RTC_DCHECK_EQ(S2_linear.size(), R2.size()); + + const size_t num_capture_channels = R2.size(); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + R2[ch][k] = S2_linear[ch][k] * erle_uncertainty; + } + } +} + +// Estimates the residual echo power based on the estimate of the echo path +// gain. +void NonLinearEstimate( + float echo_path_gain, + const std::array& X2, + rtc::ArrayView> R2) { + const size_t num_capture_channels = R2.size(); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + R2[ch][k] = X2[k] * echo_path_gain; + } + } +} + +// Applies a soft noise gate to the echo generating power. +void ApplyNoiseGate(const EchoCanceller3Config::EchoModel& config, + rtc::ArrayView X2) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + if (config.noise_gate_power > X2[k]) { + X2[k] = std::max(0.f, X2[k] - config.noise_gate_slope * + (config.noise_gate_power - X2[k])); + } + } +} + +// Estimates the echo generating signal power as gated maximal power over a +// time window. +void EchoGeneratingPower(size_t num_render_channels, + const SpectrumBuffer& spectrum_buffer, + const EchoCanceller3Config::EchoModel& echo_model, + int filter_delay_blocks, + rtc::ArrayView X2) { + int idx_stop; + int idx_start; + GetRenderIndexesToAnalyze(spectrum_buffer, echo_model, filter_delay_blocks, + &idx_start, &idx_stop); + + std::fill(X2.begin(), X2.end(), 0.f); + if (num_render_channels == 1) { + for (int k = idx_start; k != idx_stop; k = spectrum_buffer.IncIndex(k)) { + for (size_t j = 0; j < kFftLengthBy2Plus1; ++j) { + X2[j] = std::max(X2[j], spectrum_buffer.buffer[k][/*channel=*/0][j]); + } + } + } else { + for (int k = idx_start; k != idx_stop; k = spectrum_buffer.IncIndex(k)) { + std::array render_power; + render_power.fill(0.f); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const auto& channel_power = spectrum_buffer.buffer[k][ch]; + for (size_t j = 0; j < kFftLengthBy2Plus1; ++j) { + render_power[j] += channel_power[j]; + } + } + for (size_t j = 0; j < kFftLengthBy2Plus1; ++j) { + X2[j] = std::max(X2[j], render_power[j]); + } + } + } +} + +// Chooses the echo path gain to use. +float GetEchoPathGain(const AecState& aec_state, + const EchoCanceller3Config::EpStrength& config) { + float gain_amplitude = + aec_state.TransparentMode() ? 0.01f : config.default_gain; + return gain_amplitude * gain_amplitude; +} + +} // namespace + +ResidualEchoEstimator::ResidualEchoEstimator(const EchoCanceller3Config& config, + size_t num_render_channels) + : config_(config), num_render_channels_(num_render_channels) { + Reset(); +} + +ResidualEchoEstimator::~ResidualEchoEstimator() = default; + +void ResidualEchoEstimator::Estimate( + const AecState& aec_state, + const RenderBuffer& render_buffer, + rtc::ArrayView> S2_linear, + rtc::ArrayView> Y2, + rtc::ArrayView> R2) { + RTC_DCHECK_EQ(R2.size(), Y2.size()); + RTC_DCHECK_EQ(R2.size(), S2_linear.size()); + + const size_t num_capture_channels = R2.size(); + + UpdateRenderNoisePower(render_buffer); + + if (aec_state.UsableLinearEstimate()) { + if (aec_state.SaturatedEcho()) { + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + std::copy(Y2[ch].begin(), Y2[ch].end(), R2[ch].begin()); + } + } else { + absl::optional erle_uncertainty = aec_state.ErleUncertainty(); + + if (erle_uncertainty) { + LinearEstimate(S2_linear, *erle_uncertainty, R2); + } else { + LinearEstimate(S2_linear, aec_state.Erle(), R2); + } + } + + AddReverb(ReverbType::kLinear, aec_state, render_buffer, R2); + } else { + const float echo_path_gain = + GetEchoPathGain(aec_state, config_.ep_strength); + + if (aec_state.SaturatedEcho()) { + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + std::copy(Y2[ch].begin(), Y2[ch].end(), R2[ch].begin()); + } + } else { + // Estimate the echo generating signal power. + std::array X2; + EchoGeneratingPower(num_render_channels_, + render_buffer.GetSpectrumBuffer(), config_.echo_model, + aec_state.MinDirectPathFilterDelay(), X2); + if (!aec_state.UseStationarityProperties()) { + ApplyNoiseGate(config_.echo_model, X2); + } + + // Subtract the stationary noise power to avoid stationary noise causing + // excessive echo suppression. + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + X2[k] -= config_.echo_model.stationary_gate_slope * X2_noise_floor_[k]; + X2[k] = std::max(0.f, X2[k]); + } + NonLinearEstimate(echo_path_gain, X2, R2); + } + + if (!aec_state.TransparentMode()) { + AddReverb(ReverbType::kNonLinear, aec_state, render_buffer, R2); + } + } + + if (aec_state.UseStationarityProperties()) { + // Scale the echo according to echo audibility. + std::array residual_scaling; + aec_state.GetResidualEchoScaling(residual_scaling); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + R2[ch][k] *= residual_scaling[k]; + } + } + } +} + +void ResidualEchoEstimator::Reset() { + echo_reverb_.Reset(); + X2_noise_floor_counter_.fill(config_.echo_model.noise_floor_hold); + X2_noise_floor_.fill(config_.echo_model.min_noise_floor_power); +} + +void ResidualEchoEstimator::UpdateRenderNoisePower( + const RenderBuffer& render_buffer) { + std::array render_power_data; + rtc::ArrayView> X2 = + render_buffer.Spectrum(0); + rtc::ArrayView render_power = + X2[/*channel=*/0]; + if (num_render_channels_ > 1) { + render_power_data.fill(0.f); + for (size_t ch = 0; ch < num_render_channels_; ++ch) { + const auto& channel_power = X2[ch]; + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + render_power_data[k] += channel_power[k]; + } + } + render_power = render_power_data; + } + + // Estimate the stationary noise power in a minimum statistics manner. + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + // Decrease rapidly. + if (render_power[k] < X2_noise_floor_[k]) { + X2_noise_floor_[k] = render_power[k]; + X2_noise_floor_counter_[k] = 0; + } else { + // Increase in a delayed, leaky manner. + if (X2_noise_floor_counter_[k] >= + static_cast(config_.echo_model.noise_floor_hold)) { + X2_noise_floor_[k] = std::max(X2_noise_floor_[k] * 1.1f, + config_.echo_model.min_noise_floor_power); + } else { + ++X2_noise_floor_counter_[k]; + } + } + } +} + +// Adds the estimated power of the reverb to the residual echo power. +void ResidualEchoEstimator::AddReverb( + ReverbType reverb_type, + const AecState& aec_state, + const RenderBuffer& render_buffer, + rtc::ArrayView> R2) { + const size_t num_capture_channels = R2.size(); + + // Choose reverb partition based on what type of echo power model is used. + const size_t first_reverb_partition = + reverb_type == ReverbType::kLinear + ? aec_state.FilterLengthBlocks() + 1 + : aec_state.MinDirectPathFilterDelay() + 1; + + // Compute render power for the reverb. + std::array render_power_data; + rtc::ArrayView> X2 = + render_buffer.Spectrum(first_reverb_partition); + rtc::ArrayView render_power = + X2[/*channel=*/0]; + if (num_render_channels_ > 1) { + render_power_data.fill(0.f); + for (size_t ch = 0; ch < num_render_channels_; ++ch) { + const auto& channel_power = X2[ch]; + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + render_power_data[k] += channel_power[k]; + } + } + render_power = render_power_data; + } + + // Update the reverb estimate. + if (reverb_type == ReverbType::kLinear) { + echo_reverb_.UpdateReverb(render_power, + aec_state.GetReverbFrequencyResponse(), + aec_state.ReverbDecay()); + } else { + const float echo_path_gain = + GetEchoPathGain(aec_state, config_.ep_strength); + echo_reverb_.UpdateReverbNoFreqShaping(render_power, echo_path_gain, + aec_state.ReverbDecay()); + } + + // Add the reverb power. + rtc::ArrayView reverb_power = + echo_reverb_.reverb(); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + R2[ch][k] += reverb_power[k]; + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/residual_echo_estimator.h b/audio_processing/aec3/residual_echo_estimator.h new file mode 100644 index 0000000..f46d88b --- /dev/null +++ b/audio_processing/aec3/residual_echo_estimator.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec_state.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/aec3/reverb_model.h" +#include "audio_processing/aec3/spectrum_buffer.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +class ResidualEchoEstimator { + public: + ResidualEchoEstimator(const EchoCanceller3Config& config, + size_t num_render_channels); + ~ResidualEchoEstimator(); + + ResidualEchoEstimator(const ResidualEchoEstimator&) = delete; + ResidualEchoEstimator& operator=(const ResidualEchoEstimator&) = delete; + + void Estimate( + const AecState& aec_state, + const RenderBuffer& render_buffer, + rtc::ArrayView> S2_linear, + rtc::ArrayView> Y2, + rtc::ArrayView> R2); + + private: + enum class ReverbType { kLinear, kNonLinear }; + + // Resets the state. + void Reset(); + + // Updates estimate for the power of the stationary noise component in the + // render signal. + void UpdateRenderNoisePower(const RenderBuffer& render_buffer); + + // Adds the estimated unmodelled echo power to the residual echo power + // estimate. + void AddReverb(ReverbType reverb_type, + const AecState& aec_state, + const RenderBuffer& render_buffer, + rtc::ArrayView> R2); + + const EchoCanceller3Config config_; + const size_t num_render_channels_; + std::array X2_noise_floor_; + std::array X2_noise_floor_counter_; + ReverbModel echo_reverb_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_ diff --git a/audio_processing/aec3/residual_echo_estimator_unittest.cc b/audio_processing/aec3/residual_echo_estimator_unittest.cc new file mode 100644 index 0000000..c8a45a4 --- /dev/null +++ b/audio_processing/aec3/residual_echo_estimator_unittest.cc @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/residual_echo_estimator.h" + +#include "api/audio/echo_canceller3_config.h" +#include "modules/audio_processing/aec3/aec3_fft.h" +#include "modules/audio_processing/aec3/aec_state.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/random.h" +#include "test/gtest.h" + +namespace webrtc { + +TEST(ResidualEchoEstimator, BasicTest) { + for (size_t num_render_channels : {1, 2, 4}) { + for (size_t num_capture_channels : {1, 2, 4}) { + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + EchoCanceller3Config config; + ResidualEchoEstimator estimator(config, num_render_channels); + AecState aec_state(config, num_capture_channels); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, + num_render_channels)); + + std::vector> E2_main( + num_capture_channels); + std::vector> S2_linear( + num_capture_channels); + std::vector> Y2( + num_capture_channels); + std::vector> R2( + num_capture_channels); + std::vector>> x( + kNumBands, + std::vector>(num_render_channels, + std::vector(kBlockSize, 0.f))); + std::vector>> H2( + num_capture_channels, + std::vector>(10)); + Random random_generator(42U); + std::vector output(num_capture_channels); + std::array y; + absl::optional delay_estimate; + + for (auto& H2_ch : H2) { + for (auto& H2_k : H2_ch) { + H2_k.fill(0.01f); + } + H2_ch[2].fill(10.f); + H2_ch[2][0] = 0.1f; + } + + std::vector> h( + num_capture_channels, + std::vector( + GetTimeDomainLength(config.filter.main.length_blocks), 0.f)); + + for (auto& subtractor_output : output) { + subtractor_output.Reset(); + subtractor_output.s_main.fill(100.f); + } + y.fill(0.f); + + constexpr float kLevel = 10.f; + for (auto& E2_main_ch : E2_main) { + E2_main_ch.fill(kLevel); + } + S2_linear[0].fill(kLevel); + for (auto& Y2_ch : Y2) { + Y2_ch.fill(kLevel); + } + + for (int k = 0; k < 1993; ++k) { + RandomizeSampleVector(&random_generator, x[0][0]); + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + + aec_state.Update(delay_estimate, H2, h, + *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, + output); + + estimator.Estimate(aec_state, *render_delay_buffer->GetRenderBuffer(), + S2_linear, Y2, R2); + } + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/reverb_decay_estimator.cc b/audio_processing/aec3/reverb_decay_estimator.cc new file mode 100644 index 0000000..d9cee1d --- /dev/null +++ b/audio_processing/aec3/reverb_decay_estimator.cc @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/reverb_decay_estimator.h" + +#include + +#include +#include +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { + +constexpr int kEarlyReverbMinSizeBlocks = 3; +constexpr int kBlocksPerSection = 6; +// Linear regression approach assumes symmetric index around 0. +constexpr float kEarlyReverbFirstPointAtLinearRegressors = + -0.5f * kBlocksPerSection * kFftLengthBy2 + 0.5f; + +// Averages the values in a block of size kFftLengthBy2; +float BlockAverage(rtc::ArrayView v, size_t block_index) { + constexpr float kOneByFftLengthBy2 = 1.f / kFftLengthBy2; + const int i = block_index * kFftLengthBy2; + RTC_DCHECK_GE(v.size(), i + kFftLengthBy2); + const float sum = + std::accumulate(v.begin() + i, v.begin() + i + kFftLengthBy2, 0.f); + return sum * kOneByFftLengthBy2; +} + +// Analyzes the gain in a block. +void AnalyzeBlockGain(const std::array& h2, + float floor_gain, + float* previous_gain, + bool* block_adapting, + bool* decaying_gain) { + float gain = std::max(BlockAverage(h2, 0), 1e-32f); + *block_adapting = + *previous_gain > 1.1f * gain || *previous_gain < 0.9f * gain; + *decaying_gain = gain > floor_gain; + *previous_gain = gain; +} + +// Arithmetic sum of $2 \sum_{i=0.5}^{(N-1)/2}i^2$ calculated directly. +constexpr float SymmetricArithmetricSum(int N) { + return N * (N * N - 1.0f) * (1.f / 12.f); +} + +// Returns the peak energy of an impulse response. +float BlockEnergyPeak(rtc::ArrayView h, int peak_block) { + RTC_DCHECK_LE((peak_block + 1) * kFftLengthBy2, h.size()); + RTC_DCHECK_GE(peak_block, 0); + float peak_value = + *std::max_element(h.begin() + peak_block * kFftLengthBy2, + h.begin() + (peak_block + 1) * kFftLengthBy2, + [](float a, float b) { return a * a < b * b; }); + return peak_value * peak_value; +} + +// Returns the average energy of an impulse response block. +float BlockEnergyAverage(rtc::ArrayView h, int block_index) { + RTC_DCHECK_LE((block_index + 1) * kFftLengthBy2, h.size()); + RTC_DCHECK_GE(block_index, 0); + constexpr float kOneByFftLengthBy2 = 1.f / kFftLengthBy2; + const auto sum_of_squares = [](float a, float b) { return a + b * b; }; + return std::accumulate(h.begin() + block_index * kFftLengthBy2, + h.begin() + (block_index + 1) * kFftLengthBy2, 0.f, + sum_of_squares) * + kOneByFftLengthBy2; +} + +} // namespace + +ReverbDecayEstimator::ReverbDecayEstimator(const EchoCanceller3Config& config) + : filter_length_blocks_(config.filter.main.length_blocks), + filter_length_coefficients_(GetTimeDomainLength(filter_length_blocks_)), + use_adaptive_echo_decay_(config.ep_strength.default_len < 0.f), + early_reverb_estimator_(config.filter.main.length_blocks - + kEarlyReverbMinSizeBlocks), + late_reverb_start_(kEarlyReverbMinSizeBlocks), + late_reverb_end_(kEarlyReverbMinSizeBlocks), + previous_gains_(config.filter.main.length_blocks, 0.f), + decay_(std::fabs(config.ep_strength.default_len)) { + RTC_DCHECK_GT(config.filter.main.length_blocks, + static_cast(kEarlyReverbMinSizeBlocks)); +} + +ReverbDecayEstimator::~ReverbDecayEstimator() = default; + +void ReverbDecayEstimator::Update(rtc::ArrayView filter, + const absl::optional& filter_quality, + int filter_delay_blocks, + bool usable_linear_filter, + bool stationary_signal) { + const int filter_size = static_cast(filter.size()); + + if (stationary_signal) { + return; + } + + bool estimation_feasible = + filter_delay_blocks <= + filter_length_blocks_ - kEarlyReverbMinSizeBlocks - 1; + estimation_feasible = + estimation_feasible && filter_size == filter_length_coefficients_; + estimation_feasible = estimation_feasible && filter_delay_blocks > 0; + estimation_feasible = estimation_feasible && usable_linear_filter; + + if (!estimation_feasible) { + ResetDecayEstimation(); + return; + } + + if (!use_adaptive_echo_decay_) { + return; + } + + const float new_smoothing = filter_quality ? *filter_quality * 0.2f : 0.f; + smoothing_constant_ = std::max(new_smoothing, smoothing_constant_); + if (smoothing_constant_ == 0.f) { + return; + } + + if (block_to_analyze_ < filter_length_blocks_) { + // Analyze the filter and accumulate data for reverb estimation. + AnalyzeFilter(filter); + ++block_to_analyze_; + } else { + // When the filter is fully analyzed, estimate the reverb decay and reset + // the block_to_analyze_ counter. + EstimateDecay(filter, filter_delay_blocks); + } +} + +void ReverbDecayEstimator::ResetDecayEstimation() { + early_reverb_estimator_.Reset(); + late_reverb_decay_estimator_.Reset(0); + block_to_analyze_ = 0; + estimation_region_candidate_size_ = 0; + estimation_region_identified_ = false; + smoothing_constant_ = 0.f; + late_reverb_start_ = 0; + late_reverb_end_ = 0; +} + +void ReverbDecayEstimator::EstimateDecay(rtc::ArrayView filter, + int peak_block) { + auto& h = filter; + RTC_DCHECK_EQ(0, h.size() % kFftLengthBy2); + + // Reset the block analysis counter. + block_to_analyze_ = + std::min(peak_block + kEarlyReverbMinSizeBlocks, filter_length_blocks_); + + // To estimate the reverb decay, the energy of the first filter section must + // be substantially larger than the last. Also, the first filter section + // energy must not deviate too much from the max peak. + const float first_reverb_gain = BlockEnergyAverage(h, block_to_analyze_); + const size_t h_size_blocks = h.size() >> kFftLengthBy2Log2; + tail_gain_ = BlockEnergyAverage(h, h_size_blocks - 1); + float peak_energy = BlockEnergyPeak(h, peak_block); + const bool sufficient_reverb_decay = first_reverb_gain > 4.f * tail_gain_; + const bool valid_filter = + first_reverb_gain > 2.f * tail_gain_ && peak_energy < 100.f; + + // Estimate the size of the regions with early and late reflections. + const int size_early_reverb = early_reverb_estimator_.Estimate(); + const int size_late_reverb = + std::max(estimation_region_candidate_size_ - size_early_reverb, 0); + + // Only update the reverb decay estimate if the size of the identified late + // reverb is sufficiently large. + if (size_late_reverb >= 5) { + if (valid_filter && late_reverb_decay_estimator_.EstimateAvailable()) { + float decay = std::pow( + 2.0f, late_reverb_decay_estimator_.Estimate() * kFftLengthBy2); + constexpr float kMaxDecay = 0.95f; // ~1 sec min RT60. + constexpr float kMinDecay = 0.02f; // ~15 ms max RT60. + decay = std::max(.97f * decay_, decay); + decay = std::min(decay, kMaxDecay); + decay = std::max(decay, kMinDecay); + decay_ += smoothing_constant_ * (decay - decay_); + } + + // Update length of decay. Must have enough data (number of sections) in + // order to estimate decay rate. + late_reverb_decay_estimator_.Reset(size_late_reverb * kFftLengthBy2); + late_reverb_start_ = + peak_block + kEarlyReverbMinSizeBlocks + size_early_reverb; + late_reverb_end_ = + block_to_analyze_ + estimation_region_candidate_size_ - 1; + } else { + late_reverb_decay_estimator_.Reset(0); + late_reverb_start_ = 0; + late_reverb_end_ = 0; + } + + // Reset variables for the identification of the region for reverb decay + // estimation. + estimation_region_identified_ = !(valid_filter && sufficient_reverb_decay); + estimation_region_candidate_size_ = 0; + + // Stop estimation of the decay until another good filter is received. + smoothing_constant_ = 0.f; + + // Reset early reflections detector. + early_reverb_estimator_.Reset(); +} + +void ReverbDecayEstimator::AnalyzeFilter(rtc::ArrayView filter) { + auto h = rtc::ArrayView( + filter.begin() + block_to_analyze_ * kFftLengthBy2, kFftLengthBy2); + + // Compute squared filter coeffiecients for the block to analyze_; + std::array h2; + std::transform(h.begin(), h.end(), h2.begin(), [](float a) { return a * a; }); + + // Map out the region for estimating the reverb decay. + bool adapting; + bool above_noise_floor; + AnalyzeBlockGain(h2, tail_gain_, &previous_gains_[block_to_analyze_], + &adapting, &above_noise_floor); + + // Count consecutive number of "good" filter sections, where "good" means: + // 1) energy is above noise floor. + // 2) energy of current section has not changed too much from last check. + estimation_region_identified_ = + estimation_region_identified_ || adapting || !above_noise_floor; + if (!estimation_region_identified_) { + ++estimation_region_candidate_size_; + } + + // Accumulate data for reverb decay estimation and for the estimation of early + // reflections. + if (block_to_analyze_ <= late_reverb_end_) { + if (block_to_analyze_ >= late_reverb_start_) { + for (float h2_k : h2) { + float h2_log2 = FastApproxLog2f(h2_k + 1e-10); + late_reverb_decay_estimator_.Accumulate(h2_log2); + early_reverb_estimator_.Accumulate(h2_log2, smoothing_constant_); + } + } else { + for (float h2_k : h2) { + float h2_log2 = FastApproxLog2f(h2_k + 1e-10); + early_reverb_estimator_.Accumulate(h2_log2, smoothing_constant_); + } + } + } +} + +void ReverbDecayEstimator::Dump(ApmDataDumper* data_dumper) const { + data_dumper->DumpRaw("aec3_reverb_decay", decay_); + data_dumper->DumpRaw("aec3_reverb_tail_energy", tail_gain_); + data_dumper->DumpRaw("aec3_reverb_alpha", smoothing_constant_); + data_dumper->DumpRaw("aec3_num_reverb_decay_blocks", + late_reverb_end_ - late_reverb_start_); + data_dumper->DumpRaw("aec3_late_reverb_start", late_reverb_start_); + data_dumper->DumpRaw("aec3_late_reverb_end", late_reverb_end_); + early_reverb_estimator_.Dump(data_dumper); +} + +void ReverbDecayEstimator::LateReverbLinearRegressor::Reset( + int num_data_points) { + RTC_DCHECK_LE(0, num_data_points); + RTC_DCHECK_EQ(0, num_data_points % 2); + const int N = num_data_points; + nz_ = 0.f; + // Arithmetic sum of $2 \sum_{i=0.5}^{(N-1)/2}i^2$ calculated directly. + nn_ = SymmetricArithmetricSum(N); + // The linear regression approach assumes symmetric index around 0. + count_ = N > 0 ? -N * 0.5f + 0.5f : 0.f; + N_ = N; + n_ = 0; +} + +void ReverbDecayEstimator::LateReverbLinearRegressor::Accumulate(float z) { + nz_ += count_ * z; + ++count_; + ++n_; +} + +float ReverbDecayEstimator::LateReverbLinearRegressor::Estimate() { + RTC_DCHECK(EstimateAvailable()); + if (nn_ == 0.f) { + RTC_NOTREACHED(); + return 0.f; + } + return nz_ / nn_; +} + +ReverbDecayEstimator::EarlyReverbLengthEstimator::EarlyReverbLengthEstimator( + int max_blocks) + : numerators_smooth_(max_blocks - kBlocksPerSection, 0.f), + numerators_(numerators_smooth_.size(), 0.f), + coefficients_counter_(0) { + RTC_DCHECK_LE(0, max_blocks); +} + +ReverbDecayEstimator::EarlyReverbLengthEstimator:: + ~EarlyReverbLengthEstimator() = default; + +void ReverbDecayEstimator::EarlyReverbLengthEstimator::Reset() { + coefficients_counter_ = 0; + std::fill(numerators_.begin(), numerators_.end(), 0.f); + block_counter_ = 0; +} + +void ReverbDecayEstimator::EarlyReverbLengthEstimator::Accumulate( + float value, + float smoothing) { + // Each section is composed by kBlocksPerSection blocks and each section + // overlaps with the next one in (kBlocksPerSection - 1) blocks. For example, + // the first section covers the blocks [0:5], the second covers the blocks + // [1:6] and so on. As a result, for each value, kBlocksPerSection sections + // need to be updated. + int first_section_index = std::max(block_counter_ - kBlocksPerSection + 1, 0); + int last_section_index = + std::min(block_counter_, static_cast(numerators_.size() - 1)); + float x_value = static_cast(coefficients_counter_) + + kEarlyReverbFirstPointAtLinearRegressors; + const float value_to_inc = kFftLengthBy2 * value; + float value_to_add = + x_value * value + (block_counter_ - last_section_index) * value_to_inc; + for (int section = last_section_index; section >= first_section_index; + --section, value_to_add += value_to_inc) { + numerators_[section] += value_to_add; + } + + // Check if this update was the last coefficient of the current block. In that + // case, check if we are at the end of one of the sections and update the + // numerator of the linear regressor that is computed in such section. + if (++coefficients_counter_ == kFftLengthBy2) { + if (block_counter_ >= (kBlocksPerSection - 1)) { + size_t section = block_counter_ - (kBlocksPerSection - 1); + RTC_DCHECK_GT(numerators_.size(), section); + RTC_DCHECK_GT(numerators_smooth_.size(), section); + numerators_smooth_[section] += + smoothing * (numerators_[section] - numerators_smooth_[section]); + n_sections_ = section + 1; + } + ++block_counter_; + coefficients_counter_ = 0; + } +} + +// Estimates the size in blocks of the early reverb. The estimation is done by +// comparing the tilt that is estimated in each section. As an optimization +// detail and due to the fact that all the linear regressors that are computed +// shared the same denominator, the comparison of the tilts is done by a +// comparison of the numerator of the linear regressors. +int ReverbDecayEstimator::EarlyReverbLengthEstimator::Estimate() { + constexpr float N = kBlocksPerSection * kFftLengthBy2; + constexpr float nn = SymmetricArithmetricSum(N); + // numerator_11 refers to the quantity that the linear regressor needs in the + // numerator for getting a decay equal to 1.1 (which is not a decay). + // log2(1.1) * nn / kFftLengthBy2. + constexpr float numerator_11 = 0.13750352374993502f * nn / kFftLengthBy2; + // log2(0.8) * nn / kFftLengthBy2. + constexpr float numerator_08 = -0.32192809488736229f * nn / kFftLengthBy2; + constexpr int kNumSectionsToAnalyze = 9; + + if (n_sections_ < kNumSectionsToAnalyze) { + return 0; + } + + // Estimation of the blocks that correspond to early reverberations. The + // estimation is done by analyzing the impulse response. The portions of the + // impulse response whose energy is not decreasing over its coefficients are + // considered to be part of the early reverberations. Furthermore, the blocks + // where the energy is decreasing faster than what it does at the end of the + // impulse response are also considered to be part of the early + // reverberations. The estimation is limited to the first + // kNumSectionsToAnalyze sections. + + RTC_DCHECK_LE(n_sections_, numerators_smooth_.size()); + const float min_numerator_tail = + *std::min_element(numerators_smooth_.begin() + kNumSectionsToAnalyze, + numerators_smooth_.begin() + n_sections_); + int early_reverb_size_minus_1 = 0; + for (int k = 0; k < kNumSectionsToAnalyze; ++k) { + if ((numerators_smooth_[k] > numerator_11) || + (numerators_smooth_[k] < numerator_08 && + numerators_smooth_[k] < 0.9f * min_numerator_tail)) { + early_reverb_size_minus_1 = k; + } + } + + return early_reverb_size_minus_1 == 0 ? 0 : early_reverb_size_minus_1 + 1; +} + +void ReverbDecayEstimator::EarlyReverbLengthEstimator::Dump( + ApmDataDumper* data_dumper) const { + data_dumper->DumpRaw("aec3_er_acum_numerator", numerators_smooth_); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/reverb_decay_estimator.h b/audio_processing/aec3/reverb_decay_estimator.h new file mode 100644 index 0000000..810a120 --- /dev/null +++ b/audio_processing/aec3/reverb_decay_estimator.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_REVERB_DECAY_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_REVERB_DECAY_ESTIMATOR_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" // kMaxAdaptiveFilter... + +namespace webrtc { + +class ApmDataDumper; +struct EchoCanceller3Config; + +// Class for estimating the decay of the late reverb. +class ReverbDecayEstimator { + public: + explicit ReverbDecayEstimator(const EchoCanceller3Config& config); + ~ReverbDecayEstimator(); + // Updates the decay estimate. + void Update(rtc::ArrayView filter, + const absl::optional& filter_quality, + int filter_delay_blocks, + bool usable_linear_filter, + bool stationary_signal); + // Returns the decay for the exponential model. + float Decay() const { return decay_; } + // Dumps debug data. + void Dump(ApmDataDumper* data_dumper) const; + + private: + void EstimateDecay(rtc::ArrayView filter, int peak_block); + void AnalyzeFilter(rtc::ArrayView filter); + + void ResetDecayEstimation(); + + // Class for estimating the decay of the late reverb from the linear filter. + class LateReverbLinearRegressor { + public: + // Resets the estimator to receive a specified number of data points. + void Reset(int num_data_points); + // Accumulates estimation data. + void Accumulate(float z); + // Estimates the decay. + float Estimate(); + // Returns whether an estimate is available. + bool EstimateAvailable() const { return n_ == N_ && N_ != 0; } + + public: + float nz_ = 0.f; + float nn_ = 0.f; + float count_ = 0.f; + int N_ = 0; + int n_ = 0; + }; + + // Class for identifying the length of the early reverb from the linear + // filter. For identifying the early reverberations, the impulse response is + // divided in sections and the tilt of each section is computed by a linear + // regressor. + class EarlyReverbLengthEstimator { + public: + explicit EarlyReverbLengthEstimator(int max_blocks); + ~EarlyReverbLengthEstimator(); + + // Resets the estimator. + void Reset(); + // Accumulates estimation data. + void Accumulate(float value, float smoothing); + // Estimates the size in blocks of the early reverb. + int Estimate(); + // Dumps debug data. + void Dump(ApmDataDumper* data_dumper) const; + + private: + std::vector numerators_smooth_; + std::vector numerators_; + int coefficients_counter_; + int block_counter_ = 0; + int n_sections_ = 0; + }; + + const int filter_length_blocks_; + const int filter_length_coefficients_; + const bool use_adaptive_echo_decay_; + LateReverbLinearRegressor late_reverb_decay_estimator_; + EarlyReverbLengthEstimator early_reverb_estimator_; + int late_reverb_start_; + int late_reverb_end_; + int block_to_analyze_ = 0; + int estimation_region_candidate_size_ = 0; + bool estimation_region_identified_ = false; + std::vector previous_gains_; + float decay_; + float tail_gain_ = 0.f; + float smoothing_constant_ = 0.f; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_REVERB_DECAY_ESTIMATOR_H_ diff --git a/audio_processing/aec3/reverb_frequency_response.cc b/audio_processing/aec3/reverb_frequency_response.cc new file mode 100644 index 0000000..1f88c14 --- /dev/null +++ b/audio_processing/aec3/reverb_frequency_response.cc @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/reverb_frequency_response.h" + +#include + +#include +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { + +// Computes the ratio of the energies between the direct path and the tail. The +// energy is computed in the power spectrum domain discarding the DC +// contributions. +float AverageDecayWithinFilter( + rtc::ArrayView freq_resp_direct_path, + rtc::ArrayView freq_resp_tail) { + // Skipping the DC for the ratio computation + constexpr size_t kSkipBins = 1; + RTC_CHECK_EQ(freq_resp_direct_path.size(), freq_resp_tail.size()); + + float direct_path_energy = + std::accumulate(freq_resp_direct_path.begin() + kSkipBins, + freq_resp_direct_path.end(), 0.f); + + if (direct_path_energy == 0.f) { + return 0.f; + } + + float tail_energy = std::accumulate(freq_resp_tail.begin() + kSkipBins, + freq_resp_tail.end(), 0.f); + return tail_energy / direct_path_energy; +} + +} // namespace + +ReverbFrequencyResponse::ReverbFrequencyResponse() { + tail_response_.fill(0.f); +} +ReverbFrequencyResponse::~ReverbFrequencyResponse() = default; + +void ReverbFrequencyResponse::Update( + const std::vector>& + frequency_response, + int filter_delay_blocks, + const absl::optional& linear_filter_quality, + bool stationary_block) { + if (stationary_block || !linear_filter_quality) { + return; + } + + Update(frequency_response, filter_delay_blocks, *linear_filter_quality); +} + +void ReverbFrequencyResponse::Update( + const std::vector>& + frequency_response, + int filter_delay_blocks, + float linear_filter_quality) { + rtc::ArrayView freq_resp_tail( + frequency_response[frequency_response.size() - 1]); + + rtc::ArrayView freq_resp_direct_path( + frequency_response[filter_delay_blocks]); + + float average_decay = + AverageDecayWithinFilter(freq_resp_direct_path, freq_resp_tail); + + const float smoothing = 0.2f * linear_filter_quality; + average_decay_ += smoothing * (average_decay - average_decay_); + + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + tail_response_[k] = freq_resp_direct_path[k] * average_decay_; + } + + for (size_t k = 1; k < kFftLengthBy2; ++k) { + const float avg_neighbour = + 0.5f * (tail_response_[k - 1] + tail_response_[k + 1]); + tail_response_[k] = std::max(tail_response_[k], avg_neighbour); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/reverb_frequency_response.h b/audio_processing/aec3/reverb_frequency_response.h new file mode 100644 index 0000000..d509f93 --- /dev/null +++ b/audio_processing/aec3/reverb_frequency_response.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_REVERB_FREQUENCY_RESPONSE_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_REVERB_FREQUENCY_RESPONSE_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { + +// Class for updating the frequency response for the reverb. +class ReverbFrequencyResponse { + public: + ReverbFrequencyResponse(); + ~ReverbFrequencyResponse(); + + // Updates the frequency response estimate of the reverb. + void Update(const std::vector>& + frequency_response, + int filter_delay_blocks, + const absl::optional& linear_filter_quality, + bool stationary_block); + + // Returns the estimated frequency response for the reverb. + rtc::ArrayView FrequencyResponse() const { + return tail_response_; + } + + private: + void Update(const std::vector>& + frequency_response, + int filter_delay_blocks, + float linear_filter_quality); + + float average_decay_ = 0.f; + std::array tail_response_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_REVERB_FREQUENCY_RESPONSE_H_ diff --git a/audio_processing/aec3/reverb_model.cc b/audio_processing/aec3/reverb_model.cc new file mode 100644 index 0000000..49ae03a --- /dev/null +++ b/audio_processing/aec3/reverb_model.cc @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/reverb_model.h" + +#include + +#include +#include + +#include "rtc_base/array_view.h" + +namespace webrtc { + +ReverbModel::ReverbModel() { + Reset(); +} + +ReverbModel::~ReverbModel() = default; + +void ReverbModel::Reset() { + reverb_.fill(0.); +} + +void ReverbModel::UpdateReverbNoFreqShaping( + rtc::ArrayView power_spectrum, + float power_spectrum_scaling, + float reverb_decay) { + if (reverb_decay > 0) { + // Update the estimate of the reverberant power. + for (size_t k = 0; k < power_spectrum.size(); ++k) { + reverb_[k] = (reverb_[k] + power_spectrum[k] * power_spectrum_scaling) * + reverb_decay; + } + } +} + +void ReverbModel::UpdateReverb( + rtc::ArrayView power_spectrum, + rtc::ArrayView power_spectrum_scaling, + float reverb_decay) { + if (reverb_decay > 0) { + // Update the estimate of the reverberant power. + for (size_t k = 0; k < power_spectrum.size(); ++k) { + reverb_[k] = + (reverb_[k] + power_spectrum[k] * power_spectrum_scaling[k]) * + reverb_decay; + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/reverb_model.h b/audio_processing/aec3/reverb_model.h new file mode 100644 index 0000000..1738f7f --- /dev/null +++ b/audio_processing/aec3/reverb_model.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_H_ + +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { + +// The ReverbModel class describes an exponential reverberant model +// that can be applied over power spectrums. +class ReverbModel { + public: + ReverbModel(); + ~ReverbModel(); + + // Resets the state. + void Reset(); + + // Returns the reverb. + rtc::ArrayView reverb() const { + return reverb_; + } + + // The methods UpdateReverbNoFreqShaping and UpdateReverb update the + // estimate of the reverberation contribution to an input/output power + // spectrum. Before applying the exponential reverberant model, the input + // power spectrum is pre-scaled. Use the method UpdateReverb when a different + // scaling should be applied per frequency and UpdateReverb_no_freq_shape if + // the same scaling should be used for all the frequencies. + void UpdateReverbNoFreqShaping(rtc::ArrayView power_spectrum, + float power_spectrum_scaling, + float reverb_decay); + + // Update the reverb based on new data. + void UpdateReverb(rtc::ArrayView power_spectrum, + rtc::ArrayView power_spectrum_scaling, + float reverb_decay); + + private: + + std::array reverb_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_H_ diff --git a/audio_processing/aec3/reverb_model_estimator.cc b/audio_processing/aec3/reverb_model_estimator.cc new file mode 100644 index 0000000..6e2dfbb --- /dev/null +++ b/audio_processing/aec3/reverb_model_estimator.cc @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/reverb_model_estimator.h" + +namespace webrtc { + +ReverbModelEstimator::ReverbModelEstimator(const EchoCanceller3Config& config, + size_t num_capture_channels) + : reverb_decay_estimators_(num_capture_channels), + reverb_frequency_responses_(num_capture_channels) { + for (size_t ch = 0; ch < reverb_decay_estimators_.size(); ++ch) { + reverb_decay_estimators_[ch] = + std::make_unique(config); + } +} + +ReverbModelEstimator::~ReverbModelEstimator() = default; + +void ReverbModelEstimator::Update( + rtc::ArrayView> impulse_responses, + rtc::ArrayView>> + frequency_responses, + rtc::ArrayView> linear_filter_qualities, + rtc::ArrayView filter_delays_blocks, + const std::vector& usable_linear_estimates, + bool stationary_block) { + const size_t num_capture_channels = reverb_decay_estimators_.size(); + RTC_DCHECK_EQ(num_capture_channels, impulse_responses.size()); + RTC_DCHECK_EQ(num_capture_channels, frequency_responses.size()); + RTC_DCHECK_EQ(num_capture_channels, usable_linear_estimates.size()); + + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + // Estimate the frequency response for the reverb. + reverb_frequency_responses_[ch].Update( + frequency_responses[ch], filter_delays_blocks[ch], + linear_filter_qualities[ch], stationary_block); + + // Estimate the reverb decay, + reverb_decay_estimators_[ch]->Update( + impulse_responses[ch], linear_filter_qualities[ch], + filter_delays_blocks[ch], usable_linear_estimates[ch], + stationary_block); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/reverb_model_estimator.h b/audio_processing/aec3/reverb_model_estimator.h new file mode 100644 index 0000000..b6c02bc --- /dev/null +++ b/audio_processing/aec3/reverb_model_estimator.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_ESTIMATOR_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" // kFftLengthBy2Plus1 +#include "audio_processing/aec3/reverb_decay_estimator.h" +#include "audio_processing/aec3/reverb_frequency_response.h" + +namespace webrtc { + +class ApmDataDumper; + +// Class for estimating the model parameters for the reverberant echo. +class ReverbModelEstimator { + public: + ReverbModelEstimator(const EchoCanceller3Config& config, + size_t num_capture_channels); + ~ReverbModelEstimator(); + + // Updates the estimates based on new data. + void Update( + rtc::ArrayView> impulse_responses, + rtc::ArrayView>> + frequency_responses, + rtc::ArrayView> linear_filter_qualities, + rtc::ArrayView filter_delays_blocks, + const std::vector& usable_linear_estimates, + bool stationary_block); + + // Returns the exponential decay of the reverberant echo. + // TODO(peah): Correct to properly support multiple channels. + float ReverbDecay() const { return reverb_decay_estimators_[0]->Decay(); } + + // Return the frequency response of the reverberant echo. + // TODO(peah): Correct to properly support multiple channels. + rtc::ArrayView GetReverbFrequencyResponse() const { + return reverb_frequency_responses_[0].FrequencyResponse(); + } + + // Dumps debug data. + void Dump(ApmDataDumper* data_dumper) const { + reverb_decay_estimators_[0]->Dump(data_dumper); + } + + private: + std::vector> reverb_decay_estimators_; + std::vector reverb_frequency_responses_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_ESTIMATOR_H_ diff --git a/audio_processing/aec3/reverb_model_estimator_unittest.cc b/audio_processing/aec3/reverb_model_estimator_unittest.cc new file mode 100644 index 0000000..50a4dc0 --- /dev/null +++ b/audio_processing/aec3/reverb_model_estimator_unittest.cc @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/reverb_model_estimator.h" + +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/audio/echo_canceller3_config.h" +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/aec3_fft.h" +#include "modules/audio_processing/aec3/fft_data.h" +#include "rtc_base/checks.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +EchoCanceller3Config CreateConfigForTest(float default_decay) { + EchoCanceller3Config cfg; + cfg.ep_strength.default_len = default_decay; + cfg.filter.main.length_blocks = 40; + return cfg; +} + +constexpr int kFilterDelayBlocks = 2; + +} // namespace + +class ReverbModelEstimatorTest { + public: + ReverbModelEstimatorTest(float default_decay, size_t num_capture_channels) + : aec3_config_(CreateConfigForTest(default_decay)), + estimated_decay_(default_decay), + h_(num_capture_channels, + std::vector( + aec3_config_.filter.main.length_blocks * kBlockSize, + 0.f)), + H2_(num_capture_channels, + std::vector>( + aec3_config_.filter.main.length_blocks)), + quality_linear_(num_capture_channels, 1.0f) { + CreateImpulseResponseWithDecay(); + } + void RunEstimator(); + float GetDecay() { return estimated_decay_; } + float GetTrueDecay() { return kTruePowerDecay; } + float GetPowerTailDb() { return 10.f * std::log10(estimated_power_tail_); } + float GetTruePowerTailDb() { return 10.f * std::log10(true_power_tail_); } + + private: + void CreateImpulseResponseWithDecay(); + static constexpr bool kStationaryBlock = false; + static constexpr float kTruePowerDecay = 0.5f; + const EchoCanceller3Config aec3_config_; + float estimated_decay_; + float estimated_power_tail_ = 0.f; + float true_power_tail_ = 0.f; + std::vector> h_; + std::vector>> H2_; + std::vector> quality_linear_; +}; + +void ReverbModelEstimatorTest::CreateImpulseResponseWithDecay() { + const Aec3Fft fft; + for (const auto& h_k : h_) { + RTC_DCHECK_EQ(h_k.size(), + aec3_config_.filter.main.length_blocks * kBlockSize); + } + for (const auto& H2_k : H2_) { + RTC_DCHECK_EQ(H2_k.size(), aec3_config_.filter.main.length_blocks); + } + RTC_DCHECK_EQ(kFilterDelayBlocks, 2); + + float decay_sample = std::sqrt(powf(kTruePowerDecay, 1.f / kBlockSize)); + const size_t filter_delay_coefficients = kFilterDelayBlocks * kBlockSize; + for (auto& h_i : h_) { + std::fill(h_i.begin(), h_i.end(), 0.f); + h_i[filter_delay_coefficients] = 1.f; + for (size_t k = filter_delay_coefficients + 1; k < h_i.size(); ++k) { + h_i[k] = h_i[k - 1] * decay_sample; + } + } + + for (size_t ch = 0; ch < H2_.size(); ++ch) { + for (size_t j = 0, k = 0; j < H2_[ch].size(); ++j, k += kBlockSize) { + std::array fft_data; + fft_data.fill(0.f); + std::copy(h_[ch].begin() + k, h_[ch].begin() + k + kBlockSize, + fft_data.begin()); + FftData H_j; + fft.Fft(&fft_data, &H_j); + H_j.Spectrum(Aec3Optimization::kNone, H2_[ch][j]); + } + } + rtc::ArrayView H2_tail(H2_[0][H2_[0].size() - 1]); + true_power_tail_ = std::accumulate(H2_tail.begin(), H2_tail.end(), 0.f); +} +void ReverbModelEstimatorTest::RunEstimator() { + const size_t num_capture_channels = H2_.size(); + constexpr bool kUsableLinearEstimate = true; + ReverbModelEstimator estimator(aec3_config_, num_capture_channels); + std::vector usable_linear_estimates(num_capture_channels, + kUsableLinearEstimate); + std::vector filter_delay_blocks(num_capture_channels, + kFilterDelayBlocks); + for (size_t k = 0; k < 3000; ++k) { + estimator.Update(h_, H2_, quality_linear_, filter_delay_blocks, + usable_linear_estimates, kStationaryBlock); + } + estimated_decay_ = estimator.ReverbDecay(); + auto freq_resp_tail = estimator.GetReverbFrequencyResponse(); + estimated_power_tail_ = + std::accumulate(freq_resp_tail.begin(), freq_resp_tail.end(), 0.f); +} + +TEST(ReverbModelEstimatorTests, NotChangingDecay) { + constexpr float kDefaultDecay = 0.9f; + for (size_t num_capture_channels : {1, 2, 4, 8}) { + ReverbModelEstimatorTest test(kDefaultDecay, num_capture_channels); + test.RunEstimator(); + EXPECT_EQ(test.GetDecay(), kDefaultDecay); + EXPECT_NEAR(test.GetPowerTailDb(), test.GetTruePowerTailDb(), 5.f); + } +} + +TEST(ReverbModelEstimatorTests, ChangingDecay) { + constexpr float kDefaultDecay = -0.9f; + for (size_t num_capture_channels : {1, 2, 4, 8}) { + ReverbModelEstimatorTest test(kDefaultDecay, num_capture_channels); + test.RunEstimator(); + EXPECT_NEAR(test.GetDecay(), test.GetTrueDecay(), 0.1); + EXPECT_NEAR(test.GetPowerTailDb(), test.GetTruePowerTailDb(), 5.f); + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/shadow_filter_update_gain.cc b/audio_processing/aec3/shadow_filter_update_gain.cc new file mode 100644 index 0000000..08678fc --- /dev/null +++ b/audio_processing/aec3/shadow_filter_update_gain.cc @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/shadow_filter_update_gain.h" + +#include +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +ShadowFilterUpdateGain::ShadowFilterUpdateGain( + const EchoCanceller3Config::Filter::ShadowConfiguration& config, + size_t config_change_duration_blocks) + : config_change_duration_blocks_( + static_cast(config_change_duration_blocks)) { + SetConfig(config, true); + RTC_DCHECK_LT(0, config_change_duration_blocks_); + one_by_config_change_duration_blocks_ = 1.f / config_change_duration_blocks_; +} + +void ShadowFilterUpdateGain::HandleEchoPathChange() { + poor_signal_excitation_counter_ = 0; + call_counter_ = 0; +} + +void ShadowFilterUpdateGain::Compute( + const std::array& render_power, + const RenderSignalAnalyzer& render_signal_analyzer, + const FftData& E_shadow, + size_t size_partitions, + bool saturated_capture_signal, + FftData* G) { + RTC_DCHECK(G); + ++call_counter_; + + UpdateCurrentConfig(); + + if (render_signal_analyzer.PoorSignalExcitation()) { + poor_signal_excitation_counter_ = 0; + } + + // Do not update the filter if the render is not sufficiently excited. + if (++poor_signal_excitation_counter_ < size_partitions || + saturated_capture_signal || call_counter_ <= size_partitions) { + G->re.fill(0.f); + G->im.fill(0.f); + return; + } + + // Compute mu. + std::array mu; + const auto& X2 = render_power; + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + if (X2[k] > current_config_.noise_gate) { + mu[k] = current_config_.rate / X2[k]; + } else { + mu[k] = 0.f; + } + } + + // Avoid updating the filter close to narrow bands in the render signals. + render_signal_analyzer.MaskRegionsAroundNarrowBands(&mu); + + // G = mu * E * X2. + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + G->re[k] = mu[k] * E_shadow.re[k]; + G->im[k] = mu[k] * E_shadow.im[k]; + } +} + +void ShadowFilterUpdateGain::UpdateCurrentConfig() { + RTC_DCHECK_GE(config_change_duration_blocks_, config_change_counter_); + if (config_change_counter_ > 0) { + if (--config_change_counter_ > 0) { + auto average = [](float from, float to, float from_weight) { + return from * from_weight + to * (1.f - from_weight); + }; + + float change_factor = + config_change_counter_ * one_by_config_change_duration_blocks_; + + current_config_.rate = + average(old_target_config_.rate, target_config_.rate, change_factor); + current_config_.noise_gate = + average(old_target_config_.noise_gate, target_config_.noise_gate, + change_factor); + } else { + current_config_ = old_target_config_ = target_config_; + } + } + RTC_DCHECK_LE(0, config_change_counter_); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/shadow_filter_update_gain.h b/audio_processing/aec3/shadow_filter_update_gain.h new file mode 100644 index 0000000..f6a9235 --- /dev/null +++ b/audio_processing/aec3/shadow_filter_update_gain.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SHADOW_FILTER_UPDATE_GAIN_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SHADOW_FILTER_UPDATE_GAIN_H_ + +#include + +#include + +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/aec3/render_signal_analyzer.h" + +namespace webrtc { + +// Provides functionality for computing the fixed gain for the shadow filter. +class ShadowFilterUpdateGain { + public: + explicit ShadowFilterUpdateGain( + const EchoCanceller3Config::Filter::ShadowConfiguration& config, + size_t config_change_duration_blocks); + + // Takes action in the case of a known echo path change. + void HandleEchoPathChange(); + + // Computes the gain. + void Compute(const std::array& render_power, + const RenderSignalAnalyzer& render_signal_analyzer, + const FftData& E_shadow, + size_t size_partitions, + bool saturated_capture_signal, + FftData* G); + + // Sets a new config. + void SetConfig( + const EchoCanceller3Config::Filter::ShadowConfiguration& config, + bool immediate_effect) { + if (immediate_effect) { + old_target_config_ = current_config_ = target_config_ = config; + config_change_counter_ = 0; + } else { + old_target_config_ = current_config_; + target_config_ = config; + config_change_counter_ = config_change_duration_blocks_; + } + } + + private: + EchoCanceller3Config::Filter::ShadowConfiguration current_config_; + EchoCanceller3Config::Filter::ShadowConfiguration target_config_; + EchoCanceller3Config::Filter::ShadowConfiguration old_target_config_; + const int config_change_duration_blocks_; + float one_by_config_change_duration_blocks_; + // TODO(peah): Check whether this counter should instead be initialized to a + // large value. + size_t poor_signal_excitation_counter_ = 0; + size_t call_counter_ = 0; + int config_change_counter_ = 0; + + void UpdateCurrentConfig(); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SHADOW_FILTER_UPDATE_GAIN_H_ diff --git a/audio_processing/aec3/shadow_filter_update_gain_unittest.cc b/audio_processing/aec3/shadow_filter_update_gain_unittest.cc new file mode 100644 index 0000000..d2d1005 --- /dev/null +++ b/audio_processing/aec3/shadow_filter_update_gain_unittest.cc @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/shadow_filter_update_gain.h" + +#include +#include +#include +#include + +#include "modules/audio_processing/aec3/adaptive_fir_filter.h" +#include "modules/audio_processing/aec3/aec3_common.h" +#include "modules/audio_processing/aec3/aec_state.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "rtc_base/numerics/safe_minmax.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +// Method for performing the simulations needed to test the main filter update +// gain functionality. +void RunFilterUpdateTest(int num_blocks_to_process, + size_t delay_samples, + size_t num_render_channels, + int filter_length_blocks, + const std::vector& blocks_with_saturation, + std::array* e_last_block, + std::array* y_last_block, + FftData* G_last_block) { + ApmDataDumper data_dumper(42); + EchoCanceller3Config config; + config.filter.main.length_blocks = filter_length_blocks; + AdaptiveFirFilter main_filter( + config.filter.main.length_blocks, config.filter.main.length_blocks, + config.filter.config_change_duration_blocks, num_render_channels, + DetectOptimization(), &data_dumper); + AdaptiveFirFilter shadow_filter( + config.filter.shadow.length_blocks, config.filter.shadow.length_blocks, + config.filter.config_change_duration_blocks, num_render_channels, + DetectOptimization(), &data_dumper); + Aec3Fft fft; + + constexpr int kSampleRateHz = 48000; + config.delay.default_delay = 1; + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels)); + + ShadowFilterUpdateGain shadow_gain( + config.filter.shadow, config.filter.config_change_duration_blocks); + Random random_generator(42U); + std::vector>> x( + NumBandsForRate(kSampleRateHz), + std::vector>(num_render_channels, + std::vector(kBlockSize, 0.f))); + std::array y; + RenderSignalAnalyzer render_signal_analyzer(config); + std::array s; + FftData S; + FftData G; + FftData E_shadow; + std::array e_shadow; + + constexpr float kScale = 1.0f / kFftLengthBy2; + + DelayBuffer delay_buffer(delay_samples); + for (int k = 0; k < num_blocks_to_process; ++k) { + // Handle saturation. + bool saturation = + std::find(blocks_with_saturation.begin(), blocks_with_saturation.end(), + k) != blocks_with_saturation.end(); + + // Create the render signal. + for (size_t band = 0; band < x.size(); ++band) { + for (size_t channel = 0; channel < x[band].size(); ++channel) { + RandomizeSampleVector(&random_generator, x[band][channel]); + } + } + delay_buffer.Delay(x[0][0], y); + + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + + render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(), + delay_samples / kBlockSize); + + shadow_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S); + fft.Ifft(S, &s); + std::transform(y.begin(), y.end(), s.begin() + kFftLengthBy2, + e_shadow.begin(), + [&](float a, float b) { return a - b * kScale; }); + std::for_each(e_shadow.begin(), e_shadow.end(), + [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); }); + fft.ZeroPaddedFft(e_shadow, Aec3Fft::Window::kRectangular, &E_shadow); + + std::array render_power; + render_delay_buffer->GetRenderBuffer()->SpectralSum( + shadow_filter.SizePartitions(), &render_power); + shadow_gain.Compute(render_power, render_signal_analyzer, E_shadow, + shadow_filter.SizePartitions(), saturation, &G); + shadow_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G); + } + + std::copy(e_shadow.begin(), e_shadow.end(), e_last_block->begin()); + std::copy(y.begin(), y.end(), y_last_block->begin()); + std::copy(G.re.begin(), G.re.end(), G_last_block->re.begin()); + std::copy(G.im.begin(), G.im.end(), G_last_block->im.begin()); +} + +std::string ProduceDebugText(int filter_length_blocks) { + rtc::StringBuilder ss; + ss << "Length: " << filter_length_blocks; + return ss.Release(); +} + +std::string ProduceDebugText(size_t delay, int filter_length_blocks) { + rtc::StringBuilder ss; + ss << "Delay: " << delay << ", "; + ss << ProduceDebugText(filter_length_blocks); + return ss.Release(); +} + +} // namespace + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies that the check for non-null output gain parameter works. +TEST(ShadowFilterUpdateGain, NullDataOutputGain) { + ApmDataDumper data_dumper(42); + FftBuffer fft_buffer(1, 1); + RenderSignalAnalyzer analyzer(EchoCanceller3Config{}); + FftData E; + const EchoCanceller3Config::Filter::ShadowConfiguration& config = { + 12, 0.5f, 220075344.f}; + ShadowFilterUpdateGain gain(config, 250); + std::array render_power; + render_power.fill(0.f); + EXPECT_DEATH(gain.Compute(render_power, analyzer, E, 1, false, nullptr), ""); +} + +#endif + +// Verifies that the gain formed causes the filter using it to converge. +TEST(ShadowFilterUpdateGain, GainCausesFilterToConverge) { + std::vector blocks_with_echo_path_changes; + std::vector blocks_with_saturation; + + for (size_t num_render_channels : {1, 2, 8}) { + for (size_t filter_length_blocks : {12, 20, 30}) { + for (size_t delay_samples : {0, 64, 150, 200, 301}) { + SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks)); + + std::array e; + std::array y; + FftData G; + + RunFilterUpdateTest(5000, delay_samples, num_render_channels, + filter_length_blocks, blocks_with_saturation, &e, + &y, &G); + + // Verify that the main filter is able to perform well. + // Use different criteria to take overmodelling into account. + if (filter_length_blocks == 12) { + EXPECT_LT( + 1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f), + std::inner_product(y.begin(), y.end(), y.begin(), 0.f)); + } else { + EXPECT_LT(std::inner_product(e.begin(), e.end(), e.begin(), 0.f), + std::inner_product(y.begin(), y.end(), y.begin(), 0.f)); + } + } + } + } +} + +// Verifies that the magnitude of the gain on average decreases for a +// persistently exciting signal. +TEST(ShadowFilterUpdateGain, DecreasingGain) { + for (size_t num_render_channels : {1, 2, 4}) { + for (size_t filter_length_blocks : {12, 20, 30}) { + SCOPED_TRACE(ProduceDebugText(filter_length_blocks)); + std::vector blocks_with_echo_path_changes; + std::vector blocks_with_saturation; + + std::array e; + std::array y; + FftData G_a; + FftData G_b; + FftData G_c; + std::array G_a_power; + std::array G_b_power; + std::array G_c_power; + + RunFilterUpdateTest(100, 65, num_render_channels, filter_length_blocks, + blocks_with_saturation, &e, &y, &G_a); + RunFilterUpdateTest(200, 65, num_render_channels, filter_length_blocks, + blocks_with_saturation, &e, &y, &G_b); + RunFilterUpdateTest(300, 65, num_render_channels, filter_length_blocks, + blocks_with_saturation, &e, &y, &G_c); + + G_a.Spectrum(Aec3Optimization::kNone, G_a_power); + G_b.Spectrum(Aec3Optimization::kNone, G_b_power); + G_c.Spectrum(Aec3Optimization::kNone, G_c_power); + + EXPECT_GT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.), + std::accumulate(G_b_power.begin(), G_b_power.end(), 0.)); + + EXPECT_GT(std::accumulate(G_b_power.begin(), G_b_power.end(), 0.), + std::accumulate(G_c_power.begin(), G_c_power.end(), 0.)); + } + } +} + +// Verifies that the gain is zero when there is saturation. +TEST(ShadowFilterUpdateGain, SaturationBehavior) { + std::vector blocks_with_echo_path_changes; + std::vector blocks_with_saturation; + for (int k = 99; k < 200; ++k) { + blocks_with_saturation.push_back(k); + } + for (size_t num_render_channels : {1, 2, 8}) { + for (size_t filter_length_blocks : {12, 20, 30}) { + SCOPED_TRACE(ProduceDebugText(filter_length_blocks)); + + std::array e; + std::array y; + FftData G_a; + FftData G_a_ref; + G_a_ref.re.fill(0.f); + G_a_ref.im.fill(0.f); + + RunFilterUpdateTest(100, 65, num_render_channels, filter_length_blocks, + blocks_with_saturation, &e, &y, &G_a); + + EXPECT_EQ(G_a_ref.re, G_a.re); + EXPECT_EQ(G_a_ref.im, G_a.im); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/signal_dependent_erle_estimator.cc b/audio_processing/aec3/signal_dependent_erle_estimator.cc new file mode 100644 index 0000000..2505d25 --- /dev/null +++ b/audio_processing/aec3/signal_dependent_erle_estimator.cc @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/signal_dependent_erle_estimator.h" + +#include +#include +#include + +#include "audio_processing/aec3/spectrum_buffer.h" +#include "rtc_base/numerics/safe_minmax.h" + +namespace webrtc { + +namespace { + +constexpr std::array + kBandBoundaries = {1, 8, 16, 24, 32, 48, kFftLengthBy2Plus1}; + +std::array FormSubbandMap() { + std::array map_band_to_subband; + size_t subband = 1; + for (size_t k = 0; k < map_band_to_subband.size(); ++k) { + RTC_DCHECK_LT(subband, kBandBoundaries.size()); + if (k >= kBandBoundaries[subband]) { + subband++; + RTC_DCHECK_LT(k, kBandBoundaries[subband]); + } + map_band_to_subband[k] = subband - 1; + } + return map_band_to_subband; +} + +// Defines the size in blocks of the sections that are used for dividing the +// linear filter. The sections are split in a non-linear manner so that lower +// sections that typically represent the direct path have a larger resolution +// than the higher sections which typically represent more reverberant acoustic +// paths. +std::vector DefineFilterSectionSizes(size_t delay_headroom_blocks, + size_t num_blocks, + size_t num_sections) { + size_t filter_length_blocks = num_blocks - delay_headroom_blocks; + std::vector section_sizes(num_sections); + size_t remaining_blocks = filter_length_blocks; + size_t remaining_sections = num_sections; + size_t estimator_size = 2; + size_t idx = 0; + while (remaining_sections > 1 && + remaining_blocks > estimator_size * remaining_sections) { + RTC_DCHECK_LT(idx, section_sizes.size()); + section_sizes[idx] = estimator_size; + remaining_blocks -= estimator_size; + remaining_sections--; + estimator_size *= 2; + idx++; + } + + size_t last_groups_size = remaining_blocks / remaining_sections; + for (; idx < num_sections; idx++) { + section_sizes[idx] = last_groups_size; + } + section_sizes[num_sections - 1] += + remaining_blocks - last_groups_size * remaining_sections; + return section_sizes; +} + +// Forms the limits in blocks for each filter section. Those sections +// are used for analyzing the echo estimates and investigating which +// linear filter sections contribute most to the echo estimate energy. +std::vector SetSectionsBoundaries(size_t delay_headroom_blocks, + size_t num_blocks, + size_t num_sections) { + std::vector estimator_boundaries_blocks(num_sections + 1); + if (estimator_boundaries_blocks.size() == 2) { + estimator_boundaries_blocks[0] = 0; + estimator_boundaries_blocks[1] = num_blocks; + return estimator_boundaries_blocks; + } + RTC_DCHECK_GT(estimator_boundaries_blocks.size(), 2); + const std::vector section_sizes = + DefineFilterSectionSizes(delay_headroom_blocks, num_blocks, + estimator_boundaries_blocks.size() - 1); + + size_t idx = 0; + size_t current_size_block = 0; + RTC_DCHECK_EQ(section_sizes.size() + 1, estimator_boundaries_blocks.size()); + estimator_boundaries_blocks[0] = delay_headroom_blocks; + for (size_t k = delay_headroom_blocks; k < num_blocks; ++k) { + current_size_block++; + if (current_size_block >= section_sizes[idx]) { + idx = idx + 1; + if (idx == section_sizes.size()) { + break; + } + estimator_boundaries_blocks[idx] = k + 1; + current_size_block = 0; + } + } + estimator_boundaries_blocks[section_sizes.size()] = num_blocks; + return estimator_boundaries_blocks; +} + +std::array +SetMaxErleSubbands(float max_erle_l, float max_erle_h, size_t limit_subband_l) { + std::array max_erle; + std::fill(max_erle.begin(), max_erle.begin() + limit_subband_l, max_erle_l); + std::fill(max_erle.begin() + limit_subband_l, max_erle.end(), max_erle_h); + return max_erle; +} + +} // namespace + +SignalDependentErleEstimator::SignalDependentErleEstimator( + const EchoCanceller3Config& config, + size_t num_capture_channels) + : min_erle_(config.erle.min), + num_sections_(config.erle.num_sections), + num_blocks_(config.filter.main.length_blocks), + delay_headroom_blocks_(config.delay.delay_headroom_samples / kBlockSize), + band_to_subband_(FormSubbandMap()), + max_erle_(SetMaxErleSubbands(config.erle.max_l, + config.erle.max_h, + band_to_subband_[kFftLengthBy2 / 2])), + section_boundaries_blocks_(SetSectionsBoundaries(delay_headroom_blocks_, + num_blocks_, + num_sections_)), + erle_(num_capture_channels), + S2_section_accum_( + num_capture_channels, + std::vector>(num_sections_)), + erle_estimators_( + num_capture_channels, + std::vector>(num_sections_)), + erle_ref_(num_capture_channels), + correction_factors_( + num_capture_channels, + std::vector>(num_sections_)), + num_updates_(num_capture_channels), + n_active_sections_(num_capture_channels) { + RTC_DCHECK_LE(num_sections_, num_blocks_); + RTC_DCHECK_GE(num_sections_, 1); + Reset(); +} + +SignalDependentErleEstimator::~SignalDependentErleEstimator() = default; + +void SignalDependentErleEstimator::Reset() { + for (size_t ch = 0; ch < erle_.size(); ++ch) { + erle_[ch].fill(min_erle_); + for (auto& erle_estimator : erle_estimators_[ch]) { + erle_estimator.fill(min_erle_); + } + erle_ref_[ch].fill(min_erle_); + for (auto& factor : correction_factors_[ch]) { + factor.fill(1.0f); + } + num_updates_[ch].fill(0); + n_active_sections_[ch].fill(0); + } +} + +// Updates the Erle estimate by analyzing the current input signals. It takes +// the render buffer and the filter frequency response in order to do an +// estimation of the number of sections of the linear filter that are needed +// for getting the majority of the energy in the echo estimate. Based on that +// number of sections, it updates the erle estimation by introducing a +// correction factor to the erle that is given as an input to this method. +void SignalDependentErleEstimator::Update( + const RenderBuffer& render_buffer, + rtc::ArrayView>> + filter_frequency_responses, + rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + rtc::ArrayView> average_erle, + const std::vector& converged_filters) { + RTC_DCHECK_GT(num_sections_, 1); + + // Gets the number of filter sections that are needed for achieving 90 % + // of the power spectrum energy of the echo estimate. + ComputeNumberOfActiveFilterSections(render_buffer, + filter_frequency_responses); + + // Updates the correction factors that is used for correcting the erle and + // adapt it to the particular characteristics of the input signal. + UpdateCorrectionFactors(X2, Y2, E2, converged_filters); + + // Applies the correction factor to the input erle for getting a more refined + // erle estimation for the current input signal. + for (size_t ch = 0; ch < erle_.size(); ++ch) { + for (size_t k = 0; k < kFftLengthBy2; ++k) { + RTC_DCHECK_GT(correction_factors_[ch].size(), n_active_sections_[ch][k]); + float correction_factor = + correction_factors_[ch][n_active_sections_[ch][k]] + [band_to_subband_[k]]; + erle_[ch][k] = rtc::SafeClamp(average_erle[ch][k] * correction_factor, + min_erle_, max_erle_[band_to_subband_[k]]); + } + } +} + +void SignalDependentErleEstimator::Dump( + const std::unique_ptr& data_dumper) const { + for (auto& erle : erle_estimators_[0]) { + data_dumper->DumpRaw("aec3_all_erle", erle); + } + data_dumper->DumpRaw("aec3_ref_erle", erle_ref_[0]); + for (auto& factor : correction_factors_[0]) { + data_dumper->DumpRaw("aec3_erle_correction_factor", factor); + } +} + +// Estimates for each band the smallest number of sections in the filter that +// together constitute 90% of the estimated echo energy. +void SignalDependentErleEstimator::ComputeNumberOfActiveFilterSections( + const RenderBuffer& render_buffer, + rtc::ArrayView>> + filter_frequency_responses) { + RTC_DCHECK_GT(num_sections_, 1); + // Computes an approximation of the power spectrum if the filter would have + // been limited to a certain number of filter sections. + ComputeEchoEstimatePerFilterSection(render_buffer, + filter_frequency_responses); + // For each band, computes the number of filter sections that are needed for + // achieving the 90 % energy in the echo estimate. + ComputeActiveFilterSections(); +} + +void SignalDependentErleEstimator::UpdateCorrectionFactors( + rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + const std::vector& converged_filters) { + for (size_t ch = 0; ch < converged_filters.size(); ++ch) { + if (converged_filters[ch]) { + constexpr float kX2BandEnergyThreshold = 44015068.0f; + constexpr float kSmthConstantDecreases = 0.1f; + constexpr float kSmthConstantIncreases = kSmthConstantDecreases / 2.f; + auto subband_powers = [](rtc::ArrayView power_spectrum, + rtc::ArrayView power_spectrum_subbands) { + for (size_t subband = 0; subband < kSubbands; ++subband) { + RTC_DCHECK_LE(kBandBoundaries[subband + 1], power_spectrum.size()); + power_spectrum_subbands[subband] = std::accumulate( + power_spectrum.begin() + kBandBoundaries[subband], + power_spectrum.begin() + kBandBoundaries[subband + 1], 0.f); + } + }; + + std::array X2_subbands, E2_subbands, Y2_subbands; + subband_powers(X2, X2_subbands); + subband_powers(E2[ch], E2_subbands); + subband_powers(Y2[ch], Y2_subbands); + std::array idx_subbands; + for (size_t subband = 0; subband < kSubbands; ++subband) { + // When aggregating the number of active sections in the filter for + // different bands we choose to take the minimum of all of them. As an + // example, if for one of the bands it is the direct path its main + // contributor to the final echo estimate, we consider the direct path + // is as well the main contributor for the subband that contains that + // particular band. That aggregate number of sections will be later used + // as the identifier of the erle estimator that needs to be updated. + RTC_DCHECK_LE(kBandBoundaries[subband + 1], + n_active_sections_[ch].size()); + idx_subbands[subband] = *std::min_element( + n_active_sections_[ch].begin() + kBandBoundaries[subband], + n_active_sections_[ch].begin() + kBandBoundaries[subband + 1]); + } + + std::array new_erle; + std::array is_erle_updated; + is_erle_updated.fill(false); + new_erle.fill(0.f); + for (size_t subband = 0; subband < kSubbands; ++subband) { + if (X2_subbands[subband] > kX2BandEnergyThreshold && + E2_subbands[subband] > 0) { + new_erle[subband] = Y2_subbands[subband] / E2_subbands[subband]; + RTC_DCHECK_GT(new_erle[subband], 0); + is_erle_updated[subband] = true; + ++num_updates_[ch][subband]; + } + } + + for (size_t subband = 0; subband < kSubbands; ++subband) { + const size_t idx = idx_subbands[subband]; + RTC_DCHECK_LT(idx, erle_estimators_[ch].size()); + float alpha = new_erle[subband] > erle_estimators_[ch][idx][subband] + ? kSmthConstantIncreases + : kSmthConstantDecreases; + alpha = static_cast(is_erle_updated[subband]) * alpha; + erle_estimators_[ch][idx][subband] += + alpha * (new_erle[subband] - erle_estimators_[ch][idx][subband]); + erle_estimators_[ch][idx][subband] = rtc::SafeClamp( + erle_estimators_[ch][idx][subband], min_erle_, max_erle_[subband]); + } + + for (size_t subband = 0; subband < kSubbands; ++subband) { + float alpha = new_erle[subband] > erle_ref_[ch][subband] + ? kSmthConstantIncreases + : kSmthConstantDecreases; + alpha = static_cast(is_erle_updated[subband]) * alpha; + erle_ref_[ch][subband] += + alpha * (new_erle[subband] - erle_ref_[ch][subband]); + erle_ref_[ch][subband] = rtc::SafeClamp(erle_ref_[ch][subband], + min_erle_, max_erle_[subband]); + } + + for (size_t subband = 0; subband < kSubbands; ++subband) { + constexpr int kNumUpdateThr = 50; + if (is_erle_updated[subband] && + num_updates_[ch][subband] > kNumUpdateThr) { + const size_t idx = idx_subbands[subband]; + RTC_DCHECK_GT(erle_ref_[ch][subband], 0.f); + // Computes the ratio between the erle that is updated using all the + // points and the erle that is updated only on signals that share the + // same number of active filter sections. + float new_correction_factor = + erle_estimators_[ch][idx][subband] / erle_ref_[ch][subband]; + + correction_factors_[ch][idx][subband] += + 0.1f * + (new_correction_factor - correction_factors_[ch][idx][subband]); + } + } + } + } +} + +void SignalDependentErleEstimator::ComputeEchoEstimatePerFilterSection( + const RenderBuffer& render_buffer, + rtc::ArrayView>> + filter_frequency_responses) { + const SpectrumBuffer& spectrum_render_buffer = + render_buffer.GetSpectrumBuffer(); + const size_t num_render_channels = spectrum_render_buffer.buffer[0].size(); + const size_t num_capture_channels = S2_section_accum_.size(); + const float one_by_num_render_channels = 1.f / num_render_channels; + + RTC_DCHECK_EQ(S2_section_accum_.size(), filter_frequency_responses.size()); + + for (size_t capture_ch = 0; capture_ch < num_capture_channels; ++capture_ch) { + RTC_DCHECK_EQ(S2_section_accum_[capture_ch].size() + 1, + section_boundaries_blocks_.size()); + size_t idx_render = render_buffer.Position(); + idx_render = spectrum_render_buffer.OffsetIndex( + idx_render, section_boundaries_blocks_[0]); + + for (size_t section = 0; section < num_sections_; ++section) { + std::array X2_section; + std::array H2_section; + X2_section.fill(0.f); + H2_section.fill(0.f); + const size_t block_limit = + std::min(section_boundaries_blocks_[section + 1], + filter_frequency_responses[capture_ch].size()); + for (size_t block = section_boundaries_blocks_[section]; + block < block_limit; ++block) { + for (size_t render_ch = 0; + render_ch < spectrum_render_buffer.buffer[idx_render].size(); + ++render_ch) { + for (size_t k = 0; k < X2_section.size(); ++k) { + X2_section[k] += + spectrum_render_buffer.buffer[idx_render][render_ch][k] * + one_by_num_render_channels; + } + } + std::transform(H2_section.begin(), H2_section.end(), + filter_frequency_responses[capture_ch][block].begin(), + H2_section.begin(), std::plus()); + idx_render = spectrum_render_buffer.IncIndex(idx_render); + } + + std::transform(X2_section.begin(), X2_section.end(), H2_section.begin(), + S2_section_accum_[capture_ch][section].begin(), + std::multiplies()); + } + + for (size_t section = 1; section < num_sections_; ++section) { + std::transform(S2_section_accum_[capture_ch][section - 1].begin(), + S2_section_accum_[capture_ch][section - 1].end(), + S2_section_accum_[capture_ch][section].begin(), + S2_section_accum_[capture_ch][section].begin(), + std::plus()); + } + } +} + +void SignalDependentErleEstimator::ComputeActiveFilterSections() { + for (size_t ch = 0; ch < n_active_sections_.size(); ++ch) { + std::fill(n_active_sections_[ch].begin(), n_active_sections_[ch].end(), 0); + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + size_t section = num_sections_; + float target = 0.9f * S2_section_accum_[ch][num_sections_ - 1][k]; + while (section > 0 && S2_section_accum_[ch][section - 1][k] >= target) { + n_active_sections_[ch][k] = --section; + } + } + } +} +} // namespace webrtc diff --git a/audio_processing/aec3/signal_dependent_erle_estimator.h b/audio_processing/aec3/signal_dependent_erle_estimator.h new file mode 100644 index 0000000..b3d3094 --- /dev/null +++ b/audio_processing/aec3/signal_dependent_erle_estimator.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_ + +#include +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" + +namespace webrtc { + +// This class estimates the dependency of the Erle to the input signal. By +// looking at the input signal, an estimation on whether the current echo +// estimate is due to the direct path or to a more reverberant one is performed. +// Once that estimation is done, it is possible to refine the average Erle that +// this class receive as an input. +class SignalDependentErleEstimator { + public: + SignalDependentErleEstimator(const EchoCanceller3Config& config, + size_t num_capture_channels); + + ~SignalDependentErleEstimator(); + + void Reset(); + + // Returns the Erle per frequency subband. + rtc::ArrayView> Erle() const { + return erle_; + } + + // Updates the Erle estimate. The Erle that is passed as an input is required + // to be an estimation of the average Erle achieved by the linear filter. + void Update( + const RenderBuffer& render_buffer, + rtc::ArrayView>> + filter_frequency_response, + rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + rtc::ArrayView> average_erle, + const std::vector& converged_filters); + + void Dump(const std::unique_ptr& data_dumper) const; + + static constexpr size_t kSubbands = 6; + + private: + void ComputeNumberOfActiveFilterSections( + const RenderBuffer& render_buffer, + rtc::ArrayView>> + filter_frequency_responses); + + void UpdateCorrectionFactors( + rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + const std::vector& converged_filters); + + void ComputeEchoEstimatePerFilterSection( + const RenderBuffer& render_buffer, + rtc::ArrayView>> + filter_frequency_responses); + + void ComputeActiveFilterSections(); + + const float min_erle_; + const size_t num_sections_; + const size_t num_blocks_; + const size_t delay_headroom_blocks_; + const std::array band_to_subband_; + const std::array max_erle_; + const std::vector section_boundaries_blocks_; + std::vector> erle_; + std::vector>> + S2_section_accum_; + std::vector>> erle_estimators_; + std::vector> erle_ref_; + std::vector>> correction_factors_; + std::vector> num_updates_; + std::vector> n_active_sections_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_ diff --git a/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc b/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc new file mode 100644 index 0000000..641c915 --- /dev/null +++ b/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h" + +#include +#include +#include + +#include "api/audio/echo_canceller3_config.h" +#include "modules/audio_processing/aec3/render_buffer.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +void GetActiveFrame(std::vector>>* x) { + const std::array frame = { + 7459.88, 17209.6, 17383, 20768.9, 16816.7, 18386.3, 4492.83, 9675.85, + 6665.52, 14808.6, 9342.3, 7483.28, 19261.7, 4145.98, 1622.18, 13475.2, + 7166.32, 6856.61, 21937, 7263.14, 9569.07, 14919, 8413.32, 7551.89, + 7848.65, 6011.27, 13080.6, 15865.2, 12656, 17459.6, 4263.93, 4503.03, + 9311.79, 21095.8, 12657.9, 13906.6, 19267.2, 11338.1, 16828.9, 11501.6, + 11405, 15031.4, 14541.6, 19765.5, 18346.3, 19350.2, 3157.47, 18095.8, + 1743.68, 21328.2, 19727.5, 7295.16, 10332.4, 11055.5, 20107.4, 14708.4, + 12416.2, 16434, 2454.69, 9840.8, 6867.23, 1615.75, 6059.9, 8394.19}; + for (size_t band = 0; band < x->size(); ++band) { + for (size_t channel = 0; channel < (*x)[band].size(); ++channel) { + RTC_DCHECK_GE((*x)[band][channel].size(), frame.size()); + std::copy(frame.begin(), frame.end(), (*x)[band][channel].begin()); + } + } +} + +class TestInputs { + public: + TestInputs(const EchoCanceller3Config& cfg, + size_t num_render_channels, + size_t num_capture_channels); + ~TestInputs(); + const RenderBuffer& GetRenderBuffer() { return *render_buffer_; } + rtc::ArrayView GetX2() { return X2_; } + rtc::ArrayView> GetY2() const { + return Y2_; + } + rtc::ArrayView> GetE2() const { + return E2_; + } + rtc::ArrayView>> + GetH2() const { + return H2_; + } + const std::vector& GetConvergedFilters() const { + return converged_filters_; + } + void Update(); + + private: + void UpdateCurrentPowerSpectra(); + int n_ = 0; + std::unique_ptr render_delay_buffer_; + RenderBuffer* render_buffer_; + std::array X2_; + std::vector> Y2_; + std::vector> E2_; + std::vector>> H2_; + std::vector>> x_; + std::vector converged_filters_; +}; + +TestInputs::TestInputs(const EchoCanceller3Config& cfg, + size_t num_render_channels, + size_t num_capture_channels) + : render_delay_buffer_( + RenderDelayBuffer::Create(cfg, 16000, num_render_channels)), + Y2_(num_capture_channels), + E2_(num_capture_channels), + H2_(num_capture_channels, + std::vector>( + cfg.filter.main.length_blocks)), + x_(1, + std::vector>(num_render_channels, + std::vector(kBlockSize, 0.f))), + converged_filters_(num_capture_channels, true) { + render_delay_buffer_->AlignFromDelay(4); + render_buffer_ = render_delay_buffer_->GetRenderBuffer(); + for (auto& H2_ch : H2_) { + for (auto& H2_p : H2_ch) { + H2_p.fill(0.f); + } + } + for (auto& H2_p : H2_[0]) { + H2_p.fill(1.f); + } +} + +TestInputs::~TestInputs() = default; + +void TestInputs::Update() { + if (n_ % 2 == 0) { + std::fill(x_[0][0].begin(), x_[0][0].end(), 0.f); + } else { + GetActiveFrame(&x_); + } + + render_delay_buffer_->Insert(x_); + render_delay_buffer_->PrepareCaptureProcessing(); + UpdateCurrentPowerSpectra(); + ++n_; +} + +void TestInputs::UpdateCurrentPowerSpectra() { + const SpectrumBuffer& spectrum_render_buffer = + render_buffer_->GetSpectrumBuffer(); + size_t idx = render_buffer_->Position(); + size_t prev_idx = spectrum_render_buffer.OffsetIndex(idx, 1); + auto& X2 = spectrum_render_buffer.buffer[idx][/*channel=*/0]; + auto& X2_prev = spectrum_render_buffer.buffer[prev_idx][/*channel=*/0]; + std::copy(X2.begin(), X2.end(), X2_.begin()); + for (size_t ch = 0; ch < Y2_.size(); ++ch) { + RTC_DCHECK_EQ(X2.size(), Y2_[ch].size()); + for (size_t k = 0; k < X2.size(); ++k) { + E2_[ch][k] = 0.01f * X2_prev[k]; + Y2_[ch][k] = X2[k] + E2_[ch][k]; + } + } +} + +} // namespace + +TEST(SignalDependentErleEstimator, SweepSettings) { + for (size_t num_render_channels : {1, 2, 4}) { + for (size_t num_capture_channels : {1, 2, 4}) { + EchoCanceller3Config cfg; + size_t max_length_blocks = 50; + for (size_t blocks = 1; blocks < max_length_blocks; + blocks = blocks + 10) { + for (size_t delay_headroom = 0; delay_headroom < 5; ++delay_headroom) { + for (size_t num_sections = 2; num_sections < max_length_blocks; + ++num_sections) { + cfg.filter.main.length_blocks = blocks; + cfg.filter.main_initial.length_blocks = + std::min(cfg.filter.main_initial.length_blocks, blocks); + cfg.delay.delay_headroom_samples = delay_headroom * kBlockSize; + cfg.erle.num_sections = num_sections; + if (EchoCanceller3Config::Validate(&cfg)) { + SignalDependentErleEstimator s(cfg, num_capture_channels); + std::vector> average_erle( + num_capture_channels); + for (auto& e : average_erle) { + e.fill(cfg.erle.max_l); + } + TestInputs inputs(cfg, num_render_channels, num_capture_channels); + for (size_t n = 0; n < 10; ++n) { + inputs.Update(); + s.Update(inputs.GetRenderBuffer(), inputs.GetH2(), + inputs.GetX2(), inputs.GetY2(), inputs.GetE2(), + average_erle, inputs.GetConvergedFilters()); + } + } + } + } + } + } + } +} + +TEST(SignalDependentErleEstimator, LongerRun) { + for (size_t num_render_channels : {1, 2, 4}) { + for (size_t num_capture_channels : {1, 2, 4}) { + EchoCanceller3Config cfg; + cfg.filter.main.length_blocks = 2; + cfg.filter.main_initial.length_blocks = 1; + cfg.delay.delay_headroom_samples = 0; + cfg.delay.hysteresis_limit_blocks = 0; + cfg.erle.num_sections = 2; + EXPECT_EQ(EchoCanceller3Config::Validate(&cfg), true); + std::vector> average_erle( + num_capture_channels); + for (auto& e : average_erle) { + e.fill(cfg.erle.max_l); + } + SignalDependentErleEstimator s(cfg, num_capture_channels); + TestInputs inputs(cfg, num_render_channels, num_capture_channels); + for (size_t n = 0; n < 200; ++n) { + inputs.Update(); + s.Update(inputs.GetRenderBuffer(), inputs.GetH2(), inputs.GetX2(), + inputs.GetY2(), inputs.GetE2(), average_erle, + inputs.GetConvergedFilters()); + } + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/spectrum_buffer.cc b/audio_processing/aec3/spectrum_buffer.cc new file mode 100644 index 0000000..4be13f7 --- /dev/null +++ b/audio_processing/aec3/spectrum_buffer.cc @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/spectrum_buffer.h" + +#include + +namespace webrtc { + +SpectrumBuffer::SpectrumBuffer(size_t size, size_t num_channels) + : size(static_cast(size)), + buffer(size, + std::vector>(num_channels)) { + for (auto& channel : buffer) { + for (auto& c : channel) { + std::fill(c.begin(), c.end(), 0.f); + } + } +} + +SpectrumBuffer::~SpectrumBuffer() = default; + +} // namespace webrtc diff --git a/audio_processing/aec3/spectrum_buffer.h b/audio_processing/aec3/spectrum_buffer.h new file mode 100644 index 0000000..fc8362a --- /dev/null +++ b/audio_processing/aec3/spectrum_buffer.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SPECTRUM_BUFFER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SPECTRUM_BUFFER_H_ + +#include + +#include +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +// Struct for bundling a circular buffer of one dimensional vector objects +// together with the read and write indices. +struct SpectrumBuffer { + SpectrumBuffer(size_t size, size_t num_channels); + ~SpectrumBuffer(); + + int IncIndex(int index) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return index < size - 1 ? index + 1 : 0; + } + + int DecIndex(int index) const { + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + return index > 0 ? index - 1 : size - 1; + } + + int OffsetIndex(int index, int offset) const { + RTC_DCHECK_GE(size, offset); + RTC_DCHECK_EQ(buffer.size(), static_cast(size)); + RTC_DCHECK_GE(size + index + offset, 0); + return (size + index + offset) % size; + } + + void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); } + void IncWriteIndex() { write = IncIndex(write); } + void DecWriteIndex() { write = DecIndex(write); } + void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); } + void IncReadIndex() { read = IncIndex(read); } + void DecReadIndex() { read = DecIndex(read); } + + const int size; + std::vector>> buffer; + int write = 0; + int read = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SPECTRUM_BUFFER_H_ diff --git a/audio_processing/aec3/stationarity_estimator.cc b/audio_processing/aec3/stationarity_estimator.cc new file mode 100644 index 0000000..7d90a5a --- /dev/null +++ b/audio_processing/aec3/stationarity_estimator.cc @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/stationarity_estimator.h" + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/spectrum_buffer.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" + +namespace webrtc { + +namespace { +constexpr float kMinNoisePower = 10.f; +constexpr int kHangoverBlocks = kNumBlocksPerSecond / 20; +constexpr int kNBlocksAverageInitPhase = 20; +constexpr int kNBlocksInitialPhase = kNumBlocksPerSecond * 2.; +} // namespace + +StationarityEstimator::StationarityEstimator() + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))) { + Reset(); +} + +StationarityEstimator::~StationarityEstimator() = default; + +void StationarityEstimator::Reset() { + noise_.Reset(); + hangovers_.fill(0); + stationarity_flags_.fill(false); +} + +// Update just the noise estimator. Usefull until the delay is known +void StationarityEstimator::UpdateNoiseEstimator( + rtc::ArrayView> spectrum) { + noise_.Update(spectrum); + data_dumper_->DumpRaw("aec3_stationarity_noise_spectrum", noise_.Spectrum()); + data_dumper_->DumpRaw("aec3_stationarity_is_block_stationary", + IsBlockStationary()); +} + +void StationarityEstimator::UpdateStationarityFlags( + const SpectrumBuffer& spectrum_buffer, + rtc::ArrayView render_reverb_contribution_spectrum, + int idx_current, + int num_lookahead) { + std::array indexes; + int num_lookahead_bounded = std::min(num_lookahead, kWindowLength - 1); + int idx = idx_current; + + if (num_lookahead_bounded < kWindowLength - 1) { + int num_lookback = (kWindowLength - 1) - num_lookahead_bounded; + idx = spectrum_buffer.OffsetIndex(idx_current, num_lookback); + } + // For estimating the stationarity properties of the current frame, the + // power for each band is accumulated for several consecutive spectra in the + // method EstimateBandStationarity. + // In order to avoid getting the indexes of the spectra for every band with + // its associated overhead, those indexes are stored in an array and then use + // when the estimation is done. + indexes[0] = idx; + for (size_t k = 1; k < indexes.size(); ++k) { + indexes[k] = spectrum_buffer.DecIndex(indexes[k - 1]); + } + RTC_DCHECK_EQ( + spectrum_buffer.DecIndex(indexes[kWindowLength - 1]), + spectrum_buffer.OffsetIndex(idx_current, -(num_lookahead_bounded + 1))); + + for (size_t k = 0; k < stationarity_flags_.size(); ++k) { + stationarity_flags_[k] = EstimateBandStationarity( + spectrum_buffer, render_reverb_contribution_spectrum, indexes, k); + } + UpdateHangover(); + SmoothStationaryPerFreq(); +} + +bool StationarityEstimator::IsBlockStationary() const { + float acum_stationarity = 0.f; + RTC_DCHECK_EQ(stationarity_flags_.size(), kFftLengthBy2Plus1); + for (size_t band = 0; band < stationarity_flags_.size(); ++band) { + bool st = IsBandStationary(band); + acum_stationarity += static_cast(st); + } + return ((acum_stationarity * (1.f / kFftLengthBy2Plus1)) > 0.75f); +} + +bool StationarityEstimator::EstimateBandStationarity( + const SpectrumBuffer& spectrum_buffer, + rtc::ArrayView average_reverb, + const std::array& indexes, + size_t band) const { + constexpr float kThrStationarity = 10.f; + float acum_power = 0.f; + const int num_render_channels = + static_cast(spectrum_buffer.buffer[0].size()); + const float one_by_num_channels = 1.f / num_render_channels; + for (auto idx : indexes) { + for (int ch = 0; ch < num_render_channels; ++ch) { + acum_power += spectrum_buffer.buffer[idx][ch][band] * one_by_num_channels; + } + } + acum_power += average_reverb[band]; + float noise = kWindowLength * GetStationarityPowerBand(band); + RTC_CHECK_LT(0.f, noise); + bool stationary = acum_power < kThrStationarity * noise; + data_dumper_->DumpRaw("aec3_stationarity_long_ratio", acum_power / noise); + return stationary; +} + +bool StationarityEstimator::AreAllBandsStationary() { + for (auto b : stationarity_flags_) { + if (!b) + return false; + } + return true; +} + +void StationarityEstimator::UpdateHangover() { + bool reduce_hangover = AreAllBandsStationary(); + for (size_t k = 0; k < stationarity_flags_.size(); ++k) { + if (!stationarity_flags_[k]) { + hangovers_[k] = kHangoverBlocks; + } else if (reduce_hangover) { + hangovers_[k] = std::max(hangovers_[k] - 1, 0); + } + } +} + +void StationarityEstimator::SmoothStationaryPerFreq() { + std::array all_ahead_stationary_smooth; + for (size_t k = 1; k < kFftLengthBy2Plus1 - 1; ++k) { + all_ahead_stationary_smooth[k] = stationarity_flags_[k - 1] && + stationarity_flags_[k] && + stationarity_flags_[k + 1]; + } + + all_ahead_stationary_smooth[0] = all_ahead_stationary_smooth[1]; + all_ahead_stationary_smooth[kFftLengthBy2Plus1 - 1] = + all_ahead_stationary_smooth[kFftLengthBy2Plus1 - 2]; + + stationarity_flags_ = all_ahead_stationary_smooth; +} + +int StationarityEstimator::instance_count_ = 0; + +StationarityEstimator::NoiseSpectrum::NoiseSpectrum() { + Reset(); +} + +StationarityEstimator::NoiseSpectrum::~NoiseSpectrum() = default; + +void StationarityEstimator::NoiseSpectrum::Reset() { + block_counter_ = 0; + noise_spectrum_.fill(kMinNoisePower); +} + +void StationarityEstimator::NoiseSpectrum::Update( + rtc::ArrayView> spectrum) { + RTC_DCHECK_LE(1, spectrum[0].size()); + const int num_render_channels = static_cast(spectrum.size()); + + std::array avg_spectrum_data; + rtc::ArrayView avg_spectrum; + if (num_render_channels == 1) { + avg_spectrum = spectrum[0]; + } else { + // For multiple channels, average the channel spectra before passing to the + // noise spectrum estimator. + avg_spectrum = avg_spectrum_data; + std::copy(spectrum[0].begin(), spectrum[0].end(), + avg_spectrum_data.begin()); + for (int ch = 1; ch < num_render_channels; ++ch) { + for (size_t k = 1; k < kFftLengthBy2Plus1; ++k) { + avg_spectrum_data[k] += spectrum[ch][k]; + } + } + + const float one_by_num_channels = 1.f / num_render_channels; + for (size_t k = 1; k < kFftLengthBy2Plus1; ++k) { + avg_spectrum_data[k] *= one_by_num_channels; + } + } + + ++block_counter_; + float alpha = GetAlpha(); + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + if (block_counter_ <= kNBlocksAverageInitPhase) { + noise_spectrum_[k] += (1.f / kNBlocksAverageInitPhase) * avg_spectrum[k]; + } else { + noise_spectrum_[k] = + UpdateBandBySmoothing(avg_spectrum[k], noise_spectrum_[k], alpha); + } + } +} + +float StationarityEstimator::NoiseSpectrum::GetAlpha() const { + constexpr float kAlpha = 0.004f; + constexpr float kAlphaInit = 0.04f; + constexpr float kTiltAlpha = (kAlphaInit - kAlpha) / kNBlocksInitialPhase; + + if (block_counter_ > (kNBlocksInitialPhase + kNBlocksAverageInitPhase)) { + return kAlpha; + } else { + return kAlphaInit - + kTiltAlpha * (block_counter_ - kNBlocksAverageInitPhase); + } +} + +float StationarityEstimator::NoiseSpectrum::UpdateBandBySmoothing( + float power_band, + float power_band_noise, + float alpha) const { + float power_band_noise_updated = power_band_noise; + if (power_band_noise < power_band) { + RTC_DCHECK_GT(power_band, 0.f); + float alpha_inc = alpha * (power_band_noise / power_band); + if (block_counter_ > kNBlocksInitialPhase) { + if (10.f * power_band_noise < power_band) { + alpha_inc *= 0.1f; + } + } + power_band_noise_updated += alpha_inc * (power_band - power_band_noise); + } else { + power_band_noise_updated += alpha * (power_band - power_band_noise); + power_band_noise_updated = + std::max(power_band_noise_updated, kMinNoisePower); + } + return power_band_noise_updated; +} + +} // namespace webrtc diff --git a/audio_processing/aec3/stationarity_estimator.h b/audio_processing/aec3/stationarity_estimator.h new file mode 100644 index 0000000..8bd08be --- /dev/null +++ b/audio_processing/aec3/stationarity_estimator.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_STATIONARITY_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_STATIONARITY_ESTIMATOR_H_ + +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" // kFftLengthBy2Plus1... +#include "audio_processing/aec3/reverb_model.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +class ApmDataDumper; +struct SpectrumBuffer; + +class StationarityEstimator { + public: + StationarityEstimator(); + ~StationarityEstimator(); + + // Reset the stationarity estimator. + void Reset(); + + // Update just the noise estimator. Usefull until the delay is known + void UpdateNoiseEstimator( + rtc::ArrayView> spectrum); + + // Update the flag indicating whether this current frame is stationary. For + // getting a more robust estimation, it looks at future and/or past frames. + void UpdateStationarityFlags( + const SpectrumBuffer& spectrum_buffer, + rtc::ArrayView render_reverb_contribution_spectrum, + int idx_current, + int num_lookahead); + + // Returns true if the current band is stationary. + bool IsBandStationary(size_t band) const { + return stationarity_flags_[band] && (hangovers_[band] == 0); + } + + // Returns true if the current block is estimated as stationary. + bool IsBlockStationary() const; + + private: + static constexpr int kWindowLength = 13; + // Returns the power of the stationary noise spectrum at a band. + float GetStationarityPowerBand(size_t k) const { return noise_.Power(k); } + + // Get an estimation of the stationarity for the current band by looking + // at the past/present/future available data. + bool EstimateBandStationarity(const SpectrumBuffer& spectrum_buffer, + rtc::ArrayView average_reverb, + const std::array& indexes, + size_t band) const; + + // True if all bands at the current point are stationary. + bool AreAllBandsStationary(); + + // Update the hangover depending on the stationary status of the current + // frame. + void UpdateHangover(); + + // Smooth the stationarity detection by looking at neighbouring frequency + // bands. + void SmoothStationaryPerFreq(); + + class NoiseSpectrum { + public: + NoiseSpectrum(); + ~NoiseSpectrum(); + + // Reset the noise power spectrum estimate state. + void Reset(); + + // Update the noise power spectrum with a new frame. + void Update( + rtc::ArrayView> spectrum); + + // Get the noise estimation power spectrum. + rtc::ArrayView Spectrum() const { return noise_spectrum_; } + + // Get the noise power spectrum at a certain band. + float Power(size_t band) const { + RTC_DCHECK_LT(band, noise_spectrum_.size()); + return noise_spectrum_[band]; + } + + private: + // Get the update coefficient to be used for the current frame. + float GetAlpha() const; + + // Update the noise power spectrum at a certain band with a new frame. + float UpdateBandBySmoothing(float power_band, + float power_band_noise, + float alpha) const; + std::array noise_spectrum_; + size_t block_counter_; + }; + + static int instance_count_; + std::unique_ptr data_dumper_; + NoiseSpectrum noise_; + std::array hangovers_; + std::array stationarity_flags_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_STATIONARITY_ESTIMATOR_H_ diff --git a/audio_processing/aec3/subband_erle_estimator.cc b/audio_processing/aec3/subband_erle_estimator.cc new file mode 100644 index 0000000..8bc6cc6 --- /dev/null +++ b/audio_processing/aec3/subband_erle_estimator.cc @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/subband_erle_estimator.h" + +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/numerics/safe_minmax.h" +#include "system_wrappers/include/field_trial.h" + +namespace webrtc { + +namespace { + +constexpr float kX2BandEnergyThreshold = 44015068.0f; +constexpr int kBlocksToHoldErle = 100; +constexpr int kBlocksForOnsetDetection = kBlocksToHoldErle + 150; +constexpr int kPointsToAccumulate = 6; + +std::array SetMaxErleBands(float max_erle_l, + float max_erle_h) { + std::array max_erle; + std::fill(max_erle.begin(), max_erle.begin() + kFftLengthBy2 / 2, max_erle_l); + std::fill(max_erle.begin() + kFftLengthBy2 / 2, max_erle.end(), max_erle_h); + return max_erle; +} + +bool EnableMinErleDuringOnsets() { + return !field_trial::IsEnabled("WebRTC-Aec3MinErleDuringOnsetsKillSwitch"); +} + +} // namespace + +SubbandErleEstimator::SubbandErleEstimator(const EchoCanceller3Config& config, + size_t num_capture_channels) + : use_onset_detection_(config.erle.onset_detection), + min_erle_(config.erle.min), + max_erle_(SetMaxErleBands(config.erle.max_l, config.erle.max_h)), + use_min_erle_during_onsets_(EnableMinErleDuringOnsets()), + accum_spectra_(num_capture_channels), + erle_(num_capture_channels), + erle_onsets_(num_capture_channels), + coming_onset_(num_capture_channels), + hold_counters_(num_capture_channels) { + Reset(); +} + +SubbandErleEstimator::~SubbandErleEstimator() = default; + +void SubbandErleEstimator::Reset() { + for (auto& erle : erle_) { + erle.fill(min_erle_); + } + for (size_t ch = 0; ch < erle_onsets_.size(); ++ch) { + erle_onsets_[ch].fill(min_erle_); + coming_onset_[ch].fill(true); + hold_counters_[ch].fill(0); + } + ResetAccumulatedSpectra(); +} + +void SubbandErleEstimator::Update( + rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + const std::vector& converged_filters) { + + UpdateAccumulatedSpectra(X2, Y2, E2, converged_filters); + UpdateBands(converged_filters); + + if (use_onset_detection_) { + DecreaseErlePerBandForLowRenderSignals(); + } + + for (auto& erle : erle_) { + erle[0] = erle[1]; + erle[kFftLengthBy2] = erle[kFftLengthBy2 - 1]; + } +} + +void SubbandErleEstimator::Dump( + const std::unique_ptr& data_dumper) const { + data_dumper->DumpRaw("aec3_erle_onset", ErleOnsets()[0]); +} + +void SubbandErleEstimator::UpdateBands( + const std::vector& converged_filters) { + const int num_capture_channels = static_cast(accum_spectra_.Y2.size()); + for (int ch = 0; ch < num_capture_channels; ++ch) { + // Note that the use of the converged_filter flag already imposed + // a minimum of the erle that can be estimated as that flag would + // be false if the filter is performing poorly. + if (!converged_filters[ch]) { + continue; + } + + std::array new_erle; + std::array is_erle_updated; + is_erle_updated.fill(false); + + for (size_t k = 1; k < kFftLengthBy2; ++k) { + if (accum_spectra_.num_points[ch] == kPointsToAccumulate && + accum_spectra_.E2[ch][k] > 0.f) { + new_erle[k] = accum_spectra_.Y2[ch][k] / accum_spectra_.E2[ch][k]; + is_erle_updated[k] = true; + } + } + + if (use_onset_detection_) { + for (size_t k = 1; k < kFftLengthBy2; ++k) { + if (is_erle_updated[k] && !accum_spectra_.low_render_energy[ch][k]) { + if (coming_onset_[ch][k]) { + coming_onset_[ch][k] = false; + if (!use_min_erle_during_onsets_) { + float alpha = new_erle[k] < erle_onsets_[ch][k] ? 0.3f : 0.15f; + erle_onsets_[ch][k] = rtc::SafeClamp( + erle_onsets_[ch][k] + + alpha * (new_erle[k] - erle_onsets_[ch][k]), + min_erle_, max_erle_[k]); + } + } + hold_counters_[ch][k] = kBlocksForOnsetDetection; + } + } + } + + for (size_t k = 1; k < kFftLengthBy2; ++k) { + if (is_erle_updated[k]) { + float alpha = 0.05f; + if (new_erle[k] < erle_[ch][k]) { + alpha = accum_spectra_.low_render_energy[ch][k] ? 0.f : 0.1f; + } + erle_[ch][k] = + rtc::SafeClamp(erle_[ch][k] + alpha * (new_erle[k] - erle_[ch][k]), + min_erle_, max_erle_[k]); + } + } + } +} + +void SubbandErleEstimator::DecreaseErlePerBandForLowRenderSignals() { + const int num_capture_channels = static_cast(accum_spectra_.Y2.size()); + for (int ch = 0; ch < num_capture_channels; ++ch) { + for (size_t k = 1; k < kFftLengthBy2; ++k) { + --hold_counters_[ch][k]; + if (hold_counters_[ch][k] <= + (kBlocksForOnsetDetection - kBlocksToHoldErle)) { + if (erle_[ch][k] > erle_onsets_[ch][k]) { + erle_[ch][k] = std::max(erle_onsets_[ch][k], 0.97f * erle_[ch][k]); + RTC_DCHECK_LE(min_erle_, erle_[ch][k]); + } + if (hold_counters_[ch][k] <= 0) { + coming_onset_[ch][k] = true; + hold_counters_[ch][k] = 0; + } + } + } + } +} + +void SubbandErleEstimator::ResetAccumulatedSpectra() { + for (size_t ch = 0; ch < erle_onsets_.size(); ++ch) { + accum_spectra_.Y2[ch].fill(0.f); + accum_spectra_.E2[ch].fill(0.f); + accum_spectra_.num_points[ch] = 0; + accum_spectra_.low_render_energy[ch].fill(false); + } +} + +void SubbandErleEstimator::UpdateAccumulatedSpectra( + rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + const std::vector& converged_filters) { + auto& st = accum_spectra_; + RTC_DCHECK_EQ(st.E2.size(), E2.size()); + RTC_DCHECK_EQ(st.E2.size(), E2.size()); + const int num_capture_channels = static_cast(Y2.size()); + for (int ch = 0; ch < num_capture_channels; ++ch) { + // Note that the use of the converged_filter flag already imposed + // a minimum of the erle that can be estimated as that flag would + // be false if the filter is performing poorly. + if (!converged_filters[ch]) { + continue; + } + if (st.num_points[ch] == kPointsToAccumulate) { + st.num_points[ch] = 0; + st.Y2[ch].fill(0.f); + st.E2[ch].fill(0.f); + st.low_render_energy[ch].fill(false); + } + std::transform(Y2[ch].begin(), Y2[ch].end(), st.Y2[ch].begin(), + st.Y2[ch].begin(), std::plus()); + std::transform(E2[ch].begin(), E2[ch].end(), st.E2[ch].begin(), + st.E2[ch].begin(), std::plus()); + + for (size_t k = 0; k < X2.size(); ++k) { + st.low_render_energy[ch][k] = + st.low_render_energy[ch][k] || X2[k] < kX2BandEnergyThreshold; + } + + ++st.num_points[ch]; + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/subband_erle_estimator.h b/audio_processing/aec3/subband_erle_estimator.h new file mode 100644 index 0000000..50fae55 --- /dev/null +++ b/audio_processing/aec3/subband_erle_estimator.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_ERLE_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_ERLE_ESTIMATOR_H_ + +#include + +#include +#include +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/logging/apm_data_dumper.h" + +namespace webrtc { + +// Estimates the echo return loss enhancement for each frequency subband. +class SubbandErleEstimator { + public: + SubbandErleEstimator(const EchoCanceller3Config& config, + size_t num_capture_channels); + ~SubbandErleEstimator(); + + // Resets the ERLE estimator. + void Reset(); + + // Updates the ERLE estimate. + void Update(rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + const std::vector& converged_filters); + + // Returns the ERLE estimate. + rtc::ArrayView> Erle() const { + return erle_; + } + + // Returns the ERLE estimate at onsets (only used for testing). + rtc::ArrayView> ErleOnsets() + const { + return erle_onsets_; + } + + void Dump(const std::unique_ptr& data_dumper) const; + + private: + struct AccumulatedSpectra { + explicit AccumulatedSpectra(size_t num_capture_channels) + : Y2(num_capture_channels), + E2(num_capture_channels), + low_render_energy(num_capture_channels), + num_points(num_capture_channels) {} + std::vector> Y2; + std::vector> E2; + std::vector> low_render_energy; + std::vector num_points; + }; + + void UpdateAccumulatedSpectra( + rtc::ArrayView X2, + rtc::ArrayView> Y2, + rtc::ArrayView> E2, + const std::vector& converged_filters); + + void ResetAccumulatedSpectra(); + + void UpdateBands(const std::vector& converged_filters); + void DecreaseErlePerBandForLowRenderSignals(); + + const bool use_onset_detection_; + const float min_erle_; + const std::array max_erle_; + const bool use_min_erle_during_onsets_; + AccumulatedSpectra accum_spectra_; + std::vector> erle_; + std::vector> erle_onsets_; + std::vector> coming_onset_; + std::vector> hold_counters_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_ERLE_ESTIMATOR_H_ diff --git a/audio_processing/aec3/subband_nearend_detector.cc b/audio_processing/aec3/subband_nearend_detector.cc new file mode 100644 index 0000000..855c90c --- /dev/null +++ b/audio_processing/aec3/subband_nearend_detector.cc @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/subband_nearend_detector.h" + +#include + +namespace webrtc { +SubbandNearendDetector::SubbandNearendDetector( + const EchoCanceller3Config::Suppressor::SubbandNearendDetection& config, + size_t num_capture_channels) + : config_(config), + num_capture_channels_(num_capture_channels), + nearend_smoothers_(num_capture_channels_, + aec3::MovingAverage(kFftLengthBy2Plus1, + config_.nearend_average_blocks)), + one_over_subband_length1_( + 1.f / (config_.subband1.high - config_.subband1.low + 1)), + one_over_subband_length2_( + 1.f / (config_.subband2.high - config_.subband2.low + 1)) {} + +void SubbandNearendDetector::Update( + rtc::ArrayView> + nearend_spectrum, + rtc::ArrayView> + residual_echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + bool initial_state) { + nearend_state_ = false; + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + const std::array& noise = + comfort_noise_spectrum[ch]; + std::array nearend; + nearend_smoothers_[ch].Average(nearend_spectrum[ch], nearend); + + // Noise power of the first region. + float noise_power = + std::accumulate(noise.begin() + config_.subband1.low, + noise.begin() + config_.subband1.high + 1, 0.f) * + one_over_subband_length1_; + + // Nearend power of the first region. + float nearend_power_subband1 = + std::accumulate(nearend.begin() + config_.subband1.low, + nearend.begin() + config_.subband1.high + 1, 0.f) * + one_over_subband_length1_; + + // Nearend power of the second region. + float nearend_power_subband2 = + std::accumulate(nearend.begin() + config_.subband2.low, + nearend.begin() + config_.subband2.high + 1, 0.f) * + one_over_subband_length2_; + + // One channel is sufficient to trigger nearend state. + nearend_state_ = + nearend_state_ || + (nearend_power_subband1 < + config_.nearend_threshold * nearend_power_subband2 && + (nearend_power_subband1 > config_.snr_threshold * noise_power)); + } +} +} // namespace webrtc diff --git a/audio_processing/aec3/subband_nearend_detector.h b/audio_processing/aec3/subband_nearend_detector.h new file mode 100644 index 0000000..1bca49a --- /dev/null +++ b/audio_processing/aec3/subband_nearend_detector.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_NEAREND_DETECTOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_NEAREND_DETECTOR_H_ + +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/moving_average.h" +#include "audio_processing/aec3/nearend_detector.h" + +namespace webrtc { +// Class for selecting whether the suppressor is in the nearend or echo state. +class SubbandNearendDetector : public NearendDetector { + public: + SubbandNearendDetector( + const EchoCanceller3Config::Suppressor::SubbandNearendDetection& config, + size_t num_capture_channels); + + // Returns whether the current state is the nearend state. + bool IsNearendState() const override { return nearend_state_; } + + // Updates the state selection based on latest spectral estimates. + void Update(rtc::ArrayView> + nearend_spectrum, + rtc::ArrayView> + residual_echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + bool initial_state) override; + + private: + const EchoCanceller3Config::Suppressor::SubbandNearendDetection config_; + const size_t num_capture_channels_; + std::vector nearend_smoothers_; + const float one_over_subband_length1_; + const float one_over_subband_length2_; + bool nearend_state_ = false; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBBAND_NEAREND_DETECTOR_H_ diff --git a/audio_processing/aec3/subtractor.cc b/audio_processing/aec3/subtractor.cc new file mode 100644 index 0000000..b519b96 --- /dev/null +++ b/audio_processing/aec3/subtractor.cc @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/subtractor.h" + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/adaptive_fir_filter_erl.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" +#include "rtc_base/numerics/safe_minmax.h" + +namespace webrtc { + +namespace { + +void PredictionError(const Aec3Fft& fft, + const FftData& S, + rtc::ArrayView y, + std::array* e, + std::array* s) { + std::array tmp; + fft.Ifft(S, &tmp); + constexpr float kScale = 1.0f / kFftLengthBy2; + std::transform(y.begin(), y.end(), tmp.begin() + kFftLengthBy2, e->begin(), + [&](float a, float b) { return a - b * kScale; }); + + if (s) { + for (size_t k = 0; k < s->size(); ++k) { + (*s)[k] = kScale * tmp[k + kFftLengthBy2]; + } + } +} + +void ScaleFilterOutput(rtc::ArrayView y, + float factor, + rtc::ArrayView e, + rtc::ArrayView s) { + RTC_DCHECK_EQ(y.size(), e.size()); + RTC_DCHECK_EQ(y.size(), s.size()); + for (size_t k = 0; k < y.size(); ++k) { + s[k] *= factor; + e[k] = y[k] - s[k]; + } +} + +} // namespace + +Subtractor::Subtractor(const EchoCanceller3Config& config, + size_t num_render_channels, + size_t num_capture_channels, + ApmDataDumper* data_dumper, + Aec3Optimization optimization) + : fft_(), + data_dumper_(data_dumper), + optimization_(optimization), + config_(config), + num_capture_channels_(num_capture_channels), + main_filters_(num_capture_channels_), + shadow_filter_(num_capture_channels_), + main_gains_(num_capture_channels_), + shadow_gains_(num_capture_channels_), + filter_misadjustment_estimators_(num_capture_channels_), + poor_shadow_filter_counters_(num_capture_channels_, 0), + main_frequency_responses_( + num_capture_channels_, + std::vector>( + std::max(config_.filter.main_initial.length_blocks, + config_.filter.main.length_blocks), + std::array())), + main_impulse_responses_( + num_capture_channels_, + std::vector(GetTimeDomainLength(std::max( + config_.filter.main_initial.length_blocks, + config_.filter.main.length_blocks)), + 0.f)) { + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + main_filters_[ch] = std::make_unique( + config_.filter.main.length_blocks, + config_.filter.main_initial.length_blocks, + config.filter.config_change_duration_blocks, num_render_channels, + optimization, data_dumper_); + + shadow_filter_[ch] = std::make_unique( + config_.filter.shadow.length_blocks, + config_.filter.shadow_initial.length_blocks, + config.filter.config_change_duration_blocks, num_render_channels, + optimization, data_dumper_); + main_gains_[ch] = std::make_unique( + config_.filter.main_initial, + config_.filter.config_change_duration_blocks); + shadow_gains_[ch] = std::make_unique( + config_.filter.shadow_initial, + config.filter.config_change_duration_blocks); + } + + RTC_DCHECK(data_dumper_); + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + for (auto& H2_k : main_frequency_responses_[ch]) { + H2_k.fill(0.f); + } + } +} + +Subtractor::~Subtractor() = default; + +void Subtractor::HandleEchoPathChange( + const EchoPathVariability& echo_path_variability) { + const auto full_reset = [&]() { + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + main_filters_[ch]->HandleEchoPathChange(); + shadow_filter_[ch]->HandleEchoPathChange(); + main_gains_[ch]->HandleEchoPathChange(echo_path_variability); + shadow_gains_[ch]->HandleEchoPathChange(); + main_gains_[ch]->SetConfig(config_.filter.main_initial, true); + shadow_gains_[ch]->SetConfig(config_.filter.shadow_initial, true); + main_filters_[ch]->SetSizePartitions( + config_.filter.main_initial.length_blocks, true); + shadow_filter_[ch]->SetSizePartitions( + config_.filter.shadow_initial.length_blocks, true); + } + }; + + if (echo_path_variability.delay_change != + EchoPathVariability::DelayAdjustment::kNone) { + full_reset(); + } + + if (echo_path_variability.gain_change) { + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + main_gains_[ch]->HandleEchoPathChange(echo_path_variability); + } + } +} + +void Subtractor::ExitInitialState() { + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + main_gains_[ch]->SetConfig(config_.filter.main, false); + shadow_gains_[ch]->SetConfig(config_.filter.shadow, false); + main_filters_[ch]->SetSizePartitions(config_.filter.main.length_blocks, + false); + shadow_filter_[ch]->SetSizePartitions(config_.filter.shadow.length_blocks, + false); + } +} + +void Subtractor::Process(const RenderBuffer& render_buffer, + const std::vector>& capture, + const RenderSignalAnalyzer& render_signal_analyzer, + const AecState& aec_state, + rtc::ArrayView outputs) { + RTC_DCHECK_EQ(num_capture_channels_, capture.size()); + + // Compute the render powers. + const bool same_filter_sizes = + main_filters_[0]->SizePartitions() == shadow_filter_[0]->SizePartitions(); + std::array X2_main; + std::array X2_shadow_data; + auto& X2_shadow = same_filter_sizes ? X2_main : X2_shadow_data; + if (same_filter_sizes) { + render_buffer.SpectralSum(main_filters_[0]->SizePartitions(), &X2_main); + } else if (main_filters_[0]->SizePartitions() > + shadow_filter_[0]->SizePartitions()) { + render_buffer.SpectralSums(shadow_filter_[0]->SizePartitions(), + main_filters_[0]->SizePartitions(), &X2_shadow, + &X2_main); + } else { + render_buffer.SpectralSums(main_filters_[0]->SizePartitions(), + shadow_filter_[0]->SizePartitions(), &X2_main, + &X2_shadow); + } + + // Process all capture channels + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + RTC_DCHECK_EQ(kBlockSize, capture[ch].size()); + SubtractorOutput& output = outputs[ch]; + rtc::ArrayView y = capture[ch]; + FftData& E_main = output.E_main; + FftData E_shadow; + std::array& e_main = output.e_main; + std::array& e_shadow = output.e_shadow; + + FftData S; + FftData& G = S; + + main_filters_[ch]->Filter(render_buffer, &S); + PredictionError(fft_, S, y, &e_main, &output.s_main); + + shadow_filter_[ch]->Filter(render_buffer, &S); + PredictionError(fft_, S, y, &e_shadow, &output.s_shadow); + + output.ComputeMetrics(y); + + // Adjust the filter if needed. + bool main_filters_adjusted = false; + filter_misadjustment_estimators_[ch].Update(output); + if (filter_misadjustment_estimators_[ch].IsAdjustmentNeeded()) { + float scale = filter_misadjustment_estimators_[ch].GetMisadjustment(); + main_filters_[ch]->ScaleFilter(scale); + for (auto& h_k : main_impulse_responses_[ch]) { + h_k *= scale; + } + ScaleFilterOutput(y, scale, e_main, output.s_main); + filter_misadjustment_estimators_[ch].Reset(); + main_filters_adjusted = true; + } + + // Compute the FFts of the main and shadow filter outputs. + fft_.ZeroPaddedFft(e_main, Aec3Fft::Window::kHanning, &E_main); + fft_.ZeroPaddedFft(e_shadow, Aec3Fft::Window::kHanning, &E_shadow); + + // Compute spectra for future use. + E_shadow.Spectrum(optimization_, output.E2_shadow); + E_main.Spectrum(optimization_, output.E2_main); + + if (!main_filters_adjusted) { + std::array erl; + ComputeErl(optimization_, main_frequency_responses_[ch], erl); + main_gains_[ch]->Compute(X2_main, render_signal_analyzer, output, erl, + main_filters_[ch]->SizePartitions(), + aec_state.SaturatedCapture(), &G); + } else { + G.re.fill(0.f); + G.im.fill(0.f); + } + + main_filters_[ch]->Adapt(render_buffer, G, &main_impulse_responses_[ch]); + main_filters_[ch]->ComputeFrequencyResponse(&main_frequency_responses_[ch]); + + if (ch == 0) { + data_dumper_->DumpRaw("aec3_subtractor_G_main", G.re); + data_dumper_->DumpRaw("aec3_subtractor_G_main", G.im); + } + + // Update the shadow filter. + poor_shadow_filter_counters_[ch] = + output.e2_main < output.e2_shadow ? poor_shadow_filter_counters_[ch] + 1 + : 0; + if (poor_shadow_filter_counters_[ch] < 5) { + shadow_gains_[ch]->Compute(X2_shadow, render_signal_analyzer, E_shadow, + shadow_filter_[ch]->SizePartitions(), + aec_state.SaturatedCapture(), &G); + } else { + poor_shadow_filter_counters_[ch] = 0; + shadow_filter_[ch]->SetFilter(main_filters_[ch]->SizePartitions(), + main_filters_[ch]->GetFilter()); + shadow_gains_[ch]->Compute(X2_shadow, render_signal_analyzer, E_main, + shadow_filter_[ch]->SizePartitions(), + aec_state.SaturatedCapture(), &G); + } + + shadow_filter_[ch]->Adapt(render_buffer, G); + if (ch == 0) { + data_dumper_->DumpRaw("aec3_subtractor_G_shadow", G.re); + data_dumper_->DumpRaw("aec3_subtractor_G_shadow", G.im); + filter_misadjustment_estimators_[ch].Dump(data_dumper_); + DumpFilters(); + } + + std::for_each(e_main.begin(), e_main.end(), + [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); }); + + if (ch == 0) { + data_dumper_->DumpWav("aec3_main_filters_output", kBlockSize, &e_main[0], + 16000, 1); + data_dumper_->DumpWav("aec3_shadow_filter_output", kBlockSize, + &e_shadow[0], 16000, 1); + } + } +} + +void Subtractor::FilterMisadjustmentEstimator::Update( + const SubtractorOutput& output) { + e2_acum_ += output.e2_main; + y2_acum_ += output.y2; + + if (++n_blocks_acum_ == n_blocks_) { + if (y2_acum_ > n_blocks_ * 200.f * 200.f * kBlockSize) { + float update = (e2_acum_ / y2_acum_); + if (e2_acum_ > n_blocks_ * 7500.f * 7500.f * kBlockSize) { + // Duration equal to blockSizeMs * n_blocks_ * 4. + overhang_ = 4; + } else { + overhang_ = std::max(overhang_ - 1, 0); + } + + if ((update < inv_misadjustment_) || (overhang_ > 0)) { + inv_misadjustment_ += 0.1f * (update - inv_misadjustment_); + } + } + e2_acum_ = 0.f; + y2_acum_ = 0.f; + n_blocks_acum_ = 0; + } +} + +void Subtractor::FilterMisadjustmentEstimator::Reset() { + e2_acum_ = 0.f; + y2_acum_ = 0.f; + n_blocks_acum_ = 0; + inv_misadjustment_ = 0.f; + overhang_ = 0.f; +} + +void Subtractor::FilterMisadjustmentEstimator::Dump( + ApmDataDumper* data_dumper) const { + data_dumper->DumpRaw("aec3_inv_misadjustment_factor", inv_misadjustment_); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/subtractor.h b/audio_processing/aec3/subtractor.h new file mode 100644 index 0000000..c8b004f --- /dev/null +++ b/audio_processing/aec3/subtractor.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_ + +#include +#include + +#include +#include + +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/adaptive_fir_filter.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec3_fft.h" +#include "audio_processing/aec3/aec_state.h" +#include "audio_processing/aec3/echo_path_variability.h" +#include "audio_processing/aec3/main_filter_update_gain.h" +#include "audio_processing/aec3/render_buffer.h" +#include "audio_processing/aec3/render_signal_analyzer.h" +#include "audio_processing/aec3/shadow_filter_update_gain.h" +#include "audio_processing/aec3/subtractor_output.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +// Proves linear echo cancellation functionality +class Subtractor { + public: + Subtractor(const EchoCanceller3Config& config, + size_t num_render_channels, + size_t num_capture_channels, + ApmDataDumper* data_dumper, + Aec3Optimization optimization); + ~Subtractor(); + Subtractor(const Subtractor&) = delete; + Subtractor& operator=(const Subtractor&) = delete; + + // Performs the echo subtraction. + void Process(const RenderBuffer& render_buffer, + const std::vector>& capture, + const RenderSignalAnalyzer& render_signal_analyzer, + const AecState& aec_state, + rtc::ArrayView outputs); + + void HandleEchoPathChange(const EchoPathVariability& echo_path_variability); + + // Exits the initial state. + void ExitInitialState(); + + // Returns the block-wise frequency responses for the main adaptive filters. + const std::vector>>& + FilterFrequencyResponses() const { + return main_frequency_responses_; + } + + // Returns the estimates of the impulse responses for the main adaptive + // filters. + const std::vector>& FilterImpulseResponses() const { + return main_impulse_responses_; + } + + void DumpFilters() { + data_dumper_->DumpRaw( + "aec3_subtractor_h_main", + rtc::ArrayView( + main_impulse_responses_[0].data(), + GetTimeDomainLength( + main_filters_[0]->max_filter_size_partitions()))); + + main_filters_[0]->DumpFilter("aec3_subtractor_H_main"); + shadow_filter_[0]->DumpFilter("aec3_subtractor_H_shadow"); + } + + private: + class FilterMisadjustmentEstimator { + public: + FilterMisadjustmentEstimator() = default; + ~FilterMisadjustmentEstimator() = default; + // Update the misadjustment estimator. + void Update(const SubtractorOutput& output); + // GetMisadjustment() Returns a recommended scale for the filter so the + // prediction error energy gets closer to the energy that is seen at the + // microphone input. + float GetMisadjustment() const { + RTC_DCHECK_GT(inv_misadjustment_, 0.0f); + // It is not aiming to adjust all the estimated mismatch. Instead, + // it adjusts half of that estimated mismatch. + return 2.f / sqrtf(inv_misadjustment_); + } + // Returns true if the prediciton error energy is significantly larger + // than the microphone signal energy and, therefore, an adjustment is + // recommended. + bool IsAdjustmentNeeded() const { return inv_misadjustment_ > 10.f; } + void Reset(); + void Dump(ApmDataDumper* data_dumper) const; + + private: + const int n_blocks_ = 4; + int n_blocks_acum_ = 0; + float e2_acum_ = 0.f; + float y2_acum_ = 0.f; + float inv_misadjustment_ = 0.f; + int overhang_ = 0.f; + }; + + const Aec3Fft fft_; + ApmDataDumper* data_dumper_; + const Aec3Optimization optimization_; + const EchoCanceller3Config config_; + const size_t num_capture_channels_; + + std::vector> main_filters_; + std::vector> shadow_filter_; + std::vector> main_gains_; + std::vector> shadow_gains_; + std::vector filter_misadjustment_estimators_; + std::vector poor_shadow_filter_counters_; + std::vector>> + main_frequency_responses_; + std::vector> main_impulse_responses_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_ diff --git a/audio_processing/aec3/subtractor_output.cc b/audio_processing/aec3/subtractor_output.cc new file mode 100644 index 0000000..0759f16 --- /dev/null +++ b/audio_processing/aec3/subtractor_output.cc @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/subtractor_output.h" + +#include + +namespace webrtc { + +SubtractorOutput::SubtractorOutput() = default; +SubtractorOutput::~SubtractorOutput() = default; + +void SubtractorOutput::Reset() { + s_main.fill(0.f); + s_shadow.fill(0.f); + e_main.fill(0.f); + e_shadow.fill(0.f); + E_main.re.fill(0.f); + E_main.im.fill(0.f); + E2_main.fill(0.f); + E2_shadow.fill(0.f); + e2_main = 0.f; + e2_shadow = 0.f; + s2_main = 0.f; + s2_shadow = 0.f; + y2 = 0.f; +} + +void SubtractorOutput::ComputeMetrics(rtc::ArrayView y) { + const auto sum_of_squares = [](float a, float b) { return a + b * b; }; + y2 = std::accumulate(y.begin(), y.end(), 0.f, sum_of_squares); + e2_main = std::accumulate(e_main.begin(), e_main.end(), 0.f, sum_of_squares); + e2_shadow = + std::accumulate(e_shadow.begin(), e_shadow.end(), 0.f, sum_of_squares); + s2_main = std::accumulate(s_main.begin(), s_main.end(), 0.f, sum_of_squares); + s2_shadow = + std::accumulate(s_shadow.begin(), s_shadow.end(), 0.f, sum_of_squares); + + s_main_max_abs = *std::max_element(s_main.begin(), s_main.end()); + s_main_max_abs = std::max(s_main_max_abs, + -(*std::min_element(s_main.begin(), s_main.end()))); + + s_shadow_max_abs = *std::max_element(s_shadow.begin(), s_shadow.end()); + s_shadow_max_abs = std::max( + s_shadow_max_abs, -(*std::min_element(s_shadow.begin(), s_shadow.end()))); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/subtractor_output.h b/audio_processing/aec3/subtractor_output.h new file mode 100644 index 0000000..fc815c8 --- /dev/null +++ b/audio_processing/aec3/subtractor_output.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_ + +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/fft_data.h" + +namespace webrtc { + +// Stores the values being returned from the echo subtractor for a single +// capture channel. +struct SubtractorOutput { + SubtractorOutput(); + ~SubtractorOutput(); + + std::array s_main; + std::array s_shadow; + std::array e_main; + std::array e_shadow; + FftData E_main; + std::array E2_main; + std::array E2_shadow; + float s2_main = 0.f; + float s2_shadow = 0.f; + float e2_main = 0.f; + float e2_shadow = 0.f; + float y2 = 0.f; + float s_main_max_abs = 0.f; + float s_shadow_max_abs = 0.f; + + // Reset the struct content. + void Reset(); + + // Updates the powers of the signals. + void ComputeMetrics(rtc::ArrayView y); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_ diff --git a/audio_processing/aec3/subtractor_output_analyzer.cc b/audio_processing/aec3/subtractor_output_analyzer.cc new file mode 100644 index 0000000..046f8bf --- /dev/null +++ b/audio_processing/aec3/subtractor_output_analyzer.cc @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/subtractor_output_analyzer.h" + +#include + +#include "audio_processing/aec3/aec3_common.h" + +namespace webrtc { + +SubtractorOutputAnalyzer::SubtractorOutputAnalyzer(size_t num_capture_channels) + : filters_converged_(num_capture_channels, false) {} + +void SubtractorOutputAnalyzer::Update( + rtc::ArrayView subtractor_output, + bool* any_filter_converged, + bool* all_filters_diverged) { + RTC_DCHECK(any_filter_converged); + RTC_DCHECK(all_filters_diverged); + RTC_DCHECK_EQ(subtractor_output.size(), filters_converged_.size()); + + *any_filter_converged = false; + *all_filters_diverged = true; + + for (size_t ch = 0; ch < subtractor_output.size(); ++ch) { + const float y2 = subtractor_output[ch].y2; + const float e2_main = subtractor_output[ch].e2_main; + const float e2_shadow = subtractor_output[ch].e2_shadow; + + constexpr float kConvergenceThreshold = 50 * 50 * kBlockSize; + bool main_filter_converged = + e2_main < 0.5f * y2 && y2 > kConvergenceThreshold; + bool shadow_filter_converged = + e2_shadow < 0.05f * y2 && y2 > kConvergenceThreshold; + float min_e2 = std::min(e2_main, e2_shadow); + bool filter_diverged = min_e2 > 1.5f * y2 && y2 > 30.f * 30.f * kBlockSize; + filters_converged_[ch] = main_filter_converged || shadow_filter_converged; + + *any_filter_converged = *any_filter_converged || filters_converged_[ch]; + *all_filters_diverged = *all_filters_diverged && filter_diverged; + } +} + +void SubtractorOutputAnalyzer::HandleEchoPathChange() { + std::fill(filters_converged_.begin(), filters_converged_.end(), false); +} + +} // namespace webrtc diff --git a/audio_processing/aec3/subtractor_output_analyzer.h b/audio_processing/aec3/subtractor_output_analyzer.h new file mode 100644 index 0000000..5557697 --- /dev/null +++ b/audio_processing/aec3/subtractor_output_analyzer.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_ANALYZER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_ANALYZER_H_ + +#include + +#include "audio_processing/aec3/subtractor_output.h" + +namespace webrtc { + +// Class for analyzing the properties subtractor output. +class SubtractorOutputAnalyzer { + public: + explicit SubtractorOutputAnalyzer(size_t num_capture_channels); + ~SubtractorOutputAnalyzer() = default; + + // Analyses the subtractor output. + void Update(rtc::ArrayView subtractor_output, + bool* any_filter_converged, + bool* all_filters_diverged); + + const std::vector& ConvergedFilters() const { + return filters_converged_; + } + + // Handle echo path change. + void HandleEchoPathChange(); + + private: + std::vector filters_converged_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_ANALYZER_H_ diff --git a/audio_processing/aec3/subtractor_unittest.cc b/audio_processing/aec3/subtractor_unittest.cc new file mode 100644 index 0000000..507d70c --- /dev/null +++ b/audio_processing/aec3/subtractor_unittest.cc @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/subtractor.h" + +#include +#include +#include +#include + +#include "modules/audio_processing/aec3/aec_state.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/test/echo_canceller_test_tools.h" +#include "modules/audio_processing/utility/cascaded_biquad_filter.h" +#include "rtc_base/random.h" +#include "rtc_base/strings/string_builder.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +std::vector RunSubtractorTest( + size_t num_render_channels, + size_t num_capture_channels, + int num_blocks_to_process, + int delay_samples, + int main_filter_length_blocks, + int shadow_filter_length_blocks, + bool uncorrelated_inputs, + const std::vector& blocks_with_echo_path_changes) { + ApmDataDumper data_dumper(42); + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + EchoCanceller3Config config; + config.filter.main.length_blocks = main_filter_length_blocks; + config.filter.shadow.length_blocks = shadow_filter_length_blocks; + + Subtractor subtractor(config, num_render_channels, num_capture_channels, + &data_dumper, DetectOptimization()); + absl::optional delay_estimate; + std::vector>> x( + kNumBands, std::vector>( + num_render_channels, std::vector(kBlockSize, 0.f))); + std::vector> y(num_capture_channels, + std::vector(kBlockSize, 0.f)); + std::array x_old; + std::vector output(num_capture_channels); + config.delay.default_delay = 1; + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels)); + RenderSignalAnalyzer render_signal_analyzer(config); + Random random_generator(42U); + Aec3Fft fft; + std::vector> Y2(num_capture_channels); + std::vector> E2_main( + num_capture_channels); + std::array E2_shadow; + AecState aec_state(config, num_capture_channels); + x_old.fill(0.f); + for (auto& Y2_ch : Y2) { + Y2_ch.fill(0.f); + } + for (auto& E2_main_ch : E2_main) { + E2_main_ch.fill(0.f); + } + E2_shadow.fill(0.f); + + std::vector>>> delay_buffer( + num_capture_channels); + for (size_t capture_ch = 0; capture_ch < num_capture_channels; ++capture_ch) { + delay_buffer[capture_ch].resize(num_render_channels); + for (size_t render_ch = 0; render_ch < num_render_channels; ++render_ch) { + delay_buffer[capture_ch][render_ch] = + std::make_unique>(delay_samples); + } + } + + // [B,A] = butter(2,100/8000,'high') + constexpr CascadedBiQuadFilter::BiQuadCoefficients + kHighPassFilterCoefficients = {{0.97261f, -1.94523f, 0.97261f}, + {-1.94448f, 0.94598f}}; + std::vector> x_hp_filter( + num_render_channels); + for (size_t ch = 0; ch < num_render_channels; ++ch) { + x_hp_filter[ch] = + std::make_unique(kHighPassFilterCoefficients, 1); + } + std::vector> y_hp_filter( + num_capture_channels); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + y_hp_filter[ch] = + std::make_unique(kHighPassFilterCoefficients, 1); + } + + for (int k = 0; k < num_blocks_to_process; ++k) { + for (size_t render_ch = 0; render_ch < num_render_channels; ++render_ch) { + RandomizeSampleVector(&random_generator, x[0][render_ch]); + } + if (uncorrelated_inputs) { + for (size_t capture_ch = 0; capture_ch < num_capture_channels; + ++capture_ch) { + RandomizeSampleVector(&random_generator, y[capture_ch]); + } + } else { + for (size_t capture_ch = 0; capture_ch < num_capture_channels; + ++capture_ch) { + for (size_t render_ch = 0; render_ch < num_render_channels; + ++render_ch) { + std::array y_channel; + delay_buffer[capture_ch][render_ch]->Delay(x[0][render_ch], + y_channel); + for (size_t k = 0; k < y.size(); ++k) { + y[capture_ch][k] += y_channel[k] / num_render_channels; + } + } + } + } + for (size_t ch = 0; ch < num_render_channels; ++ch) { + x_hp_filter[ch]->Process(x[0][ch]); + } + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + y_hp_filter[ch]->Process(y[ch]); + } + + render_delay_buffer->Insert(x); + if (k == 0) { + render_delay_buffer->Reset(); + } + render_delay_buffer->PrepareCaptureProcessing(); + render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(), + aec_state.MinDirectPathFilterDelay()); + + // Handle echo path changes. + if (std::find(blocks_with_echo_path_changes.begin(), + blocks_with_echo_path_changes.end(), + k) != blocks_with_echo_path_changes.end()) { + subtractor.HandleEchoPathChange(EchoPathVariability( + true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, + false)); + } + subtractor.Process(*render_delay_buffer->GetRenderBuffer(), y, + render_signal_analyzer, aec_state, output); + + aec_state.HandleEchoPathChange(EchoPathVariability( + false, EchoPathVariability::DelayAdjustment::kNone, false)); + aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(), + subtractor.FilterImpulseResponses(), + *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, + output); + } + + std::vector results(num_capture_channels); + for (size_t ch = 0; ch < num_capture_channels; ++ch) { + const float output_power = + std::inner_product(output[ch].e_main.begin(), output[ch].e_main.end(), + output[ch].e_main.begin(), 0.f); + const float y_power = + std::inner_product(y[ch].begin(), y[ch].end(), y[ch].begin(), 0.f); + if (y_power == 0.f) { + ADD_FAILURE(); + results[ch] = -1.f; + } + results[ch] = output_power / y_power; + } + return results; +} + +std::string ProduceDebugText(size_t num_render_channels, + size_t num_capture_channels, + size_t delay, + int filter_length_blocks) { + rtc::StringBuilder ss; + ss << "delay: " << delay << ", "; + ss << "filter_length_blocks:" << filter_length_blocks << ", "; + ss << "num_render_channels:" << num_render_channels << ", "; + ss << "num_capture_channels:" << num_capture_channels; + return ss.Release(); +} + +} // namespace + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies that the check for non data dumper works. +TEST(Subtractor, NullDataDumper) { + EXPECT_DEATH( + Subtractor(EchoCanceller3Config(), 1, 1, nullptr, DetectOptimization()), + ""); +} + +// Verifies the check for the capture signal size. +TEST(Subtractor, WrongCaptureSize) { + ApmDataDumper data_dumper(42); + EchoCanceller3Config config; + Subtractor subtractor(config, 1, 1, &data_dumper, DetectOptimization()); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, 48000, 1)); + RenderSignalAnalyzer render_signal_analyzer(config); + std::vector> y(1, std::vector(kBlockSize - 1, 0.f)); + std::array output; + + EXPECT_DEATH( + subtractor.Process(*render_delay_buffer->GetRenderBuffer(), y, + render_signal_analyzer, AecState(config, 1), output), + ""); +} + +#endif + +// Verifies that the subtractor is able to converge on correlated data. +TEST(Subtractor, Convergence) { + std::vector blocks_with_echo_path_changes; + for (size_t filter_length_blocks : {12, 20, 30}) { + for (size_t delay_samples : {0, 64, 150, 200, 301}) { + SCOPED_TRACE(ProduceDebugText(1, 1, delay_samples, filter_length_blocks)); + std::vector echo_to_nearend_powers = RunSubtractorTest( + 1, 1, 2500, delay_samples, filter_length_blocks, filter_length_blocks, + false, blocks_with_echo_path_changes); + + for (float echo_to_nearend_power : echo_to_nearend_powers) { + EXPECT_GT(0.1f, echo_to_nearend_power); + } + } + } +} + +// Verifies that the subtractor is able to converge on correlated data. +TEST(Subtractor, ConvergenceMultiChannel) { + std::vector blocks_with_echo_path_changes; + for (size_t num_render_channels : {1, 2, 4, 8}) { + for (size_t num_capture_channels : {1, 2, 4}) { + SCOPED_TRACE( + ProduceDebugText(num_render_channels, num_render_channels, 64, 20)); + size_t num_blocks_to_process = 2500 * num_render_channels; + std::vector echo_to_nearend_powers = RunSubtractorTest( + num_render_channels, num_capture_channels, num_blocks_to_process, 64, + 20, 20, false, blocks_with_echo_path_changes); + + for (float echo_to_nearend_power : echo_to_nearend_powers) { + EXPECT_GT(0.1f, echo_to_nearend_power); + } + } + } +} + +// Verifies that the subtractor is able to handle the case when the main filter +// is longer than the shadow filter. +TEST(Subtractor, MainFilterLongerThanShadowFilter) { + std::vector blocks_with_echo_path_changes; + std::vector echo_to_nearend_powers = RunSubtractorTest( + 1, 1, 400, 64, 20, 15, false, blocks_with_echo_path_changes); + for (float echo_to_nearend_power : echo_to_nearend_powers) { + EXPECT_GT(0.5f, echo_to_nearend_power); + } +} + +// Verifies that the subtractor is able to handle the case when the shadow +// filter is longer than the main filter. +TEST(Subtractor, ShadowFilterLongerThanMainFilter) { + std::vector blocks_with_echo_path_changes; + std::vector echo_to_nearend_powers = RunSubtractorTest( + 1, 1, 400, 64, 15, 20, false, blocks_with_echo_path_changes); + for (float echo_to_nearend_power : echo_to_nearend_powers) { + EXPECT_GT(0.5f, echo_to_nearend_power); + } +} + +// Verifies that the subtractor does not converge on uncorrelated signals. +TEST(Subtractor, NonConvergenceOnUncorrelatedSignals) { + std::vector blocks_with_echo_path_changes; + for (size_t filter_length_blocks : {12, 20, 30}) { + for (size_t delay_samples : {0, 64, 150, 200, 301}) { + SCOPED_TRACE(ProduceDebugText(1, 1, delay_samples, filter_length_blocks)); + + std::vector echo_to_nearend_powers = RunSubtractorTest( + 1, 1, 3000, delay_samples, filter_length_blocks, filter_length_blocks, + true, blocks_with_echo_path_changes); + for (float echo_to_nearend_power : echo_to_nearend_powers) { + EXPECT_NEAR(1.f, echo_to_nearend_power, 0.1); + } + } + } +} + +// Verifies that the subtractor does not converge on uncorrelated signals. +TEST(Subtractor, NonConvergenceOnUncorrelatedSignalsMultiChannel) { + std::vector blocks_with_echo_path_changes; + for (size_t num_render_channels : {1, 2, 4}) { + for (size_t num_capture_channels : {1, 2, 4}) { + SCOPED_TRACE( + ProduceDebugText(num_render_channels, num_render_channels, 64, 20)); + size_t num_blocks_to_process = 5000 * num_render_channels; + std::vector echo_to_nearend_powers = RunSubtractorTest( + num_render_channels, num_capture_channels, num_blocks_to_process, 64, + 20, 20, true, blocks_with_echo_path_changes); + for (float echo_to_nearend_power : echo_to_nearend_powers) { + EXPECT_LT(.8f, echo_to_nearend_power); + EXPECT_NEAR(1.f, echo_to_nearend_power, 0.25f); + } + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/suppression_filter.cc b/audio_processing/aec3/suppression_filter.cc new file mode 100644 index 0000000..56f11db --- /dev/null +++ b/audio_processing/aec3/suppression_filter.cc @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/suppression_filter.h" + +#include +#include +#include +#include +#include + +#include "audio_processing/aec3/vector_math.h" +#include "rtc_base/checks.h" +#include "rtc_base/numerics/safe_minmax.h" + +namespace webrtc { +namespace { + +// Hanning window from Matlab command win = sqrt(hanning(128)). +const float kSqrtHanning[kFftLength] = { + 0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f, + 0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f, + 0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f, + 0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f, + 0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f, + 0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f, + 0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f, + 0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f, + 0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f, + 0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f, + 0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f, + 0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f, + 0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f, + 0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f, + 0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f, + 0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f, + 1.00000000000000f, 0.99969881869620f, 0.99879545620517f, 0.99729045667869f, + 0.99518472667220f, 0.99247953459871f, 0.98917650996478f, 0.98527764238894f, + 0.98078528040323f, 0.97570213003853f, 0.97003125319454f, 0.96377606579544f, + 0.95694033573221f, 0.94952818059304f, 0.94154406518302f, 0.93299279883474f, + 0.92387953251129f, 0.91420975570353f, 0.90398929312344f, 0.89322430119552f, + 0.88192126434835f, 0.87008699110871f, 0.85772861000027f, 0.84485356524971f, + 0.83146961230255f, 0.81758481315158f, 0.80320753148064f, 0.78834642762661f, + 0.77301045336274f, 0.75720884650648f, 0.74095112535496f, 0.72424708295147f, + 0.70710678118655f, 0.68954054473707f, 0.67155895484702f, 0.65317284295378f, + 0.63439328416365f, 0.61523159058063f, 0.59569930449243f, 0.57580819141785f, + 0.55557023301960f, 0.53499761988710f, 0.51410274419322f, 0.49289819222978f, + 0.47139673682600f, 0.44961132965461f, 0.42755509343028f, 0.40524131400499f, + 0.38268343236509f, 0.35989503653499f, 0.33688985339222f, 0.31368174039889f, + 0.29028467725446f, 0.26671275747490f, 0.24298017990326f, 0.21910124015687f, + 0.19509032201613f, 0.17096188876030f, 0.14673047445536f, 0.12241067519922f, + 0.09801714032956f, 0.07356456359967f, 0.04906767432742f, 0.02454122852291f}; + +} // namespace + +SuppressionFilter::SuppressionFilter(Aec3Optimization optimization, + int sample_rate_hz, + size_t num_capture_channels) + : optimization_(optimization), + sample_rate_hz_(sample_rate_hz), + num_capture_channels_(num_capture_channels), + fft_(), + e_output_old_(NumBandsForRate(sample_rate_hz_), + std::vector>( + num_capture_channels_)) { + RTC_DCHECK(ValidFullBandRate(sample_rate_hz_)); + for (size_t b = 0; b < e_output_old_.size(); ++b) { + for (size_t ch = 0; ch < e_output_old_[b].size(); ++ch) { + e_output_old_[b][ch].fill(0.f); + } + } +} + +SuppressionFilter::~SuppressionFilter() = default; + +void SuppressionFilter::ApplyGain( + rtc::ArrayView comfort_noise, + rtc::ArrayView comfort_noise_high_band, + const std::array& suppression_gain, + float high_bands_gain, + rtc::ArrayView E_lowest_band, + std::vector>>* e) { + RTC_DCHECK(e); + RTC_DCHECK_EQ(e->size(), NumBandsForRate(sample_rate_hz_)); + + // Comfort noise gain is sqrt(1-g^2), where g is the suppression gain. + std::array noise_gain; + for (size_t i = 0; i < kFftLengthBy2Plus1; ++i) { + noise_gain[i] = 1.f - suppression_gain[i] * suppression_gain[i]; + } + aec3::VectorMath(optimization_).Sqrt(noise_gain); + + const float high_bands_noise_scaling = + 0.4f * std::sqrt(1.f - high_bands_gain * high_bands_gain); + + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + FftData E; + + // Analysis filterbank. + E.Assign(E_lowest_band[ch]); + + for (size_t i = 0; i < kFftLengthBy2Plus1; ++i) { + // Apply suppression gains. + E.re[i] *= suppression_gain[i]; + E.im[i] *= suppression_gain[i]; + + // Scale and add the comfort noise. + E.re[i] += noise_gain[i] * comfort_noise[ch].re[i]; + E.im[i] += noise_gain[i] * comfort_noise[ch].im[i]; + } + + // Synthesis filterbank. + std::array e_extended; + constexpr float kIfftNormalization = 2.f / kFftLength; + fft_.Ifft(E, &e_extended); + + auto& e0 = (*e)[0][ch]; + auto& e0_old = e_output_old_[0][ch]; + + // Window and add the first half of e_extended with the second half of + // e_extended from the previous block. + for (size_t i = 0; i < kFftLengthBy2; ++i) { + e0[i] = e0_old[i] * kSqrtHanning[kFftLengthBy2 + i]; + e0[i] += e_extended[i] * kSqrtHanning[i]; + e0[i] *= kIfftNormalization; + } + + // The second half of e_extended is stored for the succeeding frame. + std::copy(e_extended.begin() + kFftLengthBy2, + e_extended.begin() + kFftLength, std::begin(e0_old)); + + // Apply suppression gain to upper bands. + for (size_t b = 1; b < e->size(); ++b) { + auto& e_band = (*e)[b][ch]; + for (size_t i = 0; i < kFftLengthBy2; ++i) { + e_band[i] *= high_bands_gain; + } + } + + // Add comfort noise to band 1. + if (e->size() > 1) { + E.Assign(comfort_noise_high_band[ch]); + std::array time_domain_high_band_noise; + fft_.Ifft(E, &time_domain_high_band_noise); + + auto& e1 = (*e)[1][ch]; + const float gain = high_bands_noise_scaling * kIfftNormalization; + for (size_t i = 0; i < kFftLengthBy2; ++i) { + e1[i] += time_domain_high_band_noise[i] * gain; + } + } + + // Delay upper bands to match the delay of the filter bank. + for (size_t b = 1; b < e->size(); ++b) { + auto& e_band = (*e)[b][ch]; + auto& e_band_old = e_output_old_[b][ch]; + for (size_t i = 0; i < kFftLengthBy2; ++i) { + std::swap(e_band[i], e_band_old[i]); + } + } + + // Clamp output of all bands. + for (size_t b = 0; b < e->size(); ++b) { + auto& e_band = (*e)[b][ch]; + for (size_t i = 0; i < kFftLengthBy2; ++i) { + e_band[i] = rtc::SafeClamp(e_band[i], -32768.f, 32767.f); + } + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/suppression_filter.h b/audio_processing/aec3/suppression_filter.h new file mode 100644 index 0000000..0cd82db --- /dev/null +++ b/audio_processing/aec3/suppression_filter.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_ + +#include +#include + +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec3_fft.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/utility/ooura_fft.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +class SuppressionFilter { + public: + SuppressionFilter(Aec3Optimization optimization, + int sample_rate_hz, + size_t num_capture_channels_); + ~SuppressionFilter(); + void ApplyGain(rtc::ArrayView comfort_noise, + rtc::ArrayView comfort_noise_high_bands, + const std::array& suppression_gain, + float high_bands_gain, + rtc::ArrayView E_lowest_band, + std::vector>>* e); + + private: + const Aec3Optimization optimization_; + const int sample_rate_hz_; + const size_t num_capture_channels_; + const OouraFft ooura_fft_; + const Aec3Fft fft_; + std::vector>> e_output_old_; + RTC_DISALLOW_COPY_AND_ASSIGN(SuppressionFilter); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_ diff --git a/audio_processing/aec3/suppression_filter_unittest.cc b/audio_processing/aec3/suppression_filter_unittest.cc new file mode 100644 index 0000000..b55c719 --- /dev/null +++ b/audio_processing/aec3/suppression_filter_unittest.cc @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/suppression_filter.h" + +#include + +#include +#include +#include + +#include "test/gtest.h" + +namespace webrtc { +namespace { + +constexpr float kPi = 3.141592f; + +void ProduceSinusoid(int sample_rate_hz, + float sinusoidal_frequency_hz, + size_t* sample_counter, + std::vector>>* x) { + // Produce a sinusoid of the specified frequency. + for (size_t k = *sample_counter, j = 0; k < (*sample_counter + kBlockSize); + ++k, ++j) { + for (size_t channel = 0; channel < (*x)[0].size(); ++channel) { + (*x)[0][channel][j] = + 32767.f * + std::sin(2.f * kPi * sinusoidal_frequency_hz * k / sample_rate_hz); + } + } + *sample_counter = *sample_counter + kBlockSize; + + for (size_t band = 1; band < x->size(); ++band) { + for (size_t channel = 0; channel < (*x)[band].size(); ++channel) { + std::fill((*x)[band][channel].begin(), (*x)[band][channel].end(), 0.f); + } + } +} + +} // namespace + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies the check for null suppressor output. +TEST(SuppressionFilter, NullOutput) { + std::vector cn(1); + std::vector cn_high_bands(1); + std::vector E(1); + std::array gain; + + EXPECT_DEATH(SuppressionFilter(Aec3Optimization::kNone, 16000, 1) + .ApplyGain(cn, cn_high_bands, gain, 1.0f, E, nullptr), + ""); +} + +// Verifies the check for allowed sample rate. +TEST(SuppressionFilter, ProperSampleRate) { + EXPECT_DEATH(SuppressionFilter(Aec3Optimization::kNone, 16001, 1), ""); +} + +#endif + +// Verifies that no comfort noise is added when the gain is 1. +TEST(SuppressionFilter, ComfortNoiseInUnityGain) { + SuppressionFilter filter(Aec3Optimization::kNone, 48000, 1); + std::vector cn(1); + std::vector cn_high_bands(1); + std::array gain; + std::array e_old_; + Aec3Fft fft; + + e_old_.fill(0.f); + gain.fill(1.f); + cn[0].re.fill(1.f); + cn[0].im.fill(1.f); + cn_high_bands[0].re.fill(1.f); + cn_high_bands[0].im.fill(1.f); + + std::vector>> e( + 3, + std::vector>(1, std::vector(kBlockSize, 0.f))); + std::vector>> e_ref = e; + + std::vector E(1); + fft.PaddedFft(e[0][0], e_old_, Aec3Fft::Window::kSqrtHanning, &E[0]); + std::copy(e[0][0].begin(), e[0][0].end(), e_old_.begin()); + + filter.ApplyGain(cn, cn_high_bands, gain, 1.f, E, &e); + + for (size_t band = 0; band < e.size(); ++band) { + for (size_t channel = 0; channel < e[band].size(); ++channel) { + for (size_t sample = 0; sample < e[band][channel].size(); ++sample) { + EXPECT_EQ(e_ref[band][channel][sample], e[band][channel][sample]); + } + } + } +} + +// Verifies that the suppressor is able to suppress a signal. +TEST(SuppressionFilter, SignalSuppression) { + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + constexpr size_t kNumChannels = 1; + + SuppressionFilter filter(Aec3Optimization::kNone, kSampleRateHz, 1); + std::vector cn(1); + std::vector cn_high_bands(1); + std::array e_old_; + Aec3Fft fft; + std::array gain; + std::vector>> e( + kNumBands, std::vector>( + kNumChannels, std::vector(kBlockSize, 0.f))); + e_old_.fill(0.f); + + gain.fill(1.f); + std::for_each(gain.begin() + 10, gain.end(), [](float& a) { a = 0.f; }); + + cn[0].re.fill(0.f); + cn[0].im.fill(0.f); + cn_high_bands[0].re.fill(0.f); + cn_high_bands[0].im.fill(0.f); + + size_t sample_counter = 0; + + float e0_input = 0.f; + float e0_output = 0.f; + for (size_t k = 0; k < 100; ++k) { + ProduceSinusoid(16000, 16000 * 40 / kFftLengthBy2 / 2, &sample_counter, &e); + e0_input = std::inner_product(e[0][0].begin(), e[0][0].end(), + e[0][0].begin(), e0_input); + + std::vector E(1); + fft.PaddedFft(e[0][0], e_old_, Aec3Fft::Window::kSqrtHanning, &E[0]); + std::copy(e[0][0].begin(), e[0][0].end(), e_old_.begin()); + + filter.ApplyGain(cn, cn_high_bands, gain, 1.f, E, &e); + e0_output = std::inner_product(e[0][0].begin(), e[0][0].end(), + e[0][0].begin(), e0_output); + } + + EXPECT_LT(e0_output, e0_input / 1000.f); +} + +// Verifies that the suppressor is able to pass through a desired signal while +// applying suppressing for some frequencies. +TEST(SuppressionFilter, SignalTransparency) { + constexpr size_t kNumChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + SuppressionFilter filter(Aec3Optimization::kNone, kSampleRateHz, 1); + std::vector cn(1); + std::array e_old_; + Aec3Fft fft; + std::vector cn_high_bands(1); + std::array gain; + std::vector>> e( + kNumBands, std::vector>( + kNumChannels, std::vector(kBlockSize, 0.f))); + e_old_.fill(0.f); + gain.fill(1.f); + std::for_each(gain.begin() + 30, gain.end(), [](float& a) { a = 0.f; }); + + cn[0].re.fill(0.f); + cn[0].im.fill(0.f); + cn_high_bands[0].re.fill(0.f); + cn_high_bands[0].im.fill(0.f); + + size_t sample_counter = 0; + + float e0_input = 0.f; + float e0_output = 0.f; + for (size_t k = 0; k < 100; ++k) { + ProduceSinusoid(16000, 16000 * 10 / kFftLengthBy2 / 2, &sample_counter, &e); + e0_input = std::inner_product(e[0][0].begin(), e[0][0].end(), + e[0][0].begin(), e0_input); + + std::vector E(1); + fft.PaddedFft(e[0][0], e_old_, Aec3Fft::Window::kSqrtHanning, &E[0]); + std::copy(e[0][0].begin(), e[0][0].end(), e_old_.begin()); + + filter.ApplyGain(cn, cn_high_bands, gain, 1.f, E, &e); + e0_output = std::inner_product(e[0][0].begin(), e[0][0].end(), + e[0][0].begin(), e0_output); + } + + EXPECT_LT(0.9f * e0_input, e0_output); +} + +// Verifies that the suppressor delay. +TEST(SuppressionFilter, Delay) { + constexpr size_t kNumChannels = 1; + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + + SuppressionFilter filter(Aec3Optimization::kNone, kSampleRateHz, 1); + std::vector cn(1); + std::vector cn_high_bands(1); + std::array e_old_; + Aec3Fft fft; + std::array gain; + std::vector>> e( + kNumBands, std::vector>( + kNumChannels, std::vector(kBlockSize, 0.f))); + + gain.fill(1.f); + + cn[0].re.fill(0.f); + cn[0].im.fill(0.f); + cn_high_bands[0].re.fill(0.f); + cn_high_bands[0].im.fill(0.f); + + for (size_t k = 0; k < 100; ++k) { + for (size_t band = 0; band < kNumBands; ++band) { + for (size_t channel = 0; channel < kNumChannels; ++channel) { + for (size_t sample = 0; sample < kBlockSize; ++sample) { + e[band][channel][sample] = k * kBlockSize + sample + channel; + } + } + } + + std::vector E(1); + fft.PaddedFft(e[0][0], e_old_, Aec3Fft::Window::kSqrtHanning, &E[0]); + std::copy(e[0][0].begin(), e[0][0].end(), e_old_.begin()); + + filter.ApplyGain(cn, cn_high_bands, gain, 1.f, E, &e); + if (k > 2) { + for (size_t band = 0; band < kNumBands; ++band) { + for (size_t channel = 0; channel < kNumChannels; ++channel) { + for (size_t sample = 0; sample < kBlockSize; ++sample) { + EXPECT_NEAR(k * kBlockSize + sample - kBlockSize + channel, + e[band][channel][sample], 0.01); + } + } + } + } + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/suppression_gain.cc b/audio_processing/aec3/suppression_gain.cc new file mode 100644 index 0000000..5b6b82e --- /dev/null +++ b/audio_processing/aec3/suppression_gain.cc @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/suppression_gain.h" + +#include +#include + +#include +#include + +#include "audio_processing/aec3/dominant_nearend_detector.h" +#include "audio_processing/aec3/moving_average.h" +#include "audio_processing/aec3/subband_nearend_detector.h" +#include "audio_processing/aec3/vector_math.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/atomic_ops.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +void PostprocessGains(std::array* gain) { + // TODO(gustaf): Investigate if this can be relaxed to achieve higher + // transparency above 2 kHz. + + // Limit the low frequency gains to avoid the impact of the high-pass filter + // on the lower-frequency gain influencing the overall achieved gain. + (*gain)[0] = (*gain)[1] = std::min((*gain)[1], (*gain)[2]); + + // Limit the high frequency gains to avoid the impact of the anti-aliasing + // filter on the upper-frequency gains influencing the overall achieved + // gain. TODO(peah): Update this when new anti-aliasing filters are + // implemented. + constexpr size_t kAntiAliasingImpactLimit = (64 * 2000) / 8000; + const float min_upper_gain = (*gain)[kAntiAliasingImpactLimit]; + std::for_each( + gain->begin() + kAntiAliasingImpactLimit, gain->end() - 1, + [min_upper_gain](float& a) { a = std::min(a, min_upper_gain); }); + (*gain)[kFftLengthBy2] = (*gain)[kFftLengthBy2Minus1]; + + // Limits the gain in the frequencies for which the adaptive filter has not + // converged. + // TODO(peah): Make adaptive to take the actual filter error into account. + constexpr size_t kUpperAccurateBandPlus1 = 29; + + constexpr float oneByBandsInSum = + 1 / static_cast(kUpperAccurateBandPlus1 - 20); + const float hf_gain_bound = + std::accumulate(gain->begin() + 20, + gain->begin() + kUpperAccurateBandPlus1, 0.f) * + oneByBandsInSum; + + std::for_each(gain->begin() + kUpperAccurateBandPlus1, gain->end(), + [hf_gain_bound](float& a) { a = std::min(a, hf_gain_bound); }); +} + +// Scales the echo according to assessed audibility at the other end. +void WeightEchoForAudibility(const EchoCanceller3Config& config, + rtc::ArrayView echo, + rtc::ArrayView weighted_echo) { + RTC_DCHECK_EQ(kFftLengthBy2Plus1, echo.size()); + RTC_DCHECK_EQ(kFftLengthBy2Plus1, weighted_echo.size()); + + auto weigh = [](float threshold, float normalizer, size_t begin, size_t end, + rtc::ArrayView echo, + rtc::ArrayView weighted_echo) { + for (size_t k = begin; k < end; ++k) { + if (echo[k] < threshold) { + float tmp = (threshold - echo[k]) * normalizer; + weighted_echo[k] = echo[k] * std::max(0.f, 1.f - tmp * tmp); + } else { + weighted_echo[k] = echo[k]; + } + } + }; + + float threshold = config.echo_audibility.floor_power * + config.echo_audibility.audibility_threshold_lf; + float normalizer = 1.f / (threshold - config.echo_audibility.floor_power); + weigh(threshold, normalizer, 0, 3, echo, weighted_echo); + + threshold = config.echo_audibility.floor_power * + config.echo_audibility.audibility_threshold_mf; + normalizer = 1.f / (threshold - config.echo_audibility.floor_power); + weigh(threshold, normalizer, 3, 7, echo, weighted_echo); + + threshold = config.echo_audibility.floor_power * + config.echo_audibility.audibility_threshold_hf; + normalizer = 1.f / (threshold - config.echo_audibility.floor_power); + weigh(threshold, normalizer, 7, kFftLengthBy2Plus1, echo, weighted_echo); +} + +} // namespace + +int SuppressionGain::instance_count_ = 0; + +float SuppressionGain::UpperBandsGain( + rtc::ArrayView> echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + const absl::optional& narrow_peak_band, + bool saturated_echo, + const std::vector>>& render, + const std::array& low_band_gain) const { + RTC_DCHECK_LT(0, render.size()); + if (render.size() == 1) { + return 1.f; + } + const size_t num_render_channels = render[0].size(); + + if (narrow_peak_band && + (*narrow_peak_band > static_cast(kFftLengthBy2Plus1 - 10))) { + return 0.001f; + } + + constexpr size_t kLowBandGainLimit = kFftLengthBy2 / 2; + const float gain_below_8_khz = *std::min_element( + low_band_gain.begin() + kLowBandGainLimit, low_band_gain.end()); + + // Always attenuate the upper bands when there is saturated echo. + if (saturated_echo) { + return std::min(0.001f, gain_below_8_khz); + } + + // Compute the upper and lower band energies. + const auto sum_of_squares = [](float a, float b) { return a + b * b; }; + float low_band_energy = 0.f; + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const float channel_energy = std::accumulate( + render[0][0].begin(), render[0][0].end(), 0.f, sum_of_squares); + low_band_energy = std::max(low_band_energy, channel_energy); + } + float high_band_energy = 0.f; + for (size_t k = 1; k < render.size(); ++k) { + for (size_t ch = 0; ch < num_render_channels; ++ch) { + const float energy = std::accumulate( + render[k][ch].begin(), render[k][ch].end(), 0.f, sum_of_squares); + high_band_energy = std::max(high_band_energy, energy); + } + } + + // If there is more power in the lower frequencies than the upper frequencies, + // or if the power in upper frequencies is low, do not bound the gain in the + // upper bands. + float anti_howling_gain; + const float activation_threshold = + kBlockSize * config_.suppressor.high_bands_suppression + .anti_howling_activation_threshold; + if (high_band_energy < std::max(low_band_energy, activation_threshold)) { + anti_howling_gain = 1.f; + } else { + // In all other cases, bound the gain for upper frequencies. + RTC_DCHECK_LE(low_band_energy, high_band_energy); + RTC_DCHECK_NE(0.f, high_band_energy); + anti_howling_gain = + config_.suppressor.high_bands_suppression.anti_howling_gain * + sqrtf(low_band_energy / high_band_energy); + } + + float gain_bound = 1.f; + if (!dominant_nearend_detector_->IsNearendState()) { + // Bound the upper gain during significant echo activity. + const auto& cfg = config_.suppressor.high_bands_suppression; + auto low_frequency_energy = [](rtc::ArrayView spectrum) { + RTC_DCHECK_LE(16, spectrum.size()); + return std::accumulate(spectrum.begin() + 1, spectrum.begin() + 16, 0.f); + }; + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + const float echo_sum = low_frequency_energy(echo_spectrum[ch]); + const float noise_sum = low_frequency_energy(comfort_noise_spectrum[ch]); + if (echo_sum > cfg.enr_threshold * noise_sum) { + gain_bound = cfg.max_gain_during_echo; + break; + } + } + } + + // Choose the gain as the minimum of the lower and upper gains. + return std::min(std::min(gain_below_8_khz, anti_howling_gain), gain_bound); +} + +// Computes the gain to reduce the echo to a non audible level. +void SuppressionGain::GainToNoAudibleEcho( + const std::array& nearend, + const std::array& echo, + const std::array& masker, + std::array* gain) const { + const auto& p = dominant_nearend_detector_->IsNearendState() ? nearend_params_ + : normal_params_; + for (size_t k = 0; k < gain->size(); ++k) { + float enr = echo[k] / (nearend[k] + 1.f); // Echo-to-nearend ratio. + float emr = echo[k] / (masker[k] + 1.f); // Echo-to-masker (noise) ratio. + float g = 1.0f; + if (enr > p.enr_transparent_[k] && emr > p.emr_transparent_[k]) { + g = (p.enr_suppress_[k] - enr) / + (p.enr_suppress_[k] - p.enr_transparent_[k]); + g = std::max(g, p.emr_transparent_[k] / emr); + } + (*gain)[k] = g; + } +} + +// Compute the minimum gain as the attenuating gain to put the signal just +// above the zero sample values. +void SuppressionGain::GetMinGain( + rtc::ArrayView weighted_residual_echo, + rtc::ArrayView last_nearend, + rtc::ArrayView last_echo, + bool low_noise_render, + bool saturated_echo, + rtc::ArrayView min_gain) const { + if (!saturated_echo) { + const float min_echo_power = + low_noise_render ? config_.echo_audibility.low_render_limit + : config_.echo_audibility.normal_render_limit; + + for (size_t k = 0; k < min_gain.size(); ++k) { + min_gain[k] = weighted_residual_echo[k] > 0.f + ? min_echo_power / weighted_residual_echo[k] + : 1.f; + min_gain[k] = std::min(min_gain[k], 1.f); + } + + const bool is_nearend_state = dominant_nearend_detector_->IsNearendState(); + for (size_t k = 0; k < 6; ++k) { + const auto& dec = is_nearend_state ? nearend_params_.max_dec_factor_lf + : normal_params_.max_dec_factor_lf; + + // Make sure the gains of the low frequencies do not decrease too + // quickly after strong nearend. + if (last_nearend[k] > last_echo[k]) { + min_gain[k] = std::max(min_gain[k], last_gain_[k] * dec); + min_gain[k] = std::min(min_gain[k], 1.f); + } + } + } else { + std::fill(min_gain.begin(), min_gain.end(), 0.f); + } +} + +// Compute the maximum gain by limiting the gain increase from the previous +// gain. +void SuppressionGain::GetMaxGain(rtc::ArrayView max_gain) const { + const auto& inc = dominant_nearend_detector_->IsNearendState() + ? nearend_params_.max_inc_factor + : normal_params_.max_inc_factor; + const auto& floor = config_.suppressor.floor_first_increase; + for (size_t k = 0; k < max_gain.size(); ++k) { + max_gain[k] = std::min(std::max(last_gain_[k] * inc, floor), 1.f); + } +} + +void SuppressionGain::LowerBandGain( + bool low_noise_render, + const AecState& aec_state, + rtc::ArrayView> + suppressor_input, + rtc::ArrayView> residual_echo, + rtc::ArrayView> comfort_noise, + std::array* gain) { + gain->fill(1.f); + const bool saturated_echo = aec_state.SaturatedEcho(); + std::array max_gain; + GetMaxGain(max_gain); + + for (size_t ch = 0; ch < num_capture_channels_; ++ch) { + std::array G; + std::array nearend; + nearend_smoothers_[ch].Average(suppressor_input[ch], nearend); + + // Weight echo power in terms of audibility. + std::array weighted_residual_echo; + WeightEchoForAudibility(config_, residual_echo[ch], weighted_residual_echo); + + std::array min_gain; + GetMinGain(weighted_residual_echo, last_nearend_[ch], last_echo_[ch], + low_noise_render, saturated_echo, min_gain); + + GainToNoAudibleEcho(nearend, weighted_residual_echo, comfort_noise[0], &G); + + // Clamp gains. + for (size_t k = 0; k < gain->size(); ++k) { + G[k] = std::max(std::min(G[k], max_gain[k]), min_gain[k]); + (*gain)[k] = std::min((*gain)[k], G[k]); + } + + // Store data required for the gain computation of the next block. + std::copy(nearend.begin(), nearend.end(), last_nearend_[ch].begin()); + std::copy(weighted_residual_echo.begin(), weighted_residual_echo.end(), + last_echo_[ch].begin()); + } + + // Limit high-frequency gains. + PostprocessGains(gain); + + // Store computed gains. + std::copy(gain->begin(), gain->end(), last_gain_.begin()); + + // Transform gains to amplitude domain. + aec3::VectorMath(optimization_).Sqrt(*gain); +} + +SuppressionGain::SuppressionGain(const EchoCanceller3Config& config, + Aec3Optimization optimization, + int sample_rate_hz, + size_t num_capture_channels) + : data_dumper_( + new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), + optimization_(optimization), + config_(config), + num_capture_channels_(num_capture_channels), + state_change_duration_blocks_( + static_cast(config_.filter.config_change_duration_blocks)), + last_nearend_(num_capture_channels_, {0}), + last_echo_(num_capture_channels_, {0}), + nearend_smoothers_( + num_capture_channels_, + aec3::MovingAverage(kFftLengthBy2Plus1, + config.suppressor.nearend_average_blocks)), + nearend_params_(config_.suppressor.nearend_tuning), + normal_params_(config_.suppressor.normal_tuning) { + RTC_DCHECK_LT(0, state_change_duration_blocks_); + last_gain_.fill(1.f); + if (config_.suppressor.use_subband_nearend_detection) { + dominant_nearend_detector_ = std::make_unique( + config_.suppressor.subband_nearend_detection, num_capture_channels_); + } else { + dominant_nearend_detector_ = std::make_unique( + config_.suppressor.dominant_nearend_detection, num_capture_channels_); + } + RTC_DCHECK(dominant_nearend_detector_); +} + +SuppressionGain::~SuppressionGain() = default; + +void SuppressionGain::GetGain( + rtc::ArrayView> + nearend_spectrum, + rtc::ArrayView> echo_spectrum, + rtc::ArrayView> + residual_echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + const RenderSignalAnalyzer& render_signal_analyzer, + const AecState& aec_state, + const std::vector>>& render, + float* high_bands_gain, + std::array* low_band_gain) { + RTC_DCHECK(high_bands_gain); + RTC_DCHECK(low_band_gain); + + // Update the nearend state selection. + dominant_nearend_detector_->Update(nearend_spectrum, residual_echo_spectrum, + comfort_noise_spectrum, initial_state_); + + // Compute gain for the lower band. + bool low_noise_render = low_render_detector_.Detect(render); + LowerBandGain(low_noise_render, aec_state, nearend_spectrum, + residual_echo_spectrum, comfort_noise_spectrum, low_band_gain); + + // Compute the gain for the upper bands. + const absl::optional narrow_peak_band = + render_signal_analyzer.NarrowPeakBand(); + + *high_bands_gain = + UpperBandsGain(echo_spectrum, comfort_noise_spectrum, narrow_peak_band, + aec_state.SaturatedEcho(), render, *low_band_gain); +} + +void SuppressionGain::SetInitialState(bool state) { + initial_state_ = state; + if (state) { + initial_state_change_counter_ = state_change_duration_blocks_; + } else { + initial_state_change_counter_ = 0; + } +} + +// Detects when the render signal can be considered to have low power and +// consist of stationary noise. +bool SuppressionGain::LowNoiseRenderDetector::Detect( + const std::vector>>& render) { + float x2_sum = 0.f; + float x2_max = 0.f; + for (auto x_ch : render[0]) { + for (auto x_k : x_ch) { + const float x2 = x_k * x_k; + x2_sum += x2; + x2_max = std::max(x2_max, x2); + } + } + const size_t num_render_channels = render[0].size(); + x2_sum = x2_sum / num_render_channels; + ; + + constexpr float kThreshold = 50.f * 50.f * 64.f; + const bool low_noise_render = + average_power_ < kThreshold && x2_max < 3 * average_power_; + average_power_ = average_power_ * 0.9f + x2_sum * 0.1f; + return low_noise_render; +} + +SuppressionGain::GainParameters::GainParameters( + const EchoCanceller3Config::Suppressor::Tuning& tuning) + : max_inc_factor(tuning.max_inc_factor), + max_dec_factor_lf(tuning.max_dec_factor_lf) { + // Compute per-band masking thresholds. + constexpr size_t kLastLfBand = 5; + constexpr size_t kFirstHfBand = 8; + RTC_DCHECK_LT(kLastLfBand, kFirstHfBand); + auto& lf = tuning.mask_lf; + auto& hf = tuning.mask_hf; + RTC_DCHECK_LT(lf.enr_transparent, lf.enr_suppress); + RTC_DCHECK_LT(hf.enr_transparent, hf.enr_suppress); + for (size_t k = 0; k < kFftLengthBy2Plus1; k++) { + float a; + if (k <= kLastLfBand) { + a = 0.f; + } else if (k < kFirstHfBand) { + a = (k - kLastLfBand) / static_cast(kFirstHfBand - kLastLfBand); + } else { + a = 1.f; + } + enr_transparent_[k] = (1 - a) * lf.enr_transparent + a * hf.enr_transparent; + enr_suppress_[k] = (1 - a) * lf.enr_suppress + a * hf.enr_suppress; + emr_transparent_[k] = (1 - a) * lf.emr_transparent + a * hf.emr_transparent; + } +} + +} // namespace webrtc diff --git a/audio_processing/aec3/suppression_gain.h b/audio_processing/aec3/suppression_gain.h new file mode 100644 index 0000000..a13927d --- /dev/null +++ b/audio_processing/aec3/suppression_gain.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_ + +#include +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/array_view.h" +#include "api/echo_canceller3_config.h" +#include "audio_processing/aec3/aec3_common.h" +#include "audio_processing/aec3/aec_state.h" +#include "audio_processing/aec3/fft_data.h" +#include "audio_processing/aec3/moving_average.h" +#include "audio_processing/aec3/nearend_detector.h" +#include "audio_processing/aec3/render_signal_analyzer.h" +#include "audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +class SuppressionGain { + public: + SuppressionGain(const EchoCanceller3Config& config, + Aec3Optimization optimization, + int sample_rate_hz, + size_t num_capture_channels); + ~SuppressionGain(); + void GetGain( + rtc::ArrayView> + nearend_spectrum, + rtc::ArrayView> echo_spectrum, + rtc::ArrayView> + residual_echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + const RenderSignalAnalyzer& render_signal_analyzer, + const AecState& aec_state, + const std::vector>>& render, + float* high_bands_gain, + std::array* low_band_gain); + + // Toggles the usage of the initial state. + void SetInitialState(bool state); + + private: + // Computes the gain to apply for the bands beyond the first band. + float UpperBandsGain( + rtc::ArrayView> echo_spectrum, + rtc::ArrayView> + comfort_noise_spectrum, + const absl::optional& narrow_peak_band, + bool saturated_echo, + const std::vector>>& render, + const std::array& low_band_gain) const; + + void GainToNoAudibleEcho(const std::array& nearend, + const std::array& echo, + const std::array& masker, + std::array* gain) const; + + void LowerBandGain( + bool stationary_with_low_power, + const AecState& aec_state, + rtc::ArrayView> + suppressor_input, + rtc::ArrayView> residual_echo, + rtc::ArrayView> comfort_noise, + std::array* gain); + + void GetMinGain(rtc::ArrayView weighted_residual_echo, + rtc::ArrayView last_nearend, + rtc::ArrayView last_echo, + bool low_noise_render, + bool saturated_echo, + rtc::ArrayView min_gain) const; + + void GetMaxGain(rtc::ArrayView max_gain) const; + + class LowNoiseRenderDetector { + public: + bool Detect(const std::vector>>& render); + + private: + float average_power_ = 32768.f * 32768.f; + }; + + struct GainParameters { + explicit GainParameters( + const EchoCanceller3Config::Suppressor::Tuning& tuning); + const float max_inc_factor; + const float max_dec_factor_lf; + std::array enr_transparent_; + std::array enr_suppress_; + std::array emr_transparent_; + }; + + static int instance_count_; + std::unique_ptr data_dumper_; + const Aec3Optimization optimization_; + const EchoCanceller3Config config_; + const size_t num_capture_channels_; + const int state_change_duration_blocks_; + std::array last_gain_; + std::vector> last_nearend_; + std::vector> last_echo_; + LowNoiseRenderDetector low_render_detector_; + bool initial_state_ = true; + int initial_state_change_counter_ = 0; + std::vector nearend_smoothers_; + const GainParameters nearend_params_; + const GainParameters normal_params_; + std::unique_ptr dominant_nearend_detector_; + + RTC_DISALLOW_COPY_AND_ASSIGN(SuppressionGain); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_ diff --git a/audio_processing/aec3/suppression_gain_unittest.cc b/audio_processing/aec3/suppression_gain_unittest.cc new file mode 100644 index 0000000..0452f2e --- /dev/null +++ b/audio_processing/aec3/suppression_gain_unittest.cc @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/suppression_gain.h" + +#include "modules/audio_processing/aec3/aec_state.h" +#include "modules/audio_processing/aec3/render_delay_buffer.h" +#include "modules/audio_processing/aec3/subtractor.h" +#include "modules/audio_processing/aec3/subtractor_output.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" +#include "system_wrappers/include/cpu_features_wrapper.h" +#include "test/gtest.h" + +namespace webrtc { +namespace aec3 { + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +// Verifies that the check for non-null output gains works. +TEST(SuppressionGain, NullOutputGains) { + std::vector> E2(1, {0.f}); + std::vector> R2(1, {0.f}); + std::vector> S2(1); + std::vector> N2(1, {0.f}); + for (auto& S2_k : S2) { + S2_k.fill(.1f); + } + FftData E; + FftData Y; + E.re.fill(0.f); + E.im.fill(0.f); + Y.re.fill(0.f); + Y.im.fill(0.f); + + float high_bands_gain; + AecState aec_state(EchoCanceller3Config{}, 1); + EXPECT_DEATH( + SuppressionGain(EchoCanceller3Config{}, DetectOptimization(), 16000, 1) + .GetGain(E2, S2, R2, N2, + RenderSignalAnalyzer((EchoCanceller3Config{})), aec_state, + std::vector>>( + 3, std::vector>( + 1, std::vector(kBlockSize, 0.f))), + &high_bands_gain, nullptr), + ""); +} + +#endif + +// Does a sanity check that the gains are correctly computed. +TEST(SuppressionGain, BasicGainComputation) { + constexpr size_t kNumRenderChannels = 1; + constexpr size_t kNumCaptureChannels = 2; + constexpr int kSampleRateHz = 16000; + constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); + SuppressionGain suppression_gain(EchoCanceller3Config(), DetectOptimization(), + kSampleRateHz, kNumCaptureChannels); + RenderSignalAnalyzer analyzer(EchoCanceller3Config{}); + float high_bands_gain; + std::vector> E2(kNumCaptureChannels); + std::vector> S2(kNumCaptureChannels, + {0.f}); + std::vector> Y2(kNumCaptureChannels); + std::vector> R2(kNumCaptureChannels); + std::vector> N2(kNumCaptureChannels); + std::array g; + std::vector output(kNumCaptureChannels); + std::vector>> x( + kNumBands, std::vector>( + kNumRenderChannels, std::vector(kBlockSize, 0.f))); + EchoCanceller3Config config; + AecState aec_state(config, kNumCaptureChannels); + ApmDataDumper data_dumper(42); + Subtractor subtractor(config, kNumRenderChannels, kNumCaptureChannels, + &data_dumper, DetectOptimization()); + std::unique_ptr render_delay_buffer( + RenderDelayBuffer::Create(config, kSampleRateHz, kNumRenderChannels)); + absl::optional delay_estimate; + + // Ensure that a strong noise is detected to mask any echoes. + for (size_t ch = 0; ch < kNumCaptureChannels; ++ch) { + E2[ch].fill(10.f); + Y2[ch].fill(10.f); + R2[ch].fill(.1f); + N2[ch].fill(100.f); + } + for (auto& subtractor_output : output) { + subtractor_output.Reset(); + } + + // Ensure that the gain is no longer forced to zero. + for (int k = 0; k <= kNumBlocksPerSecond / 5 + 1; ++k) { + aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(), + subtractor.FilterImpulseResponses(), + *render_delay_buffer->GetRenderBuffer(), E2, Y2, output); + } + + for (int k = 0; k < 100; ++k) { + aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(), + subtractor.FilterImpulseResponses(), + *render_delay_buffer->GetRenderBuffer(), E2, Y2, output); + suppression_gain.GetGain(E2, S2, R2, N2, analyzer, aec_state, x, + &high_bands_gain, &g); + } + std::for_each(g.begin(), g.end(), + [](float a) { EXPECT_NEAR(1.f, a, 0.001); }); + + // Ensure that a strong nearend is detected to mask any echoes. + for (size_t ch = 0; ch < kNumCaptureChannels; ++ch) { + E2[ch].fill(100.f); + Y2[ch].fill(100.f); + R2[ch].fill(0.1f); + S2[ch].fill(0.1f); + N2[ch].fill(0.f); + } + + for (int k = 0; k < 100; ++k) { + aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponses(), + subtractor.FilterImpulseResponses(), + *render_delay_buffer->GetRenderBuffer(), E2, Y2, output); + suppression_gain.GetGain(E2, S2, R2, N2, analyzer, aec_state, x, + &high_bands_gain, &g); + } + std::for_each(g.begin(), g.end(), + [](float a) { EXPECT_NEAR(1.f, a, 0.001); }); + + // Add a strong echo to one of the channels and ensure that it is suppressed. + E2[1].fill(1000000000.f); + R2[1].fill(10000000000000.f); + + for (int k = 0; k < 10; ++k) { + suppression_gain.GetGain(E2, S2, R2, N2, analyzer, aec_state, x, + &high_bands_gain, &g); + } + std::for_each(g.begin(), g.end(), + [](float a) { EXPECT_NEAR(0.f, a, 0.001); }); +} + +} // namespace aec3 +} // namespace webrtc diff --git a/audio_processing/aec3/vector_math.h b/audio_processing/aec3/vector_math.h new file mode 100644 index 0000000..1273914 --- /dev/null +++ b/audio_processing/aec3/vector_math.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_ +#define MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_ + +// Defines WEBRTC_ARCH_X86_FAMILY, used below. +#include "rtc_base/system/arch.h" + +#if defined(WEBRTC_HAS_NEON) +#include +#endif +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif +#include + +#include +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/aec3/aec3_common.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace aec3 { + +// Provides optimizations for mathematical operations based on vectors. +class VectorMath { + public: + explicit VectorMath(Aec3Optimization optimization) + : optimization_(optimization) {} + + // Elementwise square root. + void Sqrt(rtc::ArrayView x) { + switch (optimization_) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: { + const int x_size = static_cast(x.size()); + const int vector_limit = x_size >> 2; + + int j = 0; + for (; j < vector_limit * 4; j += 4) { + __m128 g = _mm_loadu_ps(&x[j]); + g = _mm_sqrt_ps(g); + _mm_storeu_ps(&x[j], g); + } + + for (; j < x_size; ++j) { + x[j] = sqrtf(x[j]); + } + } break; +#endif +#if defined(WEBRTC_HAS_NEON) + case Aec3Optimization::kNeon: { + const int x_size = static_cast(x.size()); + const int vector_limit = x_size >> 2; + + int j = 0; + for (; j < vector_limit * 4; j += 4) { + float32x4_t g = vld1q_f32(&x[j]); +#if !defined(WEBRTC_ARCH_ARM64) + float32x4_t y = vrsqrteq_f32(g); + + // Code to handle sqrt(0). + // If the input to sqrtf() is zero, a zero will be returned. + // If the input to vrsqrteq_f32() is zero, positive infinity is + // returned. + const uint32x4_t vec_p_inf = vdupq_n_u32(0x7F800000); + // check for divide by zero + const uint32x4_t div_by_zero = + vceqq_u32(vec_p_inf, vreinterpretq_u32_f32(y)); + // zero out the positive infinity results + y = vreinterpretq_f32_u32( + vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(y))); + // from arm documentation + // The Newton-Raphson iteration: + // y[n+1] = y[n] * (3 - d * (y[n] * y[n])) / 2) + // converges to (1/√d) if y0 is the result of VRSQRTE applied to d. + // + // Note: The precision did not improve after 2 iterations. + for (int i = 0; i < 2; i++) { + y = vmulq_f32(vrsqrtsq_f32(vmulq_f32(y, y), g), y); + } + // sqrt(g) = g * 1/sqrt(g) + g = vmulq_f32(g, y); +#else + g = vsqrtq_f32(g); +#endif + vst1q_f32(&x[j], g); + } + + for (; j < x_size; ++j) { + x[j] = sqrtf(x[j]); + } + } +#endif + break; + default: + std::for_each(x.begin(), x.end(), [](float& a) { a = sqrtf(a); }); + } + } + + // Elementwise vector multiplication z = x * y. + void Multiply(rtc::ArrayView x, + rtc::ArrayView y, + rtc::ArrayView z) { + RTC_DCHECK_EQ(z.size(), x.size()); + RTC_DCHECK_EQ(z.size(), y.size()); + switch (optimization_) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: { + const int x_size = static_cast(x.size()); + const int vector_limit = x_size >> 2; + + int j = 0; + for (; j < vector_limit * 4; j += 4) { + const __m128 x_j = _mm_loadu_ps(&x[j]); + const __m128 y_j = _mm_loadu_ps(&y[j]); + const __m128 z_j = _mm_mul_ps(x_j, y_j); + _mm_storeu_ps(&z[j], z_j); + } + + for (; j < x_size; ++j) { + z[j] = x[j] * y[j]; + } + } break; +#endif +#if defined(WEBRTC_HAS_NEON) + case Aec3Optimization::kNeon: { + const int x_size = static_cast(x.size()); + const int vector_limit = x_size >> 2; + + int j = 0; + for (; j < vector_limit * 4; j += 4) { + const float32x4_t x_j = vld1q_f32(&x[j]); + const float32x4_t y_j = vld1q_f32(&y[j]); + const float32x4_t z_j = vmulq_f32(x_j, y_j); + vst1q_f32(&z[j], z_j); + } + + for (; j < x_size; ++j) { + z[j] = x[j] * y[j]; + } + } break; +#endif + default: + std::transform(x.begin(), x.end(), y.begin(), z.begin(), + std::multiplies()); + } + } + + // Elementwise vector accumulation z += x. + void Accumulate(rtc::ArrayView x, rtc::ArrayView z) { + RTC_DCHECK_EQ(z.size(), x.size()); + switch (optimization_) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + case Aec3Optimization::kSse2: { + const int x_size = static_cast(x.size()); + const int vector_limit = x_size >> 2; + + int j = 0; + for (; j < vector_limit * 4; j += 4) { + const __m128 x_j = _mm_loadu_ps(&x[j]); + __m128 z_j = _mm_loadu_ps(&z[j]); + z_j = _mm_add_ps(x_j, z_j); + _mm_storeu_ps(&z[j], z_j); + } + + for (; j < x_size; ++j) { + z[j] += x[j]; + } + } break; +#endif +#if defined(WEBRTC_HAS_NEON) + case Aec3Optimization::kNeon: { + const int x_size = static_cast(x.size()); + const int vector_limit = x_size >> 2; + + int j = 0; + for (; j < vector_limit * 4; j += 4) { + const float32x4_t x_j = vld1q_f32(&x[j]); + float32x4_t z_j = vld1q_f32(&z[j]); + z_j = vaddq_f32(z_j, x_j); + vst1q_f32(&z[j], z_j); + } + + for (; j < x_size; ++j) { + z[j] += x[j]; + } + } break; +#endif + default: + std::transform(x.begin(), x.end(), z.begin(), z.begin(), + std::plus()); + } + } + + private: + Aec3Optimization optimization_; +}; + +} // namespace aec3 + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_ diff --git a/audio_processing/aec3/vector_math_unittest.cc b/audio_processing/aec3/vector_math_unittest.cc new file mode 100644 index 0000000..fdab2e5 --- /dev/null +++ b/audio_processing/aec3/vector_math_unittest.cc @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/aec3/vector_math.h" + +#include + +#include "rtc_base/system/arch.h" +#include "system_wrappers/include/cpu_features_wrapper.h" +#include "test/gtest.h" + +namespace webrtc { + +#if defined(WEBRTC_HAS_NEON) + +TEST(VectorMath, Sqrt) { + std::array x; + std::array z; + std::array z_neon; + + for (size_t k = 0; k < x.size(); ++k) { + x[k] = (2.f / 3.f) * k; + } + + std::copy(x.begin(), x.end(), z.begin()); + aec3::VectorMath(Aec3Optimization::kNone).Sqrt(z); + std::copy(x.begin(), x.end(), z_neon.begin()); + aec3::VectorMath(Aec3Optimization::kNeon).Sqrt(z_neon); + for (size_t k = 0; k < z.size(); ++k) { + EXPECT_NEAR(z[k], z_neon[k], 0.0001f); + EXPECT_NEAR(sqrtf(x[k]), z_neon[k], 0.0001f); + } +} + +TEST(VectorMath, Multiply) { + std::array x; + std::array y; + std::array z; + std::array z_neon; + + for (size_t k = 0; k < x.size(); ++k) { + x[k] = k; + y[k] = (2.f / 3.f) * k; + } + + aec3::VectorMath(Aec3Optimization::kNone).Multiply(x, y, z); + aec3::VectorMath(Aec3Optimization::kNeon).Multiply(x, y, z_neon); + for (size_t k = 0; k < z.size(); ++k) { + EXPECT_FLOAT_EQ(z[k], z_neon[k]); + EXPECT_FLOAT_EQ(x[k] * y[k], z_neon[k]); + } +} + +TEST(VectorMath, Accumulate) { + std::array x; + std::array z; + std::array z_neon; + + for (size_t k = 0; k < x.size(); ++k) { + x[k] = k; + z[k] = z_neon[k] = 2.f * k; + } + + aec3::VectorMath(Aec3Optimization::kNone).Accumulate(x, z); + aec3::VectorMath(Aec3Optimization::kNeon).Accumulate(x, z_neon); + for (size_t k = 0; k < z.size(); ++k) { + EXPECT_FLOAT_EQ(z[k], z_neon[k]); + EXPECT_FLOAT_EQ(x[k] + 2.f * x[k], z_neon[k]); + } +} +#endif + +#if defined(WEBRTC_ARCH_X86_FAMILY) + +TEST(VectorMath, Sqrt) { + if (WebRtc_GetCPUInfo(kSSE2) != 0) { + std::array x; + std::array z; + std::array z_sse2; + + for (size_t k = 0; k < x.size(); ++k) { + x[k] = (2.f / 3.f) * k; + } + + std::copy(x.begin(), x.end(), z.begin()); + aec3::VectorMath(Aec3Optimization::kNone).Sqrt(z); + std::copy(x.begin(), x.end(), z_sse2.begin()); + aec3::VectorMath(Aec3Optimization::kSse2).Sqrt(z_sse2); + EXPECT_EQ(z, z_sse2); + for (size_t k = 0; k < z.size(); ++k) { + EXPECT_FLOAT_EQ(z[k], z_sse2[k]); + EXPECT_FLOAT_EQ(sqrtf(x[k]), z_sse2[k]); + } + } +} + +TEST(VectorMath, Multiply) { + if (WebRtc_GetCPUInfo(kSSE2) != 0) { + std::array x; + std::array y; + std::array z; + std::array z_sse2; + + for (size_t k = 0; k < x.size(); ++k) { + x[k] = k; + y[k] = (2.f / 3.f) * k; + } + + aec3::VectorMath(Aec3Optimization::kNone).Multiply(x, y, z); + aec3::VectorMath(Aec3Optimization::kSse2).Multiply(x, y, z_sse2); + for (size_t k = 0; k < z.size(); ++k) { + EXPECT_FLOAT_EQ(z[k], z_sse2[k]); + EXPECT_FLOAT_EQ(x[k] * y[k], z_sse2[k]); + } + } +} + +TEST(VectorMath, Accumulate) { + if (WebRtc_GetCPUInfo(kSSE2) != 0) { + std::array x; + std::array z; + std::array z_sse2; + + for (size_t k = 0; k < x.size(); ++k) { + x[k] = k; + z[k] = z_sse2[k] = 2.f * k; + } + + aec3::VectorMath(Aec3Optimization::kNone).Accumulate(x, z); + aec3::VectorMath(Aec3Optimization::kSse2).Accumulate(x, z_sse2); + for (size_t k = 0; k < z.size(); ++k) { + EXPECT_FLOAT_EQ(z[k], z_sse2[k]); + EXPECT_FLOAT_EQ(x[k] + 2.f * x[k], z_sse2[k]); + } + } +} +#endif + +} // namespace webrtc diff --git a/audio_processing/audio_buffer.cc b/audio_processing/audio_buffer.cc new file mode 100644 index 0000000..52d5fe7 --- /dev/null +++ b/audio_processing/audio_buffer.cc @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/audio_buffer.h" + +#include + +#include + +#include "audio_processing/channel_buffer.h" +#include "audio_processing/include/audio_util.h" +#include "audio_processing/resampler/push_sinc_resampler.h" +#include "audio_processing/splitting_filter.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +constexpr size_t kSamplesPer32kHzChannel = 320; +constexpr size_t kSamplesPer48kHzChannel = 480; +constexpr size_t kMaxSamplesPerChannel = AudioBuffer::kMaxSampleRate / 100; + +size_t NumBandsFromFramesPerChannel(size_t num_frames) { + if (num_frames == kSamplesPer32kHzChannel) { + return 2; + } + if (num_frames == kSamplesPer48kHzChannel) { + return 3; + } + return 1; +} + +} // namespace + +AudioBuffer::AudioBuffer(size_t input_rate, + size_t input_num_channels, + size_t buffer_rate, + size_t buffer_num_channels, + size_t output_rate, + size_t output_num_channels) + : AudioBuffer(static_cast(input_rate) / 100, + input_num_channels, + static_cast(buffer_rate) / 100, + buffer_num_channels, + static_cast(output_rate) / 100) {} + +AudioBuffer::AudioBuffer(size_t input_num_frames, + size_t input_num_channels, + size_t buffer_num_frames, + size_t buffer_num_channels, + size_t output_num_frames) + : input_num_frames_(input_num_frames), + input_num_channels_(input_num_channels), + buffer_num_frames_(buffer_num_frames), + buffer_num_channels_(buffer_num_channels), + output_num_frames_(output_num_frames), + output_num_channels_(0), + num_channels_(buffer_num_channels), + num_bands_(NumBandsFromFramesPerChannel(buffer_num_frames_)), + num_split_frames_(rtc::CheckedDivExact(buffer_num_frames_, num_bands_)), + data_( + new ChannelBuffer(buffer_num_frames_, buffer_num_channels_)) { + RTC_DCHECK_GT(input_num_frames_, 0); + RTC_DCHECK_GT(buffer_num_frames_, 0); + RTC_DCHECK_GT(output_num_frames_, 0); + RTC_DCHECK_GT(input_num_channels_, 0); + RTC_DCHECK_GT(buffer_num_channels_, 0); + RTC_DCHECK_LE(buffer_num_channels_, input_num_channels_); + + const bool input_resampling_needed = input_num_frames_ != buffer_num_frames_; + const bool output_resampling_needed = + output_num_frames_ != buffer_num_frames_; + if (input_resampling_needed) { + for (size_t i = 0; i < buffer_num_channels_; ++i) { + input_resamplers_.push_back(std::unique_ptr( + new PushSincResampler(input_num_frames_, buffer_num_frames_))); + } + } + + if (output_resampling_needed) { + for (size_t i = 0; i < buffer_num_channels_; ++i) { + output_resamplers_.push_back(std::unique_ptr( + new PushSincResampler(buffer_num_frames_, output_num_frames_))); + } + } + + if (num_bands_ > 1) { + split_data_.reset(new ChannelBuffer( + buffer_num_frames_, buffer_num_channels_, num_bands_)); + splitting_filter_.reset(new SplittingFilter( + buffer_num_channels_, num_bands_, buffer_num_frames_)); + } +} + +AudioBuffer::~AudioBuffer() {} + +void AudioBuffer::set_downmixing_to_specific_channel(size_t channel) { + downmix_by_averaging_ = false; + RTC_DCHECK_GT(input_num_channels_, channel); + channel_for_downmixing_ = std::min(channel, input_num_channels_ - 1); +} + +void AudioBuffer::set_downmixing_by_averaging() { + downmix_by_averaging_ = true; +} + +void AudioBuffer::CopyFrom(const float* const* data, + const StreamConfig& stream_config) { + RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_); + RTC_DCHECK_EQ(stream_config.num_channels(), input_num_channels_); + RestoreNumChannels(); + const bool downmix_needed = input_num_channels_ > 1 && num_channels_ == 1; + + const bool resampling_needed = input_num_frames_ != buffer_num_frames_; + + if (downmix_needed) { + RTC_DCHECK_GE(kMaxSamplesPerChannel, input_num_frames_); + + std::array downmix; + if (downmix_by_averaging_) { + const float kOneByNumChannels = 1.f / input_num_channels_; + for (size_t i = 0; i < input_num_frames_; ++i) { + float value = data[0][i]; + for (size_t j = 1; j < input_num_channels_; ++j) { + value += data[j][i]; + } + downmix[i] = value * kOneByNumChannels; + } + } + const float* downmixed_data = + downmix_by_averaging_ ? downmix.data() : data[channel_for_downmixing_]; + + if (resampling_needed) { + input_resamplers_[0]->Resample(downmixed_data, input_num_frames_, + data_->channels()[0], buffer_num_frames_); + } + const float* data_to_convert = + resampling_needed ? data_->channels()[0] : downmixed_data; + FloatToFloatS16(data_to_convert, buffer_num_frames_, data_->channels()[0]); + } else { + if (resampling_needed) { + for (size_t i = 0; i < num_channels_; ++i) { + input_resamplers_[i]->Resample(data[i], input_num_frames_, + data_->channels()[i], + buffer_num_frames_); + FloatToFloatS16(data_->channels()[i], buffer_num_frames_, + data_->channels()[i]); + } + } else { + for (size_t i = 0; i < num_channels_; ++i) { + FloatToFloatS16(data[i], buffer_num_frames_, data_->channels()[i]); + } + } + } +} + +void AudioBuffer::CopyTo(const StreamConfig& stream_config, + float* const* data) { + RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_); + + const bool resampling_needed = output_num_frames_ != buffer_num_frames_; + if (resampling_needed) { + for (size_t i = 0; i < num_channels_; ++i) { + FloatS16ToFloat(data_->channels()[i], buffer_num_frames_, + data_->channels()[i]); + output_resamplers_[i]->Resample(data_->channels()[i], buffer_num_frames_, + data[i], output_num_frames_); + } + } else { + for (size_t i = 0; i < num_channels_; ++i) { + FloatS16ToFloat(data_->channels()[i], buffer_num_frames_, data[i]); + } + } + + for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) { + memcpy(data[i], data[0], output_num_frames_ * sizeof(**data)); + } +} + +void AudioBuffer::CopyTo(AudioBuffer* buffer) const { + RTC_DCHECK_EQ(buffer->num_frames(), output_num_frames_); + + const bool resampling_needed = output_num_frames_ != buffer_num_frames_; + if (resampling_needed) { + for (size_t i = 0; i < num_channels_; ++i) { + output_resamplers_[i]->Resample(data_->channels()[i], buffer_num_frames_, + buffer->channels()[i], + buffer->num_frames()); + } + } else { + for (size_t i = 0; i < num_channels_; ++i) { + memcpy(buffer->channels()[i], data_->channels()[i], + buffer_num_frames_ * sizeof(**buffer->channels())); + } + } + + for (size_t i = num_channels_; i < buffer->num_channels(); ++i) { + memcpy(buffer->channels()[i], buffer->channels()[0], + output_num_frames_ * sizeof(**buffer->channels())); + } +} + +void AudioBuffer::RestoreNumChannels() { + num_channels_ = buffer_num_channels_; + data_->set_num_channels(buffer_num_channels_); + if (split_data_.get()) { + split_data_->set_num_channels(buffer_num_channels_); + } +} + +void AudioBuffer::set_num_channels(size_t num_channels) { + RTC_DCHECK_GE(buffer_num_channels_, num_channels); + num_channels_ = num_channels; + data_->set_num_channels(num_channels); + if (split_data_.get()) { + split_data_->set_num_channels(num_channels); + } +} + +// The resampler is only for supporting 48kHz to 16kHz in the reverse stream. +void AudioBuffer::CopyFrom(const AudioFrame* frame) { + RTC_DCHECK_EQ(frame->num_channels_, input_num_channels_); + RTC_DCHECK_EQ(frame->samples_per_channel_, input_num_frames_); + RestoreNumChannels(); + + const bool resampling_required = input_num_frames_ != buffer_num_frames_; + + const int16_t* interleaved = frame->data(); + if (num_channels_ == 1) { + if (input_num_channels_ == 1) { + if (resampling_required) { + std::array float_buffer; + S16ToFloatS16(interleaved, input_num_frames_, float_buffer.data()); + input_resamplers_[0]->Resample(float_buffer.data(), input_num_frames_, + data_->channels()[0], + buffer_num_frames_); + } else { + S16ToFloatS16(interleaved, input_num_frames_, data_->channels()[0]); + } + } else { + std::array float_buffer; + float* downmixed_data = + resampling_required ? float_buffer.data() : data_->channels()[0]; + if (downmix_by_averaging_) { + for (size_t j = 0, k = 0; j < input_num_frames_; ++j) { + int32_t sum = 0; + for (size_t i = 0; i < input_num_channels_; ++i, ++k) { + sum += interleaved[k]; + } + downmixed_data[j] = sum / static_cast(input_num_channels_); + } + } else { + for (size_t j = 0, k = channel_for_downmixing_; j < input_num_frames_; + ++j, k += input_num_channels_) { + downmixed_data[j] = interleaved[k]; + } + } + + if (resampling_required) { + input_resamplers_[0]->Resample(downmixed_data, input_num_frames_, + data_->channels()[0], + buffer_num_frames_); + } + } + } else { + auto deinterleave_channel = [](size_t channel, size_t num_channels, + size_t samples_per_channel, const int16_t* x, + float* y) { + for (size_t j = 0, k = channel; j < samples_per_channel; + ++j, k += num_channels) { + y[j] = x[k]; + } + }; + + if (resampling_required) { + std::array float_buffer; + for (size_t i = 0; i < num_channels_; ++i) { + deinterleave_channel(i, num_channels_, input_num_frames_, interleaved, + float_buffer.data()); + input_resamplers_[i]->Resample(float_buffer.data(), input_num_frames_, + data_->channels()[i], + buffer_num_frames_); + } + } else { + for (size_t i = 0; i < num_channels_; ++i) { + deinterleave_channel(i, num_channels_, input_num_frames_, interleaved, + data_->channels()[i]); + } + } + } +} + +void AudioBuffer::CopyTo(AudioFrame* frame) const { + RTC_DCHECK(frame->num_channels_ == num_channels_ || num_channels_ == 1); + RTC_DCHECK_EQ(frame->samples_per_channel_, output_num_frames_); + + const bool resampling_required = buffer_num_frames_ != output_num_frames_; + + int16_t* interleaved = frame->mutable_data(); + if (num_channels_ == 1) { + std::array float_buffer; + + if (resampling_required) { + output_resamplers_[0]->Resample(data_->channels()[0], buffer_num_frames_, + float_buffer.data(), output_num_frames_); + } + const float* deinterleaved = + resampling_required ? float_buffer.data() : data_->channels()[0]; + + if (frame->num_channels_ == 1) { + for (size_t j = 0; j < output_num_frames_; ++j) { + interleaved[j] = FloatS16ToS16(deinterleaved[j]); + } + } else { + for (size_t i = 0, k = 0; i < output_num_frames_; ++i) { + float tmp = FloatS16ToS16(deinterleaved[i]); + for (size_t j = 0; j < frame->num_channels_; ++j, ++k) { + interleaved[k] = tmp; + } + } + } + } else { + auto interleave_channel = [](size_t channel, size_t num_channels, + size_t samples_per_channel, const float* x, + int16_t* y) { + for (size_t k = 0, j = channel; k < samples_per_channel; + ++k, j += num_channels) { + y[j] = FloatS16ToS16(x[k]); + } + }; + + if (resampling_required) { + for (size_t i = 0; i < num_channels_; ++i) { + std::array float_buffer; + output_resamplers_[i]->Resample(data_->channels()[i], + buffer_num_frames_, float_buffer.data(), + output_num_frames_); + interleave_channel(i, frame->num_channels_, output_num_frames_, + float_buffer.data(), interleaved); + } + } else { + for (size_t i = 0; i < num_channels_; ++i) { + interleave_channel(i, frame->num_channels_, output_num_frames_, + data_->channels()[i], interleaved); + } + } + + for (size_t i = num_channels_; i < frame->num_channels_; ++i) { + for (size_t j = 0, k = i, n = num_channels_; j < output_num_frames_; + ++j, k += frame->num_channels_, n += frame->num_channels_) { + interleaved[k] = interleaved[n]; + } + } + } +} + +void AudioBuffer::SplitIntoFrequencyBands() { + splitting_filter_->Analysis(data_.get(), split_data_.get()); +} + +void AudioBuffer::MergeFrequencyBands() { + splitting_filter_->Synthesis(split_data_.get(), data_.get()); +} + +void AudioBuffer::ExportSplitChannelData( + size_t channel, + int16_t* const* split_band_data) const { + for (size_t k = 0; k < num_bands(); ++k) { + const float* band_data = split_bands_const(channel)[k]; + + RTC_DCHECK(split_band_data[k]); + RTC_DCHECK(band_data); + for (size_t i = 0; i < num_frames_per_band(); ++i) { + split_band_data[k][i] = FloatS16ToS16(band_data[i]); + } + } +} + +void AudioBuffer::ImportSplitChannelData( + size_t channel, + const int16_t* const* split_band_data) { + for (size_t k = 0; k < num_bands(); ++k) { + float* band_data = split_bands(channel)[k]; + RTC_DCHECK(split_band_data[k]); + RTC_DCHECK(band_data); + for (size_t i = 0; i < num_frames_per_band(); ++i) { + band_data[i] = split_band_data[k][i]; + } + } +} + +} // namespace webrtc diff --git a/audio_processing/audio_buffer.h b/audio_processing/audio_buffer.h new file mode 100644 index 0000000..83b38e7 --- /dev/null +++ b/audio_processing/audio_buffer.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_ +#define MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_ + +#include +#include + +#include +#include + +#include "audio_frame.h" +#include "rtc_base/array_view.h" +#include "channel_buffer.h" +#include "audio_processing/include/audio_processing.h" + +namespace webrtc { + +class PushSincResampler; +class SplittingFilter; + +enum Band { kBand0To8kHz = 0, kBand8To16kHz = 1, kBand16To24kHz = 2 }; + +// Stores any audio data in a way that allows the audio processing module to +// operate on it in a controlled manner. +class AudioBuffer { + public: + static const int kSplitBandSize = 160; + static const size_t kMaxSampleRate = 384000; + AudioBuffer(size_t input_rate, + size_t input_num_channels, + size_t buffer_rate, + size_t buffer_num_channels, + size_t output_rate, + size_t output_num_channels); + + // The constructor below will be deprecated. + AudioBuffer(size_t input_num_frames, + size_t input_num_channels, + size_t buffer_num_frames, + size_t buffer_num_channels, + size_t output_num_frames); + virtual ~AudioBuffer(); + + AudioBuffer(const AudioBuffer&) = delete; + AudioBuffer& operator=(const AudioBuffer&) = delete; + + // Specify that downmixing should be done by selecting a single channel. + void set_downmixing_to_specific_channel(size_t channel); + + // Specify that downmixing should be done by averaging all channels,. + void set_downmixing_by_averaging(); + + // Set the number of channels in the buffer. The specified number of channels + // cannot be larger than the specified buffer_num_channels. The number is also + // reset at each call to CopyFrom or InterleaveFrom. + void set_num_channels(size_t num_channels); + + size_t num_channels() const { return num_channels_; } + size_t num_frames() const { return buffer_num_frames_; } + size_t num_frames_per_band() const { return num_split_frames_; } + size_t num_bands() const { return num_bands_; } + + // Returns pointer arrays to the full-band channels. + // Usage: + // channels()[channel][sample]. + // Where: + // 0 <= channel < |buffer_num_channels_| + // 0 <= sample < |buffer_num_frames_| + float* const* channels() { return data_->channels(); } + const float* const* channels_const() const { return data_->channels(); } + + // Returns pointer arrays to the bands for a specific channel. + // Usage: + // split_bands(channel)[band][sample]. + // Where: + // 0 <= channel < |buffer_num_channels_| + // 0 <= band < |num_bands_| + // 0 <= sample < |num_split_frames_| + const float* const* split_bands_const(size_t channel) const { + return split_data_.get() ? split_data_->bands(channel) + : data_->bands(channel); + } + float* const* split_bands(size_t channel) { + return split_data_.get() ? split_data_->bands(channel) + : data_->bands(channel); + } + + // Returns a pointer array to the channels for a specific band. + // Usage: + // split_channels(band)[channel][sample]. + // Where: + // 0 <= band < |num_bands_| + // 0 <= channel < |buffer_num_channels_| + // 0 <= sample < |num_split_frames_| + const float* const* split_channels_const(Band band) const { + if (split_data_.get()) { + return split_data_->channels(band); + } else { + return band == kBand0To8kHz ? data_->channels() : nullptr; + } + } + + // Copies data into the buffer. + void CopyFrom(const AudioFrame* frame); + void CopyFrom(const float* const* data, const StreamConfig& stream_config); + + // Copies data from the buffer. + void CopyTo(AudioFrame* frame) const; + void CopyTo(const StreamConfig& stream_config, float* const* data); + void CopyTo(AudioBuffer* buffer) const; + + // Splits the buffer data into frequency bands. + void SplitIntoFrequencyBands(); + + // Recombines the frequency bands into a full-band signal. + void MergeFrequencyBands(); + + // Copies the split bands data into the integer two-dimensional array. + void ExportSplitChannelData(size_t channel, + int16_t* const* split_band_data) const; + + // Copies the data in the integer two-dimensional array into the split_bands + // data. + void ImportSplitChannelData(size_t channel, + const int16_t* const* split_band_data); + + static const size_t kMaxSplitFrameLength = 160; + static const size_t kMaxNumBands = 3; + + // Deprecated methods, will be removed soon. + float* const* channels_f() { return channels(); } + const float* const* channels_const_f() const { return channels_const(); } + const float* const* split_bands_const_f(size_t channel) const { + return split_bands_const(channel); + } + float* const* split_bands_f(size_t channel) { return split_bands(channel); } + const float* const* split_channels_const_f(Band band) const { + return split_channels_const(band); + } + void DeinterleaveFrom(const AudioFrame* frame) { CopyFrom(frame); } + void InterleaveTo(AudioFrame* frame) const { CopyTo(frame); } + + private: + //FRIEND_TEST_ALL_PREFIXES(AudioBufferTest, + // SetNumChannelsSetsChannelBuffersNumChannels); + void RestoreNumChannels(); + + const size_t input_num_frames_; + const size_t input_num_channels_; + const size_t buffer_num_frames_; + const size_t buffer_num_channels_; + const size_t output_num_frames_; + const size_t output_num_channels_; + + size_t num_channels_; + size_t num_bands_; + size_t num_split_frames_; + + std::unique_ptr> data_; + std::unique_ptr> split_data_; + std::unique_ptr splitting_filter_; + std::vector> input_resamplers_; + std::vector> output_resamplers_; + bool downmix_by_averaging_ = true; + size_t channel_for_downmixing_ = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_ diff --git a/audio_processing/audio_frame.cc b/audio_processing/audio_frame.cc new file mode 100644 index 0000000..9190626 --- /dev/null +++ b/audio_processing/audio_frame.cc @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_frame.h" + +#include + +#include "rtc_base/checks.h" +#include "rtc_base/time_utils.h" + +namespace webrtc { + +AudioFrame::AudioFrame() { + // Visual Studio doesn't like this in the class definition. + static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes"); +} + +void AudioFrame::Reset() { + ResetWithoutMuting(); + muted_ = true; +} + +void AudioFrame::ResetWithoutMuting() { + // TODO(wu): Zero is a valid value for |timestamp_|. We should initialize + // to an invalid value, or add a new member to indicate invalidity. + timestamp_ = 0; + elapsed_time_ms_ = -1; + ntp_time_ms_ = -1; + samples_per_channel_ = 0; + sample_rate_hz_ = 0; + num_channels_ = 0; + channel_layout_ = CHANNEL_LAYOUT_NONE; + speech_type_ = kUndefined; + vad_activity_ = kVadUnknown; + profile_timestamp_ms_ = 0; + //packet_infos_ = RtpPacketInfos(); +} + +void AudioFrame::UpdateFrame(uint32_t timestamp, + const int16_t* data, + size_t samples_per_channel, + int sample_rate_hz, + SpeechType speech_type, + VADActivity vad_activity, + size_t num_channels) { + timestamp_ = timestamp; + samples_per_channel_ = samples_per_channel; + sample_rate_hz_ = sample_rate_hz; + speech_type_ = speech_type; + vad_activity_ = vad_activity; + num_channels_ = num_channels; + channel_layout_ = GuessChannelLayout(num_channels); + if (channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED) { + RTC_DCHECK_EQ(num_channels, ChannelLayoutToChannelCount(channel_layout_)); + } + + const size_t length = samples_per_channel * num_channels; + RTC_CHECK_LE(length, kMaxDataSizeSamples); + if (data != nullptr) { + memcpy(data_, data, sizeof(int16_t) * length); + muted_ = false; + } else { + muted_ = true; + } +} + +void AudioFrame::CopyFrom(const AudioFrame& src) { + if (this == &src) + return; + + timestamp_ = src.timestamp_; + elapsed_time_ms_ = src.elapsed_time_ms_; + ntp_time_ms_ = src.ntp_time_ms_; + //packet_infos_ = src.packet_infos_; + muted_ = src.muted(); + samples_per_channel_ = src.samples_per_channel_; + sample_rate_hz_ = src.sample_rate_hz_; + speech_type_ = src.speech_type_; + vad_activity_ = src.vad_activity_; + num_channels_ = src.num_channels_; + channel_layout_ = src.channel_layout_; + + const size_t length = samples_per_channel_ * num_channels_; + RTC_CHECK_LE(length, kMaxDataSizeSamples); + if (!src.muted()) { + memcpy(data_, src.data(), sizeof(int16_t) * length); + muted_ = false; + } +} + +void AudioFrame::UpdateProfileTimeStamp() { + profile_timestamp_ms_ = rtc::TimeMillis(); +} + +int64_t AudioFrame::ElapsedProfileTimeMs() const { + if (profile_timestamp_ms_ == 0) { + // Profiling has not been activated. + return -1; + } + return rtc::TimeSince(profile_timestamp_ms_); +} + +const int16_t* AudioFrame::data() const { + return muted_ ? empty_data() : data_; +} + +// TODO(henrik.lundin) Can we skip zeroing the buffer? +// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647. +int16_t* AudioFrame::mutable_data() { + if (muted_) { + memset(data_, 0, kMaxDataSizeBytes); + muted_ = false; + } + return data_; +} + +void AudioFrame::Mute() { + muted_ = true; +} + +bool AudioFrame::muted() const { + return muted_; +} + +// static +const int16_t* AudioFrame::empty_data() { + static int16_t* null_data = new int16_t[kMaxDataSizeSamples](); + return &null_data[0]; +} + +} // namespace webrtc diff --git a/audio_processing/audio_frame.h b/audio_processing/audio_frame.h new file mode 100644 index 0000000..1c906de --- /dev/null +++ b/audio_processing/audio_frame.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_AUDIO_AUDIO_FRAME_H_ +#define API_AUDIO_AUDIO_FRAME_H_ + +#include +#include + +#include "channel_layout.h" +//#include "api/rtp_packet_infos.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +/* This class holds up to 120 ms of super-wideband (32 kHz) stereo audio. It + * allows for adding and subtracting frames while keeping track of the resulting + * states. + * + * Notes + * - This is a de-facto api, not designed for external use. The AudioFrame class + * is in need of overhaul or even replacement, and anyone depending on it + * should be prepared for that. + * - The total number of samples is samples_per_channel_ * num_channels_. + * - Stereo data is interleaved starting with the left channel. + */ +class AudioFrame { + public: + // Using constexpr here causes linker errors unless the variable also has an + // out-of-class definition, which is impractical in this header-only class. + // (This makes no sense because it compiles as an enum value, which we most + // certainly cannot take the address of, just fine.) C++17 introduces inline + // variables which should allow us to switch to constexpr and keep this a + // header-only class. + enum : size_t { + // Stereo, 32 kHz, 120 ms (2 * 32 * 120) + // Stereo, 192 kHz, 20 ms (2 * 192 * 20) + kMaxDataSizeSamples = 7680, + kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t), + }; + + enum VADActivity { kVadActive = 0, kVadPassive = 1, kVadUnknown = 2 }; + enum SpeechType { + kNormalSpeech = 0, + kPLC = 1, + kCNG = 2, + kPLCCNG = 3, + kCodecPLC = 5, + kUndefined = 4 + }; + + AudioFrame(); + + // Resets all members to their default state. + void Reset(); + // Same as Reset(), but leaves mute state unchanged. Muting a frame requires + // the buffer to be zeroed on the next call to mutable_data(). Callers + // intending to write to the buffer immediately after Reset() can instead use + // ResetWithoutMuting() to skip this wasteful zeroing. + void ResetWithoutMuting(); + + void UpdateFrame(uint32_t timestamp, + const int16_t* data, + size_t samples_per_channel, + int sample_rate_hz, + SpeechType speech_type, + VADActivity vad_activity, + size_t num_channels = 1); + + void CopyFrom(const AudioFrame& src); + + // Sets a wall-time clock timestamp in milliseconds to be used for profiling + // of time between two points in the audio chain. + // Example: + // t0: UpdateProfileTimeStamp() + // t1: ElapsedProfileTimeMs() => t1 - t0 [msec] + void UpdateProfileTimeStamp(); + // Returns the time difference between now and when UpdateProfileTimeStamp() + // was last called. Returns -1 if UpdateProfileTimeStamp() has not yet been + // called. + int64_t ElapsedProfileTimeMs() const; + + // data() returns a zeroed static buffer if the frame is muted. + // mutable_frame() always returns a non-static buffer; the first call to + // mutable_frame() zeros the non-static buffer and marks the frame unmuted. + const int16_t* data() const; + int16_t* mutable_data(); + + // Prefer to mute frames using AudioFrameOperations::Mute. + void Mute(); + // Frame is muted by default. + bool muted() const; + + size_t max_16bit_samples() const { return kMaxDataSizeSamples; } + size_t samples_per_channel() const { return samples_per_channel_; } + size_t num_channels() const { return num_channels_; } + ChannelLayout channel_layout() const { return channel_layout_; } + int sample_rate_hz() const { return sample_rate_hz_; } + + // RTP timestamp of the first sample in the AudioFrame. + uint32_t timestamp_ = 0; + // Time since the first frame in milliseconds. + // -1 represents an uninitialized value. + int64_t elapsed_time_ms_ = -1; + // NTP time of the estimated capture time in local timebase in milliseconds. + // -1 represents an uninitialized value. + int64_t ntp_time_ms_ = -1; + size_t samples_per_channel_ = 0; + int sample_rate_hz_ = 0; + size_t num_channels_ = 0; + ChannelLayout channel_layout_ = CHANNEL_LAYOUT_NONE; + SpeechType speech_type_ = kUndefined; + VADActivity vad_activity_ = kVadUnknown; + // Monotonically increasing timestamp intended for profiling of audio frames. + // Typically used for measuring elapsed time between two different points in + // the audio path. No lock is used to save resources and we are thread safe + // by design. Also, absl::optional is not used since it will cause a "complex + // class/struct needs an explicit out-of-line destructor" build error. + int64_t profile_timestamp_ms_ = 0; + + // Information about packets used to assemble this audio frame. This is needed + // by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's + // MediaStreamTrack, in order to implement getContributingSources(). See: + // https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources + // + // TODO(bugs.webrtc.org/10757): + // Note that this information might not be fully accurate since we currently + // don't have a proper way to track it across the audio sync buffer. The + // sync buffer is the small sample-holding buffer located after the audio + // decoder and before where samples are assembled into output frames. + // + // |RtpPacketInfos| may also be empty if the audio samples did not come from + // RTP packets. E.g. if the audio were locally generated by packet loss + // concealment, comfort noise generation, etc. + //RtpPacketInfos packet_infos_; + + private: + // A permanently zeroed out buffer to represent muted frames. This is a + // header-only class, so the only way to avoid creating a separate empty + // buffer per translation unit is to wrap a static in an inline function. + static const int16_t* empty_data(); + + int16_t data_[kMaxDataSizeSamples]; + bool muted_ = true; + + RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame); +}; + +} // namespace webrtc + +#endif // API_AUDIO_AUDIO_FRAME_H_ diff --git a/audio_processing/channel_buffer.cc b/audio_processing/channel_buffer.cc new file mode 100644 index 0000000..d31d86b --- /dev/null +++ b/audio_processing/channel_buffer.cc @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "channel_buffer.h" + +#include + +#include "include/audio_util.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +IFChannelBuffer::IFChannelBuffer(size_t num_frames, + size_t num_channels, + size_t num_bands) + : ivalid_(true), + ibuf_(num_frames, num_channels, num_bands), + fvalid_(true), + fbuf_(num_frames, num_channels, num_bands) {} + +IFChannelBuffer::~IFChannelBuffer() = default; + +ChannelBuffer* IFChannelBuffer::ibuf() { + RefreshI(); + fvalid_ = false; + return &ibuf_; +} + +ChannelBuffer* IFChannelBuffer::fbuf() { + RefreshF(); + ivalid_ = false; + return &fbuf_; +} + +const ChannelBuffer* IFChannelBuffer::ibuf_const() const { + RefreshI(); + return &ibuf_; +} + +const ChannelBuffer* IFChannelBuffer::fbuf_const() const { + RefreshF(); + return &fbuf_; +} + +void IFChannelBuffer::RefreshF() const { + if (!fvalid_) { + RTC_DCHECK(ivalid_); + fbuf_.set_num_channels(ibuf_.num_channels()); + const int16_t* const* int_channels = ibuf_.channels(); + float* const* float_channels = fbuf_.channels(); + for (size_t i = 0; i < ibuf_.num_channels(); ++i) { + for (size_t j = 0; j < ibuf_.num_frames(); ++j) { + float_channels[i][j] = int_channels[i][j]; + } + } + fvalid_ = true; + } +} + +void IFChannelBuffer::RefreshI() const { + if (!ivalid_) { + RTC_DCHECK(fvalid_); + int16_t* const* int_channels = ibuf_.channels(); + ibuf_.set_num_channels(fbuf_.num_channels()); + const float* const* float_channels = fbuf_.channels(); + for (size_t i = 0; i < fbuf_.num_channels(); ++i) { + FloatS16ToS16(float_channels[i], ibuf_.num_frames(), int_channels[i]); + } + ivalid_ = true; + } +} + +} // namespace webrtc diff --git a/audio_processing/channel_buffer.h b/audio_processing/channel_buffer.h new file mode 100644 index 0000000..c536e54 --- /dev/null +++ b/audio_processing/channel_buffer.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef COMMON_AUDIO_CHANNEL_BUFFER_H_ +#define COMMON_AUDIO_CHANNEL_BUFFER_H_ + +#include + +#include + +#include "include/audio_util.h" +#include "rtc_base/checks.h" +//#include "rtc_base/gtest_prod_util.h" + +namespace webrtc { + +// Helper to encapsulate a contiguous data buffer, full or split into frequency +// bands, with access to a pointer arrays of the deinterleaved channels and +// bands. The buffer is zero initialized at creation. +// +// The buffer structure is showed below for a 2 channel and 2 bands case: +// +// |data_|: +// { [ --- b1ch1 --- ] [ --- b2ch1 --- ] [ --- b1ch2 --- ] [ --- b2ch2 --- ] } +// +// The pointer arrays for the same example are as follows: +// +// |channels_|: +// { [ b1ch1* ] [ b1ch2* ] [ b2ch1* ] [ b2ch2* ] } +// +// |bands_|: +// { [ b1ch1* ] [ b2ch1* ] [ b1ch2* ] [ b2ch2* ] } +template +class ChannelBuffer { + public: + ChannelBuffer(size_t num_frames, size_t num_channels, size_t num_bands = 1) + : data_(new T[num_frames * num_channels]()), + channels_(new T*[num_channels * num_bands]), + bands_(new T*[num_channels * num_bands]), + num_frames_(num_frames), + num_frames_per_band_(num_frames / num_bands), + num_allocated_channels_(num_channels), + num_channels_(num_channels), + num_bands_(num_bands) { + for (size_t i = 0; i < num_allocated_channels_; ++i) { + for (size_t j = 0; j < num_bands_; ++j) { + channels_[j * num_allocated_channels_ + i] = + &data_[i * num_frames_ + j * num_frames_per_band_]; + bands_[i * num_bands_ + j] = channels_[j * num_allocated_channels_ + i]; + } + } + } + + // Returns a pointer array to the full-band channels (or lower band channels). + // Usage: + // channels()[channel][sample]. + // Where: + // 0 <= channel < |num_allocated_channels_| + // 0 <= sample < |num_frames_| + T* const* channels() { return channels(0); } + const T* const* channels() const { return channels(0); } + + // Returns a pointer array to the channels for a specific band. + // Usage: + // channels(band)[channel][sample]. + // Where: + // 0 <= band < |num_bands_| + // 0 <= channel < |num_allocated_channels_| + // 0 <= sample < |num_frames_per_band_| + const T* const* channels(size_t band) const { + RTC_DCHECK_LT(band, num_bands_); + return &channels_[band * num_allocated_channels_]; + } + T* const* channels(size_t band) { + const ChannelBuffer* t = this; + return const_cast(t->channels(band)); + } + + // Returns a pointer array to the bands for a specific channel. + // Usage: + // bands(channel)[band][sample]. + // Where: + // 0 <= channel < |num_channels_| + // 0 <= band < |num_bands_| + // 0 <= sample < |num_frames_per_band_| + const T* const* bands(size_t channel) const { + RTC_DCHECK_LT(channel, num_channels_); + RTC_DCHECK_GE(channel, 0); + return &bands_[channel * num_bands_]; + } + T* const* bands(size_t channel) { + const ChannelBuffer* t = this; + return const_cast(t->bands(channel)); + } + + // Sets the |slice| pointers to the |start_frame| position for each channel. + // Returns |slice| for convenience. + const T* const* Slice(T** slice, size_t start_frame) const { + RTC_DCHECK_LT(start_frame, num_frames_); + for (size_t i = 0; i < num_channels_; ++i) + slice[i] = &channels_[i][start_frame]; + return slice; + } + T** Slice(T** slice, size_t start_frame) { + const ChannelBuffer* t = this; + return const_cast(t->Slice(slice, start_frame)); + } + + size_t num_frames() const { return num_frames_; } + size_t num_frames_per_band() const { return num_frames_per_band_; } + size_t num_channels() const { return num_channels_; } + size_t num_bands() const { return num_bands_; } + size_t size() const { return num_frames_ * num_allocated_channels_; } + + void set_num_channels(size_t num_channels) { + RTC_DCHECK_LE(num_channels, num_allocated_channels_); + num_channels_ = num_channels; + } + + void SetDataForTesting(const T* data, size_t size) { + RTC_CHECK_EQ(size, this->size()); + memcpy(data_.get(), data, size * sizeof(*data)); + } + + private: + std::unique_ptr data_; + std::unique_ptr channels_; + std::unique_ptr bands_; + const size_t num_frames_; + const size_t num_frames_per_band_; + // Number of channels the internal buffer holds. + const size_t num_allocated_channels_; + // Number of channels the user sees. + size_t num_channels_; + const size_t num_bands_; +}; + +// One int16_t and one float ChannelBuffer that are kept in sync. The sync is +// broken when someone requests write access to either ChannelBuffer, and +// reestablished when someone requests the outdated ChannelBuffer. It is +// therefore safe to use the return value of ibuf_const() and fbuf_const() +// until the next call to ibuf() or fbuf(), and the return value of ibuf() and +// fbuf() until the next call to any of the other functions. +class IFChannelBuffer { + public: + IFChannelBuffer(size_t num_frames, size_t num_channels, size_t num_bands = 1); + ~IFChannelBuffer(); + + ChannelBuffer* ibuf(); + ChannelBuffer* fbuf(); + const ChannelBuffer* ibuf_const() const; + const ChannelBuffer* fbuf_const() const; + + size_t num_frames() const { return ibuf_.num_frames(); } + size_t num_frames_per_band() const { return ibuf_.num_frames_per_band(); } + size_t num_channels() const { + return ivalid_ ? ibuf_.num_channels() : fbuf_.num_channels(); + } + void set_num_channels(size_t num_channels) { + ibuf_.set_num_channels(num_channels); + fbuf_.set_num_channels(num_channels); + } + size_t num_bands() const { return ibuf_.num_bands(); } + + private: + void RefreshF() const; + void RefreshI() const; + + mutable bool ivalid_; + mutable ChannelBuffer ibuf_; + mutable bool fvalid_; + mutable ChannelBuffer fbuf_; +}; + +} // namespace webrtc + +#endif // COMMON_AUDIO_CHANNEL_BUFFER_H_ diff --git a/audio_processing/channel_layout.cc b/audio_processing/channel_layout.cc new file mode 100644 index 0000000..da7abae --- /dev/null +++ b/audio_processing/channel_layout.cc @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "channel_layout.h" + +#include + +#include "rtc_base/arraysize.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +static const int kLayoutToChannels[] = { + 0, // CHANNEL_LAYOUT_NONE + 0, // CHANNEL_LAYOUT_UNSUPPORTED + 1, // CHANNEL_LAYOUT_MONO + 2, // CHANNEL_LAYOUT_STEREO + 3, // CHANNEL_LAYOUT_2_1 + 3, // CHANNEL_LAYOUT_SURROUND + 4, // CHANNEL_LAYOUT_4_0 + 4, // CHANNEL_LAYOUT_2_2 + 4, // CHANNEL_LAYOUT_QUAD + 5, // CHANNEL_LAYOUT_5_0 + 6, // CHANNEL_LAYOUT_5_1 + 5, // CHANNEL_LAYOUT_5_0_BACK + 6, // CHANNEL_LAYOUT_5_1_BACK + 7, // CHANNEL_LAYOUT_7_0 + 8, // CHANNEL_LAYOUT_7_1 + 8, // CHANNEL_LAYOUT_7_1_WIDE + 2, // CHANNEL_LAYOUT_STEREO_DOWNMIX + 3, // CHANNEL_LAYOUT_2POINT1 + 4, // CHANNEL_LAYOUT_3_1 + 5, // CHANNEL_LAYOUT_4_1 + 6, // CHANNEL_LAYOUT_6_0 + 6, // CHANNEL_LAYOUT_6_0_FRONT + 6, // CHANNEL_LAYOUT_HEXAGONAL + 7, // CHANNEL_LAYOUT_6_1 + 7, // CHANNEL_LAYOUT_6_1_BACK + 7, // CHANNEL_LAYOUT_6_1_FRONT + 7, // CHANNEL_LAYOUT_7_0_FRONT + 8, // CHANNEL_LAYOUT_7_1_WIDE_BACK + 8, // CHANNEL_LAYOUT_OCTAGONAL + 0, // CHANNEL_LAYOUT_DISCRETE + 3, // CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC + 5, // CHANNEL_LAYOUT_4_1_QUAD_SIDE + 0, // CHANNEL_LAYOUT_BITSTREAM +}; + +// The channel orderings for each layout as specified by FFmpeg. Each value +// represents the index of each channel in each layout. Values of -1 mean the +// channel at that index is not used for that layout. For example, the left side +// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because +// the order is L, R, C, LFE, LS, RS), so +// kChannelOrderings[CHANNEL_LAYOUT_5_1][SIDE_LEFT] = 4; +static const int kChannelOrderings[CHANNEL_LAYOUT_MAX + 1][CHANNELS_MAX + 1] = { + // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR + + // CHANNEL_LAYOUT_NONE + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_UNSUPPORTED + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_MONO + {-1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_STEREO + {0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_2_1 + {0, 1, -1, -1, -1, -1, -1, -1, 2, -1, -1}, + + // CHANNEL_LAYOUT_SURROUND + {0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_4_0 + {0, 1, 2, -1, -1, -1, -1, -1, 3, -1, -1}, + + // CHANNEL_LAYOUT_2_2 + {0, 1, -1, -1, -1, -1, -1, -1, -1, 2, 3}, + + // CHANNEL_LAYOUT_QUAD + {0, 1, -1, -1, 2, 3, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_5_0 + {0, 1, 2, -1, -1, -1, -1, -1, -1, 3, 4}, + + // CHANNEL_LAYOUT_5_1 + {0, 1, 2, 3, -1, -1, -1, -1, -1, 4, 5}, + + // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR + + // CHANNEL_LAYOUT_5_0_BACK + {0, 1, 2, -1, 3, 4, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_5_1_BACK + {0, 1, 2, 3, 4, 5, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_7_0 + {0, 1, 2, -1, 5, 6, -1, -1, -1, 3, 4}, + + // CHANNEL_LAYOUT_7_1 + {0, 1, 2, 3, 6, 7, -1, -1, -1, 4, 5}, + + // CHANNEL_LAYOUT_7_1_WIDE + {0, 1, 2, 3, -1, -1, 6, 7, -1, 4, 5}, + + // CHANNEL_LAYOUT_STEREO_DOWNMIX + {0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_2POINT1 + {0, 1, -1, 2, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_3_1 + {0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_4_1 + {0, 1, 2, 4, -1, -1, -1, -1, 3, -1, -1}, + + // CHANNEL_LAYOUT_6_0 + {0, 1, 2, -1, -1, -1, -1, -1, 5, 3, 4}, + + // CHANNEL_LAYOUT_6_0_FRONT + {0, 1, -1, -1, -1, -1, 4, 5, -1, 2, 3}, + + // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR + + // CHANNEL_LAYOUT_HEXAGONAL + {0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1}, + + // CHANNEL_LAYOUT_6_1 + {0, 1, 2, 3, -1, -1, -1, -1, 6, 4, 5}, + + // CHANNEL_LAYOUT_6_1_BACK + {0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1}, + + // CHANNEL_LAYOUT_6_1_FRONT + {0, 1, -1, 6, -1, -1, 4, 5, -1, 2, 3}, + + // CHANNEL_LAYOUT_7_0_FRONT + {0, 1, 2, -1, -1, -1, 5, 6, -1, 3, 4}, + + // CHANNEL_LAYOUT_7_1_WIDE_BACK + {0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1}, + + // CHANNEL_LAYOUT_OCTAGONAL + {0, 1, 2, -1, 5, 6, -1, -1, 7, 3, 4}, + + // CHANNEL_LAYOUT_DISCRETE + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC + {0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1}, + + // CHANNEL_LAYOUT_4_1_QUAD_SIDE + {0, 1, -1, 4, -1, -1, -1, -1, -1, 2, 3}, + + // CHANNEL_LAYOUT_BITSTREAM + {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + + // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR +}; + +int ChannelLayoutToChannelCount(ChannelLayout layout) { + RTC_DCHECK_LT(static_cast(layout), arraysize(kLayoutToChannels)); + RTC_DCHECK_LE(kLayoutToChannels[layout], kMaxConcurrentChannels); + return kLayoutToChannels[layout]; +} + +// Converts a channel count into a channel layout. +ChannelLayout GuessChannelLayout(int channels) { + switch (channels) { + case 1: + return CHANNEL_LAYOUT_MONO; + case 2: + return CHANNEL_LAYOUT_STEREO; + case 3: + return CHANNEL_LAYOUT_SURROUND; + case 4: + return CHANNEL_LAYOUT_QUAD; + case 5: + return CHANNEL_LAYOUT_5_0; + case 6: + return CHANNEL_LAYOUT_5_1; + case 7: + return CHANNEL_LAYOUT_6_1; + case 8: + return CHANNEL_LAYOUT_7_1; + default: + RTC_DLOG(LS_WARNING) << "Unsupported channel count: " << channels; + } + return CHANNEL_LAYOUT_UNSUPPORTED; +} + +int ChannelOrder(ChannelLayout layout, Channels channel) { + RTC_DCHECK_LT(static_cast(layout), arraysize(kChannelOrderings)); + RTC_DCHECK_LT(static_cast(channel), arraysize(kChannelOrderings[0])); + return kChannelOrderings[layout][channel]; +} + +const char* ChannelLayoutToString(ChannelLayout layout) { + switch (layout) { + case CHANNEL_LAYOUT_NONE: + return "NONE"; + case CHANNEL_LAYOUT_UNSUPPORTED: + return "UNSUPPORTED"; + case CHANNEL_LAYOUT_MONO: + return "MONO"; + case CHANNEL_LAYOUT_STEREO: + return "STEREO"; + case CHANNEL_LAYOUT_2_1: + return "2.1"; + case CHANNEL_LAYOUT_SURROUND: + return "SURROUND"; + case CHANNEL_LAYOUT_4_0: + return "4.0"; + case CHANNEL_LAYOUT_2_2: + return "QUAD_SIDE"; + case CHANNEL_LAYOUT_QUAD: + return "QUAD"; + case CHANNEL_LAYOUT_5_0: + return "5.0"; + case CHANNEL_LAYOUT_5_1: + return "5.1"; + case CHANNEL_LAYOUT_5_0_BACK: + return "5.0_BACK"; + case CHANNEL_LAYOUT_5_1_BACK: + return "5.1_BACK"; + case CHANNEL_LAYOUT_7_0: + return "7.0"; + case CHANNEL_LAYOUT_7_1: + return "7.1"; + case CHANNEL_LAYOUT_7_1_WIDE: + return "7.1_WIDE"; + case CHANNEL_LAYOUT_STEREO_DOWNMIX: + return "STEREO_DOWNMIX"; + case CHANNEL_LAYOUT_2POINT1: + return "2POINT1"; + case CHANNEL_LAYOUT_3_1: + return "3.1"; + case CHANNEL_LAYOUT_4_1: + return "4.1"; + case CHANNEL_LAYOUT_6_0: + return "6.0"; + case CHANNEL_LAYOUT_6_0_FRONT: + return "6.0_FRONT"; + case CHANNEL_LAYOUT_HEXAGONAL: + return "HEXAGONAL"; + case CHANNEL_LAYOUT_6_1: + return "6.1"; + case CHANNEL_LAYOUT_6_1_BACK: + return "6.1_BACK"; + case CHANNEL_LAYOUT_6_1_FRONT: + return "6.1_FRONT"; + case CHANNEL_LAYOUT_7_0_FRONT: + return "7.0_FRONT"; + case CHANNEL_LAYOUT_7_1_WIDE_BACK: + return "7.1_WIDE_BACK"; + case CHANNEL_LAYOUT_OCTAGONAL: + return "OCTAGONAL"; + case CHANNEL_LAYOUT_DISCRETE: + return "DISCRETE"; + case CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC: + return "STEREO_AND_KEYBOARD_MIC"; + case CHANNEL_LAYOUT_4_1_QUAD_SIDE: + return "4.1_QUAD_SIDE"; + case CHANNEL_LAYOUT_BITSTREAM: + return "BITSTREAM"; + } + RTC_NOTREACHED() << "Invalid channel layout provided: " << layout; + return ""; +} + +} // namespace webrtc diff --git a/audio_processing/channel_layout.h b/audio_processing/channel_layout.h new file mode 100644 index 0000000..175aee7 --- /dev/null +++ b/audio_processing/channel_layout.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_AUDIO_CHANNEL_LAYOUT_H_ +#define API_AUDIO_CHANNEL_LAYOUT_H_ + +namespace webrtc { + +// This file is derived from Chromium's base/channel_layout.h. + +// Enumerates the various representations of the ordering of audio channels. +// Logged to UMA, so never reuse a value, always add new/greater ones! +enum ChannelLayout { + CHANNEL_LAYOUT_NONE = 0, + CHANNEL_LAYOUT_UNSUPPORTED = 1, + + // Front C + CHANNEL_LAYOUT_MONO = 2, + + // Front L, Front R + CHANNEL_LAYOUT_STEREO = 3, + + // Front L, Front R, Back C + CHANNEL_LAYOUT_2_1 = 4, + + // Front L, Front R, Front C + CHANNEL_LAYOUT_SURROUND = 5, + + // Front L, Front R, Front C, Back C + CHANNEL_LAYOUT_4_0 = 6, + + // Front L, Front R, Side L, Side R + CHANNEL_LAYOUT_2_2 = 7, + + // Front L, Front R, Back L, Back R + CHANNEL_LAYOUT_QUAD = 8, + + // Front L, Front R, Front C, Side L, Side R + CHANNEL_LAYOUT_5_0 = 9, + + // Front L, Front R, Front C, LFE, Side L, Side R + CHANNEL_LAYOUT_5_1 = 10, + + // Front L, Front R, Front C, Back L, Back R + CHANNEL_LAYOUT_5_0_BACK = 11, + + // Front L, Front R, Front C, LFE, Back L, Back R + CHANNEL_LAYOUT_5_1_BACK = 12, + + // Front L, Front R, Front C, Side L, Side R, Back L, Back R + CHANNEL_LAYOUT_7_0 = 13, + + // Front L, Front R, Front C, LFE, Side L, Side R, Back L, Back R + CHANNEL_LAYOUT_7_1 = 14, + + // Front L, Front R, Front C, LFE, Side L, Side R, Front LofC, Front RofC + CHANNEL_LAYOUT_7_1_WIDE = 15, + + // Stereo L, Stereo R + CHANNEL_LAYOUT_STEREO_DOWNMIX = 16, + + // Stereo L, Stereo R, LFE + CHANNEL_LAYOUT_2POINT1 = 17, + + // Stereo L, Stereo R, Front C, LFE + CHANNEL_LAYOUT_3_1 = 18, + + // Stereo L, Stereo R, Front C, Rear C, LFE + CHANNEL_LAYOUT_4_1 = 19, + + // Stereo L, Stereo R, Front C, Side L, Side R, Back C + CHANNEL_LAYOUT_6_0 = 20, + + // Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC + CHANNEL_LAYOUT_6_0_FRONT = 21, + + // Stereo L, Stereo R, Front C, Rear L, Rear R, Rear C + CHANNEL_LAYOUT_HEXAGONAL = 22, + + // Stereo L, Stereo R, Front C, LFE, Side L, Side R, Rear Center + CHANNEL_LAYOUT_6_1 = 23, + + // Stereo L, Stereo R, Front C, LFE, Back L, Back R, Rear Center + CHANNEL_LAYOUT_6_1_BACK = 24, + + // Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC, LFE + CHANNEL_LAYOUT_6_1_FRONT = 25, + + // Front L, Front R, Front C, Side L, Side R, Front LofC, Front RofC + CHANNEL_LAYOUT_7_0_FRONT = 26, + + // Front L, Front R, Front C, LFE, Back L, Back R, Front LofC, Front RofC + CHANNEL_LAYOUT_7_1_WIDE_BACK = 27, + + // Front L, Front R, Front C, Side L, Side R, Rear L, Back R, Back C. + CHANNEL_LAYOUT_OCTAGONAL = 28, + + // Channels are not explicitly mapped to speakers. + CHANNEL_LAYOUT_DISCRETE = 29, + + // Front L, Front R, Front C. Front C contains the keyboard mic audio. This + // layout is only intended for input for WebRTC. The Front C channel + // is stripped away in the WebRTC audio input pipeline and never seen outside + // of that. + CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC = 30, + + // Front L, Front R, Side L, Side R, LFE + CHANNEL_LAYOUT_4_1_QUAD_SIDE = 31, + + // Actual channel layout is specified in the bitstream and the actual channel + // count is unknown at Chromium media pipeline level (useful for audio + // pass-through mode). + CHANNEL_LAYOUT_BITSTREAM = 32, + + // Max value, must always equal the largest entry ever logged. + CHANNEL_LAYOUT_MAX = CHANNEL_LAYOUT_BITSTREAM +}; + +// Note: Do not reorder or reassign these values; other code depends on their +// ordering to operate correctly. E.g., CoreAudio channel layout computations. +enum Channels { + LEFT = 0, + RIGHT, + CENTER, + LFE, + BACK_LEFT, + BACK_RIGHT, + LEFT_OF_CENTER, + RIGHT_OF_CENTER, + BACK_CENTER, + SIDE_LEFT, + SIDE_RIGHT, + CHANNELS_MAX = + SIDE_RIGHT, // Must always equal the largest value ever logged. +}; + +// The maximum number of concurrently active channels for all possible layouts. +// ChannelLayoutToChannelCount() will never return a value higher than this. +constexpr int kMaxConcurrentChannels = 8; + +// Returns the expected channel position in an interleaved stream. Values of -1 +// mean the channel at that index is not used for that layout. Values range +// from 0 to ChannelLayoutToChannelCount(layout) - 1. +int ChannelOrder(ChannelLayout layout, Channels channel); + +// Returns the number of channels in a given ChannelLayout. +int ChannelLayoutToChannelCount(ChannelLayout layout); + +// Given the number of channels, return the best layout, +// or return CHANNEL_LAYOUT_UNSUPPORTED if there is no good match. +ChannelLayout GuessChannelLayout(int channels); + +// Returns a string representation of the channel layout. +const char* ChannelLayoutToString(ChannelLayout layout); + +} // namespace webrtc + +#endif // API_AUDIO_CHANNEL_LAYOUT_H_ diff --git a/audio_processing/high_pass_filter.cc b/audio_processing/high_pass_filter.cc new file mode 100644 index 0000000..cc1ba8c --- /dev/null +++ b/audio_processing/high_pass_filter.cc @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/high_pass_filter.h" + +#include "rtc_base/array_view.h" +#include "audio_processing/audio_buffer.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { +// [B,A] = butter(2,100/8000,'high') +constexpr CascadedBiQuadFilter::BiQuadCoefficients + kHighPassFilterCoefficients16kHz = {{0.97261f, -1.94523f, 0.97261f}, + {-1.94448f, 0.94598f}}; + +// [B,A] = butter(2,100/16000,'high') +constexpr CascadedBiQuadFilter::BiQuadCoefficients + kHighPassFilterCoefficients32kHz = {{0.98621f, -1.97242f, 0.98621f}, + {-1.97223f, 0.97261f}}; + +// [B,A] = butter(2,100/24000,'high') +constexpr CascadedBiQuadFilter::BiQuadCoefficients + kHighPassFilterCoefficients48kHz = {{0.99079f, -1.98157f, 0.99079f}, + {-1.98149f, 0.98166f}}; + +constexpr size_t kNumberOfHighPassBiQuads = 1; + +const CascadedBiQuadFilter::BiQuadCoefficients& ChooseCoefficients( + int sample_rate_hz) { + switch (sample_rate_hz) { + case 16000: + return kHighPassFilterCoefficients16kHz; + case 32000: + return kHighPassFilterCoefficients32kHz; + case 48000: + return kHighPassFilterCoefficients48kHz; + default: + RTC_NOTREACHED(); + } + RTC_NOTREACHED(); + return kHighPassFilterCoefficients16kHz; +} + +} // namespace + +HighPassFilter::HighPassFilter(int sample_rate_hz, size_t num_channels) + : sample_rate_hz_(sample_rate_hz) { + filters_.resize(num_channels); + const auto& coefficients = ChooseCoefficients(sample_rate_hz_); + for (size_t k = 0; k < filters_.size(); ++k) { + filters_[k].reset( + new CascadedBiQuadFilter(coefficients, kNumberOfHighPassBiQuads)); + } +} + +HighPassFilter::~HighPassFilter() = default; + +void HighPassFilter::Process(AudioBuffer* audio, bool use_split_band_data) { + RTC_DCHECK(audio); + RTC_DCHECK_EQ(filters_.size(), audio->num_channels()); + if (use_split_band_data) { + for (size_t k = 0; k < audio->num_channels(); ++k) { + rtc::ArrayView channel_data = rtc::ArrayView( + audio->split_bands(k)[0], audio->num_frames_per_band()); + filters_[k]->Process(channel_data); + } + } else { + for (size_t k = 0; k < audio->num_channels(); ++k) { + rtc::ArrayView channel_data = + rtc::ArrayView(&audio->channels()[k][0], audio->num_frames()); + filters_[k]->Process(channel_data); + } + } +} + +void HighPassFilter::Process(std::vector>* audio) { + RTC_DCHECK_EQ(filters_.size(), audio->size()); + for (size_t k = 0; k < audio->size(); ++k) { + filters_[k]->Process((*audio)[k]); + } +} + +void HighPassFilter::Reset() { + for (size_t k = 0; k < filters_.size(); ++k) { + filters_[k]->Reset(); + } +} + +void HighPassFilter::Reset(size_t num_channels) { + const size_t old_num_channels = filters_.size(); + filters_.resize(num_channels); + if (filters_.size() < old_num_channels) { + Reset(); + } else { + for (size_t k = 0; k < old_num_channels; ++k) { + filters_[k]->Reset(); + } + const auto& coefficients = ChooseCoefficients(sample_rate_hz_); + for (size_t k = old_num_channels; k < filters_.size(); ++k) { + filters_[k].reset( + new CascadedBiQuadFilter(coefficients, kNumberOfHighPassBiQuads)); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/high_pass_filter.h b/audio_processing/high_pass_filter.h new file mode 100644 index 0000000..e830713 --- /dev/null +++ b/audio_processing/high_pass_filter.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_H_ +#define MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_H_ + +#include +#include + +#include "rtc_base/array_view.h" +#include "audio_processing/utility/cascaded_biquad_filter.h" + +namespace webrtc { + +class AudioBuffer; + +class HighPassFilter { + public: + HighPassFilter(int sample_rate_hz, size_t num_channels); + ~HighPassFilter(); + HighPassFilter(const HighPassFilter&) = delete; + HighPassFilter& operator=(const HighPassFilter&) = delete; + + void Process(AudioBuffer* audio, bool use_split_band_data); + void Process(std::vector>* audio); + void Reset(); + void Reset(size_t num_channels); + + private: + const int sample_rate_hz_; + std::vector> filters_; +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_H_ diff --git a/audio_processing/include/audio_processing.h b/audio_processing/include/audio_processing.h new file mode 100644 index 0000000..c10ac61 --- /dev/null +++ b/audio_processing/include/audio_processing.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_ +#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_ + +// MSVC++ requires this to be set before any other includes to get M_PI. +#ifndef _USE_MATH_DEFINES +#define _USE_MATH_DEFINES +#endif + +namespace webrtc { + +static const int kChunkSizeMs = 10; +class StreamConfig { + public: + // sample_rate_hz: The sampling rate of the stream. + // + // num_channels: The number of audio channels in the stream, excluding the + // keyboard channel if it is present. When passing a + // StreamConfig with an array of arrays T*[N], + // + // N == {num_channels + 1 if has_keyboard + // {num_channels if !has_keyboard + // + // has_keyboard: True if the stream has a keyboard channel. When has_keyboard + // is true, the last channel in any corresponding list of + // channels is the keyboard channel. + StreamConfig(int sample_rate_hz = 0, + size_t num_channels = 0, + bool has_keyboard = false) + : sample_rate_hz_(sample_rate_hz), + num_channels_(num_channels), + has_keyboard_(has_keyboard), + num_frames_(calculate_frames(sample_rate_hz)) {} + + void set_sample_rate_hz(int value) { + sample_rate_hz_ = value; + num_frames_ = calculate_frames(value); + } + void set_num_channels(size_t value) { num_channels_ = value; } + void set_has_keyboard(bool value) { has_keyboard_ = value; } + + int sample_rate_hz() const { return sample_rate_hz_; } + + // The number of channels in the stream, not including the keyboard channel if + // present. + size_t num_channels() const { return num_channels_; } + + bool has_keyboard() const { return has_keyboard_; } + size_t num_frames() const { return num_frames_; } + size_t num_samples() const { return num_channels_ * num_frames_; } + + bool operator==(const StreamConfig& other) const { + return sample_rate_hz_ == other.sample_rate_hz_ && + num_channels_ == other.num_channels_ && + has_keyboard_ == other.has_keyboard_; + } + + bool operator!=(const StreamConfig& other) const { return !(*this == other); } + + private: + static size_t calculate_frames(int sample_rate_hz) { + return static_cast(kChunkSizeMs * sample_rate_hz / + 1000); + } + + int sample_rate_hz_; + size_t num_channels_; + bool has_keyboard_; + size_t num_frames_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_ diff --git a/audio_processing/include/audio_util.h b/audio_processing/include/audio_util.h new file mode 100644 index 0000000..df94932 --- /dev/null +++ b/audio_processing/include/audio_util.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_ +#define COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_ + +#include + +#include +#include +#include +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +typedef std::numeric_limits limits_int16; + +// The conversion functions use the following naming convention: +// S16: int16_t [-32768, 32767] +// Float: float [-1.0, 1.0] +// FloatS16: float [-32768.0, 32768.0] +// Dbfs: float [-20.0*log(10, 32768), 0] = [-90.3, 0] +// The ratio conversion functions use this naming convention: +// Ratio: float (0, +inf) +// Db: float (-inf, +inf) +static inline float S16ToFloat(int16_t v) { + constexpr float kScaling = 1.f / 32768.f; + return v * kScaling; +} + +static inline int16_t FloatS16ToS16(float v) { + v = std::min(v, 32767.f); + v = std::max(v, -32768.f); + return static_cast(v + std::copysign(0.5f, v)); +} + +static inline int16_t FloatToS16(float v) { + v *= 32768.f; + v = std::min(v, 32767.f); + v = std::max(v, -32768.f); + return static_cast(v + std::copysign(0.5f, v)); +} + +static inline float FloatToFloatS16(float v) { + v = std::min(v, 1.f); + v = std::max(v, -1.f); + return v * 32768.f; +} + +static inline float FloatS16ToFloat(float v) { + v = std::min(v, 32768.f); + v = std::max(v, -32768.f); + constexpr float kScaling = 1.f / 32768.f; + return v * kScaling; +} + +static inline void S16ToFloatS16(const int16_t* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = src[i]; +} + +static inline void FloatS16ToS16(const float* src, size_t size, int16_t* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatS16ToS16(src[i]); +} + +static inline void FloatToFloatS16(const float* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatToFloatS16(src[i]); +} + +static inline void FloatS16ToFloat(const float* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatS16ToFloat(src[i]); +} + +void FloatToS16(const float* src, size_t size, int16_t* dest); +void S16ToFloat(const int16_t* src, size_t size, float* dest); + +inline float DbToRatio(float v) { + return std::pow(10.0f, v / 20.0f); +} + +inline float DbfsToFloatS16(float v) { + static constexpr float kMaximumAbsFloatS16 = -limits_int16::min(); + return DbToRatio(v) * kMaximumAbsFloatS16; +} + +inline float FloatS16ToDbfs(float v) { + RTC_DCHECK_GE(v, 0); + + // kMinDbfs is equal to -20.0 * log10(-limits_int16::min()) + static constexpr float kMinDbfs = -90.30899869919436f; + if (v <= 1.0f) { + return kMinDbfs; + } + // Equal to 20 * log10(v / (-limits_int16::min())) + return 20.0f * std::log10(v) + kMinDbfs; +} + +// Copy audio from |src| channels to |dest| channels unless |src| and |dest| +// point to the same address. |src| and |dest| must have the same number of +// channels, and there must be sufficient space allocated in |dest|. +template +void CopyAudioIfNeeded(const T* const* src, + int num_frames, + int num_channels, + T* const* dest) { + for (int i = 0; i < num_channels; ++i) { + if (src[i] != dest[i]) { + std::copy(src[i], src[i] + num_frames, dest[i]); + } + } +} + +// Deinterleave audio from |interleaved| to the channel buffers pointed to +// by |deinterleaved|. There must be sufficient space allocated in the +// |deinterleaved| buffers (|num_channel| buffers with |samples_per_channel| +// per buffer). +template +void Deinterleave(const T* interleaved, + size_t samples_per_channel, + size_t num_channels, + T* const* deinterleaved) { + for (size_t i = 0; i < num_channels; ++i) { + T* channel = deinterleaved[i]; + size_t interleaved_idx = i; + for (size_t j = 0; j < samples_per_channel; ++j) { + channel[j] = interleaved[interleaved_idx]; + interleaved_idx += num_channels; + } + } +} + +// Interleave audio from the channel buffers pointed to by |deinterleaved| to +// |interleaved|. There must be sufficient space allocated in |interleaved| +// (|samples_per_channel| * |num_channels|). +template +void Interleave(const T* const* deinterleaved, + size_t samples_per_channel, + size_t num_channels, + T* interleaved) { + for (size_t i = 0; i < num_channels; ++i) { + const T* channel = deinterleaved[i]; + size_t interleaved_idx = i; + for (size_t j = 0; j < samples_per_channel; ++j) { + interleaved[interleaved_idx] = channel[j]; + interleaved_idx += num_channels; + } + } +} + +// Copies audio from a single channel buffer pointed to by |mono| to each +// channel of |interleaved|. There must be sufficient space allocated in +// |interleaved| (|samples_per_channel| * |num_channels|). +template +void UpmixMonoToInterleaved(const T* mono, + int num_frames, + int num_channels, + T* interleaved) { + int interleaved_idx = 0; + for (int i = 0; i < num_frames; ++i) { + for (int j = 0; j < num_channels; ++j) { + interleaved[interleaved_idx++] = mono[i]; + } + } +} + +template +void DownmixToMono(const T* const* input_channels, + size_t num_frames, + int num_channels, + T* out) { + for (size_t i = 0; i < num_frames; ++i) { + Intermediate value = input_channels[0][i]; + for (int j = 1; j < num_channels; ++j) { + value += input_channels[j][i]; + } + out[i] = value / num_channels; + } +} + +// Downmixes an interleaved multichannel signal to a single channel by averaging +// all channels. +template +void DownmixInterleavedToMonoImpl(const T* interleaved, + size_t num_frames, + int num_channels, + T* deinterleaved) { + RTC_DCHECK_GT(num_channels, 0); + RTC_DCHECK_GT(num_frames, 0); + + const T* const end = interleaved + num_frames * num_channels; + + while (interleaved < end) { + const T* const frame_end = interleaved + num_channels; + + Intermediate value = *interleaved++; + while (interleaved < frame_end) { + value += *interleaved++; + } + + *deinterleaved++ = value / num_channels; + } +} + +template +void DownmixInterleavedToMono(const T* interleaved, + size_t num_frames, + int num_channels, + T* deinterleaved); + +template <> +void DownmixInterleavedToMono(const int16_t* interleaved, + size_t num_frames, + int num_channels, + int16_t* deinterleaved); + +} // namespace webrtc + +#endif // COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_ diff --git a/audio_processing/logging/apm_data_dumper.cc b/audio_processing/logging/apm_data_dumper.cc new file mode 100644 index 0000000..24c5c16 --- /dev/null +++ b/audio_processing/logging/apm_data_dumper.cc @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/logging/apm_data_dumper.h" + +#include "rtc_base/strings/string_builder.h" + +// Check to verify that the define is properly set. +#if !defined(WEBRTC_APM_DEBUG_DUMP) || \ + (WEBRTC_APM_DEBUG_DUMP != 0 && WEBRTC_APM_DEBUG_DUMP != 1) +#error "Set WEBRTC_APM_DEBUG_DUMP to either 0 or 1" +#endif + +namespace webrtc { +namespace { + +#if WEBRTC_APM_DEBUG_DUMP == 1 + +#if defined(WEBRTC_WIN) +constexpr char kPathDelimiter = '\\'; +#else +constexpr char kPathDelimiter = '/'; +#endif + +std::string FormFileName(const char* output_dir, + const char* name, + int instance_index, + int reinit_index, + const std::string& suffix) { + char buf[1024]; + rtc::SimpleStringBuilder ss(buf); + const size_t output_dir_size = strlen(output_dir); + if (output_dir_size > 0) { + ss << output_dir; + if (output_dir[output_dir_size - 1] != kPathDelimiter) { + ss << kPathDelimiter; + } + } + ss << name << "_" << instance_index << "-" << reinit_index << suffix; + return ss.str(); +} +#endif + +} // namespace + +#if WEBRTC_APM_DEBUG_DUMP == 1 +ApmDataDumper::ApmDataDumper(int instance_index) + : instance_index_(instance_index) {} +#else +ApmDataDumper::ApmDataDumper(int instance_index) {} +#endif + +ApmDataDumper::~ApmDataDumper() = default; + +#if WEBRTC_APM_DEBUG_DUMP == 1 +bool ApmDataDumper::recording_activated_ = false; +char ApmDataDumper::output_dir_[] = ""; + +FILE* ApmDataDumper::GetRawFile(const char* name) { + std::string filename = FormFileName(output_dir_, name, instance_index_, + recording_set_index_, ".dat"); + auto& f = raw_files_[filename]; + if (!f) { + f.reset(fopen(filename.c_str(), "wb")); + RTC_CHECK(f.get()) << "Cannot write to " << filename << "."; + } + return f.get(); +} + +WavWriter* ApmDataDumper::GetWavFile(const char* name, + int sample_rate_hz, + int num_channels) { + std::string filename = FormFileName(output_dir_, name, instance_index_, + recording_set_index_, ".wav"); + auto& f = wav_files_[filename]; + if (!f) { + f.reset(new WavWriter(filename.c_str(), sample_rate_hz, num_channels)); + } + return f.get(); +} +#endif + +} // namespace webrtc diff --git a/audio_processing/logging/apm_data_dumper.h b/audio_processing/logging/apm_data_dumper.h new file mode 100644 index 0000000..0b579e2 --- /dev/null +++ b/audio_processing/logging/apm_data_dumper.h @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_ +#define MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_ + +// define WEBRTC_APM_DEBUG_DUMP +#include "build_config.h" + +#include +#include +#include + +#include +#if WEBRTC_APM_DEBUG_DUMP == 1 +#include +#include +#endif + +#include "rtc_base/array_view.h" +#if WEBRTC_APM_DEBUG_DUMP == 1 +#include "wav_file.h" +#include "rtc_base/checks.h" +#endif +#include "rtc_base/constructor_magic.h" + +// Check to verify that the define is properly set. +#if !defined(WEBRTC_APM_DEBUG_DUMP) || \ + (WEBRTC_APM_DEBUG_DUMP != 0 && WEBRTC_APM_DEBUG_DUMP != 1) +#error "Set WEBRTC_APM_DEBUG_DUMP to either 0 or 1" +#endif + +namespace webrtc { + +#if WEBRTC_APM_DEBUG_DUMP == 1 +// Functor used to use as a custom deleter in the map of file pointers to raw +// files. +struct RawFileCloseFunctor { + void operator()(FILE* f) const { fclose(f); } +}; +#endif + +// Class that handles dumping of variables into files. +class ApmDataDumper { + public: + // Constructor that takes an instance index that may + // be used to distinguish data dumped from different + // instances of the code. + explicit ApmDataDumper(int instance_index); + + ~ApmDataDumper(); + + // Activates or deactivate the dumping functionality. + static void SetActivated(bool activated) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + recording_activated_ = activated; +#endif + } + + // Set an optional output directory. + static void SetOutputDirectory(const std::string& output_dir) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + RTC_CHECK_LT(output_dir.size(), kOutputDirMaxLength); + strncpy(output_dir_, output_dir.c_str(), output_dir.size()); +#endif + } + + // Reinitializes the data dumping such that new versions + // of all files being dumped to are created. + void InitiateNewSetOfRecordings() { +#if WEBRTC_APM_DEBUG_DUMP == 1 + ++recording_set_index_; +#endif + } + + // Methods for performing dumping of data of various types into + // various formats. + void DumpRaw(const char* name, double v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(&v, sizeof(v), 1, file); + } +#endif + } + + void DumpRaw(const char* name, size_t v_length, const double* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(v, sizeof(v[0]), v_length, file); + } +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + DumpRaw(name, v.size(), v.data()); + } +#endif + } + + void DumpRaw(const char* name, float v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(&v, sizeof(v), 1, file); + } +#endif + } + + void DumpRaw(const char* name, size_t v_length, const float* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(v, sizeof(v[0]), v_length, file); + } +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + DumpRaw(name, v.size(), v.data()); + } +#endif + } + + void DumpRaw(const char* name, bool v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + DumpRaw(name, static_cast(v)); + } +#endif + } + + void DumpRaw(const char* name, size_t v_length, const bool* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + for (size_t k = 0; k < v_length; ++k) { + int16_t value = static_cast(v[k]); + fwrite(&value, sizeof(value), 1, file); + } + } +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + DumpRaw(name, v.size(), v.data()); + } +#endif + } + + void DumpRaw(const char* name, int16_t v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(&v, sizeof(v), 1, file); + } +#endif + } + + void DumpRaw(const char* name, size_t v_length, const int16_t* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(v, sizeof(v[0]), v_length, file); + } +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + DumpRaw(name, v.size(), v.data()); + } +#endif + } + + void DumpRaw(const char* name, int32_t v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(&v, sizeof(v), 1, file); + } +#endif + } + + void DumpRaw(const char* name, size_t v_length, const int32_t* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(v, sizeof(v[0]), v_length, file); + } +#endif + } + + void DumpRaw(const char* name, size_t v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(&v, sizeof(v), 1, file); + } +#endif + } + + void DumpRaw(const char* name, size_t v_length, const size_t* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + FILE* file = GetRawFile(name); + fwrite(v, sizeof(v[0]), v_length, file); + } +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + DumpRaw(name, v.size(), v.data()); + } +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + DumpRaw(name, v.size(), v.data()); +#endif + } + + void DumpWav(const char* name, + size_t v_length, + const float* v, + int sample_rate_hz, + int num_channels) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + WavWriter* file = GetWavFile(name, sample_rate_hz, num_channels); + file->WriteSamples(v, v_length); + } +#endif + } + + void DumpWav(const char* name, + rtc::ArrayView v, + int sample_rate_hz, + int num_channels) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + if (recording_activated_) { + DumpWav(name, v.size(), v.data(), sample_rate_hz, num_channels); + } +#endif + } + + private: +#if WEBRTC_APM_DEBUG_DUMP == 1 + static bool recording_activated_; + static constexpr size_t kOutputDirMaxLength = 1024; + static char output_dir_[kOutputDirMaxLength]; + const int instance_index_; + int recording_set_index_ = 0; + std::unordered_map> + raw_files_; + std::unordered_map> wav_files_; + + FILE* GetRawFile(const char* name); + WavWriter* GetWavFile(const char* name, int sample_rate_hz, int num_channels); +#endif + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ApmDataDumper); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_ diff --git a/audio_processing/logging/wav_file.cc b/audio_processing/logging/wav_file.cc new file mode 100644 index 0000000..5581acf --- /dev/null +++ b/audio_processing/logging/wav_file.cc @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "wav_file.h" + +#include + +#include +#include +#include +#include + +#include "include/audio_util.h" +#include "wav_header.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { +namespace { + +// We write 16-bit PCM WAV files. +constexpr WavFormat kWavFormat = kWavFormatPcm; +static_assert(std::is_trivially_destructible::value, ""); +constexpr size_t kBytesPerSample = 2; + +// Doesn't take ownership of the file handle and won't close it. +class ReadableWavFile : public ReadableWav { + public: + explicit ReadableWavFile(FileWrapper* file) : file_(file) {} + ReadableWavFile(const ReadableWavFile&) = delete; + ReadableWavFile& operator=(const ReadableWavFile&) = delete; + size_t Read(void* buf, size_t num_bytes) override { + size_t count = file_->Read(buf, num_bytes); + pos_ += count; + return count; + } + bool SeekForward(uint32_t num_bytes) override { + bool success = file_->SeekRelative(num_bytes); + if (success) { + pos_ += num_bytes; + } + return success; + } + int64_t GetPosition() { return pos_; } + + private: + FileWrapper* file_; + int64_t pos_ = 0; +}; + +} // namespace + +WavReader::WavReader(const std::string& filename) + : WavReader(FileWrapper::OpenReadOnly(filename)) {} + +WavReader::WavReader(FileWrapper file) : file_(std::move(file)) { + RTC_CHECK(file_.is_open()) + << "Invalid file. Could not create file handle for wav file."; + + ReadableWavFile readable(&file_); + WavFormat format; + size_t bytes_per_sample; + RTC_CHECK(ReadWavHeader(&readable, &num_channels_, &sample_rate_, &format, + &bytes_per_sample, &num_samples_)); + num_samples_remaining_ = num_samples_; + RTC_CHECK_EQ(kWavFormat, format); + RTC_CHECK_EQ(kBytesPerSample, bytes_per_sample); + data_start_pos_ = readable.GetPosition(); +} + +WavReader::~WavReader() { + Close(); +} + +void WavReader::Reset() { + RTC_CHECK(file_.SeekTo(data_start_pos_)) + << "Failed to set position in the file to WAV data start position"; + num_samples_remaining_ = num_samples_; +} + +int WavReader::sample_rate() const { + return sample_rate_; +} + +size_t WavReader::num_channels() const { + return num_channels_; +} + +size_t WavReader::num_samples() const { + return num_samples_; +} + +size_t WavReader::ReadSamples(size_t num_samples, int16_t* samples) { +#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +#error "Need to convert samples to big-endian when reading from WAV file" +#endif + // There could be metadata after the audio; ensure we don't read it. + num_samples = std::min(num_samples, num_samples_remaining_); + const size_t num_bytes = num_samples * sizeof(*samples); + const size_t read_bytes = file_.Read(samples, num_bytes); + // If we didn't read what was requested, ensure we've reached the EOF. + RTC_CHECK(read_bytes == num_bytes || file_.ReadEof()); + RTC_CHECK_EQ(read_bytes % 2, 0) + << "End of file in the middle of a 16-bit sample"; + const size_t read_samples = read_bytes / 2; + RTC_CHECK_LE(read_samples, num_samples_remaining_); + num_samples_remaining_ -= read_samples; + return read_samples; +} + +size_t WavReader::ReadSamples(size_t num_samples, float* samples) { + static const size_t kChunksize = 4096 / sizeof(uint16_t); + size_t read = 0; + for (size_t i = 0; i < num_samples; i += kChunksize) { + int16_t isamples[kChunksize]; + size_t chunk = std::min(kChunksize, num_samples - i); + chunk = ReadSamples(chunk, isamples); + for (size_t j = 0; j < chunk; ++j) + samples[i + j] = isamples[j]; + read += chunk; + } + return read; +} + +void WavReader::Close() { + file_.Close(); +} + +WavWriter::WavWriter(const std::string& filename, + int sample_rate, + size_t num_channels) + // Unlike plain fopen, OpenWriteOnly takes care of filename utf8 -> + // wchar conversion on windows. + : WavWriter(FileWrapper::OpenWriteOnly(filename), + sample_rate, + num_channels) {} + +WavWriter::WavWriter(FileWrapper file, int sample_rate, size_t num_channels) + : sample_rate_(sample_rate), + num_channels_(num_channels), + num_samples_(0), + file_(std::move(file)) { + // Handle errors from the OpenWriteOnly call in above constructor. + RTC_CHECK(file_.is_open()) << "Invalid file. Could not create wav file."; + + RTC_CHECK(CheckWavParameters(num_channels_, sample_rate_, kWavFormat, + kBytesPerSample, num_samples_)); + + // Write a blank placeholder header, since we need to know the total number + // of samples before we can fill in the real data. + static const uint8_t blank_header[kWavHeaderSize] = {0}; + RTC_CHECK(file_.Write(blank_header, kWavHeaderSize)); +} + +WavWriter::~WavWriter() { + Close(); +} + +int WavWriter::sample_rate() const { + return sample_rate_; +} + +size_t WavWriter::num_channels() const { + return num_channels_; +} + +size_t WavWriter::num_samples() const { + return num_samples_; +} + +void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) { +#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +#error "Need to convert samples to little-endian when writing to WAV file" +#endif + RTC_CHECK(file_.Write(samples, sizeof(*samples) * num_samples)); + num_samples_ += num_samples; + RTC_CHECK(num_samples_ >= num_samples); // detect size_t overflow +} + +void WavWriter::WriteSamples(const float* samples, size_t num_samples) { + static const size_t kChunksize = 4096 / sizeof(uint16_t); + for (size_t i = 0; i < num_samples; i += kChunksize) { + int16_t isamples[kChunksize]; + const size_t chunk = std::min(kChunksize, num_samples - i); + FloatS16ToS16(samples + i, chunk, isamples); + WriteSamples(isamples, chunk); + } +} + +void WavWriter::Close() { + RTC_CHECK(file_.Rewind()); + uint8_t header[kWavHeaderSize]; + WriteWavHeader(header, num_channels_, sample_rate_, kWavFormat, + kBytesPerSample, num_samples_); + RTC_CHECK(file_.Write(header, kWavHeaderSize)); + RTC_CHECK(file_.Close()); +} + +} // namespace webrtc diff --git a/audio_processing/logging/wav_file.h b/audio_processing/logging/wav_file.h new file mode 100644 index 0000000..65f2453 --- /dev/null +++ b/audio_processing/logging/wav_file.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef COMMON_AUDIO_WAV_FILE_H_ +#define COMMON_AUDIO_WAV_FILE_H_ + +#include + +#include +#include + +#include "rtc_base/constructor_magic.h" +#include "rtc_base/system/file_wrapper.h" + +namespace webrtc { + +// Interface to provide access to WAV file parameters. +class WavFile { + public: + virtual ~WavFile() {} + + virtual int sample_rate() const = 0; + virtual size_t num_channels() const = 0; + virtual size_t num_samples() const = 0; +}; + +// Simple C++ class for writing 16-bit PCM WAV files. All error handling is +// by calls to RTC_CHECK(), making it unsuitable for anything but debug code. +class WavWriter final : public WavFile { + public: + // Open a new WAV file for writing. + WavWriter(const std::string& filename, int sample_rate, size_t num_channels); + + // Open a new WAV file for writing. + WavWriter(FileWrapper file, int sample_rate, size_t num_channels); + + // Close the WAV file, after writing its header. + ~WavWriter() override; + + // Write additional samples to the file. Each sample is in the range + // [-32768,32767], and there must be the previously specified number of + // interleaved channels. + void WriteSamples(const float* samples, size_t num_samples); + void WriteSamples(const int16_t* samples, size_t num_samples); + + int sample_rate() const override; + size_t num_channels() const override; + size_t num_samples() const override; + + private: + void Close(); + const int sample_rate_; + const size_t num_channels_; + size_t num_samples_; // Total number of samples written to file. + FileWrapper file_; // Output file, owned by this class + + RTC_DISALLOW_COPY_AND_ASSIGN(WavWriter); +}; + +// Follows the conventions of WavWriter. +class WavReader final : public WavFile { + public: + // Opens an existing WAV file for reading. + explicit WavReader(const std::string& filename); + + // Use an existing WAV file for reading. + explicit WavReader(FileWrapper file); + + // Close the WAV file. + ~WavReader() override; + + // Resets position to the beginning of the file. + void Reset(); + + // Returns the number of samples read. If this is less than requested, + // verifies that the end of the file was reached. + size_t ReadSamples(size_t num_samples, float* samples); + size_t ReadSamples(size_t num_samples, int16_t* samples); + + int sample_rate() const override; + size_t num_channels() const override; + size_t num_samples() const override; + + private: + void Close(); + int sample_rate_; + size_t num_channels_; + size_t num_samples_; // Total number of samples in the file. + size_t num_samples_remaining_; + FileWrapper file_; // Input file, owned by this class. + int64_t + data_start_pos_; // Position in the file immediately after WAV header. + + RTC_DISALLOW_COPY_AND_ASSIGN(WavReader); +}; + +} // namespace webrtc + +#endif // COMMON_AUDIO_WAV_FILE_H_ diff --git a/audio_processing/logging/wav_header.cc b/audio_processing/logging/wav_header.cc new file mode 100644 index 0000000..773224a --- /dev/null +++ b/audio_processing/logging/wav_header.cc @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Based on the WAV file format documentation at +// https://ccrma.stanford.edu/courses/422/projects/WaveFormat/ and +// http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html + +#include "wav_header.h" + +#include +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/sanitizer.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { +namespace { + +struct ChunkHeader { + uint32_t ID; + uint32_t Size; +}; +static_assert(sizeof(ChunkHeader) == 8, "ChunkHeader size"); + +struct RiffHeader { + ChunkHeader header; + uint32_t Format; +}; + +// We can't nest this definition in WavHeader, because VS2013 gives an error +// on sizeof(WavHeader::fmt): "error C2070: 'unknown': illegal sizeof operand". +struct FmtSubchunk { + ChunkHeader header; + uint16_t AudioFormat; + uint16_t NumChannels; + uint32_t SampleRate; + uint32_t ByteRate; + uint16_t BlockAlign; + uint16_t BitsPerSample; +}; +static_assert(sizeof(FmtSubchunk) == 24, "FmtSubchunk size"); +const uint32_t kFmtSubchunkSize = sizeof(FmtSubchunk) - sizeof(ChunkHeader); + +// Simple wav header. It does not include chunks that are not essential to read +// audio samples. +struct WavHeader { + WavHeader(const WavHeader&) = default; + WavHeader& operator=(const WavHeader&) = default; + RiffHeader riff; + FmtSubchunk fmt; + struct { + ChunkHeader header; + } data; +}; +static_assert(sizeof(WavHeader) == kWavHeaderSize, "no padding in header"); + +#ifdef WEBRTC_ARCH_LITTLE_ENDIAN +static inline void WriteLE16(uint16_t* f, uint16_t x) { + *f = x; +} +static inline void WriteLE32(uint32_t* f, uint32_t x) { + *f = x; +} +static inline void WriteFourCC(uint32_t* f, char a, char b, char c, char d) { + *f = static_cast(a) | static_cast(b) << 8 | + static_cast(c) << 16 | static_cast(d) << 24; +} + +static inline uint16_t ReadLE16(uint16_t x) { + return x; +} +static inline uint32_t ReadLE32(uint32_t x) { + return x; +} +static inline std::string ReadFourCC(uint32_t x) { + return std::string(reinterpret_cast(&x), 4); +} +#else +#error "Write be-to-le conversion functions" +#endif + +static inline uint32_t RiffChunkSize(size_t bytes_in_payload) { + return static_cast(bytes_in_payload + kWavHeaderSize - + sizeof(ChunkHeader)); +} + +static inline uint32_t ByteRate(size_t num_channels, + int sample_rate, + size_t bytes_per_sample) { + return static_cast(num_channels * sample_rate * bytes_per_sample); +} + +static inline uint16_t BlockAlign(size_t num_channels, + size_t bytes_per_sample) { + return static_cast(num_channels * bytes_per_sample); +} + +// Finds a chunk having the sought ID. If found, then |readable| points to the +// first byte of the sought chunk data. If not found, the end of the file is +// reached. +bool FindWaveChunk(ChunkHeader* chunk_header, + ReadableWav* readable, + const std::string sought_chunk_id) { + RTC_DCHECK_EQ(sought_chunk_id.size(), 4); + while (true) { + if (readable->Read(chunk_header, sizeof(*chunk_header)) != + sizeof(*chunk_header)) + return false; // EOF. + if (ReadFourCC(chunk_header->ID) == sought_chunk_id) + return true; // Sought chunk found. + // Ignore current chunk by skipping its payload. + if (!readable->SeekForward(chunk_header->Size)) + return false; // EOF or error. + } +} + +bool ReadFmtChunkData(FmtSubchunk* fmt_subchunk, ReadableWav* readable) { + // Reads "fmt " chunk payload. + if (readable->Read(&(fmt_subchunk->AudioFormat), kFmtSubchunkSize) != + kFmtSubchunkSize) + return false; + const uint32_t fmt_size = ReadLE32(fmt_subchunk->header.Size); + if (fmt_size != kFmtSubchunkSize) { + // There is an optional two-byte extension field permitted to be present + // with PCM, but which must be zero. + int16_t ext_size; + if (kFmtSubchunkSize + sizeof(ext_size) != fmt_size) + return false; + if (readable->Read(&ext_size, sizeof(ext_size)) != sizeof(ext_size)) + return false; + if (ext_size != 0) + return false; + } + return true; +} + +} // namespace + +bool CheckWavParameters(size_t num_channels, + int sample_rate, + WavFormat format, + size_t bytes_per_sample, + size_t num_samples) { + // num_channels, sample_rate, and bytes_per_sample must be positive, must fit + // in their respective fields, and their product must fit in the 32-bit + // ByteRate field. + if (num_channels == 0 || sample_rate <= 0 || bytes_per_sample == 0) + return false; + if (static_cast(sample_rate) > std::numeric_limits::max()) + return false; + if (num_channels > std::numeric_limits::max()) + return false; + if (static_cast(bytes_per_sample) * 8 > + std::numeric_limits::max()) + return false; + if (static_cast(sample_rate) * num_channels * bytes_per_sample > + std::numeric_limits::max()) + return false; + + // format and bytes_per_sample must agree. + switch (format) { + case kWavFormatPcm: + // Other values may be OK, but for now we're conservative: + if (bytes_per_sample != 1 && bytes_per_sample != 2) + return false; + break; + case kWavFormatALaw: + case kWavFormatMuLaw: + if (bytes_per_sample != 1) + return false; + break; + default: + return false; + } + + // The number of bytes in the file, not counting the first ChunkHeader, must + // be less than 2^32; otherwise, the ChunkSize field overflows. + const size_t header_size = kWavHeaderSize - sizeof(ChunkHeader); + const size_t max_samples = + (std::numeric_limits::max() - header_size) / bytes_per_sample; + if (num_samples > max_samples) + return false; + + // Each channel must have the same number of samples. + if (num_samples % num_channels != 0) + return false; + + return true; +} + +void WriteWavHeader(uint8_t* buf, + size_t num_channels, + int sample_rate, + WavFormat format, + size_t bytes_per_sample, + size_t num_samples) { + RTC_CHECK(CheckWavParameters(num_channels, sample_rate, format, + bytes_per_sample, num_samples)); + + auto header = rtc::MsanUninitialized({}); + const size_t bytes_in_payload = bytes_per_sample * num_samples; + + WriteFourCC(&header.riff.header.ID, 'R', 'I', 'F', 'F'); + WriteLE32(&header.riff.header.Size, RiffChunkSize(bytes_in_payload)); + WriteFourCC(&header.riff.Format, 'W', 'A', 'V', 'E'); + + WriteFourCC(&header.fmt.header.ID, 'f', 'm', 't', ' '); + WriteLE32(&header.fmt.header.Size, kFmtSubchunkSize); + WriteLE16(&header.fmt.AudioFormat, format); + WriteLE16(&header.fmt.NumChannels, static_cast(num_channels)); + WriteLE32(&header.fmt.SampleRate, sample_rate); + WriteLE32(&header.fmt.ByteRate, + ByteRate(num_channels, sample_rate, bytes_per_sample)); + WriteLE16(&header.fmt.BlockAlign, BlockAlign(num_channels, bytes_per_sample)); + WriteLE16(&header.fmt.BitsPerSample, + static_cast(8 * bytes_per_sample)); + + WriteFourCC(&header.data.header.ID, 'd', 'a', 't', 'a'); + WriteLE32(&header.data.header.Size, static_cast(bytes_in_payload)); + + // Do an extra copy rather than writing everything to buf directly, since buf + // might not be correctly aligned. + memcpy(buf, &header, kWavHeaderSize); +} + +bool ReadWavHeader(ReadableWav* readable, + size_t* num_channels, + int* sample_rate, + WavFormat* format, + size_t* bytes_per_sample, + size_t* num_samples) { + auto header = rtc::MsanUninitialized({}); + + // Read RIFF chunk. + if (readable->Read(&header.riff, sizeof(header.riff)) != sizeof(header.riff)) + return false; + if (ReadFourCC(header.riff.header.ID) != "RIFF") + return false; + if (ReadFourCC(header.riff.Format) != "WAVE") + return false; + + // Find "fmt " and "data" chunks. While the official Wave file specification + // does not put requirements on the chunks order, it is uncommon to find the + // "data" chunk before the "fmt " one. The code below fails if this is not the + // case. + if (!FindWaveChunk(&header.fmt.header, readable, "fmt ")) { + RTC_LOG(LS_ERROR) << "Cannot find 'fmt ' chunk."; + return false; + } + if (!ReadFmtChunkData(&header.fmt, readable)) { + RTC_LOG(LS_ERROR) << "Cannot read 'fmt ' chunk."; + return false; + } + if (!FindWaveChunk(&header.data.header, readable, "data")) { + RTC_LOG(LS_ERROR) << "Cannot find 'data' chunk."; + return false; + } + + // Parse needed fields. + *format = static_cast(ReadLE16(header.fmt.AudioFormat)); + *num_channels = ReadLE16(header.fmt.NumChannels); + *sample_rate = ReadLE32(header.fmt.SampleRate); + *bytes_per_sample = ReadLE16(header.fmt.BitsPerSample) / 8; + const size_t bytes_in_payload = ReadLE32(header.data.header.Size); + if (*bytes_per_sample == 0) + return false; + *num_samples = bytes_in_payload / *bytes_per_sample; + + if (ReadLE32(header.riff.header.Size) < RiffChunkSize(bytes_in_payload)) + return false; + if (ReadLE32(header.fmt.ByteRate) != + ByteRate(*num_channels, *sample_rate, *bytes_per_sample)) + return false; + if (ReadLE16(header.fmt.BlockAlign) != + BlockAlign(*num_channels, *bytes_per_sample)) + return false; + + return CheckWavParameters(*num_channels, *sample_rate, *format, + *bytes_per_sample, *num_samples); +} + +} // namespace webrtc diff --git a/audio_processing/logging/wav_header.h b/audio_processing/logging/wav_header.h new file mode 100644 index 0000000..0c83d8d --- /dev/null +++ b/audio_processing/logging/wav_header.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef COMMON_AUDIO_WAV_HEADER_H_ +#define COMMON_AUDIO_WAV_HEADER_H_ + +#include +#include + +namespace webrtc { + +static const size_t kWavHeaderSize = 44; + +class ReadableWav { + public: + // Returns the number of bytes read. + virtual size_t Read(void* buf, size_t num_bytes) = 0; + virtual bool SeekForward(uint32_t num_bytes) = 0; + virtual ~ReadableWav() = default; +}; + +enum WavFormat { + kWavFormatPcm = 1, // PCM, each sample of size bytes_per_sample + kWavFormatALaw = 6, // 8-bit ITU-T G.711 A-law + kWavFormatMuLaw = 7, // 8-bit ITU-T G.711 mu-law +}; + +// Return true if the given parameters will make a well-formed WAV header. +bool CheckWavParameters(size_t num_channels, + int sample_rate, + WavFormat format, + size_t bytes_per_sample, + size_t num_samples); + +// Write a kWavHeaderSize bytes long WAV header to buf. The payload that +// follows the header is supposed to have the specified number of interleaved +// channels and contain the specified total number of samples of the specified +// type. CHECKs the input parameters for validity. +void WriteWavHeader(uint8_t* buf, + size_t num_channels, + int sample_rate, + WavFormat format, + size_t bytes_per_sample, + size_t num_samples); + +// Read a WAV header from an implemented ReadableWav and parse the values into +// the provided output parameters. ReadableWav is used because the header can +// be variably sized. Returns false if the header is invalid. +bool ReadWavHeader(ReadableWav* readable, + size_t* num_channels, + int* sample_rate, + WavFormat* format, + size_t* bytes_per_sample, + size_t* num_samples); + +} // namespace webrtc + +#endif // COMMON_AUDIO_WAV_HEADER_H_ diff --git a/audio_processing/resampler/push_sinc_resampler.cc b/audio_processing/resampler/push_sinc_resampler.cc new file mode 100644 index 0000000..2627478 --- /dev/null +++ b/audio_processing/resampler/push_sinc_resampler.cc @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "resampler/push_sinc_resampler.h" + +#include + +#include "include/audio_util.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +PushSincResampler::PushSincResampler(size_t source_frames, + size_t destination_frames) + : resampler_(new SincResampler(source_frames * 1.0 / destination_frames, + source_frames, + this)), + source_ptr_(nullptr), + source_ptr_int_(nullptr), + destination_frames_(destination_frames), + first_pass_(true), + source_available_(0) {} + +PushSincResampler::~PushSincResampler() {} + +size_t PushSincResampler::Resample(const int16_t* source, + size_t source_length, + int16_t* destination, + size_t destination_capacity) { + if (!float_buffer_.get()) + float_buffer_.reset(new float[destination_frames_]); + + source_ptr_int_ = source; + // Pass nullptr as the float source to have Run() read from the int16 source. + Resample(nullptr, source_length, float_buffer_.get(), destination_frames_); + FloatS16ToS16(float_buffer_.get(), destination_frames_, destination); + source_ptr_int_ = nullptr; + return destination_frames_; +} + +size_t PushSincResampler::Resample(const float* source, + size_t source_length, + float* destination, + size_t destination_capacity) { + RTC_CHECK_EQ(source_length, resampler_->request_frames()); + RTC_CHECK_GE(destination_capacity, destination_frames_); + // Cache the source pointer. Calling Resample() will immediately trigger + // the Run() callback whereupon we provide the cached value. + source_ptr_ = source; + source_available_ = source_length; + + // On the first pass, we call Resample() twice. During the first call, we + // provide dummy input and discard the output. This is done to prime the + // SincResampler buffer with the correct delay (half the kernel size), thereby + // ensuring that all later Resample() calls will only result in one input + // request through Run(). + // + // If this wasn't done, SincResampler would call Run() twice on the first + // pass, and we'd have to introduce an entire |source_frames| of delay, rather + // than the minimum half kernel. + // + // It works out that ChunkSize() is exactly the amount of output we need to + // request in order to prime the buffer with a single Run() request for + // |source_frames|. + if (first_pass_) + resampler_->Resample(resampler_->ChunkSize(), destination); + + resampler_->Resample(destination_frames_, destination); + source_ptr_ = nullptr; + return destination_frames_; +} + +void PushSincResampler::Run(size_t frames, float* destination) { + // Ensure we are only asked for the available samples. This would fail if + // Run() was triggered more than once per Resample() call. + RTC_CHECK_EQ(source_available_, frames); + + if (first_pass_) { + // Provide dummy input on the first pass, the output of which will be + // discarded, as described in Resample(). + std::memset(destination, 0, frames * sizeof(*destination)); + first_pass_ = false; + return; + } + + if (source_ptr_) { + std::memcpy(destination, source_ptr_, frames * sizeof(*destination)); + } else { + for (size_t i = 0; i < frames; ++i) + destination[i] = static_cast(source_ptr_int_[i]); + } + source_available_ -= frames; +} + +} // namespace webrtc diff --git a/audio_processing/resampler/push_sinc_resampler.h b/audio_processing/resampler/push_sinc_resampler.h new file mode 100644 index 0000000..ba00f77 --- /dev/null +++ b/audio_processing/resampler/push_sinc_resampler.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_ +#define COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_ + +#include +#include + +#include + +#include "resampler/sinc_resampler.h" +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// A thin wrapper over SincResampler to provide a push-based interface as +// required by WebRTC. SincResampler uses a pull-based interface, and will +// use SincResamplerCallback::Run() to request data upon a call to Resample(). +// These Run() calls will happen on the same thread Resample() is called on. +class PushSincResampler : public SincResamplerCallback { + public: + // Provide the size of the source and destination blocks in samples. These + // must correspond to the same time duration (typically 10 ms) as the sample + // ratio is inferred from them. + PushSincResampler(size_t source_frames, size_t destination_frames); + ~PushSincResampler() override; + + // Perform the resampling. |source_frames| must always equal the + // |source_frames| provided at construction. |destination_capacity| must be + // at least as large as |destination_frames|. Returns the number of samples + // provided in destination (for convenience, since this will always be equal + // to |destination_frames|). + size_t Resample(const int16_t* source, + size_t source_frames, + int16_t* destination, + size_t destination_capacity); + size_t Resample(const float* source, + size_t source_frames, + float* destination, + size_t destination_capacity); + + // Delay due to the filter kernel. Essentially, the time after which an input + // sample will appear in the resampled output. + static float AlgorithmicDelaySeconds(int source_rate_hz) { + return 1.f / source_rate_hz * SincResampler::kKernelSize / 2; + } + + protected: + // Implements SincResamplerCallback. + void Run(size_t frames, float* destination) override; + + private: + friend class PushSincResamplerTest; + SincResampler* get_resampler_for_testing() { return resampler_.get(); } + + std::unique_ptr resampler_; + std::unique_ptr float_buffer_; + const float* source_ptr_; + const int16_t* source_ptr_int_; + const size_t destination_frames_; + + // True on the first call to Resample(), to prime the SincResampler buffer. + bool first_pass_; + + // Used to assert we are only requested for as much data as is available. + size_t source_available_; + + RTC_DISALLOW_COPY_AND_ASSIGN(PushSincResampler); +}; + +} // namespace webrtc + +#endif // COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_ diff --git a/audio_processing/resampler/sinc_resampler.cc b/audio_processing/resampler/sinc_resampler.cc new file mode 100644 index 0000000..890a065 --- /dev/null +++ b/audio_processing/resampler/sinc_resampler.cc @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Modified from the Chromium original: +// src/media/base/sinc_resampler.cc + +// Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_ +// and r4_ will move after the first load): +// +// |----------------|-----------------------------------------|----------------| +// +// request_frames_ +// <---------------------------------------------------------> +// r0_ (during first load) +// +// kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 +// <---------------> <---------------> <---------------> <---------------> +// r1_ r2_ r3_ r4_ +// +// block_size_ == r4_ - r2_ +// <---------------------------------------> +// +// request_frames_ +// <------------------ ... -----------------> +// r0_ (during second load) +// +// On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_ +// and block_size_ are reinitialized via step (3) in the algorithm below. +// +// These new regions remain constant until a Flush() occurs. While complicated, +// this allows us to reduce jitter by always requesting the same amount from the +// provided callback. +// +// The algorithm: +// +// 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures +// there's enough room to read request_frames_ from the callback into region +// r0_ (which will move between the first and subsequent passes). +// +// 2) Let r1_, r2_ each represent half the kernel centered around r0_: +// +// r0_ = input_buffer_ + kKernelSize / 2 +// r1_ = input_buffer_ +// r2_ = r0_ +// +// r0_ is always request_frames_ in size. r1_, r2_ are kKernelSize / 2 in +// size. r1_ must be zero initialized to avoid convolution with garbage (see +// step (5) for why). +// +// 3) Let r3_, r4_ each represent half the kernel right aligned with the end of +// r0_ and choose block_size_ as the distance in frames between r4_ and r2_: +// +// r3_ = r0_ + request_frames_ - kKernelSize +// r4_ = r0_ + request_frames_ - kKernelSize / 2 +// block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2 +// +// 4) Consume request_frames_ frames into r0_. +// +// 5) Position kernel centered at start of r2_ and generate output frames until +// the kernel is centered at the start of r4_ or we've finished generating +// all the output frames. +// +// 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_. +// +// 7) If we're on the second load, in order to avoid overwriting the frames we +// just wrapped from r4_ we need to slide r0_ to the right by the size of +// r4_, which is kKernelSize / 2: +// +// r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize +// +// r3_, r4_, and block_size_ then need to be reinitialized, so goto (3). +// +// 8) Else, if we're not on the second load, goto (4). +// +// Note: we're glossing over how the sub-sample handling works with +// |virtual_source_idx_|, etc. + +// MSVC++ requires this to be set before any other includes to get M_PI. +#define _USE_MATH_DEFINES + +#include "resampler/sinc_resampler.h" + +#include +#include +#include + +#include + +#include "rtc_base/checks.h" +#include "rtc_base/system/arch.h" +#include "system_wrappers/include/cpu_features_wrapper.h" // kSSE2, WebRtc_G... + +namespace webrtc { + +namespace { + +double SincScaleFactor(double io_ratio) { + // |sinc_scale_factor| is basically the normalized cutoff frequency of the + // low-pass filter. + double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0; + + // The sinc function is an idealized brick-wall filter, but since we're + // windowing it the transition from pass to stop does not happen right away. + // So we should adjust the low pass filter cutoff slightly downward to avoid + // some aliasing at the very high-end. + // TODO(crogers): this value is empirical and to be more exact should vary + // depending on kKernelSize. + sinc_scale_factor *= 0.9; + + return sinc_scale_factor; +} + +} // namespace + +const size_t SincResampler::kKernelSize; + +// If we know the minimum architecture at compile time, avoid CPU detection. +#if defined(WEBRTC_ARCH_X86_FAMILY) +#if defined(__SSE2__) +#define CONVOLVE_FUNC Convolve_SSE +void SincResampler::InitializeCPUSpecificFeatures() {} +#else +// x86 CPU detection required. Function will be set by +// InitializeCPUSpecificFeatures(). +// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed. +#define CONVOLVE_FUNC convolve_proc_ + +void SincResampler::InitializeCPUSpecificFeatures() { + convolve_proc_ = WebRtc_GetCPUInfo(kSSE2) ? Convolve_SSE : Convolve_C; +} +#endif +#elif defined(WEBRTC_HAS_NEON) +#define CONVOLVE_FUNC Convolve_NEON +void SincResampler::InitializeCPUSpecificFeatures() {} +#else +// Unknown architecture. +#define CONVOLVE_FUNC Convolve_C +void SincResampler::InitializeCPUSpecificFeatures() {} +#endif + +SincResampler::SincResampler(double io_sample_rate_ratio, + size_t request_frames, + SincResamplerCallback* read_cb) + : io_sample_rate_ratio_(io_sample_rate_ratio), + read_cb_(read_cb), + request_frames_(request_frames), + input_buffer_size_(request_frames_ + kKernelSize), + // Create input buffers with a 16-byte alignment for SSE optimizations. + kernel_storage_(static_cast( + AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))), + kernel_pre_sinc_storage_(static_cast( + AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))), + kernel_window_storage_(static_cast( + AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))), + input_buffer_(static_cast( + AlignedMalloc(sizeof(float) * input_buffer_size_, 16))), +#if defined(WEBRTC_ARCH_X86_FAMILY) && !defined(__SSE2__) + convolve_proc_(nullptr), +#endif + r1_(input_buffer_.get()), + r2_(input_buffer_.get() + kKernelSize / 2) { +#if defined(WEBRTC_ARCH_X86_FAMILY) && !defined(__SSE2__) + InitializeCPUSpecificFeatures(); + RTC_DCHECK(convolve_proc_); +#endif + RTC_DCHECK_GT(request_frames_, 0); + Flush(); + RTC_DCHECK_GT(block_size_, kKernelSize); + + memset(kernel_storage_.get(), 0, + sizeof(*kernel_storage_.get()) * kKernelStorageSize); + memset(kernel_pre_sinc_storage_.get(), 0, + sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize); + memset(kernel_window_storage_.get(), 0, + sizeof(*kernel_window_storage_.get()) * kKernelStorageSize); + + InitializeKernel(); +} + +SincResampler::~SincResampler() {} + +void SincResampler::UpdateRegions(bool second_load) { + // Setup various region pointers in the buffer (see diagram above). If we're + // on the second load we need to slide r0_ to the right by kKernelSize / 2. + r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2); + r3_ = r0_ + request_frames_ - kKernelSize; + r4_ = r0_ + request_frames_ - kKernelSize / 2; + block_size_ = r4_ - r2_; + + // r1_ at the beginning of the buffer. + RTC_DCHECK_EQ(r1_, input_buffer_.get()); + // r1_ left of r2_, r4_ left of r3_ and size correct. + RTC_DCHECK_EQ(r2_ - r1_, r4_ - r3_); + // r2_ left of r3. + RTC_DCHECK_LT(r2_, r3_); +} + +void SincResampler::InitializeKernel() { + // Blackman window parameters. + static const double kAlpha = 0.16; + static const double kA0 = 0.5 * (1.0 - kAlpha); + static const double kA1 = 0.5; + static const double kA2 = 0.5 * kAlpha; + + // Generates a set of windowed sinc() kernels. + // We generate a range of sub-sample offsets from 0.0 to 1.0. + const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_); + for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) { + const float subsample_offset = + static_cast(offset_idx) / kKernelOffsetCount; + + for (size_t i = 0; i < kKernelSize; ++i) { + const size_t idx = i + offset_idx * kKernelSize; + const float pre_sinc = static_cast( + M_PI * (static_cast(i) - static_cast(kKernelSize / 2) - + subsample_offset)); + kernel_pre_sinc_storage_[idx] = pre_sinc; + + // Compute Blackman window, matching the offset of the sinc(). + const float x = (i - subsample_offset) / kKernelSize; + const float window = static_cast(kA0 - kA1 * cos(2.0 * M_PI * x) + + kA2 * cos(4.0 * M_PI * x)); + kernel_window_storage_[idx] = window; + + // Compute the sinc with offset, then window the sinc() function and store + // at the correct offset. + kernel_storage_[idx] = static_cast( + window * ((pre_sinc == 0) + ? sinc_scale_factor + : (sin(sinc_scale_factor * pre_sinc) / pre_sinc))); + } + } +} + +void SincResampler::SetRatio(double io_sample_rate_ratio) { + if (fabs(io_sample_rate_ratio_ - io_sample_rate_ratio) < + std::numeric_limits::epsilon()) { + return; + } + + io_sample_rate_ratio_ = io_sample_rate_ratio; + + // Optimize reinitialization by reusing values which are independent of + // |sinc_scale_factor|. Provides a 3x speedup. + const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_); + for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) { + for (size_t i = 0; i < kKernelSize; ++i) { + const size_t idx = i + offset_idx * kKernelSize; + const float window = kernel_window_storage_[idx]; + const float pre_sinc = kernel_pre_sinc_storage_[idx]; + + kernel_storage_[idx] = static_cast( + window * ((pre_sinc == 0) + ? sinc_scale_factor + : (sin(sinc_scale_factor * pre_sinc) / pre_sinc))); + } + } +} + +void SincResampler::Resample(size_t frames, float* destination) { + size_t remaining_frames = frames; + + // Step (1) -- Prime the input buffer at the start of the input stream. + if (!buffer_primed_ && remaining_frames) { + read_cb_->Run(request_frames_, r0_); + buffer_primed_ = true; + } + + // Step (2) -- Resample! const what we can outside of the loop for speed. It + // actually has an impact on ARM performance. See inner loop comment below. + const double current_io_ratio = io_sample_rate_ratio_; + const float* const kernel_ptr = kernel_storage_.get(); + while (remaining_frames) { + // |i| may be negative if the last Resample() call ended on an iteration + // that put |virtual_source_idx_| over the limit. + // + // Note: The loop construct here can severely impact performance on ARM + // or when built with clang. See https://codereview.chromium.org/18566009/ + for (int i = static_cast( + ceil((block_size_ - virtual_source_idx_) / current_io_ratio)); + i > 0; --i) { + RTC_DCHECK_LT(virtual_source_idx_, block_size_); + + // |virtual_source_idx_| lies in between two kernel offsets so figure out + // what they are. + const int source_idx = static_cast(virtual_source_idx_); + const double subsample_remainder = virtual_source_idx_ - source_idx; + + const double virtual_offset_idx = + subsample_remainder * kKernelOffsetCount; + const int offset_idx = static_cast(virtual_offset_idx); + + // We'll compute "convolutions" for the two kernels which straddle + // |virtual_source_idx_|. + const float* const k1 = kernel_ptr + offset_idx * kKernelSize; + const float* const k2 = k1 + kKernelSize; + + // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be + // true so long as kKernelSize is a multiple of 16. + RTC_DCHECK_EQ(0, reinterpret_cast(k1) % 16); + RTC_DCHECK_EQ(0, reinterpret_cast(k2) % 16); + + // Initialize input pointer based on quantized |virtual_source_idx_|. + const float* const input_ptr = r1_ + source_idx; + + // Figure out how much to weight each kernel's "convolution". + const double kernel_interpolation_factor = + virtual_offset_idx - offset_idx; + *destination++ = + CONVOLVE_FUNC(input_ptr, k1, k2, kernel_interpolation_factor); + + // Advance the virtual index. + virtual_source_idx_ += current_io_ratio; + + if (!--remaining_frames) + return; + } + + // Wrap back around to the start. + virtual_source_idx_ -= block_size_; + + // Step (3) -- Copy r3_, r4_ to r1_, r2_. + // This wraps the last input frames back to the start of the buffer. + memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize); + + // Step (4) -- Reinitialize regions if necessary. + if (r0_ == r2_) + UpdateRegions(true); + + // Step (5) -- Refresh the buffer with more input. + read_cb_->Run(request_frames_, r0_); + } +} + +#undef CONVOLVE_FUNC + +size_t SincResampler::ChunkSize() const { + return static_cast(block_size_ / io_sample_rate_ratio_); +} + +void SincResampler::Flush() { + virtual_source_idx_ = 0; + buffer_primed_ = false; + memset(input_buffer_.get(), 0, + sizeof(*input_buffer_.get()) * input_buffer_size_); + UpdateRegions(false); +} + +float SincResampler::Convolve_C(const float* input_ptr, + const float* k1, + const float* k2, + double kernel_interpolation_factor) { + float sum1 = 0; + float sum2 = 0; + + // Generate a single output sample. Unrolling this loop hurt performance in + // local testing. + size_t n = kKernelSize; + while (n--) { + sum1 += *input_ptr * *k1++; + sum2 += *input_ptr++ * *k2++; + } + + // Linearly interpolate the two "convolutions". + return static_cast((1.0 - kernel_interpolation_factor) * sum1 + + kernel_interpolation_factor * sum2); +} + +} // namespace webrtc diff --git a/audio_processing/resampler/sinc_resampler.h b/audio_processing/resampler/sinc_resampler.h new file mode 100644 index 0000000..5181c18 --- /dev/null +++ b/audio_processing/resampler/sinc_resampler.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Modified from the Chromium original here: +// src/media/base/sinc_resampler.h + +#ifndef COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_ +#define COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_ + +#include + +#include + +#include "rtc_base/constructor_magic.h" +#include "rtc_base/gtest_prod_util.h" +#include "rtc_base/memory/aligned_malloc.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { + +// Callback class for providing more data into the resampler. Expects |frames| +// of data to be rendered into |destination|; zero padded if not enough frames +// are available to satisfy the request. +class SincResamplerCallback { + public: + virtual ~SincResamplerCallback() {} + virtual void Run(size_t frames, float* destination) = 0; +}; + +// SincResampler is a high-quality single-channel sample-rate converter. +class SincResampler { + public: + // The kernel size can be adjusted for quality (higher is better) at the + // expense of performance. Must be a multiple of 32. + // TODO(dalecurtis): Test performance to see if we can jack this up to 64+. + static const size_t kKernelSize = 32; + + // Default request size. Affects how often and for how much SincResampler + // calls back for input. Must be greater than kKernelSize. + static const size_t kDefaultRequestSize = 512; + + // The kernel offset count is used for interpolation and is the number of + // sub-sample kernel shifts. Can be adjusted for quality (higher is better) + // at the expense of allocating more memory. + static const size_t kKernelOffsetCount = 32; + static const size_t kKernelStorageSize = + kKernelSize * (kKernelOffsetCount + 1); + + // Constructs a SincResampler with the specified |read_cb|, which is used to + // acquire audio data for resampling. |io_sample_rate_ratio| is the ratio + // of input / output sample rates. |request_frames| controls the size in + // frames of the buffer requested by each |read_cb| call. The value must be + // greater than kKernelSize. Specify kDefaultRequestSize if there are no + // request size constraints. + SincResampler(double io_sample_rate_ratio, + size_t request_frames, + SincResamplerCallback* read_cb); + virtual ~SincResampler(); + + // Resample |frames| of data from |read_cb_| into |destination|. + void Resample(size_t frames, float* destination); + + // The maximum size in frames that guarantees Resample() will only make a + // single call to |read_cb_| for more data. + size_t ChunkSize() const; + + size_t request_frames() const { return request_frames_; } + + // Flush all buffered data and reset internal indices. Not thread safe, do + // not call while Resample() is in progress. + void Flush(); + + // Update |io_sample_rate_ratio_|. SetRatio() will cause a reconstruction of + // the kernels used for resampling. Not thread safe, do not call while + // Resample() is in progress. + // + // TODO(ajm): Use this in PushSincResampler rather than reconstructing + // SincResampler. We would also need a way to update |request_frames_|. + void SetRatio(double io_sample_rate_ratio); + + float* get_kernel_for_testing() { return kernel_storage_.get(); } + + private: + FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, Convolve); + FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, ConvolveBenchmark); + + void InitializeKernel(); + void UpdateRegions(bool second_load); + + // Selects runtime specific CPU features like SSE. Must be called before + // using SincResampler. + // TODO(ajm): Currently managed by the class internally. See the note with + // |convolve_proc_| below. + void InitializeCPUSpecificFeatures(); + + // Compute convolution of |k1| and |k2| over |input_ptr|, resultant sums are + // linearly interpolated using |kernel_interpolation_factor|. On x86 and ARM + // the underlying implementation is chosen at run time. + static float Convolve_C(const float* input_ptr, + const float* k1, + const float* k2, + double kernel_interpolation_factor); +#if defined(WEBRTC_ARCH_X86_FAMILY) + static float Convolve_SSE(const float* input_ptr, + const float* k1, + const float* k2, + double kernel_interpolation_factor); +#elif defined(WEBRTC_HAS_NEON) + static float Convolve_NEON(const float* input_ptr, + const float* k1, + const float* k2, + double kernel_interpolation_factor); +#endif + + // The ratio of input / output sample rates. + double io_sample_rate_ratio_; + + // An index on the source input buffer with sub-sample precision. It must be + // double precision to avoid drift. + double virtual_source_idx_; + + // The buffer is primed once at the very beginning of processing. + bool buffer_primed_; + + // Source of data for resampling. + SincResamplerCallback* read_cb_; + + // The size (in samples) to request from each |read_cb_| execution. + const size_t request_frames_; + + // The number of source frames processed per pass. + size_t block_size_; + + // The size (in samples) of the internal buffer used by the resampler. + const size_t input_buffer_size_; + + // Contains kKernelOffsetCount kernels back-to-back, each of size kKernelSize. + // The kernel offsets are sub-sample shifts of a windowed sinc shifted from + // 0.0 to 1.0 sample. + std::unique_ptr kernel_storage_; + std::unique_ptr kernel_pre_sinc_storage_; + std::unique_ptr kernel_window_storage_; + + // Data from the source is copied into this buffer for each processing pass. + std::unique_ptr input_buffer_; + +// Stores the runtime selection of which Convolve function to use. +// TODO(ajm): Move to using a global static which must only be initialized +// once by the user. We're not doing this initially, because we don't have +// e.g. a LazyInstance helper in webrtc. +#if defined(WEBRTC_ARCH_X86_FAMILY) && !defined(__SSE2__) + typedef float (*ConvolveProc)(const float*, + const float*, + const float*, + double); + ConvolveProc convolve_proc_; +#endif + + // Pointers to the various regions inside |input_buffer_|. See the diagram at + // the top of the .cc file for more information. + float* r0_; + float* const r1_; + float* const r2_; + float* r3_; + float* r4_; + + RTC_DISALLOW_COPY_AND_ASSIGN(SincResampler); +}; + +} // namespace webrtc + +#endif // COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_ diff --git a/audio_processing/resampler/sinc_resampler_neon.cc b/audio_processing/resampler/sinc_resampler_neon.cc new file mode 100644 index 0000000..99c8a72 --- /dev/null +++ b/audio_processing/resampler/sinc_resampler_neon.cc @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Modified from the Chromium original: +// src/media/base/sinc_resampler.cc + +#if defined(WEBRTC_HAS_NEON) +#include +#endif + +#include "resampler/sinc_resampler.h" + +namespace webrtc { + +#if defined(WEBRTC_HAS_NEON) +float SincResampler::Convolve_NEON(const float* input_ptr, + const float* k1, + const float* k2, + double kernel_interpolation_factor) { + float32x4_t m_input; + float32x4_t m_sums1 = vmovq_n_f32(0); + float32x4_t m_sums2 = vmovq_n_f32(0); + + const float* upper = input_ptr + kKernelSize; + for (; input_ptr < upper;) { + m_input = vld1q_f32(input_ptr); + input_ptr += 4; + m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1)); + k1 += 4; + m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2)); + k2 += 4; + } + + // Linearly interpolate the two "convolutions". + m_sums1 = vmlaq_f32( + vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)), + m_sums2, vmovq_n_f32(kernel_interpolation_factor)); + + // Sum components together. + float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1)); + return vget_lane_f32(vpadd_f32(m_half, m_half), 0); +} +#endif + +} // namespace webrtc diff --git a/audio_processing/resampler/sinc_resampler_sse.cc b/audio_processing/resampler/sinc_resampler_sse.cc new file mode 100644 index 0000000..f74ac75 --- /dev/null +++ b/audio_processing/resampler/sinc_resampler_sse.cc @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Modified from the Chromium original: +// src/media/base/simd/sinc_resampler_sse.cc + +#include +#include +#include + +#include "resampler/sinc_resampler.h" + +namespace webrtc { + +float SincResampler::Convolve_SSE(const float* input_ptr, + const float* k1, + const float* k2, + double kernel_interpolation_factor) { + __m128 m_input; + __m128 m_sums1 = _mm_setzero_ps(); + __m128 m_sums2 = _mm_setzero_ps(); + + // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling + // these loops hurt performance in local testing. + if (reinterpret_cast(input_ptr) & 0x0F) { + for (size_t i = 0; i < kKernelSize; i += 4) { + m_input = _mm_loadu_ps(input_ptr + i); + m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i))); + m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i))); + } + } else { + for (size_t i = 0; i < kKernelSize; i += 4) { + m_input = _mm_load_ps(input_ptr + i); + m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i))); + m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i))); + } + } + + // Linearly interpolate the two "convolutions". + m_sums1 = _mm_mul_ps( + m_sums1, + _mm_set_ps1(static_cast(1.0 - kernel_interpolation_factor))); + m_sums2 = _mm_mul_ps( + m_sums2, _mm_set_ps1(static_cast(kernel_interpolation_factor))); + m_sums1 = _mm_add_ps(m_sums1, m_sums2); + + // Sum components together. + float result; + m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1); + _mm_store_ss(&result, + _mm_add_ss(m_sums2, _mm_shuffle_ps(m_sums2, m_sums2, 1))); + + return result; +} + +} // namespace webrtc diff --git a/audio_processing/sparse_fir_filter.cc b/audio_processing/sparse_fir_filter.cc new file mode 100644 index 0000000..f7e6cff --- /dev/null +++ b/audio_processing/sparse_fir_filter.cc @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "sparse_fir_filter.h" + +#include "rtc_base/checks.h" + +namespace webrtc { + +SparseFIRFilter::SparseFIRFilter(const float* nonzero_coeffs, + size_t num_nonzero_coeffs, + size_t sparsity, + size_t offset) + : sparsity_(sparsity), + offset_(offset), + nonzero_coeffs_(nonzero_coeffs, nonzero_coeffs + num_nonzero_coeffs), + state_(sparsity_ * (num_nonzero_coeffs - 1) + offset_, 0.f) { + RTC_CHECK_GE(num_nonzero_coeffs, 1); + RTC_CHECK_GE(sparsity, 1); +} + +SparseFIRFilter::~SparseFIRFilter() = default; + +void SparseFIRFilter::Filter(const float* in, size_t length, float* out) { + // Convolves the input signal |in| with the filter kernel |nonzero_coeffs_| + // taking into account the previous state. + for (size_t i = 0; i < length; ++i) { + out[i] = 0.f; + size_t j; + for (j = 0; i >= j * sparsity_ + offset_ && j < nonzero_coeffs_.size(); + ++j) { + out[i] += in[i - j * sparsity_ - offset_] * nonzero_coeffs_[j]; + } + for (; j < nonzero_coeffs_.size(); ++j) { + out[i] += state_[i + (nonzero_coeffs_.size() - j - 1) * sparsity_] * + nonzero_coeffs_[j]; + } + } + + // Update current state. + if (!state_.empty()) { + if (length >= state_.size()) { + std::memcpy(&state_[0], &in[length - state_.size()], + state_.size() * sizeof(*in)); + } else { + std::memmove(&state_[0], &state_[length], + (state_.size() - length) * sizeof(state_[0])); + std::memcpy(&state_[state_.size() - length], in, length * sizeof(*in)); + } + } +} + +} // namespace webrtc diff --git a/audio_processing/sparse_fir_filter.h b/audio_processing/sparse_fir_filter.h new file mode 100644 index 0000000..5197a8e --- /dev/null +++ b/audio_processing/sparse_fir_filter.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef COMMON_AUDIO_SPARSE_FIR_FILTER_H_ +#define COMMON_AUDIO_SPARSE_FIR_FILTER_H_ + +#include +#include + +#include "rtc_base/constructor_magic.h" + +namespace webrtc { + +// A Finite Impulse Response filter implementation which takes advantage of a +// sparse structure with uniformly distributed non-zero coefficients. +class SparseFIRFilter final { + public: + // |num_nonzero_coeffs| is the number of non-zero coefficients, + // |nonzero_coeffs|. They are assumed to be uniformly distributed every + // |sparsity| samples and with an initial |offset|. The rest of the filter + // coefficients will be assumed zeros. For example, with sparsity = 3, and + // offset = 1 the filter coefficients will be: + // B = [0 coeffs[0] 0 0 coeffs[1] 0 0 coeffs[2] ... ] + // All initial state values will be zeros. + SparseFIRFilter(const float* nonzero_coeffs, + size_t num_nonzero_coeffs, + size_t sparsity, + size_t offset); + ~SparseFIRFilter(); + + // Filters the |in| data supplied. + // |out| must be previously allocated and it must be at least of |length|. + void Filter(const float* in, size_t length, float* out); + + private: + const size_t sparsity_; + const size_t offset_; + const std::vector nonzero_coeffs_; + std::vector state_; + + RTC_DISALLOW_COPY_AND_ASSIGN(SparseFIRFilter); +}; + +} // namespace webrtc + +#endif // COMMON_AUDIO_SPARSE_FIR_FILTER_H_ diff --git a/audio_processing/splitting_filter.cc b/audio_processing/splitting_filter.cc new file mode 100644 index 0000000..7a64361 --- /dev/null +++ b/audio_processing/splitting_filter.cc @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/splitting_filter.h" + +#include + +#include "channel_buffer.h" +//#include "signal_processing/include/signal_processing_library.h" +#include "rtc_base/checks.h" + +#if defined(__cplusplus) +extern "C"{ +#endif + +extern void WebRtcSpl_AnalysisQMF(const int16_t* in_data, + size_t in_data_length, + int16_t* low_band, + int16_t* high_band, + int32_t* filter_state1, + int32_t* filter_state2); +extern void WebRtcSpl_SynthesisQMF(const int16_t* low_band, + const int16_t* high_band, + size_t band_length, + int16_t* out_data, + int32_t* filter_state1, + int32_t* filter_state2); + +#if defined(__cplusplus) +} +#endif + +namespace webrtc { +namespace { + +constexpr size_t kSamplesPerBand = 160; +constexpr size_t kTwoBandFilterSamplesPerFrame = 320; + +} // namespace + +SplittingFilter::SplittingFilter(size_t num_channels, + size_t num_bands, + size_t num_frames) + : num_bands_(num_bands) { + RTC_CHECK(num_bands_ == 2 || num_bands_ == 3); + if (num_bands_ == 2) { + two_bands_states_.resize(num_channels); + } else if (num_bands_ == 3) { + for (size_t i = 0; i < num_channels; ++i) { + three_band_filter_banks_.push_back(std::unique_ptr( + new ThreeBandFilterBank(num_frames))); + } + } +} + +SplittingFilter::~SplittingFilter() = default; + +void SplittingFilter::Analysis(const ChannelBuffer* data, + ChannelBuffer* bands) { + RTC_DCHECK_EQ(num_bands_, bands->num_bands()); + RTC_DCHECK_EQ(data->num_channels(), bands->num_channels()); + RTC_DCHECK_EQ(data->num_frames(), + bands->num_frames_per_band() * bands->num_bands()); + if (bands->num_bands() == 2) { + TwoBandsAnalysis(data, bands); + } else if (bands->num_bands() == 3) { + ThreeBandsAnalysis(data, bands); + } +} + +void SplittingFilter::Synthesis(const ChannelBuffer* bands, + ChannelBuffer* data) { + RTC_DCHECK_EQ(num_bands_, bands->num_bands()); + RTC_DCHECK_EQ(data->num_channels(), bands->num_channels()); + RTC_DCHECK_EQ(data->num_frames(), + bands->num_frames_per_band() * bands->num_bands()); + if (bands->num_bands() == 2) { + TwoBandsSynthesis(bands, data); + } else if (bands->num_bands() == 3) { + ThreeBandsSynthesis(bands, data); + } +} + +void SplittingFilter::TwoBandsAnalysis(const ChannelBuffer* data, + ChannelBuffer* bands) { + RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels()); + RTC_DCHECK_EQ(data->num_frames(), kTwoBandFilterSamplesPerFrame); + + for (size_t i = 0; i < two_bands_states_.size(); ++i) { + std::array, 2> bands16; + std::array full_band16; + FloatS16ToS16(data->channels(0)[i], full_band16.size(), full_band16.data()); + WebRtcSpl_AnalysisQMF(full_band16.data(), data->num_frames(), + bands16[0].data(), bands16[1].data(), + two_bands_states_[i].analysis_state1, + two_bands_states_[i].analysis_state2); + S16ToFloatS16(bands16[0].data(), bands16[0].size(), bands->channels(0)[i]); + S16ToFloatS16(bands16[1].data(), bands16[1].size(), bands->channels(1)[i]); + } +} + +void SplittingFilter::TwoBandsSynthesis(const ChannelBuffer* bands, + ChannelBuffer* data) { + RTC_DCHECK_LE(data->num_channels(), two_bands_states_.size()); + RTC_DCHECK_EQ(data->num_frames(), kTwoBandFilterSamplesPerFrame); + for (size_t i = 0; i < data->num_channels(); ++i) { + std::array, 2> bands16; + std::array full_band16; + FloatS16ToS16(bands->channels(0)[i], bands16[0].size(), bands16[0].data()); + FloatS16ToS16(bands->channels(1)[i], bands16[1].size(), bands16[1].data()); + WebRtcSpl_SynthesisQMF(bands16[0].data(), bands16[1].data(), + bands->num_frames_per_band(), full_band16.data(), + two_bands_states_[i].synthesis_state1, + two_bands_states_[i].synthesis_state2); + S16ToFloatS16(full_band16.data(), full_band16.size(), data->channels(0)[i]); + } +} + +void SplittingFilter::ThreeBandsAnalysis(const ChannelBuffer* data, + ChannelBuffer* bands) { + RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels()); + for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) { + three_band_filter_banks_[i]->Analysis(data->channels()[i], + data->num_frames(), bands->bands(i)); + } +} + +void SplittingFilter::ThreeBandsSynthesis(const ChannelBuffer* bands, + ChannelBuffer* data) { + RTC_DCHECK_LE(data->num_channels(), three_band_filter_banks_.size()); + for (size_t i = 0; i < data->num_channels(); ++i) { + three_band_filter_banks_[i]->Synthesis( + bands->bands(i), bands->num_frames_per_band(), data->channels()[i]); + } +} + +} // namespace webrtc diff --git a/audio_processing/splitting_filter.h b/audio_processing/splitting_filter.h new file mode 100644 index 0000000..eee8018 --- /dev/null +++ b/audio_processing/splitting_filter.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_ +#define MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_ + +#include +#include +#include + +#include "channel_buffer.h" +#include "audio_processing/three_band_filter_bank.h" + +namespace webrtc { + +struct TwoBandsStates { + TwoBandsStates() { + memset(analysis_state1, 0, sizeof(analysis_state1)); + memset(analysis_state2, 0, sizeof(analysis_state2)); + memset(synthesis_state1, 0, sizeof(synthesis_state1)); + memset(synthesis_state2, 0, sizeof(synthesis_state2)); + } + + static const int kStateSize = 6; + int analysis_state1[kStateSize]; + int analysis_state2[kStateSize]; + int synthesis_state1[kStateSize]; + int synthesis_state2[kStateSize]; +}; + +// Splitting filter which is able to split into and merge from 2 or 3 frequency +// bands. The number of channels needs to be provided at construction time. +// +// For each block, Analysis() is called to split into bands and then Synthesis() +// to merge these bands again. The input and output signals are contained in +// ChannelBuffers and for the different bands an array of ChannelBuffers is +// used. +class SplittingFilter { + public: + SplittingFilter(size_t num_channels, size_t num_bands, size_t num_frames); + ~SplittingFilter(); + + void Analysis(const ChannelBuffer* data, ChannelBuffer* bands); + void Synthesis(const ChannelBuffer* bands, ChannelBuffer* data); + + private: + // Two-band analysis and synthesis work for 640 samples or less. + void TwoBandsAnalysis(const ChannelBuffer* data, + ChannelBuffer* bands); + void TwoBandsSynthesis(const ChannelBuffer* bands, + ChannelBuffer* data); + void ThreeBandsAnalysis(const ChannelBuffer* data, + ChannelBuffer* bands); + void ThreeBandsSynthesis(const ChannelBuffer* bands, + ChannelBuffer* data); + void InitBuffers(); + + const size_t num_bands_; + std::vector two_bands_states_; + std::vector> three_band_filter_banks_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_ diff --git a/audio_processing/splitting_filter_c.c b/audio_processing/splitting_filter_c.c new file mode 100644 index 0000000..1c48aa4 --- /dev/null +++ b/audio_processing/splitting_filter_c.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * This file contains the splitting filter functions. + * + */ + +#include "rtc_base/checks.h" +#include + +// Maximum number of samples in a low/high-band frame. +enum +{ + kMaxBandFrameLength = 320 // 10 ms at 64 kHz. +}; + +// QMF filter coefficients in Q16. +static const uint16_t WebRtcSpl_kAllPassFilter1[3] = {6418, 36982, 57261}; +static const uint16_t WebRtcSpl_kAllPassFilter2[3] = {21333, 49062, 63010}; + + +static __inline int32_t WebRtcSpl_SubSatW32(int32_t a, int32_t b) { + // Do the subtraction in unsigned numbers, since signed overflow is undefined + // behavior. + const int32_t diff = (int32_t)((uint32_t)a - (uint32_t)b); + + // a - b can't overflow if a and b have the same sign. If they have different + // signs, a - b has the same sign as a iff it didn't overflow. + if ((a < 0) != (b < 0) && (a < 0) != (diff < 0)) { + // The direction of the overflow is obvious from the sign of a - b. + return diff < 0 ? INT32_MAX : INT32_MIN; + } + return diff; +} + +// C + the 32 most significant bits of A * B +#define WEBRTC_SPL_SCALEDIFF32(A, B, C) \ + (C + (B >> 16) * A + (((uint32_t)(0x0000FFFF & B) * A) >> 16)) + +static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) { + int16_t out16 = (int16_t)value32; + + if (value32 > 32767) + out16 = 32767; + else if (value32 < -32768) + out16 = -32768; + + return out16; +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +// WebRtcSpl_AllPassQMF(...) +// +// Allpass filter used by the analysis and synthesis parts of the QMF filter. +// +// Input: +// - in_data : Input data sequence (Q10) +// - data_length : Length of data sequence (>2) +// - filter_coefficients : Filter coefficients (length 3, Q16) +// +// Input & Output: +// - filter_state : Filter state (length 6, Q10). +// +// Output: +// - out_data : Output data sequence (Q10), length equal to +// |data_length| +// + +void WebRtcSpl_AllPassQMF(int32_t* in_data, size_t data_length, + int32_t* out_data, const uint16_t* filter_coefficients, + int32_t* filter_state) +{ + // The procedure is to filter the input with three first order all pass filters + // (cascade operations). + // + // a_3 + q^-1 a_2 + q^-1 a_1 + q^-1 + // y[n] = ----------- ----------- ----------- x[n] + // 1 + a_3q^-1 1 + a_2q^-1 1 + a_1q^-1 + // + // The input vector |filter_coefficients| includes these three filter coefficients. + // The filter state contains the in_data state, in_data[-1], followed by + // the out_data state, out_data[-1]. This is repeated for each cascade. + // The first cascade filter will filter the |in_data| and store the output in + // |out_data|. The second will the take the |out_data| as input and make an + // intermediate storage in |in_data|, to save memory. The third, and final, cascade + // filter operation takes the |in_data| (which is the output from the previous cascade + // filter) and store the output in |out_data|. + // Note that the input vector values are changed during the process. + size_t k; + int32_t diff; + // First all-pass cascade; filter from in_data to out_data. + + // Let y_i[n] indicate the output of cascade filter i (with filter coefficient a_i) at + // vector position n. Then the final output will be y[n] = y_3[n] + + // First loop, use the states stored in memory. + // "diff" should be safe from wrap around since max values are 2^25 + // diff = (x[0] - y_1[-1]) + diff = WebRtcSpl_SubSatW32(in_data[0], filter_state[1]); + // y_1[0] = x[-1] + a_1 * (x[0] - y_1[-1]) + out_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[0], diff, filter_state[0]); + + // For the remaining loops, use previous values. + for (k = 1; k < data_length; k++) + { + // diff = (x[n] - y_1[n-1]) + diff = WebRtcSpl_SubSatW32(in_data[k], out_data[k - 1]); + // y_1[n] = x[n-1] + a_1 * (x[n] - y_1[n-1]) + out_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[0], diff, in_data[k - 1]); + } + + // Update states. + filter_state[0] = in_data[data_length - 1]; // x[N-1], becomes x[-1] next time + filter_state[1] = out_data[data_length - 1]; // y_1[N-1], becomes y_1[-1] next time + + // Second all-pass cascade; filter from out_data to in_data. + // diff = (y_1[0] - y_2[-1]) + diff = WebRtcSpl_SubSatW32(out_data[0], filter_state[3]); + // y_2[0] = y_1[-1] + a_2 * (y_1[0] - y_2[-1]) + in_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[1], diff, filter_state[2]); + for (k = 1; k < data_length; k++) + { + // diff = (y_1[n] - y_2[n-1]) + diff = WebRtcSpl_SubSatW32(out_data[k], in_data[k - 1]); + // y_2[0] = y_1[-1] + a_2 * (y_1[0] - y_2[-1]) + in_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[1], diff, out_data[k-1]); + } + + filter_state[2] = out_data[data_length - 1]; // y_1[N-1], becomes y_1[-1] next time + filter_state[3] = in_data[data_length - 1]; // y_2[N-1], becomes y_2[-1] next time + + // Third all-pass cascade; filter from in_data to out_data. + // diff = (y_2[0] - y[-1]) + diff = WebRtcSpl_SubSatW32(in_data[0], filter_state[5]); + // y[0] = y_2[-1] + a_3 * (y_2[0] - y[-1]) + out_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[2], diff, filter_state[4]); + for (k = 1; k < data_length; k++) + { + // diff = (y_2[n] - y[n-1]) + diff = WebRtcSpl_SubSatW32(in_data[k], out_data[k - 1]); + // y[n] = y_2[n-1] + a_3 * (y_2[n] - y[n-1]) + out_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[2], diff, in_data[k-1]); + } + filter_state[4] = in_data[data_length - 1]; // y_2[N-1], becomes y_2[-1] next time + filter_state[5] = out_data[data_length - 1]; // y[N-1], becomes y[-1] next time +} + +void WebRtcSpl_AnalysisQMF(const int16_t* in_data, size_t in_data_length, + int16_t* low_band, int16_t* high_band, + int32_t* filter_state1, int32_t* filter_state2) +{ + size_t i; + int16_t k; + int32_t tmp; + int32_t half_in1[kMaxBandFrameLength]; + int32_t half_in2[kMaxBandFrameLength]; + int32_t filter1[kMaxBandFrameLength]; + int32_t filter2[kMaxBandFrameLength]; + const size_t band_length = in_data_length / 2; + RTC_DCHECK_EQ(0, in_data_length % 2); + RTC_DCHECK_LE(band_length, kMaxBandFrameLength); + + // Split even and odd samples. Also shift them to Q10. + for (i = 0, k = 0; i < band_length; i++, k += 2) + { + half_in2[i] = ((int32_t)in_data[k]) * (1 << 10); + half_in1[i] = ((int32_t)in_data[k + 1]) * (1 << 10); + } + + // All pass filter even and odd samples, independently. + WebRtcSpl_AllPassQMF(half_in1, band_length, filter1, + WebRtcSpl_kAllPassFilter1, filter_state1); + WebRtcSpl_AllPassQMF(half_in2, band_length, filter2, + WebRtcSpl_kAllPassFilter2, filter_state2); + + // Take the sum and difference of filtered version of odd and even + // branches to get upper & lower band. + for (i = 0; i < band_length; i++) + { + tmp = (filter1[i] + filter2[i] + 1024) >> 11; + low_band[i] = WebRtcSpl_SatW32ToW16(tmp); + + tmp = (filter1[i] - filter2[i] + 1024) >> 11; + high_band[i] = WebRtcSpl_SatW32ToW16(tmp); + } +} + +void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band, + size_t band_length, int16_t* out_data, + int32_t* filter_state1, int32_t* filter_state2) +{ + int32_t tmp; + int32_t half_in1[kMaxBandFrameLength]; + int32_t half_in2[kMaxBandFrameLength]; + int32_t filter1[kMaxBandFrameLength]; + int32_t filter2[kMaxBandFrameLength]; + size_t i; + int16_t k; + RTC_DCHECK_LE(band_length, kMaxBandFrameLength); + + // Obtain the sum and difference channels out of upper and lower-band channels. + // Also shift to Q10 domain. + for (i = 0; i < band_length; i++) + { + tmp = (int32_t)low_band[i] + (int32_t)high_band[i]; + half_in1[i] = tmp * (1 << 10); + tmp = (int32_t)low_band[i] - (int32_t)high_band[i]; + half_in2[i] = tmp * (1 << 10); + } + + // all-pass filter the sum and difference channels + WebRtcSpl_AllPassQMF(half_in1, band_length, filter1, + WebRtcSpl_kAllPassFilter2, filter_state1); + WebRtcSpl_AllPassQMF(half_in2, band_length, filter2, + WebRtcSpl_kAllPassFilter1, filter_state2); + + // The filtered signals are even and odd samples of the output. Combine + // them. The signals are Q10 should shift them back to Q0 and take care of + // saturation. + for (i = 0, k = 0; i < band_length; i++) + { + tmp = (filter2[i] + 512) >> 10; + out_data[k++] = WebRtcSpl_SatW32ToW16(tmp); + + tmp = (filter1[i] + 512) >> 10; + out_data[k++] = WebRtcSpl_SatW32ToW16(tmp); + } + +} diff --git a/audio_processing/three_band_filter_bank.cc b/audio_processing/three_band_filter_bank.cc new file mode 100644 index 0000000..f9034b8 --- /dev/null +++ b/audio_processing/three_band_filter_bank.cc @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// An implementation of a 3-band FIR filter-bank with DCT modulation, similar to +// the proposed in "Multirate Signal Processing for Communication Systems" by +// Fredric J Harris. +// +// The idea is to take a heterodyne system and change the order of the +// components to get something which is efficient to implement digitally. +// +// It is possible to separate the filter using the noble identity as follows: +// +// H(z) = H0(z^3) + z^-1 * H1(z^3) + z^-2 * H2(z^3) +// +// This is used in the analysis stage to first downsample serial to parallel +// and then filter each branch with one of these polyphase decompositions of the +// lowpass prototype. Because each filter is only a modulation of the prototype, +// it is enough to multiply each coefficient by the respective cosine value to +// shift it to the desired band. But because the cosine period is 12 samples, +// it requires separating the prototype even further using the noble identity. +// After filtering and modulating for each band, the output of all filters is +// accumulated to get the downsampled bands. +// +// A similar logic can be applied to the synthesis stage. + +// MSVC++ requires this to be set before any other includes to get M_PI. +#define _USE_MATH_DEFINES + +#include "audio_processing/three_band_filter_bank.h" + +#include + +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +const size_t kNumBands = 3; +const size_t kSparsity = 4; + +// Factors to take into account when choosing |kNumCoeffs|: +// 1. Higher |kNumCoeffs|, means faster transition, which ensures less +// aliasing. This is especially important when there is non-linear +// processing between the splitting and merging. +// 2. The delay that this filter bank introduces is +// |kNumBands| * |kSparsity| * |kNumCoeffs| / 2, so it increases linearly +// with |kNumCoeffs|. +// 3. The computation complexity also increases linearly with |kNumCoeffs|. +const size_t kNumCoeffs = 4; + +// The Matlab code to generate these |kLowpassCoeffs| is: +// +// N = kNumBands * kSparsity * kNumCoeffs - 1; +// h = fir1(N, 1 / (2 * kNumBands), kaiser(N + 1, 3.5)); +// reshape(h, kNumBands * kSparsity, kNumCoeffs); +// +// Because the total bandwidth of the lower and higher band is double the middle +// one (because of the spectrum parity), the low-pass prototype is half the +// bandwidth of 1 / (2 * |kNumBands|) and is then shifted with cosine modulation +// to the right places. +// A Kaiser window is used because of its flexibility and the alpha is set to +// 3.5, since that sets a stop band attenuation of 40dB ensuring a fast +// transition. +const float kLowpassCoeffs[kNumBands * kSparsity][kNumCoeffs] = { + {-0.00047749f, -0.00496888f, +0.16547118f, +0.00425496f}, + {-0.00173287f, -0.01585778f, +0.14989004f, +0.00994113f}, + {-0.00304815f, -0.02536082f, +0.12154542f, +0.01157993f}, + {-0.00383509f, -0.02982767f, +0.08543175f, +0.00983212f}, + {-0.00346946f, -0.02587886f, +0.04760441f, +0.00607594f}, + {-0.00154717f, -0.01136076f, +0.01387458f, +0.00186353f}, + {+0.00186353f, +0.01387458f, -0.01136076f, -0.00154717f}, + {+0.00607594f, +0.04760441f, -0.02587886f, -0.00346946f}, + {+0.00983212f, +0.08543175f, -0.02982767f, -0.00383509f}, + {+0.01157993f, +0.12154542f, -0.02536082f, -0.00304815f}, + {+0.00994113f, +0.14989004f, -0.01585778f, -0.00173287f}, + {+0.00425496f, +0.16547118f, -0.00496888f, -0.00047749f}}; + +// Downsamples |in| into |out|, taking one every |kNumbands| starting from +// |offset|. |split_length| is the |out| length. |in| has to be at least +// |kNumBands| * |split_length| long. +void Downsample(const float* in, + size_t split_length, + size_t offset, + float* out) { + for (size_t i = 0; i < split_length; ++i) { + out[i] = in[kNumBands * i + offset]; + } +} + +// Upsamples |in| into |out|, scaling by |kNumBands| and accumulating it every +// |kNumBands| starting from |offset|. |split_length| is the |in| length. |out| +// has to be at least |kNumBands| * |split_length| long. +void Upsample(const float* in, size_t split_length, size_t offset, float* out) { + for (size_t i = 0; i < split_length; ++i) { + out[kNumBands * i + offset] += kNumBands * in[i]; + } +} + +} // namespace + +// Because the low-pass filter prototype has half bandwidth it is possible to +// use a DCT to shift it in both directions at the same time, to the center +// frequencies [1 / 12, 3 / 12, 5 / 12]. +ThreeBandFilterBank::ThreeBandFilterBank(size_t length) + : in_buffer_(rtc::CheckedDivExact(length, kNumBands)), + out_buffer_(in_buffer_.size()) { + for (size_t i = 0; i < kSparsity; ++i) { + for (size_t j = 0; j < kNumBands; ++j) { + analysis_filters_.push_back( + std::unique_ptr(new SparseFIRFilter( + kLowpassCoeffs[i * kNumBands + j], kNumCoeffs, kSparsity, i))); + synthesis_filters_.push_back( + std::unique_ptr(new SparseFIRFilter( + kLowpassCoeffs[i * kNumBands + j], kNumCoeffs, kSparsity, i))); + } + } + dct_modulation_.resize(kNumBands * kSparsity); + for (size_t i = 0; i < dct_modulation_.size(); ++i) { + dct_modulation_[i].resize(kNumBands); + for (size_t j = 0; j < kNumBands; ++j) { + dct_modulation_[i][j] = + 2.f * cos(2.f * M_PI * i * (2.f * j + 1.f) / dct_modulation_.size()); + } + } +} + +ThreeBandFilterBank::~ThreeBandFilterBank() = default; + +// The analysis can be separated in these steps: +// 1. Serial to parallel downsampling by a factor of |kNumBands|. +// 2. Filtering of |kSparsity| different delayed signals with polyphase +// decomposition of the low-pass prototype filter and upsampled by a factor +// of |kSparsity|. +// 3. Modulating with cosines and accumulating to get the desired band. +void ThreeBandFilterBank::Analysis(const float* in, + size_t length, + float* const* out) { + RTC_CHECK_EQ(in_buffer_.size(), rtc::CheckedDivExact(length, kNumBands)); + for (size_t i = 0; i < kNumBands; ++i) { + memset(out[i], 0, in_buffer_.size() * sizeof(*out[i])); + } + for (size_t i = 0; i < kNumBands; ++i) { + Downsample(in, in_buffer_.size(), kNumBands - i - 1, &in_buffer_[0]); + for (size_t j = 0; j < kSparsity; ++j) { + const size_t offset = i + j * kNumBands; + analysis_filters_[offset]->Filter(&in_buffer_[0], in_buffer_.size(), + &out_buffer_[0]); + DownModulate(&out_buffer_[0], out_buffer_.size(), offset, out); + } + } +} + +// The synthesis can be separated in these steps: +// 1. Modulating with cosines. +// 2. Filtering each one with a polyphase decomposition of the low-pass +// prototype filter upsampled by a factor of |kSparsity| and accumulating +// |kSparsity| signals with different delays. +// 3. Parallel to serial upsampling by a factor of |kNumBands|. +void ThreeBandFilterBank::Synthesis(const float* const* in, + size_t split_length, + float* out) { + RTC_CHECK_EQ(in_buffer_.size(), split_length); + memset(out, 0, kNumBands * in_buffer_.size() * sizeof(*out)); + for (size_t i = 0; i < kNumBands; ++i) { + for (size_t j = 0; j < kSparsity; ++j) { + const size_t offset = i + j * kNumBands; + UpModulate(in, in_buffer_.size(), offset, &in_buffer_[0]); + synthesis_filters_[offset]->Filter(&in_buffer_[0], in_buffer_.size(), + &out_buffer_[0]); + Upsample(&out_buffer_[0], out_buffer_.size(), i, out); + } + } +} + +// Modulates |in| by |dct_modulation_| and accumulates it in each of the +// |kNumBands| bands of |out|. |offset| is the index in the period of the +// cosines used for modulation. |split_length| is the length of |in| and each +// band of |out|. +void ThreeBandFilterBank::DownModulate(const float* in, + size_t split_length, + size_t offset, + float* const* out) { + for (size_t i = 0; i < kNumBands; ++i) { + for (size_t j = 0; j < split_length; ++j) { + out[i][j] += dct_modulation_[offset][i] * in[j]; + } + } +} + +// Modulates each of the |kNumBands| bands of |in| by |dct_modulation_| and +// accumulates them in |out|. |out| is cleared before starting to accumulate. +// |offset| is the index in the period of the cosines used for modulation. +// |split_length| is the length of each band of |in| and |out|. +void ThreeBandFilterBank::UpModulate(const float* const* in, + size_t split_length, + size_t offset, + float* out) { + memset(out, 0, split_length * sizeof(*out)); + for (size_t i = 0; i < kNumBands; ++i) { + for (size_t j = 0; j < split_length; ++j) { + out[j] += dct_modulation_[offset][i] * in[i][j]; + } + } +} + +} // namespace webrtc diff --git a/audio_processing/three_band_filter_bank.h b/audio_processing/three_band_filter_bank.h new file mode 100644 index 0000000..845c19c --- /dev/null +++ b/audio_processing/three_band_filter_bank.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_ +#define MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_ + +#include +#include +#include + +#include "sparse_fir_filter.h" + +namespace webrtc { + +// An implementation of a 3-band FIR filter-bank with DCT modulation, similar to +// the proposed in "Multirate Signal Processing for Communication Systems" by +// Fredric J Harris. +// The low-pass filter prototype has these characteristics: +// * Pass-band ripple = 0.3dB +// * Pass-band frequency = 0.147 (7kHz at 48kHz) +// * Stop-band attenuation = 40dB +// * Stop-band frequency = 0.192 (9.2kHz at 48kHz) +// * Delay = 24 samples (500us at 48kHz) +// * Linear phase +// This filter bank does not satisfy perfect reconstruction. The SNR after +// analysis and synthesis (with no processing in between) is approximately 9.5dB +// depending on the input signal after compensating for the delay. +class ThreeBandFilterBank final { + public: + explicit ThreeBandFilterBank(size_t length); + ~ThreeBandFilterBank(); + + // Splits |in| into 3 downsampled frequency bands in |out|. + // |length| is the |in| length. Each of the 3 bands of |out| has to have a + // length of |length| / 3. + void Analysis(const float* in, size_t length, float* const* out); + + // Merges the 3 downsampled frequency bands in |in| into |out|. + // |split_length| is the length of each band of |in|. |out| has to have at + // least a length of 3 * |split_length|. + void Synthesis(const float* const* in, size_t split_length, float* out); + + private: + void DownModulate(const float* in, + size_t split_length, + size_t offset, + float* const* out); + void UpModulate(const float* const* in, + size_t split_length, + size_t offset, + float* out); + + std::vector in_buffer_; + std::vector out_buffer_; + std::vector> analysis_filters_; + std::vector> synthesis_filters_; + std::vector> dct_modulation_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_ diff --git a/audio_processing/utility/BUILD.gn b/audio_processing/utility/BUILD.gn new file mode 100644 index 0000000..a808625 --- /dev/null +++ b/audio_processing/utility/BUILD.gn @@ -0,0 +1,131 @@ +# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_library("cascaded_biquad_filter") { + sources = [ + "cascaded_biquad_filter.cc", + "cascaded_biquad_filter.h", + ] + deps = [ + "../../../api:array_view", + "../../../rtc_base:checks", + ] +} + +rtc_library("legacy_delay_estimator") { + sources = [ + "delay_estimator.cc", + "delay_estimator.h", + "delay_estimator_internal.h", + "delay_estimator_wrapper.cc", + "delay_estimator_wrapper.h", + ] + deps = [ + "../../../rtc_base:checks", + ] +} + +rtc_library("ooura_fft") { + sources = [ + "ooura_fft.cc", + "ooura_fft.h", + "ooura_fft_tables_common.h", + ] + deps = [ + "../../../rtc_base/system:arch", + "../../../system_wrappers:cpu_features_api", + ] + cflags = [] + + if (current_cpu == "x86" || current_cpu == "x64") { + sources += [ + "ooura_fft_sse2.cc", + "ooura_fft_tables_neon_sse2.h", + ] + if (is_posix || is_fuchsia) { + cflags += [ "-msse2" ] + } + } + + if (rtc_build_with_neon) { + sources += [ + "ooura_fft_neon.cc", + "ooura_fft_tables_neon_sse2.h", + ] + + deps += [ "../../../common_audio" ] + + if (current_cpu != "arm64") { + # Enable compilation for the NEON instruction set. + suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ] + cflags += [ "-mfpu=neon" ] + } + } + + if (current_cpu == "mipsel" && mips_float_abi == "hard") { + sources += [ "ooura_fft_mips.cc" ] + } +} + +rtc_library("pffft_wrapper") { + visibility = [ "../*" ] + sources = [ + "pffft_wrapper.cc", + "pffft_wrapper.h", + ] + deps = [ + "../../../api:array_view", + "../../../rtc_base:checks", + "//third_party/pffft", + ] +} + +if (rtc_include_tests) { + rtc_library("cascaded_biquad_filter_unittest") { + testonly = true + + sources = [ + "cascaded_biquad_filter_unittest.cc", + ] + deps = [ + ":cascaded_biquad_filter", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "//testing/gtest", + ] + } + + rtc_library("legacy_delay_estimator_unittest") { + testonly = true + + sources = [ + "delay_estimator_unittest.cc", + ] + deps = [ + ":legacy_delay_estimator", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "//testing/gtest", + ] + } + + rtc_library("pffft_wrapper_unittest") { + testonly = true + sources = [ + "pffft_wrapper_unittest.cc", + ] + deps = [ + ":pffft_wrapper", + "../../../test:test_support", + "//testing/gtest", + "//third_party/pffft", + ] + } +} diff --git a/audio_processing/utility/DEPS b/audio_processing/utility/DEPS new file mode 100644 index 0000000..c72d810 --- /dev/null +++ b/audio_processing/utility/DEPS @@ -0,0 +1,3 @@ +include_rules = [ + "+third_party/pffft", +] diff --git a/audio_processing/utility/cascaded_biquad_filter.cc b/audio_processing/utility/cascaded_biquad_filter.cc new file mode 100644 index 0000000..8f42a75 --- /dev/null +++ b/audio_processing/utility/cascaded_biquad_filter.cc @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "audio_processing/utility/cascaded_biquad_filter.h" + +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +CascadedBiQuadFilter::BiQuadParam::BiQuadParam(std::complex zero, + std::complex pole, + float gain, + bool mirror_zero_along_i_axis) + : zero(zero), + pole(pole), + gain(gain), + mirror_zero_along_i_axis(mirror_zero_along_i_axis) {} + +CascadedBiQuadFilter::BiQuadParam::BiQuadParam(const BiQuadParam&) = default; + +CascadedBiQuadFilter::BiQuad::BiQuad( + const CascadedBiQuadFilter::BiQuadParam& param) + : x(), y() { + float z_r = std::real(param.zero); + float z_i = std::imag(param.zero); + float p_r = std::real(param.pole); + float p_i = std::imag(param.pole); + float gain = param.gain; + + if (param.mirror_zero_along_i_axis) { + // Assuming zeroes at z_r and -z_r. + RTC_DCHECK(z_i == 0.f); + coefficients.b[0] = gain * 1.f; + coefficients.b[1] = 0.f; + coefficients.b[2] = gain * -(z_r * z_r); + } else { + // Assuming zeros at (z_r + z_i*i) and (z_r - z_i*i). + coefficients.b[0] = gain * 1.f; + coefficients.b[1] = gain * -2.f * z_r; + coefficients.b[2] = gain * (z_r * z_r + z_i * z_i); + } + + // Assuming poles at (p_r + p_i*i) and (p_r - p_i*i). + coefficients.a[0] = -2.f * p_r; + coefficients.a[1] = p_r * p_r + p_i * p_i; +} + +void CascadedBiQuadFilter::BiQuad::BiQuad::Reset() { + x[0] = x[1] = y[0] = y[1] = 0.f; +} + +CascadedBiQuadFilter::CascadedBiQuadFilter( + const CascadedBiQuadFilter::BiQuadCoefficients& coefficients, + size_t num_biquads) + : biquads_(num_biquads, BiQuad(coefficients)) {} + +CascadedBiQuadFilter::CascadedBiQuadFilter( + const std::vector& biquad_params) { + for (const auto& param : biquad_params) { + biquads_.push_back(BiQuad(param)); + } +} + +CascadedBiQuadFilter::~CascadedBiQuadFilter() = default; + +void CascadedBiQuadFilter::Process(rtc::ArrayView x, + rtc::ArrayView y) { + if (biquads_.size() > 0) { + ApplyBiQuad(x, y, &biquads_[0]); + for (size_t k = 1; k < biquads_.size(); ++k) { + ApplyBiQuad(y, y, &biquads_[k]); + } + } else { + std::copy(x.begin(), x.end(), y.begin()); + } +} + +void CascadedBiQuadFilter::Process(rtc::ArrayView y) { + for (auto& biquad : biquads_) { + ApplyBiQuad(y, y, &biquad); + } +} + +void CascadedBiQuadFilter::Reset() { + for (auto& biquad : biquads_) { + biquad.Reset(); + } +} + +void CascadedBiQuadFilter::ApplyBiQuad(rtc::ArrayView x, + rtc::ArrayView y, + CascadedBiQuadFilter::BiQuad* biquad) { + RTC_DCHECK_EQ(x.size(), y.size()); + const auto* c_b = biquad->coefficients.b; + const auto* c_a = biquad->coefficients.a; + auto* m_x = biquad->x; + auto* m_y = biquad->y; + for (size_t k = 0; k < x.size(); ++k) { + const float tmp = x[k]; + y[k] = c_b[0] * tmp + c_b[1] * m_x[0] + c_b[2] * m_x[1] - c_a[0] * m_y[0] - + c_a[1] * m_y[1]; + m_x[1] = m_x[0]; + m_x[0] = tmp; + m_y[1] = m_y[0]; + m_y[0] = y[k]; + } +} + +} // namespace webrtc diff --git a/audio_processing/utility/cascaded_biquad_filter.h b/audio_processing/utility/cascaded_biquad_filter.h new file mode 100644 index 0000000..9fda200 --- /dev/null +++ b/audio_processing/utility/cascaded_biquad_filter.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_UTILITY_CASCADED_BIQUAD_FILTER_H_ +#define MODULES_AUDIO_PROCESSING_UTILITY_CASCADED_BIQUAD_FILTER_H_ + +#include + +#include +#include + +#include "rtc_base/array_view.h" + +namespace webrtc { + +// Applies a number of biquads in a cascaded manner. The filter implementation +// is direct form 1. +class CascadedBiQuadFilter { + public: + struct BiQuadParam { + BiQuadParam(std::complex zero, + std::complex pole, + float gain, + bool mirror_zero_along_i_axis = false); + explicit BiQuadParam(const BiQuadParam&); + std::complex zero; + std::complex pole; + float gain; + bool mirror_zero_along_i_axis; + }; + + struct BiQuadCoefficients { + float b[3]; + float a[2]; + }; + + struct BiQuad { + explicit BiQuad(const BiQuadCoefficients& coefficients) + : coefficients(coefficients), x(), y() {} + explicit BiQuad(const CascadedBiQuadFilter::BiQuadParam& param); + void Reset(); + BiQuadCoefficients coefficients; + float x[2]; + float y[2]; + }; + + CascadedBiQuadFilter( + const CascadedBiQuadFilter::BiQuadCoefficients& coefficients, + size_t num_biquads); + explicit CascadedBiQuadFilter( + const std::vector& biquad_params); + ~CascadedBiQuadFilter(); + CascadedBiQuadFilter(const CascadedBiQuadFilter&) = delete; + CascadedBiQuadFilter& operator=(const CascadedBiQuadFilter&) = delete; + + // Applies the biquads on the values in x in order to form the output in y. + void Process(rtc::ArrayView x, rtc::ArrayView y); + // Applies the biquads on the values in y in an in-place manner. + void Process(rtc::ArrayView y); + // Resets the filter to its initial state. + void Reset(); + + private: + void ApplyBiQuad(rtc::ArrayView x, + rtc::ArrayView y, + CascadedBiQuadFilter::BiQuad* biquad); + + std::vector biquads_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_UTILITY_CASCADED_BIQUAD_FILTER_H_ diff --git a/audio_processing/utility/cascaded_biquad_filter_unittest.cc b/audio_processing/utility/cascaded_biquad_filter_unittest.cc new file mode 100644 index 0000000..bb1d208 --- /dev/null +++ b/audio_processing/utility/cascaded_biquad_filter_unittest.cc @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/aec3/cascaded_biquad_filter.h" + +#include + +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +// Coefficients for a second order Butterworth high-pass filter with cutoff +// frequency 100 Hz. +const CascadedBiQuadFilter::BiQuadCoefficients kHighPassFilterCoefficients = { + {0.97261f, -1.94523f, 0.97261f}, + {-1.94448f, 0.94598f}}; + +const CascadedBiQuadFilter::BiQuadCoefficients kTransparentCoefficients = { + {1.f, 0.f, 0.f}, + {0.f, 0.f}}; + +const CascadedBiQuadFilter::BiQuadCoefficients kBlockingCoefficients = { + {0.f, 0.f, 0.f}, + {0.f, 0.f}}; + +std::vector CreateInputWithIncreasingValues(size_t vector_length) { + std::vector v(vector_length); + for (size_t k = 0; k < v.size(); ++k) { + v[k] = k; + } + return v; +} + +} // namespace + +// Verifies that the filter applies an effect which removes the input signal. +// The test also verifies that the in-place Process API call works as intended. +TEST(CascadedBiquadFilter, BlockingConfiguration) { + std::vector values = CreateInputWithIncreasingValues(1000); + + CascadedBiQuadFilter filter(kBlockingCoefficients, 1); + filter.Process(values); + + EXPECT_EQ(std::vector(1000, 0.f), values); +} + +// Verifies that the filter is able to form a zero-mean output from a +// non-zeromean input signal when coefficients for a high-pass filter are +// applied. The test also verifies that the filter works with multiple biquads. +TEST(CascadedBiquadFilter, HighPassConfiguration) { + std::vector values(1000); + for (size_t k = 0; k < values.size(); ++k) { + values[k] = 1.f; + } + + CascadedBiQuadFilter filter(kHighPassFilterCoefficients, 2); + filter.Process(values); + + for (size_t k = values.size() / 2; k < values.size(); ++k) { + EXPECT_NEAR(0.f, values[k], 1e-4); + } +} + +// Verifies that the reset functionality works as intended. +TEST(CascadedBiquadFilter, HighPassConfiguration) { + CascadedBiQuadFilter filter(kHighPassFilterCoefficients, 2); + + std::vector values1(100, 1.f); + filter.Process(values1); + + filter.Reset(); + + std::vector values2(100, 1.f); + filter.Process(values2); + + for (size_t k = 0; k < values1.size(); ++k) { + EXPECT_EQ(values1[k], values2[k]); + } +} + +// Verifies that the filter is able to produce a transparent effect with no +// impact on the data when the proper coefficients are applied. The test also +// verifies that the non-in-place Process API call works as intended. +TEST(CascadedBiquadFilter, TransparentConfiguration) { + const std::vector input = CreateInputWithIncreasingValues(1000); + std::vector output(input.size()); + + CascadedBiQuadFilter filter(kTransparentCoefficients, 1); + filter.Process(input, output); + + EXPECT_EQ(input, output); +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +// Verifies that the check of the lengths for the input and output works for the +// non-in-place call. +TEST(CascadedBiquadFilter, InputSizeCheckVerification) { + const std::vector input = CreateInputWithIncreasingValues(10); + std::vector output(input.size() - 1); + + CascadedBiQuadFilter filter(kTransparentCoefficients, 1); + EXPECT_DEATH(filter.Process(input, output), ""); +} +#endif + +// Verifies the conversion from zero, pole, gain to filter coefficients for +// lowpass filter. +TEST(CascadedBiquadFilter, BiQuadParamLowPass) { + CascadedBiQuadFilter::BiQuadParam param( + {-1.0f, 0.0f}, {0.23146901f, 0.39514232f}, 0.1866943331163784f); + CascadedBiQuadFilter::BiQuad filter(param); + const float epsilon = 1e-6f; + EXPECT_NEAR(filter.coefficients.b[0], 0.18669433f, epsilon); + EXPECT_NEAR(filter.coefficients.b[1], 0.37338867f, epsilon); + EXPECT_NEAR(filter.coefficients.b[2], 0.18669433f, epsilon); + EXPECT_NEAR(filter.coefficients.a[0], -0.46293803f, epsilon); + EXPECT_NEAR(filter.coefficients.a[1], 0.20971536f, epsilon); +} + +// Verifies the conversion from zero, pole, gain to filter coefficients for +// highpass filter. +TEST(CascadedBiquadFilter, BiQuadParamHighPass) { + CascadedBiQuadFilter::BiQuadParam param( + {1.0f, 0.0f}, {0.72712179f, 0.21296904f}, 0.75707637533388494f); + CascadedBiQuadFilter::BiQuad filter(param); + const float epsilon = 1e-6f; + EXPECT_NEAR(filter.coefficients.b[0], 0.75707638f, epsilon); + EXPECT_NEAR(filter.coefficients.b[1], -1.51415275f, epsilon); + EXPECT_NEAR(filter.coefficients.b[2], 0.75707638f, epsilon); + EXPECT_NEAR(filter.coefficients.a[0], -1.45424359f, epsilon); + EXPECT_NEAR(filter.coefficients.a[1], 0.57406192f, epsilon); +} + +// Verifies the conversion from zero, pole, gain to filter coefficients for +// bandpass filter. +TEST(CascadedBiquadFilter, BiQuadParamBandPass) { + CascadedBiQuadFilter::BiQuadParam param( + {1.0f, 0.0f}, {1.11022302e-16f, 0.71381051f}, 0.2452372752527856f, true); + CascadedBiQuadFilter::BiQuad filter(param); + const float epsilon = 1e-6f; + EXPECT_NEAR(filter.coefficients.b[0], 0.24523728f, epsilon); + EXPECT_NEAR(filter.coefficients.b[1], 0.f, epsilon); + EXPECT_NEAR(filter.coefficients.b[2], -0.24523728f, epsilon); + EXPECT_NEAR(filter.coefficients.a[0], -2.22044605e-16f, epsilon); + EXPECT_NEAR(filter.coefficients.a[1], 5.09525449e-01f, epsilon); +} + +} // namespace webrtc diff --git a/audio_processing/utility/delay_estimator.cc b/audio_processing/utility/delay_estimator.cc new file mode 100644 index 0000000..034fada --- /dev/null +++ b/audio_processing/utility/delay_estimator.cc @@ -0,0 +1,700 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/utility/delay_estimator.h" + +#include +#include + +#include + +#include "rtc_base/checks.h" + +// Number of right shifts for scaling is linearly depending on number of bits in +// the far-end binary spectrum. +static const int kShiftsAtZero = 13; // Right shifts at zero binary spectrum. +static const int kShiftsLinearSlope = 3; + +static const int32_t kProbabilityOffset = 1024; // 2 in Q9. +static const int32_t kProbabilityLowerLimit = 8704; // 17 in Q9. +static const int32_t kProbabilityMinSpread = 2816; // 5.5 in Q9. + +// Robust validation settings +static const float kHistogramMax = 3000.f; +static const float kLastHistogramMax = 250.f; +static const float kMinHistogramThreshold = 1.5f; +static const int kMinRequiredHits = 10; +static const int kMaxHitsWhenPossiblyNonCausal = 10; +static const int kMaxHitsWhenPossiblyCausal = 1000; +static const float kQ14Scaling = 1.f / (1 << 14); // Scaling by 2^14 to get Q0. +static const float kFractionSlope = 0.05f; +static const float kMinFractionWhenPossiblyCausal = 0.5f; +static const float kMinFractionWhenPossiblyNonCausal = 0.25f; + +// Counts and returns number of bits of a 32-bit word. +static int BitCount(uint32_t u32) { + uint32_t tmp = + u32 - ((u32 >> 1) & 033333333333) - ((u32 >> 2) & 011111111111); + tmp = ((tmp + (tmp >> 3)) & 030707070707); + tmp = (tmp + (tmp >> 6)); + tmp = (tmp + (tmp >> 12) + (tmp >> 24)) & 077; + + return ((int)tmp); +} + +// Compares the |binary_vector| with all rows of the |binary_matrix| and counts +// per row the number of times they have the same value. +// +// Inputs: +// - binary_vector : binary "vector" stored in a long +// - binary_matrix : binary "matrix" stored as a vector of long +// - matrix_size : size of binary "matrix" +// +// Output: +// - bit_counts : "Vector" stored as a long, containing for each +// row the number of times the matrix row and the +// input vector have the same value +// +static void BitCountComparison(uint32_t binary_vector, + const uint32_t* binary_matrix, + int matrix_size, + int32_t* bit_counts) { + int n = 0; + + // Compare |binary_vector| with all rows of the |binary_matrix| + for (; n < matrix_size; n++) { + bit_counts[n] = (int32_t)BitCount(binary_vector ^ binary_matrix[n]); + } +} + +// Collects necessary statistics for the HistogramBasedValidation(). This +// function has to be called prior to calling HistogramBasedValidation(). The +// statistics updated and used by the HistogramBasedValidation() are: +// 1. the number of |candidate_hits|, which states for how long we have had the +// same |candidate_delay| +// 2. the |histogram| of candidate delays over time. This histogram is +// weighted with respect to a reliability measure and time-varying to cope +// with possible delay shifts. +// For further description see commented code. +// +// Inputs: +// - candidate_delay : The delay to validate. +// - valley_depth_q14 : The cost function has a valley/minimum at the +// |candidate_delay| location. |valley_depth_q14| is the +// cost function difference between the minimum and +// maximum locations. The value is in the Q14 domain. +// - valley_level_q14 : Is the cost function value at the minimum, in Q14. +static void UpdateRobustValidationStatistics(BinaryDelayEstimator* self, + int candidate_delay, + int32_t valley_depth_q14, + int32_t valley_level_q14) { + const float valley_depth = valley_depth_q14 * kQ14Scaling; + float decrease_in_last_set = valley_depth; + const int max_hits_for_slow_change = (candidate_delay < self->last_delay) + ? kMaxHitsWhenPossiblyNonCausal + : kMaxHitsWhenPossiblyCausal; + int i = 0; + + RTC_DCHECK_EQ(self->history_size, self->farend->history_size); + // Reset |candidate_hits| if we have a new candidate. + if (candidate_delay != self->last_candidate_delay) { + self->candidate_hits = 0; + self->last_candidate_delay = candidate_delay; + } + self->candidate_hits++; + + // The |histogram| is updated differently across the bins. + // 1. The |candidate_delay| histogram bin is increased with the + // |valley_depth|, which is a simple measure of how reliable the + // |candidate_delay| is. The histogram is not increased above + // |kHistogramMax|. + self->histogram[candidate_delay] += valley_depth; + if (self->histogram[candidate_delay] > kHistogramMax) { + self->histogram[candidate_delay] = kHistogramMax; + } + // 2. The histogram bins in the neighborhood of |candidate_delay| are + // unaffected. The neighborhood is defined as x + {-2, -1, 0, 1}. + // 3. The histogram bins in the neighborhood of |last_delay| are decreased + // with |decrease_in_last_set|. This value equals the difference between + // the cost function values at the locations |candidate_delay| and + // |last_delay| until we reach |max_hits_for_slow_change| consecutive hits + // at the |candidate_delay|. If we exceed this amount of hits the + // |candidate_delay| is a "potential" candidate and we start decreasing + // these histogram bins more rapidly with |valley_depth|. + if (self->candidate_hits < max_hits_for_slow_change) { + decrease_in_last_set = + (self->mean_bit_counts[self->compare_delay] - valley_level_q14) * + kQ14Scaling; + } + // 4. All other bins are decreased with |valley_depth|. + // TODO(bjornv): Investigate how to make this loop more efficient. Split up + // the loop? Remove parts that doesn't add too much. + for (i = 0; i < self->history_size; ++i) { + int is_in_last_set = (i >= self->last_delay - 2) && + (i <= self->last_delay + 1) && (i != candidate_delay); + int is_in_candidate_set = + (i >= candidate_delay - 2) && (i <= candidate_delay + 1); + self->histogram[i] -= + decrease_in_last_set * is_in_last_set + + valley_depth * (!is_in_last_set && !is_in_candidate_set); + // 5. No histogram bin can go below 0. + if (self->histogram[i] < 0) { + self->histogram[i] = 0; + } + } +} + +// Validates the |candidate_delay|, estimated in WebRtc_ProcessBinarySpectrum(), +// based on a mix of counting concurring hits with a modified histogram +// of recent delay estimates. In brief a candidate is valid (returns 1) if it +// is the most likely according to the histogram. There are a couple of +// exceptions that are worth mentioning: +// 1. If the |candidate_delay| < |last_delay| it can be that we are in a +// non-causal state, breaking a possible echo control algorithm. Hence, we +// open up for a quicker change by allowing the change even if the +// |candidate_delay| is not the most likely one according to the histogram. +// 2. There's a minimum number of hits (kMinRequiredHits) and the histogram +// value has to reached a minimum (kMinHistogramThreshold) to be valid. +// 3. The action is also depending on the filter length used for echo control. +// If the delay difference is larger than what the filter can capture, we +// also move quicker towards a change. +// For further description see commented code. +// +// Input: +// - candidate_delay : The delay to validate. +// +// Return value: +// - is_histogram_valid : 1 - The |candidate_delay| is valid. +// 0 - Otherwise. +static int HistogramBasedValidation(const BinaryDelayEstimator* self, + int candidate_delay) { + float fraction = 1.f; + float histogram_threshold = self->histogram[self->compare_delay]; + const int delay_difference = candidate_delay - self->last_delay; + int is_histogram_valid = 0; + + // The histogram based validation of |candidate_delay| is done by comparing + // the |histogram| at bin |candidate_delay| with a |histogram_threshold|. + // This |histogram_threshold| equals a |fraction| of the |histogram| at bin + // |last_delay|. The |fraction| is a piecewise linear function of the + // |delay_difference| between the |candidate_delay| and the |last_delay| + // allowing for a quicker move if + // i) a potential echo control filter can not handle these large differences. + // ii) keeping |last_delay| instead of updating to |candidate_delay| could + // force an echo control into a non-causal state. + // We further require the histogram to have reached a minimum value of + // |kMinHistogramThreshold|. In addition, we also require the number of + // |candidate_hits| to be more than |kMinRequiredHits| to remove spurious + // values. + + // Calculate a comparison histogram value (|histogram_threshold|) that is + // depending on the distance between the |candidate_delay| and |last_delay|. + // TODO(bjornv): How much can we gain by turning the fraction calculation + // into tables? + if (delay_difference > self->allowed_offset) { + fraction = 1.f - kFractionSlope * (delay_difference - self->allowed_offset); + fraction = (fraction > kMinFractionWhenPossiblyCausal + ? fraction + : kMinFractionWhenPossiblyCausal); + } else if (delay_difference < 0) { + fraction = + kMinFractionWhenPossiblyNonCausal - kFractionSlope * delay_difference; + fraction = (fraction > 1.f ? 1.f : fraction); + } + histogram_threshold *= fraction; + histogram_threshold = + (histogram_threshold > kMinHistogramThreshold ? histogram_threshold + : kMinHistogramThreshold); + + is_histogram_valid = + (self->histogram[candidate_delay] >= histogram_threshold) && + (self->candidate_hits > kMinRequiredHits); + + return is_histogram_valid; +} + +// Performs a robust validation of the |candidate_delay| estimated in +// WebRtc_ProcessBinarySpectrum(). The algorithm takes the +// |is_instantaneous_valid| and the |is_histogram_valid| and combines them +// into a robust validation. The HistogramBasedValidation() has to be called +// prior to this call. +// For further description on how the combination is done, see commented code. +// +// Inputs: +// - candidate_delay : The delay to validate. +// - is_instantaneous_valid : The instantaneous validation performed in +// WebRtc_ProcessBinarySpectrum(). +// - is_histogram_valid : The histogram based validation. +// +// Return value: +// - is_robust : 1 - The candidate_delay is valid according to a +// combination of the two inputs. +// : 0 - Otherwise. +static int RobustValidation(const BinaryDelayEstimator* self, + int candidate_delay, + int is_instantaneous_valid, + int is_histogram_valid) { + int is_robust = 0; + + // The final robust validation is based on the two algorithms; 1) the + // |is_instantaneous_valid| and 2) the histogram based with result stored in + // |is_histogram_valid|. + // i) Before we actually have a valid estimate (|last_delay| == -2), we say + // a candidate is valid if either algorithm states so + // (|is_instantaneous_valid| OR |is_histogram_valid|). + is_robust = + (self->last_delay < 0) && (is_instantaneous_valid || is_histogram_valid); + // ii) Otherwise, we need both algorithms to be certain + // (|is_instantaneous_valid| AND |is_histogram_valid|) + is_robust |= is_instantaneous_valid && is_histogram_valid; + // iii) With one exception, i.e., the histogram based algorithm can overrule + // the instantaneous one if |is_histogram_valid| = 1 and the histogram + // is significantly strong. + is_robust |= is_histogram_valid && + (self->histogram[candidate_delay] > self->last_delay_histogram); + + return is_robust; +} + +void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) { + if (self == NULL) { + return; + } + + free(self->binary_far_history); + self->binary_far_history = NULL; + + free(self->far_bit_counts); + self->far_bit_counts = NULL; + + free(self); +} + +BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend( + int history_size) { + BinaryDelayEstimatorFarend* self = NULL; + + if (history_size > 1) { + // Sanity conditions fulfilled. + self = static_cast( + malloc(sizeof(BinaryDelayEstimatorFarend))); + } + if (self == NULL) { + return NULL; + } + + self->history_size = 0; + self->binary_far_history = NULL; + self->far_bit_counts = NULL; + if (WebRtc_AllocateFarendBufferMemory(self, history_size) == 0) { + WebRtc_FreeBinaryDelayEstimatorFarend(self); + self = NULL; + } + return self; +} + +int WebRtc_AllocateFarendBufferMemory(BinaryDelayEstimatorFarend* self, + int history_size) { + RTC_DCHECK(self); + // (Re-)Allocate memory for history buffers. + self->binary_far_history = static_cast( + realloc(self->binary_far_history, + history_size * sizeof(*self->binary_far_history))); + self->far_bit_counts = static_cast(realloc( + self->far_bit_counts, history_size * sizeof(*self->far_bit_counts))); + if ((self->binary_far_history == NULL) || (self->far_bit_counts == NULL)) { + history_size = 0; + } + // Fill with zeros if we have expanded the buffers. + if (history_size > self->history_size) { + int size_diff = history_size - self->history_size; + memset(&self->binary_far_history[self->history_size], 0, + sizeof(*self->binary_far_history) * size_diff); + memset(&self->far_bit_counts[self->history_size], 0, + sizeof(*self->far_bit_counts) * size_diff); + } + self->history_size = history_size; + + return self->history_size; +} + +void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) { + RTC_DCHECK(self); + memset(self->binary_far_history, 0, sizeof(uint32_t) * self->history_size); + memset(self->far_bit_counts, 0, sizeof(int) * self->history_size); +} + +void WebRtc_SoftResetBinaryDelayEstimatorFarend( + BinaryDelayEstimatorFarend* self, + int delay_shift) { + int abs_shift = abs(delay_shift); + int shift_size = 0; + int dest_index = 0; + int src_index = 0; + int padding_index = 0; + + RTC_DCHECK(self); + shift_size = self->history_size - abs_shift; + RTC_DCHECK_GT(shift_size, 0); + if (delay_shift == 0) { + return; + } else if (delay_shift > 0) { + dest_index = abs_shift; + } else if (delay_shift < 0) { + src_index = abs_shift; + padding_index = shift_size; + } + + // Shift and zero pad buffers. + memmove(&self->binary_far_history[dest_index], + &self->binary_far_history[src_index], + sizeof(*self->binary_far_history) * shift_size); + memset(&self->binary_far_history[padding_index], 0, + sizeof(*self->binary_far_history) * abs_shift); + memmove(&self->far_bit_counts[dest_index], &self->far_bit_counts[src_index], + sizeof(*self->far_bit_counts) * shift_size); + memset(&self->far_bit_counts[padding_index], 0, + sizeof(*self->far_bit_counts) * abs_shift); +} + +void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* handle, + uint32_t binary_far_spectrum) { + RTC_DCHECK(handle); + // Shift binary spectrum history and insert current |binary_far_spectrum|. + memmove(&(handle->binary_far_history[1]), &(handle->binary_far_history[0]), + (handle->history_size - 1) * sizeof(uint32_t)); + handle->binary_far_history[0] = binary_far_spectrum; + + // Shift history of far-end binary spectrum bit counts and insert bit count + // of current |binary_far_spectrum|. + memmove(&(handle->far_bit_counts[1]), &(handle->far_bit_counts[0]), + (handle->history_size - 1) * sizeof(int)); + handle->far_bit_counts[0] = BitCount(binary_far_spectrum); +} + +void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self) { + if (self == NULL) { + return; + } + + free(self->mean_bit_counts); + self->mean_bit_counts = NULL; + + free(self->bit_counts); + self->bit_counts = NULL; + + free(self->binary_near_history); + self->binary_near_history = NULL; + + free(self->histogram); + self->histogram = NULL; + + // BinaryDelayEstimator does not have ownership of |farend|, hence we do not + // free the memory here. That should be handled separately by the user. + self->farend = NULL; + + free(self); +} + +BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator( + BinaryDelayEstimatorFarend* farend, + int max_lookahead) { + BinaryDelayEstimator* self = NULL; + + if ((farend != NULL) && (max_lookahead >= 0)) { + // Sanity conditions fulfilled. + self = static_cast( + malloc(sizeof(BinaryDelayEstimator))); + } + if (self == NULL) { + return NULL; + } + + self->farend = farend; + self->near_history_size = max_lookahead + 1; + self->history_size = 0; + self->robust_validation_enabled = 0; // Disabled by default. + self->allowed_offset = 0; + + self->lookahead = max_lookahead; + + // Allocate memory for spectrum and history buffers. + self->mean_bit_counts = NULL; + self->bit_counts = NULL; + self->histogram = NULL; + self->binary_near_history = static_cast( + malloc((max_lookahead + 1) * sizeof(*self->binary_near_history))); + if (self->binary_near_history == NULL || + WebRtc_AllocateHistoryBufferMemory(self, farend->history_size) == 0) { + WebRtc_FreeBinaryDelayEstimator(self); + self = NULL; + } + + return self; +} + +int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self, + int history_size) { + BinaryDelayEstimatorFarend* far = self->farend; + // (Re-)Allocate memory for spectrum and history buffers. + if (history_size != far->history_size) { + // Only update far-end buffers if we need. + history_size = WebRtc_AllocateFarendBufferMemory(far, history_size); + } + // The extra array element in |mean_bit_counts| and |histogram| is a dummy + // element only used while |last_delay| == -2, i.e., before we have a valid + // estimate. + self->mean_bit_counts = static_cast( + realloc(self->mean_bit_counts, + (history_size + 1) * sizeof(*self->mean_bit_counts))); + self->bit_counts = static_cast( + realloc(self->bit_counts, history_size * sizeof(*self->bit_counts))); + self->histogram = static_cast( + realloc(self->histogram, (history_size + 1) * sizeof(*self->histogram))); + + if ((self->mean_bit_counts == NULL) || (self->bit_counts == NULL) || + (self->histogram == NULL)) { + history_size = 0; + } + // Fill with zeros if we have expanded the buffers. + if (history_size > self->history_size) { + int size_diff = history_size - self->history_size; + memset(&self->mean_bit_counts[self->history_size], 0, + sizeof(*self->mean_bit_counts) * size_diff); + memset(&self->bit_counts[self->history_size], 0, + sizeof(*self->bit_counts) * size_diff); + memset(&self->histogram[self->history_size], 0, + sizeof(*self->histogram) * size_diff); + } + self->history_size = history_size; + + return self->history_size; +} + +void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self) { + int i = 0; + RTC_DCHECK(self); + + memset(self->bit_counts, 0, sizeof(int32_t) * self->history_size); + memset(self->binary_near_history, 0, + sizeof(uint32_t) * self->near_history_size); + for (i = 0; i <= self->history_size; ++i) { + self->mean_bit_counts[i] = (20 << 9); // 20 in Q9. + self->histogram[i] = 0.f; + } + self->minimum_probability = kMaxBitCountsQ9; // 32 in Q9. + self->last_delay_probability = (int)kMaxBitCountsQ9; // 32 in Q9. + + // Default return value if we're unable to estimate. -1 is used for errors. + self->last_delay = -2; + + self->last_candidate_delay = -2; + self->compare_delay = self->history_size; + self->candidate_hits = 0; + self->last_delay_histogram = 0.f; +} + +int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self, + int delay_shift) { + int lookahead = 0; + RTC_DCHECK(self); + lookahead = self->lookahead; + self->lookahead -= delay_shift; + if (self->lookahead < 0) { + self->lookahead = 0; + } + if (self->lookahead > self->near_history_size - 1) { + self->lookahead = self->near_history_size - 1; + } + return lookahead - self->lookahead; +} + +int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self, + uint32_t binary_near_spectrum) { + int i = 0; + int candidate_delay = -1; + int valid_candidate = 0; + + int32_t value_best_candidate = kMaxBitCountsQ9; + int32_t value_worst_candidate = 0; + int32_t valley_depth = 0; + + RTC_DCHECK(self); + if (self->farend->history_size != self->history_size) { + // Non matching history sizes. + return -1; + } + if (self->near_history_size > 1) { + // If we apply lookahead, shift near-end binary spectrum history. Insert + // current |binary_near_spectrum| and pull out the delayed one. + memmove(&(self->binary_near_history[1]), &(self->binary_near_history[0]), + (self->near_history_size - 1) * sizeof(uint32_t)); + self->binary_near_history[0] = binary_near_spectrum; + binary_near_spectrum = self->binary_near_history[self->lookahead]; + } + + // Compare with delayed spectra and store the |bit_counts| for each delay. + BitCountComparison(binary_near_spectrum, self->farend->binary_far_history, + self->history_size, self->bit_counts); + + // Update |mean_bit_counts|, which is the smoothed version of |bit_counts|. + for (i = 0; i < self->history_size; i++) { + // |bit_counts| is constrained to [0, 32], meaning we can smooth with a + // factor up to 2^26. We use Q9. + int32_t bit_count = (self->bit_counts[i] << 9); // Q9. + + // Update |mean_bit_counts| only when far-end signal has something to + // contribute. If |far_bit_counts| is zero the far-end signal is weak and + // we likely have a poor echo condition, hence don't update. + if (self->farend->far_bit_counts[i] > 0) { + // Make number of right shifts piecewise linear w.r.t. |far_bit_counts|. + int shifts = kShiftsAtZero; + shifts -= (kShiftsLinearSlope * self->farend->far_bit_counts[i]) >> 4; + WebRtc_MeanEstimatorFix(bit_count, shifts, &(self->mean_bit_counts[i])); + } + } + + // Find |candidate_delay|, |value_best_candidate| and |value_worst_candidate| + // of |mean_bit_counts|. + for (i = 0; i < self->history_size; i++) { + if (self->mean_bit_counts[i] < value_best_candidate) { + value_best_candidate = self->mean_bit_counts[i]; + candidate_delay = i; + } + if (self->mean_bit_counts[i] > value_worst_candidate) { + value_worst_candidate = self->mean_bit_counts[i]; + } + } + valley_depth = value_worst_candidate - value_best_candidate; + + // The |value_best_candidate| is a good indicator on the probability of + // |candidate_delay| being an accurate delay (a small |value_best_candidate| + // means a good binary match). In the following sections we make a decision + // whether to update |last_delay| or not. + // 1) If the difference bit counts between the best and the worst delay + // candidates is too small we consider the situation to be unreliable and + // don't update |last_delay|. + // 2) If the situation is reliable we update |last_delay| if the value of the + // best candidate delay has a value less than + // i) an adaptive threshold |minimum_probability|, or + // ii) this corresponding value |last_delay_probability|, but updated at + // this time instant. + + // Update |minimum_probability|. + if ((self->minimum_probability > kProbabilityLowerLimit) && + (valley_depth > kProbabilityMinSpread)) { + // The "hard" threshold can't be lower than 17 (in Q9). + // The valley in the curve also has to be distinct, i.e., the + // difference between |value_worst_candidate| and |value_best_candidate| has + // to be large enough. + int32_t threshold = value_best_candidate + kProbabilityOffset; + if (threshold < kProbabilityLowerLimit) { + threshold = kProbabilityLowerLimit; + } + if (self->minimum_probability > threshold) { + self->minimum_probability = threshold; + } + } + // Update |last_delay_probability|. + // We use a Markov type model, i.e., a slowly increasing level over time. + self->last_delay_probability++; + // Validate |candidate_delay|. We have a reliable instantaneous delay + // estimate if + // 1) The valley is distinct enough (|valley_depth| > |kProbabilityOffset|) + // and + // 2) The depth of the valley is deep enough + // (|value_best_candidate| < |minimum_probability|) + // and deeper than the best estimate so far + // (|value_best_candidate| < |last_delay_probability|) + valid_candidate = ((valley_depth > kProbabilityOffset) && + ((value_best_candidate < self->minimum_probability) || + (value_best_candidate < self->last_delay_probability))); + + // Check for nonstationary farend signal. + const bool non_stationary_farend = + std::any_of(self->farend->far_bit_counts, + self->farend->far_bit_counts + self->history_size, + [](int a) { return a > 0; }); + + if (non_stationary_farend) { + // Only update the validation statistics when the farend is nonstationary + // as the underlying estimates are otherwise frozen. + UpdateRobustValidationStatistics(self, candidate_delay, valley_depth, + value_best_candidate); + } + + if (self->robust_validation_enabled) { + int is_histogram_valid = HistogramBasedValidation(self, candidate_delay); + valid_candidate = RobustValidation(self, candidate_delay, valid_candidate, + is_histogram_valid); + } + + // Only update the delay estimate when the farend is nonstationary and when + // a valid delay candidate is available. + if (non_stationary_farend && valid_candidate) { + if (candidate_delay != self->last_delay) { + self->last_delay_histogram = + (self->histogram[candidate_delay] > kLastHistogramMax + ? kLastHistogramMax + : self->histogram[candidate_delay]); + // Adjust the histogram if we made a change to |last_delay|, though it was + // not the most likely one according to the histogram. + if (self->histogram[candidate_delay] < + self->histogram[self->compare_delay]) { + self->histogram[self->compare_delay] = self->histogram[candidate_delay]; + } + } + self->last_delay = candidate_delay; + if (value_best_candidate < self->last_delay_probability) { + self->last_delay_probability = value_best_candidate; + } + self->compare_delay = self->last_delay; + } + + return self->last_delay; +} + +int WebRtc_binary_last_delay(BinaryDelayEstimator* self) { + RTC_DCHECK(self); + return self->last_delay; +} + +float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self) { + float quality = 0; + RTC_DCHECK(self); + + if (self->robust_validation_enabled) { + // Simply a linear function of the histogram height at delay estimate. + quality = self->histogram[self->compare_delay] / kHistogramMax; + } else { + // Note that |last_delay_probability| states how deep the minimum of the + // cost function is, so it is rather an error probability. + quality = (float)(kMaxBitCountsQ9 - self->last_delay_probability) / + kMaxBitCountsQ9; + if (quality < 0) { + quality = 0; + } + } + return quality; +} + +void WebRtc_MeanEstimatorFix(int32_t new_value, + int factor, + int32_t* mean_value) { + int32_t diff = new_value - *mean_value; + + // mean_new = mean_value + ((new_value - mean_value) >> factor); + if (diff < 0) { + diff = -((-diff) >> factor); + } else { + diff = (diff >> factor); + } + *mean_value += diff; +} diff --git a/audio_processing/utility/delay_estimator.h b/audio_processing/utility/delay_estimator.h new file mode 100644 index 0000000..2f47e26 --- /dev/null +++ b/audio_processing/utility/delay_estimator.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Performs delay estimation on binary converted spectra. +// The return value is 0 - OK and -1 - Error, unless otherwise stated. + +#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_ +#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_ + +#include + +static const int32_t kMaxBitCountsQ9 = (32 << 9); // 32 matching bits in Q9. + +typedef struct { + // Pointer to bit counts. + int* far_bit_counts; + // Binary history variables. + uint32_t* binary_far_history; + int history_size; +} BinaryDelayEstimatorFarend; + +typedef struct { + // Pointer to bit counts. + int32_t* mean_bit_counts; + // Array only used locally in ProcessBinarySpectrum() but whose size is + // determined at run-time. + int32_t* bit_counts; + + // Binary history variables. + uint32_t* binary_near_history; + int near_history_size; + int history_size; + + // Delay estimation variables. + int32_t minimum_probability; + int last_delay_probability; + + // Delay memory. + int last_delay; + + // Robust validation + int robust_validation_enabled; + int allowed_offset; + int last_candidate_delay; + int compare_delay; + int candidate_hits; + float* histogram; + float last_delay_histogram; + + // For dynamically changing the lookahead when using SoftReset...(). + int lookahead; + + // Far-end binary spectrum history buffer etc. + BinaryDelayEstimatorFarend* farend; +} BinaryDelayEstimator; + +// Releases the memory allocated by +// WebRtc_CreateBinaryDelayEstimatorFarend(...). +// Input: +// - self : Pointer to the binary delay estimation far-end +// instance which is the return value of +// WebRtc_CreateBinaryDelayEstimatorFarend(). +// +void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self); + +// Allocates the memory needed by the far-end part of the binary delay +// estimation. The memory needs to be initialized separately through +// WebRtc_InitBinaryDelayEstimatorFarend(...). +// +// Inputs: +// - history_size : Size of the far-end binary spectrum history. +// +// Return value: +// - BinaryDelayEstimatorFarend* +// : Created |handle|. If the memory can't be allocated +// or if any of the input parameters are invalid NULL +// is returned. +// +BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend( + int history_size); + +// Re-allocates the buffers. +// +// Inputs: +// - self : Pointer to the binary estimation far-end instance +// which is the return value of +// WebRtc_CreateBinaryDelayEstimatorFarend(). +// - history_size : Size of the far-end binary spectrum history. +// +// Return value: +// - history_size : The history size allocated. +int WebRtc_AllocateFarendBufferMemory(BinaryDelayEstimatorFarend* self, + int history_size); + +// Initializes the delay estimation far-end instance created with +// WebRtc_CreateBinaryDelayEstimatorFarend(...). +// +// Input: +// - self : Pointer to the delay estimation far-end instance. +// +// Output: +// - self : Initialized far-end instance. +// +void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self); + +// Soft resets the delay estimation far-end instance created with +// WebRtc_CreateBinaryDelayEstimatorFarend(...). +// +// Input: +// - delay_shift : The amount of blocks to shift history buffers. +// +void WebRtc_SoftResetBinaryDelayEstimatorFarend( + BinaryDelayEstimatorFarend* self, + int delay_shift); + +// Adds the binary far-end spectrum to the internal far-end history buffer. This +// spectrum is used as reference when calculating the delay using +// WebRtc_ProcessBinarySpectrum(). +// +// Inputs: +// - self : Pointer to the delay estimation far-end +// instance. +// - binary_far_spectrum : Far-end binary spectrum. +// +// Output: +// - self : Updated far-end instance. +// +void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* self, + uint32_t binary_far_spectrum); + +// Releases the memory allocated by WebRtc_CreateBinaryDelayEstimator(...). +// +// Note that BinaryDelayEstimator utilizes BinaryDelayEstimatorFarend, but does +// not take ownership of it, hence the BinaryDelayEstimator has to be torn down +// before the far-end. +// +// Input: +// - self : Pointer to the binary delay estimation instance +// which is the return value of +// WebRtc_CreateBinaryDelayEstimator(). +// +void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self); + +// Allocates the memory needed by the binary delay estimation. The memory needs +// to be initialized separately through WebRtc_InitBinaryDelayEstimator(...). +// +// See WebRtc_CreateDelayEstimator(..) in delay_estimator_wrapper.c for detailed +// description. +BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator( + BinaryDelayEstimatorFarend* farend, + int max_lookahead); + +// Re-allocates |history_size| dependent buffers. The far-end buffers will be +// updated at the same time if needed. +// +// Input: +// - self : Pointer to the binary estimation instance which is +// the return value of +// WebRtc_CreateBinaryDelayEstimator(). +// - history_size : Size of the history buffers. +// +// Return value: +// - history_size : The history size allocated. +int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self, + int history_size); + +// Initializes the delay estimation instance created with +// WebRtc_CreateBinaryDelayEstimator(...). +// +// Input: +// - self : Pointer to the delay estimation instance. +// +// Output: +// - self : Initialized instance. +// +void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self); + +// Soft resets the delay estimation instance created with +// WebRtc_CreateBinaryDelayEstimator(...). +// +// Input: +// - delay_shift : The amount of blocks to shift history buffers. +// +// Return value: +// - actual_shifts : The actual number of shifts performed. +// +int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self, + int delay_shift); + +// Estimates and returns the delay between the binary far-end and binary near- +// end spectra. It is assumed the binary far-end spectrum has been added using +// WebRtc_AddBinaryFarSpectrum() prior to this call. The value will be offset by +// the lookahead (i.e. the lookahead should be subtracted from the returned +// value). +// +// Inputs: +// - self : Pointer to the delay estimation instance. +// - binary_near_spectrum : Near-end binary spectrum of the current block. +// +// Output: +// - self : Updated instance. +// +// Return value: +// - delay : >= 0 - Calculated delay value. +// -2 - Insufficient data for estimation. +// +int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self, + uint32_t binary_near_spectrum); + +// Returns the last calculated delay updated by the function +// WebRtc_ProcessBinarySpectrum(...). +// +// Input: +// - self : Pointer to the delay estimation instance. +// +// Return value: +// - delay : >= 0 - Last calculated delay value +// -2 - Insufficient data for estimation. +// +int WebRtc_binary_last_delay(BinaryDelayEstimator* self); + +// Returns the estimation quality of the last calculated delay updated by the +// function WebRtc_ProcessBinarySpectrum(...). The estimation quality is a value +// in the interval [0, 1]. The higher the value, the better the quality. +// +// Return value: +// - delay_quality : >= 0 - Estimation quality of last calculated +// delay value. +float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self); + +// Updates the |mean_value| recursively with a step size of 2^-|factor|. This +// function is used internally in the Binary Delay Estimator as well as the +// Fixed point wrapper. +// +// Inputs: +// - new_value : The new value the mean should be updated with. +// - factor : The step size, in number of right shifts. +// +// Input/Output: +// - mean_value : Pointer to the mean value. +// +void WebRtc_MeanEstimatorFix(int32_t new_value, + int factor, + int32_t* mean_value); + +#endif // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_ diff --git a/audio_processing/utility/delay_estimator_internal.h b/audio_processing/utility/delay_estimator_internal.h new file mode 100644 index 0000000..6e51dd8 --- /dev/null +++ b/audio_processing/utility/delay_estimator_internal.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Header file including the delay estimator handle used for testing. + +#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_ +#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_ + +#include "audio_processing/utility/delay_estimator.h" + +typedef union { + float float_; + int32_t int32_; +} SpectrumType; + +typedef struct { + // Pointers to mean values of spectrum. + SpectrumType* mean_far_spectrum; + // |mean_far_spectrum| initialization indicator. + int far_spectrum_initialized; + + int spectrum_size; + + // Far-end part of binary spectrum based delay estimation. + BinaryDelayEstimatorFarend* binary_farend; +} DelayEstimatorFarend; + +typedef struct { + // Pointers to mean values of spectrum. + SpectrumType* mean_near_spectrum; + // |mean_near_spectrum| initialization indicator. + int near_spectrum_initialized; + + int spectrum_size; + + // Binary spectrum based delay estimator + BinaryDelayEstimator* binary_handle; +} DelayEstimator; + +#endif // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_ diff --git a/audio_processing/utility/delay_estimator_unittest.cc b/audio_processing/utility/delay_estimator_unittest.cc new file mode 100644 index 0000000..a3cb824 --- /dev/null +++ b/audio_processing/utility/delay_estimator_unittest.cc @@ -0,0 +1,617 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/utility/delay_estimator.h" + +#include "audio_processing/utility/delay_estimator_internal.h" +#include "audio_processing/utility/delay_estimator_wrapper.h" +#include "test/gtest.h" + +namespace { + +enum { kSpectrumSize = 65 }; +// Delay history sizes. +enum { kMaxDelay = 100 }; +enum { kLookahead = 10 }; +enum { kHistorySize = kMaxDelay + kLookahead }; +// Length of binary spectrum sequence. +enum { kSequenceLength = 400 }; + +const int kDifferentHistorySize = 3; +const int kDifferentLookahead = 1; + +const int kEnable[] = {0, 1}; +const size_t kSizeEnable = sizeof(kEnable) / sizeof(*kEnable); + +class DelayEstimatorTest : public ::testing::Test { + protected: + DelayEstimatorTest(); + void SetUp() override; + void TearDown() override; + + void Init(); + void InitBinary(); + void VerifyDelay(BinaryDelayEstimator* binary_handle, int offset, int delay); + void RunBinarySpectra(BinaryDelayEstimator* binary1, + BinaryDelayEstimator* binary2, + int near_offset, + int lookahead_offset, + int far_offset); + void RunBinarySpectraTest(int near_offset, + int lookahead_offset, + int ref_robust_validation, + int robust_validation); + + void* handle_; + DelayEstimator* self_; + void* farend_handle_; + DelayEstimatorFarend* farend_self_; + BinaryDelayEstimator* binary_; + BinaryDelayEstimatorFarend* binary_farend_; + int spectrum_size_; + // Dummy input spectra. + float far_f_[kSpectrumSize]; + float near_f_[kSpectrumSize]; + uint16_t far_u16_[kSpectrumSize]; + uint16_t near_u16_[kSpectrumSize]; + uint32_t binary_spectrum_[kSequenceLength + kHistorySize]; +}; + +DelayEstimatorTest::DelayEstimatorTest() + : handle_(NULL), + self_(NULL), + farend_handle_(NULL), + farend_self_(NULL), + binary_(NULL), + binary_farend_(NULL), + spectrum_size_(kSpectrumSize) { + // Dummy input data are set with more or less arbitrary non-zero values. + memset(far_f_, 1, sizeof(far_f_)); + memset(near_f_, 2, sizeof(near_f_)); + memset(far_u16_, 1, sizeof(far_u16_)); + memset(near_u16_, 2, sizeof(near_u16_)); + // Construct a sequence of binary spectra used to verify delay estimate. The + // |kSequenceLength| has to be long enough for the delay estimation to leave + // the initialized state. + binary_spectrum_[0] = 1; + for (int i = 1; i < (kSequenceLength + kHistorySize); i++) { + binary_spectrum_[i] = 3 * binary_spectrum_[i - 1]; + } +} + +void DelayEstimatorTest::SetUp() { + farend_handle_ = + WebRtc_CreateDelayEstimatorFarend(kSpectrumSize, kHistorySize); + ASSERT_TRUE(farend_handle_ != NULL); + farend_self_ = reinterpret_cast(farend_handle_); + handle_ = WebRtc_CreateDelayEstimator(farend_handle_, kLookahead); + ASSERT_TRUE(handle_ != NULL); + self_ = reinterpret_cast(handle_); + binary_farend_ = WebRtc_CreateBinaryDelayEstimatorFarend(kHistorySize); + ASSERT_TRUE(binary_farend_ != NULL); + binary_ = WebRtc_CreateBinaryDelayEstimator(binary_farend_, kLookahead); + ASSERT_TRUE(binary_ != NULL); +} + +void DelayEstimatorTest::TearDown() { + WebRtc_FreeDelayEstimator(handle_); + handle_ = NULL; + self_ = NULL; + WebRtc_FreeDelayEstimatorFarend(farend_handle_); + farend_handle_ = NULL; + farend_self_ = NULL; + WebRtc_FreeBinaryDelayEstimator(binary_); + binary_ = NULL; + WebRtc_FreeBinaryDelayEstimatorFarend(binary_farend_); + binary_farend_ = NULL; +} + +void DelayEstimatorTest::Init() { + // Initialize Delay Estimator + EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_)); + EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_)); + // Verify initialization. + EXPECT_EQ(0, farend_self_->far_spectrum_initialized); + EXPECT_EQ(0, self_->near_spectrum_initialized); + EXPECT_EQ(-2, WebRtc_last_delay(handle_)); // Delay in initial state. + EXPECT_FLOAT_EQ(0, WebRtc_last_delay_quality(handle_)); // Zero quality. +} + +void DelayEstimatorTest::InitBinary() { + // Initialize Binary Delay Estimator (far-end part). + WebRtc_InitBinaryDelayEstimatorFarend(binary_farend_); + // Initialize Binary Delay Estimator + WebRtc_InitBinaryDelayEstimator(binary_); + // Verify initialization. This does not guarantee a complete check, since + // |last_delay| may be equal to -2 before initialization if done on the fly. + EXPECT_EQ(-2, binary_->last_delay); +} + +void DelayEstimatorTest::VerifyDelay(BinaryDelayEstimator* binary_handle, + int offset, + int delay) { + // Verify that we WebRtc_binary_last_delay() returns correct delay. + EXPECT_EQ(delay, WebRtc_binary_last_delay(binary_handle)); + + if (delay != -2) { + // Verify correct delay estimate. In the non-causal case the true delay + // is equivalent with the |offset|. + EXPECT_EQ(offset, delay); + } +} + +void DelayEstimatorTest::RunBinarySpectra(BinaryDelayEstimator* binary1, + BinaryDelayEstimator* binary2, + int near_offset, + int lookahead_offset, + int far_offset) { + int different_validations = + binary1->robust_validation_enabled ^ binary2->robust_validation_enabled; + WebRtc_InitBinaryDelayEstimatorFarend(binary_farend_); + WebRtc_InitBinaryDelayEstimator(binary1); + WebRtc_InitBinaryDelayEstimator(binary2); + // Verify initialization. This does not guarantee a complete check, since + // |last_delay| may be equal to -2 before initialization if done on the fly. + EXPECT_EQ(-2, binary1->last_delay); + EXPECT_EQ(-2, binary2->last_delay); + for (int i = kLookahead; i < (kSequenceLength + kLookahead); i++) { + WebRtc_AddBinaryFarSpectrum(binary_farend_, + binary_spectrum_[i + far_offset]); + int delay_1 = WebRtc_ProcessBinarySpectrum(binary1, binary_spectrum_[i]); + int delay_2 = WebRtc_ProcessBinarySpectrum( + binary2, binary_spectrum_[i - near_offset]); + + VerifyDelay(binary1, far_offset + kLookahead, delay_1); + VerifyDelay(binary2, + far_offset + kLookahead + lookahead_offset + near_offset, + delay_2); + // Expect the two delay estimates to be offset by |lookahead_offset| + + // |near_offset| when we have left the initial state. + if ((delay_1 != -2) && (delay_2 != -2)) { + EXPECT_EQ(delay_1, delay_2 - lookahead_offset - near_offset); + } + // For the case of identical signals |delay_1| and |delay_2| should match + // all the time, unless one of them has robust validation turned on. In + // that case the robust validation leaves the initial state faster. + if ((near_offset == 0) && (lookahead_offset == 0)) { + if (!different_validations) { + EXPECT_EQ(delay_1, delay_2); + } else { + if (binary1->robust_validation_enabled) { + EXPECT_GE(delay_1, delay_2); + } else { + EXPECT_GE(delay_2, delay_1); + } + } + } + } + // Verify that we have left the initialized state. + EXPECT_NE(-2, WebRtc_binary_last_delay(binary1)); + EXPECT_LT(0, WebRtc_binary_last_delay_quality(binary1)); + EXPECT_NE(-2, WebRtc_binary_last_delay(binary2)); + EXPECT_LT(0, WebRtc_binary_last_delay_quality(binary2)); +} + +void DelayEstimatorTest::RunBinarySpectraTest(int near_offset, + int lookahead_offset, + int ref_robust_validation, + int robust_validation) { + BinaryDelayEstimator* binary2 = WebRtc_CreateBinaryDelayEstimator( + binary_farend_, kLookahead + lookahead_offset); + // Verify the delay for both causal and non-causal systems. For causal systems + // the delay is equivalent with a positive |offset| of the far-end sequence. + // For non-causal systems the delay is equivalent with a negative |offset| of + // the far-end sequence. + binary_->robust_validation_enabled = ref_robust_validation; + binary2->robust_validation_enabled = robust_validation; + for (int offset = -kLookahead; + offset < kMaxDelay - lookahead_offset - near_offset; offset++) { + RunBinarySpectra(binary_, binary2, near_offset, lookahead_offset, offset); + } + WebRtc_FreeBinaryDelayEstimator(binary2); + binary2 = NULL; + binary_->robust_validation_enabled = 0; // Reset reference. +} + +TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) { + // In this test we verify correct error returns on invalid API calls. + + // WebRtc_CreateDelayEstimatorFarend() and WebRtc_CreateDelayEstimator() + // should return a NULL pointer on invalid input values. + // Make sure we have a non-NULL value at start, so we can detect NULL after + // create failure. + void* handle = farend_handle_; + handle = WebRtc_CreateDelayEstimatorFarend(33, kHistorySize); + EXPECT_TRUE(handle == NULL); + handle = WebRtc_CreateDelayEstimatorFarend(kSpectrumSize, 1); + EXPECT_TRUE(handle == NULL); + + handle = handle_; + handle = WebRtc_CreateDelayEstimator(NULL, kLookahead); + EXPECT_TRUE(handle == NULL); + handle = WebRtc_CreateDelayEstimator(farend_handle_, -1); + EXPECT_TRUE(handle == NULL); + + // WebRtc_InitDelayEstimatorFarend() and WebRtc_InitDelayEstimator() should + // return -1 if we have a NULL pointer as |handle|. + EXPECT_EQ(-1, WebRtc_InitDelayEstimatorFarend(NULL)); + EXPECT_EQ(-1, WebRtc_InitDelayEstimator(NULL)); + + // WebRtc_AddFarSpectrumFloat() should return -1 if we have: + // 1) NULL pointer as |handle|. + // 2) NULL pointer as far-end spectrum. + // 3) Incorrect spectrum size. + EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(NULL, far_f_, spectrum_size_)); + // Use |farend_handle_| which is properly created at SetUp(). + EXPECT_EQ(-1, + WebRtc_AddFarSpectrumFloat(farend_handle_, NULL, spectrum_size_)); + EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_, + spectrum_size_ + 1)); + + // WebRtc_AddFarSpectrumFix() should return -1 if we have: + // 1) NULL pointer as |handle|. + // 2) NULL pointer as far-end spectrum. + // 3) Incorrect spectrum size. + // 4) Too high precision in far-end spectrum (Q-domain > 15). + EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(NULL, far_u16_, spectrum_size_, 0)); + EXPECT_EQ(-1, + WebRtc_AddFarSpectrumFix(farend_handle_, NULL, spectrum_size_, 0)); + EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_, + spectrum_size_ + 1, 0)); + EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_, + spectrum_size_, 16)); + + // WebRtc_set_history_size() should return -1 if: + // 1) |handle| is a NULL. + // 2) |history_size| <= 1. + EXPECT_EQ(-1, WebRtc_set_history_size(NULL, 1)); + EXPECT_EQ(-1, WebRtc_set_history_size(handle_, 1)); + // WebRtc_history_size() should return -1 if: + // 1) NULL pointer input. + EXPECT_EQ(-1, WebRtc_history_size(NULL)); + // 2) there is a mismatch between history size. + void* tmp_handle = WebRtc_CreateDelayEstimator(farend_handle_, kHistorySize); + EXPECT_EQ(0, WebRtc_InitDelayEstimator(tmp_handle)); + EXPECT_EQ(kDifferentHistorySize, + WebRtc_set_history_size(tmp_handle, kDifferentHistorySize)); + EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(tmp_handle)); + EXPECT_EQ(kHistorySize, WebRtc_set_history_size(handle_, kHistorySize)); + EXPECT_EQ(-1, WebRtc_history_size(tmp_handle)); + + // WebRtc_set_lookahead() should return -1 if we try a value outside the + /// buffer. + EXPECT_EQ(-1, WebRtc_set_lookahead(handle_, kLookahead + 1)); + EXPECT_EQ(-1, WebRtc_set_lookahead(handle_, -1)); + + // WebRtc_set_allowed_offset() should return -1 if we have: + // 1) NULL pointer as |handle|. + // 2) |allowed_offset| < 0. + EXPECT_EQ(-1, WebRtc_set_allowed_offset(NULL, 0)); + EXPECT_EQ(-1, WebRtc_set_allowed_offset(handle_, -1)); + + EXPECT_EQ(-1, WebRtc_get_allowed_offset(NULL)); + + // WebRtc_enable_robust_validation() should return -1 if we have: + // 1) NULL pointer as |handle|. + // 2) Incorrect |enable| value (not 0 or 1). + EXPECT_EQ(-1, WebRtc_enable_robust_validation(NULL, kEnable[0])); + EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, -1)); + EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, 2)); + + // WebRtc_is_robust_validation_enabled() should return -1 if we have NULL + // pointer as |handle|. + EXPECT_EQ(-1, WebRtc_is_robust_validation_enabled(NULL)); + + // WebRtc_DelayEstimatorProcessFloat() should return -1 if we have: + // 1) NULL pointer as |handle|. + // 2) NULL pointer as near-end spectrum. + // 3) Incorrect spectrum size. + // 4) Non matching history sizes if multiple delay estimators using the same + // far-end reference. + EXPECT_EQ(-1, + WebRtc_DelayEstimatorProcessFloat(NULL, near_f_, spectrum_size_)); + // Use |handle_| which is properly created at SetUp(). + EXPECT_EQ(-1, + WebRtc_DelayEstimatorProcessFloat(handle_, NULL, spectrum_size_)); + EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(handle_, near_f_, + spectrum_size_ + 1)); + // |tmp_handle| is already in a non-matching state. + EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(tmp_handle, near_f_, + spectrum_size_)); + + // WebRtc_DelayEstimatorProcessFix() should return -1 if we have: + // 1) NULL pointer as |handle|. + // 2) NULL pointer as near-end spectrum. + // 3) Incorrect spectrum size. + // 4) Too high precision in near-end spectrum (Q-domain > 15). + // 5) Non matching history sizes if multiple delay estimators using the same + // far-end reference. + EXPECT_EQ( + -1, WebRtc_DelayEstimatorProcessFix(NULL, near_u16_, spectrum_size_, 0)); + EXPECT_EQ(-1, + WebRtc_DelayEstimatorProcessFix(handle_, NULL, spectrum_size_, 0)); + EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_, + spectrum_size_ + 1, 0)); + EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_, + spectrum_size_, 16)); + // |tmp_handle| is already in a non-matching state. + EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(tmp_handle, near_u16_, + spectrum_size_, 0)); + WebRtc_FreeDelayEstimator(tmp_handle); + + // WebRtc_last_delay() should return -1 if we have a NULL pointer as |handle|. + EXPECT_EQ(-1, WebRtc_last_delay(NULL)); + + // Free any local memory if needed. + WebRtc_FreeDelayEstimator(handle); +} + +TEST_F(DelayEstimatorTest, VerifyAllowedOffset) { + // Is set to zero by default. + EXPECT_EQ(0, WebRtc_get_allowed_offset(handle_)); + for (int i = 1; i >= 0; i--) { + EXPECT_EQ(0, WebRtc_set_allowed_offset(handle_, i)); + EXPECT_EQ(i, WebRtc_get_allowed_offset(handle_)); + Init(); + // Unaffected over a reset. + EXPECT_EQ(i, WebRtc_get_allowed_offset(handle_)); + } +} + +TEST_F(DelayEstimatorTest, VerifyEnableRobustValidation) { + // Disabled by default. + EXPECT_EQ(0, WebRtc_is_robust_validation_enabled(handle_)); + for (size_t i = 0; i < kSizeEnable; ++i) { + EXPECT_EQ(0, WebRtc_enable_robust_validation(handle_, kEnable[i])); + EXPECT_EQ(kEnable[i], WebRtc_is_robust_validation_enabled(handle_)); + Init(); + // Unaffected over a reset. + EXPECT_EQ(kEnable[i], WebRtc_is_robust_validation_enabled(handle_)); + } +} + +TEST_F(DelayEstimatorTest, InitializedSpectrumAfterProcess) { + // In this test we verify that the mean spectra are initialized after first + // time we call WebRtc_AddFarSpectrum() and Process() respectively. The test + // also verifies the state is not left for zero spectra. + const float kZerosFloat[kSpectrumSize] = {0.0}; + const uint16_t kZerosU16[kSpectrumSize] = {0}; + + // For floating point operations, process one frame and verify initialization + // flag. + Init(); + EXPECT_EQ(0, WebRtc_AddFarSpectrumFloat(farend_handle_, kZerosFloat, + spectrum_size_)); + EXPECT_EQ(0, farend_self_->far_spectrum_initialized); + EXPECT_EQ(0, + WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_, spectrum_size_)); + EXPECT_EQ(1, farend_self_->far_spectrum_initialized); + EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFloat(handle_, kZerosFloat, + spectrum_size_)); + EXPECT_EQ(0, self_->near_spectrum_initialized); + EXPECT_EQ( + -2, WebRtc_DelayEstimatorProcessFloat(handle_, near_f_, spectrum_size_)); + EXPECT_EQ(1, self_->near_spectrum_initialized); + + // For fixed point operations, process one frame and verify initialization + // flag. + Init(); + EXPECT_EQ(0, WebRtc_AddFarSpectrumFix(farend_handle_, kZerosU16, + spectrum_size_, 0)); + EXPECT_EQ(0, farend_self_->far_spectrum_initialized); + EXPECT_EQ( + 0, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_, spectrum_size_, 0)); + EXPECT_EQ(1, farend_self_->far_spectrum_initialized); + EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFix(handle_, kZerosU16, + spectrum_size_, 0)); + EXPECT_EQ(0, self_->near_spectrum_initialized); + EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_, + spectrum_size_, 0)); + EXPECT_EQ(1, self_->near_spectrum_initialized); +} + +TEST_F(DelayEstimatorTest, CorrectLastDelay) { + // In this test we verify that we get the correct last delay upon valid call. + // We simply process the same data until we leave the initialized state + // (|last_delay| = -2). Then we compare the Process() output with the + // last_delay() call. + + // TODO(bjornv): Update quality values for robust validation. + int last_delay = 0; + // Floating point operations. + Init(); + for (int i = 0; i < 200; i++) { + EXPECT_EQ( + 0, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_, spectrum_size_)); + last_delay = + WebRtc_DelayEstimatorProcessFloat(handle_, near_f_, spectrum_size_); + if (last_delay != -2) { + EXPECT_EQ(last_delay, WebRtc_last_delay(handle_)); + if (!WebRtc_is_robust_validation_enabled(handle_)) { + EXPECT_FLOAT_EQ(7203.f / kMaxBitCountsQ9, + WebRtc_last_delay_quality(handle_)); + } + break; + } + } + // Verify that we have left the initialized state. + EXPECT_NE(-2, WebRtc_last_delay(handle_)); + EXPECT_LT(0, WebRtc_last_delay_quality(handle_)); + + // Fixed point operations. + Init(); + for (int i = 0; i < 200; i++) { + EXPECT_EQ(0, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_, + spectrum_size_, 0)); + last_delay = + WebRtc_DelayEstimatorProcessFix(handle_, near_u16_, spectrum_size_, 0); + if (last_delay != -2) { + EXPECT_EQ(last_delay, WebRtc_last_delay(handle_)); + if (!WebRtc_is_robust_validation_enabled(handle_)) { + EXPECT_FLOAT_EQ(7203.f / kMaxBitCountsQ9, + WebRtc_last_delay_quality(handle_)); + } + break; + } + } + // Verify that we have left the initialized state. + EXPECT_NE(-2, WebRtc_last_delay(handle_)); + EXPECT_LT(0, WebRtc_last_delay_quality(handle_)); +} + +TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimatorFarend) { + // In this test we verify correct output on invalid API calls to the Binary + // Delay Estimator (far-end part). + + BinaryDelayEstimatorFarend* binary = binary_farend_; + // WebRtc_CreateBinaryDelayEstimatorFarend() should return -1 if the input + // history size is less than 2. This is to make sure the buffer shifting + // applies properly. + // Make sure we have a non-NULL value at start, so we can detect NULL after + // create failure. + binary = WebRtc_CreateBinaryDelayEstimatorFarend(1); + EXPECT_TRUE(binary == NULL); +} + +TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimator) { + // In this test we verify correct output on invalid API calls to the Binary + // Delay Estimator. + + BinaryDelayEstimator* binary_handle = binary_; + // WebRtc_CreateBinaryDelayEstimator() should return -1 if we have a NULL + // pointer as |binary_farend| or invalid input values. Upon failure, the + // |binary_handle| should be NULL. + // Make sure we have a non-NULL value at start, so we can detect NULL after + // create failure. + binary_handle = WebRtc_CreateBinaryDelayEstimator(NULL, kLookahead); + EXPECT_TRUE(binary_handle == NULL); + binary_handle = WebRtc_CreateBinaryDelayEstimator(binary_farend_, -1); + EXPECT_TRUE(binary_handle == NULL); +} + +TEST_F(DelayEstimatorTest, MeanEstimatorFix) { + // In this test we verify that we update the mean value in correct direction + // only. With "direction" we mean increase or decrease. + + int32_t mean_value = 4000; + int32_t mean_value_before = mean_value; + int32_t new_mean_value = mean_value * 2; + + // Increasing |mean_value|. + WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value); + EXPECT_LT(mean_value_before, mean_value); + EXPECT_GT(new_mean_value, mean_value); + + // Decreasing |mean_value|. + new_mean_value = mean_value / 2; + mean_value_before = mean_value; + WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value); + EXPECT_GT(mean_value_before, mean_value); + EXPECT_LT(new_mean_value, mean_value); +} + +TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearSameSpectrum) { + // In this test we verify that we get the correct delay estimates if we shift + // the signal accordingly. We create two Binary Delay Estimators and feed them + // with the same signals, so they should output the same results. + // We verify both causal and non-causal delays. + // For these noise free signals, the robust validation should not have an + // impact, hence we turn robust validation on/off for both reference and + // delayed near end. + + for (size_t i = 0; i < kSizeEnable; ++i) { + for (size_t j = 0; j < kSizeEnable; ++j) { + RunBinarySpectraTest(0, 0, kEnable[i], kEnable[j]); + } + } +} + +TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentSpectrum) { + // In this test we use the same setup as above, but we now feed the two Binary + // Delay Estimators with different signals, so they should output different + // results. + // For these noise free signals, the robust validation should not have an + // impact, hence we turn robust validation on/off for both reference and + // delayed near end. + + const int kNearOffset = 1; + for (size_t i = 0; i < kSizeEnable; ++i) { + for (size_t j = 0; j < kSizeEnable; ++j) { + RunBinarySpectraTest(kNearOffset, 0, kEnable[i], kEnable[j]); + } + } +} + +TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentLookahead) { + // In this test we use the same setup as above, feeding the two Binary + // Delay Estimators with the same signals. The difference is that we create + // them with different lookahead. + // For these noise free signals, the robust validation should not have an + // impact, hence we turn robust validation on/off for both reference and + // delayed near end. + + const int kLookaheadOffset = 1; + for (size_t i = 0; i < kSizeEnable; ++i) { + for (size_t j = 0; j < kSizeEnable; ++j) { + RunBinarySpectraTest(0, kLookaheadOffset, kEnable[i], kEnable[j]); + } + } +} + +TEST_F(DelayEstimatorTest, AllowedOffsetNoImpactWhenRobustValidationDisabled) { + // The same setup as in ExactDelayEstimateMultipleNearSameSpectrum with the + // difference that |allowed_offset| is set for the reference binary delay + // estimator. + + binary_->allowed_offset = 10; + RunBinarySpectraTest(0, 0, 0, 0); + binary_->allowed_offset = 0; // Reset reference. +} + +TEST_F(DelayEstimatorTest, VerifyLookaheadAtCreate) { + void* farend_handle = + WebRtc_CreateDelayEstimatorFarend(kSpectrumSize, kMaxDelay); + ASSERT_TRUE(farend_handle != NULL); + void* handle = WebRtc_CreateDelayEstimator(farend_handle, kLookahead); + ASSERT_TRUE(handle != NULL); + EXPECT_EQ(kLookahead, WebRtc_lookahead(handle)); + WebRtc_FreeDelayEstimator(handle); + WebRtc_FreeDelayEstimatorFarend(farend_handle); +} + +TEST_F(DelayEstimatorTest, VerifyLookaheadIsSetAndKeptAfterInit) { + EXPECT_EQ(kLookahead, WebRtc_lookahead(handle_)); + EXPECT_EQ(kDifferentLookahead, + WebRtc_set_lookahead(handle_, kDifferentLookahead)); + EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_)); + EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_)); + EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_)); + EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_)); + EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_)); +} + +TEST_F(DelayEstimatorTest, VerifyHistorySizeAtCreate) { + EXPECT_EQ(kHistorySize, WebRtc_history_size(handle_)); +} + +TEST_F(DelayEstimatorTest, VerifyHistorySizeIsSetAndKeptAfterInit) { + EXPECT_EQ(kHistorySize, WebRtc_history_size(handle_)); + EXPECT_EQ(kDifferentHistorySize, + WebRtc_set_history_size(handle_, kDifferentHistorySize)); + EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_)); + EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_)); + EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_)); + EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_)); + EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_)); +} + +// TODO(bjornv): Add tests for SoftReset...(...). + +} // namespace diff --git a/audio_processing/utility/delay_estimator_wrapper.cc b/audio_processing/utility/delay_estimator_wrapper.cc new file mode 100644 index 0000000..33e9477 --- /dev/null +++ b/audio_processing/utility/delay_estimator_wrapper.cc @@ -0,0 +1,485 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/utility/delay_estimator_wrapper.h" + +#include +#include + +#include "audio_processing/utility/delay_estimator.h" +#include "audio_processing/utility/delay_estimator_internal.h" +#include "rtc_base/checks.h" + +// Only bit |kBandFirst| through bit |kBandLast| are processed and +// |kBandFirst| - |kBandLast| must be < 32. +enum { kBandFirst = 12 }; +enum { kBandLast = 43 }; + +static __inline uint32_t SetBit(uint32_t in, int pos) { + uint32_t mask = (1 << pos); + uint32_t out = (in | mask); + + return out; +} + +// Calculates the mean recursively. Same version as WebRtc_MeanEstimatorFix(), +// but for float. +// +// Inputs: +// - new_value : New additional value. +// - scale : Scale for smoothing (should be less than 1.0). +// +// Input/Output: +// - mean_value : Pointer to the mean value for updating. +// +static void MeanEstimatorFloat(float new_value, + float scale, + float* mean_value) { + RTC_DCHECK_LT(scale, 1.0f); + *mean_value += (new_value - *mean_value) * scale; +} + +// Computes the binary spectrum by comparing the input |spectrum| with a +// |threshold_spectrum|. Float and fixed point versions. +// +// Inputs: +// - spectrum : Spectrum of which the binary spectrum should be +// calculated. +// - threshold_spectrum : Threshold spectrum with which the input +// spectrum is compared. +// Return: +// - out : Binary spectrum. +// +static uint32_t BinarySpectrumFix(const uint16_t* spectrum, + SpectrumType* threshold_spectrum, + int q_domain, + int* threshold_initialized) { + int i = kBandFirst; + uint32_t out = 0; + + RTC_DCHECK_LT(q_domain, 16); + + if (!(*threshold_initialized)) { + // Set the |threshold_spectrum| to half the input |spectrum| as starting + // value. This speeds up the convergence. + for (i = kBandFirst; i <= kBandLast; i++) { + if (spectrum[i] > 0) { + // Convert input spectrum from Q(|q_domain|) to Q15. + int32_t spectrum_q15 = ((int32_t)spectrum[i]) << (15 - q_domain); + threshold_spectrum[i].int32_ = (spectrum_q15 >> 1); + *threshold_initialized = 1; + } + } + } + for (i = kBandFirst; i <= kBandLast; i++) { + // Convert input spectrum from Q(|q_domain|) to Q15. + int32_t spectrum_q15 = ((int32_t)spectrum[i]) << (15 - q_domain); + // Update the |threshold_spectrum|. + WebRtc_MeanEstimatorFix(spectrum_q15, 6, &(threshold_spectrum[i].int32_)); + // Convert |spectrum| at current frequency bin to a binary value. + if (spectrum_q15 > threshold_spectrum[i].int32_) { + out = SetBit(out, i - kBandFirst); + } + } + + return out; +} + +static uint32_t BinarySpectrumFloat(const float* spectrum, + SpectrumType* threshold_spectrum, + int* threshold_initialized) { + int i = kBandFirst; + uint32_t out = 0; + const float kScale = 1 / 64.0; + + if (!(*threshold_initialized)) { + // Set the |threshold_spectrum| to half the input |spectrum| as starting + // value. This speeds up the convergence. + for (i = kBandFirst; i <= kBandLast; i++) { + if (spectrum[i] > 0.0f) { + threshold_spectrum[i].float_ = (spectrum[i] / 2); + *threshold_initialized = 1; + } + } + } + + for (i = kBandFirst; i <= kBandLast; i++) { + // Update the |threshold_spectrum|. + MeanEstimatorFloat(spectrum[i], kScale, &(threshold_spectrum[i].float_)); + // Convert |spectrum| at current frequency bin to a binary value. + if (spectrum[i] > threshold_spectrum[i].float_) { + out = SetBit(out, i - kBandFirst); + } + } + + return out; +} + +void WebRtc_FreeDelayEstimatorFarend(void* handle) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle; + + if (handle == NULL) { + return; + } + + free(self->mean_far_spectrum); + self->mean_far_spectrum = NULL; + + WebRtc_FreeBinaryDelayEstimatorFarend(self->binary_farend); + self->binary_farend = NULL; + + free(self); +} + +void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size) { + DelayEstimatorFarend* self = NULL; + + // Check if the sub band used in the delay estimation is small enough to fit + // the binary spectra in a uint32_t. + static_assert(kBandLast - kBandFirst < 32, ""); + + if (spectrum_size >= kBandLast) { + self = static_cast( + malloc(sizeof(DelayEstimatorFarend))); + } + + if (self != NULL) { + int memory_fail = 0; + + // Allocate memory for the binary far-end spectrum handling. + self->binary_farend = WebRtc_CreateBinaryDelayEstimatorFarend(history_size); + memory_fail |= (self->binary_farend == NULL); + + // Allocate memory for spectrum buffers. + self->mean_far_spectrum = static_cast( + malloc(spectrum_size * sizeof(SpectrumType))); + memory_fail |= (self->mean_far_spectrum == NULL); + + self->spectrum_size = spectrum_size; + + if (memory_fail) { + WebRtc_FreeDelayEstimatorFarend(self); + self = NULL; + } + } + + return self; +} + +int WebRtc_InitDelayEstimatorFarend(void* handle) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle; + + if (self == NULL) { + return -1; + } + + // Initialize far-end part of binary delay estimator. + WebRtc_InitBinaryDelayEstimatorFarend(self->binary_farend); + + // Set averaged far and near end spectra to zero. + memset(self->mean_far_spectrum, 0, + sizeof(SpectrumType) * self->spectrum_size); + // Reset initialization indicators. + self->far_spectrum_initialized = 0; + + return 0; +} + +void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle; + RTC_DCHECK(self); + WebRtc_SoftResetBinaryDelayEstimatorFarend(self->binary_farend, delay_shift); +} + +int WebRtc_AddFarSpectrumFix(void* handle, + const uint16_t* far_spectrum, + int spectrum_size, + int far_q) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle; + uint32_t binary_spectrum = 0; + + if (self == NULL) { + return -1; + } + if (far_spectrum == NULL) { + // Empty far end spectrum. + return -1; + } + if (spectrum_size != self->spectrum_size) { + // Data sizes don't match. + return -1; + } + if (far_q > 15) { + // If |far_q| is larger than 15 we cannot guarantee no wrap around. + return -1; + } + + // Get binary spectrum. + binary_spectrum = BinarySpectrumFix(far_spectrum, self->mean_far_spectrum, + far_q, &(self->far_spectrum_initialized)); + WebRtc_AddBinaryFarSpectrum(self->binary_farend, binary_spectrum); + + return 0; +} + +int WebRtc_AddFarSpectrumFloat(void* handle, + const float* far_spectrum, + int spectrum_size) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*)handle; + uint32_t binary_spectrum = 0; + + if (self == NULL) { + return -1; + } + if (far_spectrum == NULL) { + // Empty far end spectrum. + return -1; + } + if (spectrum_size != self->spectrum_size) { + // Data sizes don't match. + return -1; + } + + // Get binary spectrum. + binary_spectrum = BinarySpectrumFloat(far_spectrum, self->mean_far_spectrum, + &(self->far_spectrum_initialized)); + WebRtc_AddBinaryFarSpectrum(self->binary_farend, binary_spectrum); + + return 0; +} + +void WebRtc_FreeDelayEstimator(void* handle) { + DelayEstimator* self = (DelayEstimator*)handle; + + if (handle == NULL) { + return; + } + + free(self->mean_near_spectrum); + self->mean_near_spectrum = NULL; + + WebRtc_FreeBinaryDelayEstimator(self->binary_handle); + self->binary_handle = NULL; + + free(self); +} + +void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead) { + DelayEstimator* self = NULL; + DelayEstimatorFarend* farend = (DelayEstimatorFarend*)farend_handle; + + if (farend_handle != NULL) { + self = static_cast(malloc(sizeof(DelayEstimator))); + } + + if (self != NULL) { + int memory_fail = 0; + + // Allocate memory for the farend spectrum handling. + self->binary_handle = + WebRtc_CreateBinaryDelayEstimator(farend->binary_farend, max_lookahead); + memory_fail |= (self->binary_handle == NULL); + + // Allocate memory for spectrum buffers. + self->mean_near_spectrum = static_cast( + malloc(farend->spectrum_size * sizeof(SpectrumType))); + memory_fail |= (self->mean_near_spectrum == NULL); + + self->spectrum_size = farend->spectrum_size; + + if (memory_fail) { + WebRtc_FreeDelayEstimator(self); + self = NULL; + } + } + + return self; +} + +int WebRtc_InitDelayEstimator(void* handle) { + DelayEstimator* self = (DelayEstimator*)handle; + + if (self == NULL) { + return -1; + } + + // Initialize binary delay estimator. + WebRtc_InitBinaryDelayEstimator(self->binary_handle); + + // Set averaged far and near end spectra to zero. + memset(self->mean_near_spectrum, 0, + sizeof(SpectrumType) * self->spectrum_size); + // Reset initialization indicators. + self->near_spectrum_initialized = 0; + + return 0; +} + +int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift) { + DelayEstimator* self = (DelayEstimator*)handle; + RTC_DCHECK(self); + return WebRtc_SoftResetBinaryDelayEstimator(self->binary_handle, delay_shift); +} + +int WebRtc_set_history_size(void* handle, int history_size) { + DelayEstimator* self = static_cast(handle); + + if ((self == NULL) || (history_size <= 1)) { + return -1; + } + return WebRtc_AllocateHistoryBufferMemory(self->binary_handle, history_size); +} + +int WebRtc_history_size(const void* handle) { + const DelayEstimator* self = static_cast(handle); + + if (self == NULL) { + return -1; + } + if (self->binary_handle->farend->history_size != + self->binary_handle->history_size) { + // Non matching history sizes. + return -1; + } + return self->binary_handle->history_size; +} + +int WebRtc_set_lookahead(void* handle, int lookahead) { + DelayEstimator* self = (DelayEstimator*)handle; + RTC_DCHECK(self); + RTC_DCHECK(self->binary_handle); + if ((lookahead > self->binary_handle->near_history_size - 1) || + (lookahead < 0)) { + return -1; + } + self->binary_handle->lookahead = lookahead; + return self->binary_handle->lookahead; +} + +int WebRtc_lookahead(void* handle) { + DelayEstimator* self = (DelayEstimator*)handle; + RTC_DCHECK(self); + RTC_DCHECK(self->binary_handle); + return self->binary_handle->lookahead; +} + +int WebRtc_set_allowed_offset(void* handle, int allowed_offset) { + DelayEstimator* self = (DelayEstimator*)handle; + + if ((self == NULL) || (allowed_offset < 0)) { + return -1; + } + self->binary_handle->allowed_offset = allowed_offset; + return 0; +} + +int WebRtc_get_allowed_offset(const void* handle) { + const DelayEstimator* self = (const DelayEstimator*)handle; + + if (self == NULL) { + return -1; + } + return self->binary_handle->allowed_offset; +} + +int WebRtc_enable_robust_validation(void* handle, int enable) { + DelayEstimator* self = (DelayEstimator*)handle; + + if (self == NULL) { + return -1; + } + if ((enable < 0) || (enable > 1)) { + return -1; + } + RTC_DCHECK(self->binary_handle); + self->binary_handle->robust_validation_enabled = enable; + return 0; +} + +int WebRtc_is_robust_validation_enabled(const void* handle) { + const DelayEstimator* self = (const DelayEstimator*)handle; + + if (self == NULL) { + return -1; + } + return self->binary_handle->robust_validation_enabled; +} + +int WebRtc_DelayEstimatorProcessFix(void* handle, + const uint16_t* near_spectrum, + int spectrum_size, + int near_q) { + DelayEstimator* self = (DelayEstimator*)handle; + uint32_t binary_spectrum = 0; + + if (self == NULL) { + return -1; + } + if (near_spectrum == NULL) { + // Empty near end spectrum. + return -1; + } + if (spectrum_size != self->spectrum_size) { + // Data sizes don't match. + return -1; + } + if (near_q > 15) { + // If |near_q| is larger than 15 we cannot guarantee no wrap around. + return -1; + } + + // Get binary spectra. + binary_spectrum = + BinarySpectrumFix(near_spectrum, self->mean_near_spectrum, near_q, + &(self->near_spectrum_initialized)); + + return WebRtc_ProcessBinarySpectrum(self->binary_handle, binary_spectrum); +} + +int WebRtc_DelayEstimatorProcessFloat(void* handle, + const float* near_spectrum, + int spectrum_size) { + DelayEstimator* self = (DelayEstimator*)handle; + uint32_t binary_spectrum = 0; + + if (self == NULL) { + return -1; + } + if (near_spectrum == NULL) { + // Empty near end spectrum. + return -1; + } + if (spectrum_size != self->spectrum_size) { + // Data sizes don't match. + return -1; + } + + // Get binary spectrum. + binary_spectrum = BinarySpectrumFloat(near_spectrum, self->mean_near_spectrum, + &(self->near_spectrum_initialized)); + + return WebRtc_ProcessBinarySpectrum(self->binary_handle, binary_spectrum); +} + +int WebRtc_last_delay(void* handle) { + DelayEstimator* self = (DelayEstimator*)handle; + + if (self == NULL) { + return -1; + } + + return WebRtc_binary_last_delay(self->binary_handle); +} + +float WebRtc_last_delay_quality(void* handle) { + DelayEstimator* self = (DelayEstimator*)handle; + RTC_DCHECK(self); + return WebRtc_binary_last_delay_quality(self->binary_handle); +} diff --git a/audio_processing/utility/delay_estimator_wrapper.h b/audio_processing/utility/delay_estimator_wrapper.h new file mode 100644 index 0000000..995470f --- /dev/null +++ b/audio_processing/utility/delay_estimator_wrapper.h @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Performs delay estimation on block by block basis. +// The return value is 0 - OK and -1 - Error, unless otherwise stated. + +#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_ +#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_ + +#include + +// Releases the memory allocated by WebRtc_CreateDelayEstimatorFarend(...) +void WebRtc_FreeDelayEstimatorFarend(void* handle); + +// Allocates the memory needed by the far-end part of the delay estimation. The +// memory needs to be initialized separately through +// WebRtc_InitDelayEstimatorFarend(...). +// +// Inputs: +// - spectrum_size : Size of the spectrum used both in far-end and +// near-end. Used to allocate memory for spectrum +// specific buffers. +// - history_size : The far-end history buffer size. A change in buffer +// size can be forced with WebRtc_set_history_size(). +// Note that the maximum delay which can be estimated is +// determined together with WebRtc_set_lookahead(). +// +// Return value: +// - void* : Created |handle|. If the memory can't be allocated or +// if any of the input parameters are invalid NULL is +// returned. +void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size); + +// Initializes the far-end part of the delay estimation instance returned by +// WebRtc_CreateDelayEstimatorFarend(...) +int WebRtc_InitDelayEstimatorFarend(void* handle); + +// Soft resets the far-end part of the delay estimation instance returned by +// WebRtc_CreateDelayEstimatorFarend(...). +// Input: +// - delay_shift : The amount of blocks to shift history buffers. +void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift); + +// Adds the far-end spectrum to the far-end history buffer. This spectrum is +// used as reference when calculating the delay using +// WebRtc_ProcessSpectrum(). +// +// Inputs: +// - far_spectrum : Far-end spectrum. +// - spectrum_size : The size of the data arrays (same for both far- and +// near-end). +// - far_q : The Q-domain of the far-end data. +// +// Output: +// - handle : Updated far-end instance. +// +int WebRtc_AddFarSpectrumFix(void* handle, + const uint16_t* far_spectrum, + int spectrum_size, + int far_q); + +// See WebRtc_AddFarSpectrumFix() for description. +int WebRtc_AddFarSpectrumFloat(void* handle, + const float* far_spectrum, + int spectrum_size); + +// Releases the memory allocated by WebRtc_CreateDelayEstimator(...) +void WebRtc_FreeDelayEstimator(void* handle); + +// Allocates the memory needed by the delay estimation. The memory needs to be +// initialized separately through WebRtc_InitDelayEstimator(...). +// +// Inputs: +// - farend_handle : Pointer to the far-end part of the delay estimation +// instance created prior to this call using +// WebRtc_CreateDelayEstimatorFarend(). +// +// Note that WebRtc_CreateDelayEstimator does not take +// ownership of |farend_handle|, which has to be torn +// down properly after this instance. +// +// - max_lookahead : Maximum amount of non-causal lookahead allowed. The +// actual amount of lookahead used can be controlled by +// WebRtc_set_lookahead(...). The default |lookahead| is +// set to |max_lookahead| at create time. Use +// WebRtc_set_lookahead(...) before start if a different +// value is desired. +// +// Using lookahead can detect cases in which a near-end +// signal occurs before the corresponding far-end signal. +// It will delay the estimate for the current block by an +// equal amount, and the returned values will be offset +// by it. +// +// A value of zero is the typical no-lookahead case. +// This also represents the minimum delay which can be +// estimated. +// +// Note that the effective range of delay estimates is +// [-|lookahead|,... ,|history_size|-|lookahead|) +// where |history_size| is set through +// WebRtc_set_history_size(). +// +// Return value: +// - void* : Created |handle|. If the memory can't be allocated or +// if any of the input parameters are invalid NULL is +// returned. +void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead); + +// Initializes the delay estimation instance returned by +// WebRtc_CreateDelayEstimator(...) +int WebRtc_InitDelayEstimator(void* handle); + +// Soft resets the delay estimation instance returned by +// WebRtc_CreateDelayEstimator(...) +// Input: +// - delay_shift : The amount of blocks to shift history buffers. +// +// Return value: +// - actual_shifts : The actual number of shifts performed. +int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift); + +// Sets the effective |history_size| used. Valid values from 2. We simply need +// at least two delays to compare to perform an estimate. If |history_size| is +// changed, buffers are reallocated filling in with zeros if necessary. +// Note that changing the |history_size| affects both buffers in far-end and +// near-end. Hence it is important to change all DelayEstimators that use the +// same reference far-end, to the same |history_size| value. +// Inputs: +// - handle : Pointer to the delay estimation instance. +// - history_size : Effective history size to be used. +// Return value: +// - new_history_size : The new history size used. If the memory was not able +// to be allocated 0 is returned. +int WebRtc_set_history_size(void* handle, int history_size); + +// Returns the history_size currently used. +// Input: +// - handle : Pointer to the delay estimation instance. +int WebRtc_history_size(const void* handle); + +// Sets the amount of |lookahead| to use. Valid values are [0, max_lookahead] +// where |max_lookahead| was set at create time through +// WebRtc_CreateDelayEstimator(...). +// +// Input: +// - handle : Pointer to the delay estimation instance. +// - lookahead : The amount of lookahead to be used. +// +// Return value: +// - new_lookahead : The actual amount of lookahead set, unless |handle| is +// a NULL pointer or |lookahead| is invalid, for which an +// error is returned. +int WebRtc_set_lookahead(void* handle, int lookahead); + +// Returns the amount of lookahead we currently use. +// Input: +// - handle : Pointer to the delay estimation instance. +int WebRtc_lookahead(void* handle); + +// Sets the |allowed_offset| used in the robust validation scheme. If the +// delay estimator is used in an echo control component, this parameter is +// related to the filter length. In principle |allowed_offset| should be set to +// the echo control filter length minus the expected echo duration, i.e., the +// delay offset the echo control can handle without quality regression. The +// default value, used if not set manually, is zero. Note that |allowed_offset| +// has to be non-negative. +// Inputs: +// - handle : Pointer to the delay estimation instance. +// - allowed_offset : The amount of delay offset, measured in partitions, +// the echo control filter can handle. +int WebRtc_set_allowed_offset(void* handle, int allowed_offset); + +// Returns the |allowed_offset| in number of partitions. +int WebRtc_get_allowed_offset(const void* handle); + +// Enables/Disables a robust validation functionality in the delay estimation. +// This is by default set to disabled at create time. The state is preserved +// over a reset. +// Inputs: +// - handle : Pointer to the delay estimation instance. +// - enable : Enable (1) or disable (0) this feature. +int WebRtc_enable_robust_validation(void* handle, int enable); + +// Returns 1 if robust validation is enabled and 0 if disabled. +int WebRtc_is_robust_validation_enabled(const void* handle); + +// Estimates and returns the delay between the far-end and near-end blocks. The +// value will be offset by the lookahead (i.e. the lookahead should be +// subtracted from the returned value). +// Inputs: +// - handle : Pointer to the delay estimation instance. +// - near_spectrum : Pointer to the near-end spectrum data of the current +// block. +// - spectrum_size : The size of the data arrays (same for both far- and +// near-end). +// - near_q : The Q-domain of the near-end data. +// +// Output: +// - handle : Updated instance. +// +// Return value: +// - delay : >= 0 - Calculated delay value. +// -1 - Error. +// -2 - Insufficient data for estimation. +int WebRtc_DelayEstimatorProcessFix(void* handle, + const uint16_t* near_spectrum, + int spectrum_size, + int near_q); + +// See WebRtc_DelayEstimatorProcessFix() for description. +int WebRtc_DelayEstimatorProcessFloat(void* handle, + const float* near_spectrum, + int spectrum_size); + +// Returns the last calculated delay updated by the function +// WebRtc_DelayEstimatorProcess(...). +// +// Input: +// - handle : Pointer to the delay estimation instance. +// +// Return value: +// - delay : >= 0 - Last calculated delay value. +// -1 - Error. +// -2 - Insufficient data for estimation. +int WebRtc_last_delay(void* handle); + +// Returns the estimation quality/probability of the last calculated delay +// updated by the function WebRtc_DelayEstimatorProcess(...). The estimation +// quality is a value in the interval [0, 1]. The higher the value, the better +// the quality. +// +// Return value: +// - delay_quality : >= 0 - Estimation quality of last calculated delay. +float WebRtc_last_delay_quality(void* handle); + +#endif // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_ diff --git a/audio_processing/utility/ooura_fft.cc b/audio_processing/utility/ooura_fft.cc new file mode 100644 index 0000000..a3f97d7 --- /dev/null +++ b/audio_processing/utility/ooura_fft.cc @@ -0,0 +1,540 @@ +/* + * http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html + * Copyright Takuya OOURA, 1996-2001 + * + * You may use, copy, modify and distribute this code for any purpose (include + * commercial use) and without fee. Please refer to this package when you modify + * this code. + * + * Changes by the WebRTC authors: + * - Trivial type modifications. + * - Minimal code subset to do rdft of length 128. + * - Optimizations because of known length. + * - Removed the global variables by moving the code in to a class in order + * to make it thread safe. + * + * All changes are covered by the WebRTC license and IP grant: + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/utility/ooura_fft.h" + +#include "audio_processing/utility/ooura_fft_tables_common.h" +#include "rtc_base/system/arch.h" +#include "system_wrappers/include/cpu_features_wrapper.h" + +namespace webrtc { + +namespace { + +#if !(defined(MIPS_FPU_LE) || defined(WEBRTC_HAS_NEON)) +static void cft1st_128_C(float* a) { + const int n = 128; + int j, k1, k2; + float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + // The processing of the first set of elements was simplified in C to avoid + // some operations (multiplication by zero or one, addition of two elements + // multiplied by the same weight, ...). + x0r = a[0] + a[2]; + x0i = a[1] + a[3]; + x1r = a[0] - a[2]; + x1i = a[1] - a[3]; + x2r = a[4] + a[6]; + x2i = a[5] + a[7]; + x3r = a[4] - a[6]; + x3i = a[5] - a[7]; + a[0] = x0r + x2r; + a[1] = x0i + x2i; + a[4] = x0r - x2r; + a[5] = x0i - x2i; + a[2] = x1r - x3i; + a[3] = x1i + x3r; + a[6] = x1r + x3i; + a[7] = x1i - x3r; + wk1r = rdft_w[2]; + x0r = a[8] + a[10]; + x0i = a[9] + a[11]; + x1r = a[8] - a[10]; + x1i = a[9] - a[11]; + x2r = a[12] + a[14]; + x2i = a[13] + a[15]; + x3r = a[12] - a[14]; + x3i = a[13] - a[15]; + a[8] = x0r + x2r; + a[9] = x0i + x2i; + a[12] = x2i - x0i; + a[13] = x0r - x2r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[10] = wk1r * (x0r - x0i); + a[11] = wk1r * (x0r + x0i); + x0r = x3i + x1r; + x0i = x3r - x1i; + a[14] = wk1r * (x0i - x0r); + a[15] = wk1r * (x0i + x0r); + k1 = 0; + for (j = 16; j < n; j += 16) { + k1 += 2; + k2 = 2 * k1; + wk2r = rdft_w[k1 + 0]; + wk2i = rdft_w[k1 + 1]; + wk1r = rdft_w[k2 + 0]; + wk1i = rdft_w[k2 + 1]; + wk3r = rdft_wk3ri_first[k1 + 0]; + wk3i = rdft_wk3ri_first[k1 + 1]; + x0r = a[j + 0] + a[j + 2]; + x0i = a[j + 1] + a[j + 3]; + x1r = a[j + 0] - a[j + 2]; + x1i = a[j + 1] - a[j + 3]; + x2r = a[j + 4] + a[j + 6]; + x2i = a[j + 5] + a[j + 7]; + x3r = a[j + 4] - a[j + 6]; + x3i = a[j + 5] - a[j + 7]; + a[j + 0] = x0r + x2r; + a[j + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j + 4] = wk2r * x0r - wk2i * x0i; + a[j + 5] = wk2r * x0i + wk2i * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j + 2] = wk1r * x0r - wk1i * x0i; + a[j + 3] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j + 6] = wk3r * x0r - wk3i * x0i; + a[j + 7] = wk3r * x0i + wk3i * x0r; + wk1r = rdft_w[k2 + 2]; + wk1i = rdft_w[k2 + 3]; + wk3r = rdft_wk3ri_second[k1 + 0]; + wk3i = rdft_wk3ri_second[k1 + 1]; + x0r = a[j + 8] + a[j + 10]; + x0i = a[j + 9] + a[j + 11]; + x1r = a[j + 8] - a[j + 10]; + x1i = a[j + 9] - a[j + 11]; + x2r = a[j + 12] + a[j + 14]; + x2i = a[j + 13] + a[j + 15]; + x3r = a[j + 12] - a[j + 14]; + x3i = a[j + 13] - a[j + 15]; + a[j + 8] = x0r + x2r; + a[j + 9] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j + 12] = -wk2i * x0r - wk2r * x0i; + a[j + 13] = -wk2i * x0i + wk2r * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j + 10] = wk1r * x0r - wk1i * x0i; + a[j + 11] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j + 14] = wk3r * x0r - wk3i * x0i; + a[j + 15] = wk3r * x0i + wk3i * x0r; + } +} + +static void cftmdl_128_C(float* a) { + const int l = 8; + const int n = 128; + const int m = 32; + int j0, j1, j2, j3, k, k1, k2, m2; + float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + for (j0 = 0; j0 < l; j0 += 2) { + j1 = j0 + 8; + j2 = j0 + 16; + j3 = j0 + 24; + x0r = a[j0 + 0] + a[j1 + 0]; + x0i = a[j0 + 1] + a[j1 + 1]; + x1r = a[j0 + 0] - a[j1 + 0]; + x1i = a[j0 + 1] - a[j1 + 1]; + x2r = a[j2 + 0] + a[j3 + 0]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2 + 0] - a[j3 + 0]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j0 + 0] = x0r + x2r; + a[j0 + 1] = x0i + x2i; + a[j2 + 0] = x0r - x2r; + a[j2 + 1] = x0i - x2i; + a[j1 + 0] = x1r - x3i; + a[j1 + 1] = x1i + x3r; + a[j3 + 0] = x1r + x3i; + a[j3 + 1] = x1i - x3r; + } + wk1r = rdft_w[2]; + for (j0 = m; j0 < l + m; j0 += 2) { + j1 = j0 + 8; + j2 = j0 + 16; + j3 = j0 + 24; + x0r = a[j0 + 0] + a[j1 + 0]; + x0i = a[j0 + 1] + a[j1 + 1]; + x1r = a[j0 + 0] - a[j1 + 0]; + x1i = a[j0 + 1] - a[j1 + 1]; + x2r = a[j2 + 0] + a[j3 + 0]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2 + 0] - a[j3 + 0]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j0 + 0] = x0r + x2r; + a[j0 + 1] = x0i + x2i; + a[j2 + 0] = x2i - x0i; + a[j2 + 1] = x0r - x2r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1 + 0] = wk1r * (x0r - x0i); + a[j1 + 1] = wk1r * (x0r + x0i); + x0r = x3i + x1r; + x0i = x3r - x1i; + a[j3 + 0] = wk1r * (x0i - x0r); + a[j3 + 1] = wk1r * (x0i + x0r); + } + k1 = 0; + m2 = 2 * m; + for (k = m2; k < n; k += m2) { + k1 += 2; + k2 = 2 * k1; + wk2r = rdft_w[k1 + 0]; + wk2i = rdft_w[k1 + 1]; + wk1r = rdft_w[k2 + 0]; + wk1i = rdft_w[k2 + 1]; + wk3r = rdft_wk3ri_first[k1 + 0]; + wk3i = rdft_wk3ri_first[k1 + 1]; + for (j0 = k; j0 < l + k; j0 += 2) { + j1 = j0 + 8; + j2 = j0 + 16; + j3 = j0 + 24; + x0r = a[j0 + 0] + a[j1 + 0]; + x0i = a[j0 + 1] + a[j1 + 1]; + x1r = a[j0 + 0] - a[j1 + 0]; + x1i = a[j0 + 1] - a[j1 + 1]; + x2r = a[j2 + 0] + a[j3 + 0]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2 + 0] - a[j3 + 0]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j0 + 0] = x0r + x2r; + a[j0 + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j2 + 0] = wk2r * x0r - wk2i * x0i; + a[j2 + 1] = wk2r * x0i + wk2i * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1 + 0] = wk1r * x0r - wk1i * x0i; + a[j1 + 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j3 + 0] = wk3r * x0r - wk3i * x0i; + a[j3 + 1] = wk3r * x0i + wk3i * x0r; + } + wk1r = rdft_w[k2 + 2]; + wk1i = rdft_w[k2 + 3]; + wk3r = rdft_wk3ri_second[k1 + 0]; + wk3i = rdft_wk3ri_second[k1 + 1]; + for (j0 = k + m; j0 < l + (k + m); j0 += 2) { + j1 = j0 + 8; + j2 = j0 + 16; + j3 = j0 + 24; + x0r = a[j0 + 0] + a[j1 + 0]; + x0i = a[j0 + 1] + a[j1 + 1]; + x1r = a[j0 + 0] - a[j1 + 0]; + x1i = a[j0 + 1] - a[j1 + 1]; + x2r = a[j2 + 0] + a[j3 + 0]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2 + 0] - a[j3 + 0]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j0 + 0] = x0r + x2r; + a[j0 + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j2 + 0] = -wk2i * x0r - wk2r * x0i; + a[j2 + 1] = -wk2i * x0i + wk2r * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1 + 0] = wk1r * x0r - wk1i * x0i; + a[j1 + 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j3 + 0] = wk3r * x0r - wk3i * x0i; + a[j3 + 1] = wk3r * x0i + wk3i * x0r; + } + } +} + +static void rftfsub_128_C(float* a) { + const float* c = rdft_w + 32; + int j1, j2, k1, k2; + float wkr, wki, xr, xi, yr, yi; + + for (j1 = 1, j2 = 2; j2 < 64; j1 += 1, j2 += 2) { + k2 = 128 - j2; + k1 = 32 - j1; + wkr = 0.5f - c[k1]; + wki = c[j1]; + xr = a[j2 + 0] - a[k2 + 0]; + xi = a[j2 + 1] + a[k2 + 1]; + yr = wkr * xr - wki * xi; + yi = wkr * xi + wki * xr; + a[j2 + 0] -= yr; + a[j2 + 1] -= yi; + a[k2 + 0] += yr; + a[k2 + 1] -= yi; + } +} + +static void rftbsub_128_C(float* a) { + const float* c = rdft_w + 32; + int j1, j2, k1, k2; + float wkr, wki, xr, xi, yr, yi; + + a[1] = -a[1]; + for (j1 = 1, j2 = 2; j2 < 64; j1 += 1, j2 += 2) { + k2 = 128 - j2; + k1 = 32 - j1; + wkr = 0.5f - c[k1]; + wki = c[j1]; + xr = a[j2 + 0] - a[k2 + 0]; + xi = a[j2 + 1] + a[k2 + 1]; + yr = wkr * xr + wki * xi; + yi = wkr * xi - wki * xr; + a[j2 + 0] = a[j2 + 0] - yr; + a[j2 + 1] = yi - a[j2 + 1]; + a[k2 + 0] = yr + a[k2 + 0]; + a[k2 + 1] = yi - a[k2 + 1]; + } + a[65] = -a[65]; +} +#endif + +} // namespace + +OouraFft::OouraFft() { +#if defined(WEBRTC_ARCH_X86_FAMILY) + use_sse2_ = (WebRtc_GetCPUInfo(kSSE2) != 0); +#else + use_sse2_ = false; +#endif +} + +OouraFft::~OouraFft() = default; + +void OouraFft::Fft(float* a) const { + float xi; + bitrv2_128(a); + cftfsub_128(a); + rftfsub_128(a); + xi = a[0] - a[1]; + a[0] += a[1]; + a[1] = xi; +} +void OouraFft::InverseFft(float* a) const { + a[1] = 0.5f * (a[0] - a[1]); + a[0] -= a[1]; + rftbsub_128(a); + bitrv2_128(a); + cftbsub_128(a); +} + +void OouraFft::cft1st_128(float* a) const { +#if defined(MIPS_FPU_LE) + cft1st_128_mips(a); +#elif defined(WEBRTC_HAS_NEON) + cft1st_128_neon(a); +#elif defined(WEBRTC_ARCH_X86_FAMILY) + if (use_sse2_) { + cft1st_128_SSE2(a); + } else { + cft1st_128_C(a); + } +#else + cft1st_128_C(a); +#endif +} +void OouraFft::cftmdl_128(float* a) const { +#if defined(MIPS_FPU_LE) + cftmdl_128_mips(a); +#elif defined(WEBRTC_HAS_NEON) + cftmdl_128_neon(a); +#elif defined(WEBRTC_ARCH_X86_FAMILY) + if (use_sse2_) { + cftmdl_128_SSE2(a); + } else { + cftmdl_128_C(a); + } +#else + cftmdl_128_C(a); +#endif +} +void OouraFft::rftfsub_128(float* a) const { +#if defined(MIPS_FPU_LE) + rftfsub_128_mips(a); +#elif defined(WEBRTC_HAS_NEON) + rftfsub_128_neon(a); +#elif defined(WEBRTC_ARCH_X86_FAMILY) + if (use_sse2_) { + rftfsub_128_SSE2(a); + } else { + rftfsub_128_C(a); + } +#else + rftfsub_128_C(a); +#endif +} + +void OouraFft::rftbsub_128(float* a) const { +#if defined(MIPS_FPU_LE) + rftbsub_128_mips(a); +#elif defined(WEBRTC_HAS_NEON) + rftbsub_128_neon(a); +#elif defined(WEBRTC_ARCH_X86_FAMILY) + if (use_sse2_) { + rftbsub_128_SSE2(a); + } else { + rftbsub_128_C(a); + } +#else + rftbsub_128_C(a); +#endif +} + +void OouraFft::cftbsub_128(float* a) const { + int j, j1, j2, j3, l; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + cft1st_128(a); + cftmdl_128(a); + l = 32; + + for (j = 0; j < l; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = -a[j + 1] - a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = -a[j + 1] + a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i - x2i; + a[j2] = x0r - x2r; + a[j2 + 1] = x0i + x2i; + a[j1] = x1r - x3i; + a[j1 + 1] = x1i - x3r; + a[j3] = x1r + x3i; + a[j3 + 1] = x1i + x3r; + } +} + +void OouraFft::cftfsub_128(float* a) const { + int j, j1, j2, j3, l; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + cft1st_128(a); + cftmdl_128(a); + l = 32; + for (j = 0; j < l; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = a[j + 1] + a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = a[j + 1] - a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i + x2i; + a[j2] = x0r - x2r; + a[j2 + 1] = x0i - x2i; + a[j1] = x1r - x3i; + a[j1 + 1] = x1i + x3r; + a[j3] = x1r + x3i; + a[j3 + 1] = x1i - x3r; + } +} + +void OouraFft::bitrv2_128(float* a) const { + /* + Following things have been attempted but are no faster: + (a) Storing the swap indexes in a LUT (index calculations are done + for 'free' while waiting on memory/L1). + (b) Consolidate the load/store of two consecutive floats by a 64 bit + integer (execution is memory/L1 bound). + (c) Do a mix of floats and 64 bit integer to maximize register + utilization (execution is memory/L1 bound). + (d) Replacing ip[i] by ((k<<31)>>25) + ((k >> 1)<<5). + (e) Hard-coding of the offsets to completely eliminates index + calculations. + */ + + unsigned int j, j1, k, k1; + float xr, xi, yr, yi; + + const int ip[4] = {0, 64, 32, 96}; + for (k = 0; k < 4; k++) { + for (j = 0; j < k; j++) { + j1 = 2 * j + ip[k]; + k1 = 2 * k + ip[j]; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + j1 += 8; + k1 += 16; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + j1 += 8; + k1 -= 8; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + j1 += 8; + k1 += 16; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + } + j1 = 2 * k + 8 + ip[k]; + k1 = j1 + 8; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + } +} + +} // namespace webrtc diff --git a/audio_processing/utility/ooura_fft.h b/audio_processing/utility/ooura_fft.h new file mode 100644 index 0000000..0cdd6aa --- /dev/null +++ b/audio_processing/utility/ooura_fft.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_ +#define MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_ + +#include "rtc_base/system/arch.h" + +namespace webrtc { + +#if defined(WEBRTC_ARCH_X86_FAMILY) +void cft1st_128_SSE2(float* a); +void cftmdl_128_SSE2(float* a); +void rftfsub_128_SSE2(float* a); +void rftbsub_128_SSE2(float* a); +#endif + +#if defined(MIPS_FPU_LE) +void cft1st_128_mips(float* a); +void cftmdl_128_mips(float* a); +void rftfsub_128_mips(float* a); +void rftbsub_128_mips(float* a); +#endif + +#if defined(WEBRTC_HAS_NEON) +void cft1st_128_neon(float* a); +void cftmdl_128_neon(float* a); +void rftfsub_128_neon(float* a); +void rftbsub_128_neon(float* a); +#endif + +class OouraFft { + public: + OouraFft(); + ~OouraFft(); + void Fft(float* a) const; + void InverseFft(float* a) const; + + private: + void cft1st_128(float* a) const; + void cftmdl_128(float* a) const; + void rftfsub_128(float* a) const; + void rftbsub_128(float* a) const; + + void cftfsub_128(float* a) const; + void cftbsub_128(float* a) const; + void bitrv2_128(float* a) const; + bool use_sse2_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_ diff --git a/audio_processing/utility/ooura_fft_mips.cc b/audio_processing/utility/ooura_fft_mips.cc new file mode 100644 index 0000000..21435ad --- /dev/null +++ b/audio_processing/utility/ooura_fft_mips.cc @@ -0,0 +1,1245 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/utility/ooura_fft.h" +#include "audio_processing/utility/ooura_fft_tables_common.h" + +namespace webrtc { + +#if defined(MIPS_FPU_LE) +void bitrv2_128_mips(float* a) { + // n is 128 + float xr, xi, yr, yi; + + xr = a[8]; + xi = a[9]; + yr = a[16]; + yi = a[17]; + a[8] = yr; + a[9] = yi; + a[16] = xr; + a[17] = xi; + + xr = a[64]; + xi = a[65]; + yr = a[2]; + yi = a[3]; + a[64] = yr; + a[65] = yi; + a[2] = xr; + a[3] = xi; + + xr = a[72]; + xi = a[73]; + yr = a[18]; + yi = a[19]; + a[72] = yr; + a[73] = yi; + a[18] = xr; + a[19] = xi; + + xr = a[80]; + xi = a[81]; + yr = a[10]; + yi = a[11]; + a[80] = yr; + a[81] = yi; + a[10] = xr; + a[11] = xi; + + xr = a[88]; + xi = a[89]; + yr = a[26]; + yi = a[27]; + a[88] = yr; + a[89] = yi; + a[26] = xr; + a[27] = xi; + + xr = a[74]; + xi = a[75]; + yr = a[82]; + yi = a[83]; + a[74] = yr; + a[75] = yi; + a[82] = xr; + a[83] = xi; + + xr = a[32]; + xi = a[33]; + yr = a[4]; + yi = a[5]; + a[32] = yr; + a[33] = yi; + a[4] = xr; + a[5] = xi; + + xr = a[40]; + xi = a[41]; + yr = a[20]; + yi = a[21]; + a[40] = yr; + a[41] = yi; + a[20] = xr; + a[21] = xi; + + xr = a[48]; + xi = a[49]; + yr = a[12]; + yi = a[13]; + a[48] = yr; + a[49] = yi; + a[12] = xr; + a[13] = xi; + + xr = a[56]; + xi = a[57]; + yr = a[28]; + yi = a[29]; + a[56] = yr; + a[57] = yi; + a[28] = xr; + a[29] = xi; + + xr = a[34]; + xi = a[35]; + yr = a[68]; + yi = a[69]; + a[34] = yr; + a[35] = yi; + a[68] = xr; + a[69] = xi; + + xr = a[42]; + xi = a[43]; + yr = a[84]; + yi = a[85]; + a[42] = yr; + a[43] = yi; + a[84] = xr; + a[85] = xi; + + xr = a[50]; + xi = a[51]; + yr = a[76]; + yi = a[77]; + a[50] = yr; + a[51] = yi; + a[76] = xr; + a[77] = xi; + + xr = a[58]; + xi = a[59]; + yr = a[92]; + yi = a[93]; + a[58] = yr; + a[59] = yi; + a[92] = xr; + a[93] = xi; + + xr = a[44]; + xi = a[45]; + yr = a[52]; + yi = a[53]; + a[44] = yr; + a[45] = yi; + a[52] = xr; + a[53] = xi; + + xr = a[96]; + xi = a[97]; + yr = a[6]; + yi = a[7]; + a[96] = yr; + a[97] = yi; + a[6] = xr; + a[7] = xi; + + xr = a[104]; + xi = a[105]; + yr = a[22]; + yi = a[23]; + a[104] = yr; + a[105] = yi; + a[22] = xr; + a[23] = xi; + + xr = a[112]; + xi = a[113]; + yr = a[14]; + yi = a[15]; + a[112] = yr; + a[113] = yi; + a[14] = xr; + a[15] = xi; + + xr = a[120]; + xi = a[121]; + yr = a[30]; + yi = a[31]; + a[120] = yr; + a[121] = yi; + a[30] = xr; + a[31] = xi; + + xr = a[98]; + xi = a[99]; + yr = a[70]; + yi = a[71]; + a[98] = yr; + a[99] = yi; + a[70] = xr; + a[71] = xi; + + xr = a[106]; + xi = a[107]; + yr = a[86]; + yi = a[87]; + a[106] = yr; + a[107] = yi; + a[86] = xr; + a[87] = xi; + + xr = a[114]; + xi = a[115]; + yr = a[78]; + yi = a[79]; + a[114] = yr; + a[115] = yi; + a[78] = xr; + a[79] = xi; + + xr = a[122]; + xi = a[123]; + yr = a[94]; + yi = a[95]; + a[122] = yr; + a[123] = yi; + a[94] = xr; + a[95] = xi; + + xr = a[100]; + xi = a[101]; + yr = a[38]; + yi = a[39]; + a[100] = yr; + a[101] = yi; + a[38] = xr; + a[39] = xi; + + xr = a[108]; + xi = a[109]; + yr = a[54]; + yi = a[55]; + a[108] = yr; + a[109] = yi; + a[54] = xr; + a[55] = xi; + + xr = a[116]; + xi = a[117]; + yr = a[46]; + yi = a[47]; + a[116] = yr; + a[117] = yi; + a[46] = xr; + a[47] = xi; + + xr = a[124]; + xi = a[125]; + yr = a[62]; + yi = a[63]; + a[124] = yr; + a[125] = yi; + a[62] = xr; + a[63] = xi; + + xr = a[110]; + xi = a[111]; + yr = a[118]; + yi = a[119]; + a[110] = yr; + a[111] = yi; + a[118] = xr; + a[119] = xi; +} + +void cft1st_128_mips(float* a) { + float f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14; + int a_ptr, p1_rdft, p2_rdft, count; + const float* first = rdft_wk3ri_first; + const float* second = rdft_wk3ri_second; + + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + // first 8 + "lwc1 %[f0], 0(%[a]) \n\t" + "lwc1 %[f1], 4(%[a]) \n\t" + "lwc1 %[f2], 8(%[a]) \n\t" + "lwc1 %[f3], 12(%[a]) \n\t" + "lwc1 %[f4], 16(%[a]) \n\t" + "lwc1 %[f5], 20(%[a]) \n\t" + "lwc1 %[f6], 24(%[a]) \n\t" + "lwc1 %[f7], 28(%[a]) \n\t" + "add.s %[f8], %[f0], %[f2] \n\t" + "sub.s %[f0], %[f0], %[f2] \n\t" + "add.s %[f2], %[f4], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "add.s %[f6], %[f1], %[f3] \n\t" + "sub.s %[f1], %[f1], %[f3] \n\t" + "add.s %[f3], %[f5], %[f7] \n\t" + "sub.s %[f5], %[f5], %[f7] \n\t" + "add.s %[f7], %[f8], %[f2] \n\t" + "sub.s %[f8], %[f8], %[f2] \n\t" + "sub.s %[f2], %[f1], %[f4] \n\t" + "add.s %[f1], %[f1], %[f4] \n\t" + "add.s %[f4], %[f6], %[f3] \n\t" + "sub.s %[f6], %[f6], %[f3] \n\t" + "sub.s %[f3], %[f0], %[f5] \n\t" + "add.s %[f0], %[f0], %[f5] \n\t" + "swc1 %[f7], 0(%[a]) \n\t" + "swc1 %[f8], 16(%[a]) \n\t" + "swc1 %[f2], 28(%[a]) \n\t" + "swc1 %[f1], 12(%[a]) \n\t" + "swc1 %[f4], 4(%[a]) \n\t" + "swc1 %[f6], 20(%[a]) \n\t" + "swc1 %[f3], 8(%[a]) \n\t" + "swc1 %[f0], 24(%[a]) \n\t" + // second 8 + "lwc1 %[f0], 32(%[a]) \n\t" + "lwc1 %[f1], 36(%[a]) \n\t" + "lwc1 %[f2], 40(%[a]) \n\t" + "lwc1 %[f3], 44(%[a]) \n\t" + "lwc1 %[f4], 48(%[a]) \n\t" + "lwc1 %[f5], 52(%[a]) \n\t" + "lwc1 %[f6], 56(%[a]) \n\t" + "lwc1 %[f7], 60(%[a]) \n\t" + "add.s %[f8], %[f4], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "add.s %[f6], %[f1], %[f3] \n\t" + "sub.s %[f1], %[f1], %[f3] \n\t" + "add.s %[f3], %[f0], %[f2] \n\t" + "sub.s %[f0], %[f0], %[f2] \n\t" + "add.s %[f2], %[f5], %[f7] \n\t" + "sub.s %[f5], %[f5], %[f7] \n\t" + "add.s %[f7], %[f4], %[f1] \n\t" + "sub.s %[f4], %[f4], %[f1] \n\t" + "add.s %[f1], %[f3], %[f8] \n\t" + "sub.s %[f3], %[f3], %[f8] \n\t" + "sub.s %[f8], %[f0], %[f5] \n\t" + "add.s %[f0], %[f0], %[f5] \n\t" + "add.s %[f5], %[f6], %[f2] \n\t" + "sub.s %[f6], %[f2], %[f6] \n\t" + "lwc1 %[f9], 8(%[rdft_w]) \n\t" + "sub.s %[f2], %[f8], %[f7] \n\t" + "add.s %[f8], %[f8], %[f7] \n\t" + "sub.s %[f7], %[f4], %[f0] \n\t" + "add.s %[f4], %[f4], %[f0] \n\t" + // prepare for loop + "addiu %[a_ptr], %[a], 64 \n\t" + "addiu %[p1_rdft], %[rdft_w], 8 \n\t" + "addiu %[p2_rdft], %[rdft_w], 16 \n\t" + "addiu %[count], $zero, 7 \n\t" + // finish second 8 + "mul.s %[f2], %[f9], %[f2] \n\t" + "mul.s %[f8], %[f9], %[f8] \n\t" + "mul.s %[f7], %[f9], %[f7] \n\t" + "mul.s %[f4], %[f9], %[f4] \n\t" + "swc1 %[f1], 32(%[a]) \n\t" + "swc1 %[f3], 52(%[a]) \n\t" + "swc1 %[f5], 36(%[a]) \n\t" + "swc1 %[f6], 48(%[a]) \n\t" + "swc1 %[f2], 40(%[a]) \n\t" + "swc1 %[f8], 44(%[a]) \n\t" + "swc1 %[f7], 56(%[a]) \n\t" + "swc1 %[f4], 60(%[a]) \n\t" + // loop + "1: \n\t" + "lwc1 %[f0], 0(%[a_ptr]) \n\t" + "lwc1 %[f1], 4(%[a_ptr]) \n\t" + "lwc1 %[f2], 8(%[a_ptr]) \n\t" + "lwc1 %[f3], 12(%[a_ptr]) \n\t" + "lwc1 %[f4], 16(%[a_ptr]) \n\t" + "lwc1 %[f5], 20(%[a_ptr]) \n\t" + "lwc1 %[f6], 24(%[a_ptr]) \n\t" + "lwc1 %[f7], 28(%[a_ptr]) \n\t" + "add.s %[f8], %[f0], %[f2] \n\t" + "sub.s %[f0], %[f0], %[f2] \n\t" + "add.s %[f2], %[f4], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "add.s %[f6], %[f1], %[f3] \n\t" + "sub.s %[f1], %[f1], %[f3] \n\t" + "add.s %[f3], %[f5], %[f7] \n\t" + "sub.s %[f5], %[f5], %[f7] \n\t" + "lwc1 %[f10], 4(%[p1_rdft]) \n\t" + "lwc1 %[f11], 0(%[p2_rdft]) \n\t" + "lwc1 %[f12], 4(%[p2_rdft]) \n\t" + "lwc1 %[f13], 8(%[first]) \n\t" + "lwc1 %[f14], 12(%[first]) \n\t" + "add.s %[f7], %[f8], %[f2] \n\t" + "sub.s %[f8], %[f8], %[f2] \n\t" + "add.s %[f2], %[f6], %[f3] \n\t" + "sub.s %[f6], %[f6], %[f3] \n\t" + "add.s %[f3], %[f0], %[f5] \n\t" + "sub.s %[f0], %[f0], %[f5] \n\t" + "add.s %[f5], %[f1], %[f4] \n\t" + "sub.s %[f1], %[f1], %[f4] \n\t" + "swc1 %[f7], 0(%[a_ptr]) \n\t" + "swc1 %[f2], 4(%[a_ptr]) \n\t" + "mul.s %[f4], %[f9], %[f8] \n\t" +#if defined(MIPS32_R2_LE) + "mul.s %[f8], %[f10], %[f8] \n\t" + "mul.s %[f7], %[f11], %[f0] \n\t" + "mul.s %[f0], %[f12], %[f0] \n\t" + "mul.s %[f2], %[f13], %[f3] \n\t" + "mul.s %[f3], %[f14], %[f3] \n\t" + "nmsub.s %[f4], %[f4], %[f10], %[f6] \n\t" + "madd.s %[f8], %[f8], %[f9], %[f6] \n\t" + "nmsub.s %[f7], %[f7], %[f12], %[f5] \n\t" + "madd.s %[f0], %[f0], %[f11], %[f5] \n\t" + "nmsub.s %[f2], %[f2], %[f14], %[f1] \n\t" + "madd.s %[f3], %[f3], %[f13], %[f1] \n\t" +#else + "mul.s %[f7], %[f10], %[f6] \n\t" + "mul.s %[f6], %[f9], %[f6] \n\t" + "mul.s %[f8], %[f10], %[f8] \n\t" + "mul.s %[f2], %[f11], %[f0] \n\t" + "mul.s %[f11], %[f11], %[f5] \n\t" + "mul.s %[f5], %[f12], %[f5] \n\t" + "mul.s %[f0], %[f12], %[f0] \n\t" + "mul.s %[f12], %[f13], %[f3] \n\t" + "mul.s %[f13], %[f13], %[f1] \n\t" + "mul.s %[f1], %[f14], %[f1] \n\t" + "mul.s %[f3], %[f14], %[f3] \n\t" + "sub.s %[f4], %[f4], %[f7] \n\t" + "add.s %[f8], %[f6], %[f8] \n\t" + "sub.s %[f7], %[f2], %[f5] \n\t" + "add.s %[f0], %[f11], %[f0] \n\t" + "sub.s %[f2], %[f12], %[f1] \n\t" + "add.s %[f3], %[f13], %[f3] \n\t" +#endif + "swc1 %[f4], 16(%[a_ptr]) \n\t" + "swc1 %[f8], 20(%[a_ptr]) \n\t" + "swc1 %[f7], 8(%[a_ptr]) \n\t" + "swc1 %[f0], 12(%[a_ptr]) \n\t" + "swc1 %[f2], 24(%[a_ptr]) \n\t" + "swc1 %[f3], 28(%[a_ptr]) \n\t" + "lwc1 %[f0], 32(%[a_ptr]) \n\t" + "lwc1 %[f1], 36(%[a_ptr]) \n\t" + "lwc1 %[f2], 40(%[a_ptr]) \n\t" + "lwc1 %[f3], 44(%[a_ptr]) \n\t" + "lwc1 %[f4], 48(%[a_ptr]) \n\t" + "lwc1 %[f5], 52(%[a_ptr]) \n\t" + "lwc1 %[f6], 56(%[a_ptr]) \n\t" + "lwc1 %[f7], 60(%[a_ptr]) \n\t" + "add.s %[f8], %[f0], %[f2] \n\t" + "sub.s %[f0], %[f0], %[f2] \n\t" + "add.s %[f2], %[f4], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "add.s %[f6], %[f1], %[f3] \n\t" + "sub.s %[f1], %[f1], %[f3] \n\t" + "add.s %[f3], %[f5], %[f7] \n\t" + "sub.s %[f5], %[f5], %[f7] \n\t" + "lwc1 %[f11], 8(%[p2_rdft]) \n\t" + "lwc1 %[f12], 12(%[p2_rdft]) \n\t" + "lwc1 %[f13], 8(%[second]) \n\t" + "lwc1 %[f14], 12(%[second]) \n\t" + "add.s %[f7], %[f8], %[f2] \n\t" + "sub.s %[f8], %[f2], %[f8] \n\t" + "add.s %[f2], %[f6], %[f3] \n\t" + "sub.s %[f6], %[f3], %[f6] \n\t" + "add.s %[f3], %[f0], %[f5] \n\t" + "sub.s %[f0], %[f0], %[f5] \n\t" + "add.s %[f5], %[f1], %[f4] \n\t" + "sub.s %[f1], %[f1], %[f4] \n\t" + "swc1 %[f7], 32(%[a_ptr]) \n\t" + "swc1 %[f2], 36(%[a_ptr]) \n\t" + "mul.s %[f4], %[f10], %[f8] \n\t" +#if defined(MIPS32_R2_LE) + "mul.s %[f10], %[f10], %[f6] \n\t" + "mul.s %[f7], %[f11], %[f0] \n\t" + "mul.s %[f11], %[f11], %[f5] \n\t" + "mul.s %[f2], %[f13], %[f3] \n\t" + "mul.s %[f13], %[f13], %[f1] \n\t" + "madd.s %[f4], %[f4], %[f9], %[f6] \n\t" + "nmsub.s %[f10], %[f10], %[f9], %[f8] \n\t" + "nmsub.s %[f7], %[f7], %[f12], %[f5] \n\t" + "madd.s %[f11], %[f11], %[f12], %[f0] \n\t" + "nmsub.s %[f2], %[f2], %[f14], %[f1] \n\t" + "madd.s %[f13], %[f13], %[f14], %[f3] \n\t" +#else + "mul.s %[f2], %[f9], %[f6] \n\t" + "mul.s %[f10], %[f10], %[f6] \n\t" + "mul.s %[f9], %[f9], %[f8] \n\t" + "mul.s %[f7], %[f11], %[f0] \n\t" + "mul.s %[f8], %[f12], %[f5] \n\t" + "mul.s %[f11], %[f11], %[f5] \n\t" + "mul.s %[f12], %[f12], %[f0] \n\t" + "mul.s %[f5], %[f13], %[f3] \n\t" + "mul.s %[f0], %[f14], %[f1] \n\t" + "mul.s %[f13], %[f13], %[f1] \n\t" + "mul.s %[f14], %[f14], %[f3] \n\t" + "add.s %[f4], %[f4], %[f2] \n\t" + "sub.s %[f10], %[f10], %[f9] \n\t" + "sub.s %[f7], %[f7], %[f8] \n\t" + "add.s %[f11], %[f11], %[f12] \n\t" + "sub.s %[f2], %[f5], %[f0] \n\t" + "add.s %[f13], %[f13], %[f14] \n\t" +#endif + "swc1 %[f4], 48(%[a_ptr]) \n\t" + "swc1 %[f10], 52(%[a_ptr]) \n\t" + "swc1 %[f7], 40(%[a_ptr]) \n\t" + "swc1 %[f11], 44(%[a_ptr]) \n\t" + "swc1 %[f2], 56(%[a_ptr]) \n\t" + "swc1 %[f13], 60(%[a_ptr]) \n\t" + "addiu %[count], %[count], -1 \n\t" + "lwc1 %[f9], 8(%[p1_rdft]) \n\t" + "addiu %[a_ptr], %[a_ptr], 64 \n\t" + "addiu %[p1_rdft], %[p1_rdft], 8 \n\t" + "addiu %[p2_rdft], %[p2_rdft], 16 \n\t" + "addiu %[first], %[first], 8 \n\t" + "bgtz %[count], 1b \n\t" + " addiu %[second], %[second], 8 \n\t" + ".set pop \n\t" + : [f0] "=&f"(f0), [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), + [f4] "=&f"(f4), [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), + [f8] "=&f"(f8), [f9] "=&f"(f9), [f10] "=&f"(f10), [f11] "=&f"(f11), + [f12] "=&f"(f12), [f13] "=&f"(f13), [f14] "=&f"(f14), + [a_ptr] "=&r"(a_ptr), [p1_rdft] "=&r"(p1_rdft), [first] "+r"(first), + [p2_rdft] "=&r"(p2_rdft), [count] "=&r"(count), [second] "+r"(second) + : [a] "r"(a), [rdft_w] "r"(rdft_w) + : "memory"); +} + +void cftmdl_128_mips(float* a) { + float f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14; + int tmp_a, count; + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "addiu %[tmp_a], %[a], 0 \n\t" + "addiu %[count], $zero, 4 \n\t" + "1: \n\t" + "addiu %[count], %[count], -1 \n\t" + "lwc1 %[f0], 0(%[tmp_a]) \n\t" + "lwc1 %[f2], 32(%[tmp_a]) \n\t" + "lwc1 %[f4], 64(%[tmp_a]) \n\t" + "lwc1 %[f6], 96(%[tmp_a]) \n\t" + "lwc1 %[f1], 4(%[tmp_a]) \n\t" + "lwc1 %[f3], 36(%[tmp_a]) \n\t" + "lwc1 %[f5], 68(%[tmp_a]) \n\t" + "lwc1 %[f7], 100(%[tmp_a]) \n\t" + "add.s %[f8], %[f0], %[f2] \n\t" + "sub.s %[f0], %[f0], %[f2] \n\t" + "add.s %[f2], %[f4], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "add.s %[f6], %[f1], %[f3] \n\t" + "sub.s %[f1], %[f1], %[f3] \n\t" + "add.s %[f3], %[f5], %[f7] \n\t" + "sub.s %[f5], %[f5], %[f7] \n\t" + "add.s %[f7], %[f8], %[f2] \n\t" + "sub.s %[f8], %[f8], %[f2] \n\t" + "add.s %[f2], %[f1], %[f4] \n\t" + "sub.s %[f1], %[f1], %[f4] \n\t" + "add.s %[f4], %[f6], %[f3] \n\t" + "sub.s %[f6], %[f6], %[f3] \n\t" + "sub.s %[f3], %[f0], %[f5] \n\t" + "add.s %[f0], %[f0], %[f5] \n\t" + "swc1 %[f7], 0(%[tmp_a]) \n\t" + "swc1 %[f8], 64(%[tmp_a]) \n\t" + "swc1 %[f2], 36(%[tmp_a]) \n\t" + "swc1 %[f1], 100(%[tmp_a]) \n\t" + "swc1 %[f4], 4(%[tmp_a]) \n\t" + "swc1 %[f6], 68(%[tmp_a]) \n\t" + "swc1 %[f3], 32(%[tmp_a]) \n\t" + "swc1 %[f0], 96(%[tmp_a]) \n\t" + "bgtz %[count], 1b \n\t" + " addiu %[tmp_a], %[tmp_a], 8 \n\t" + ".set pop \n\t" + : [f0] "=&f"(f0), [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), + [f4] "=&f"(f4), [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), + [f8] "=&f"(f8), [tmp_a] "=&r"(tmp_a), [count] "=&r"(count) + : [a] "r"(a) + : "memory"); + f9 = rdft_w[2]; + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "addiu %[tmp_a], %[a], 128 \n\t" + "addiu %[count], $zero, 4 \n\t" + "1: \n\t" + "addiu %[count], %[count], -1 \n\t" + "lwc1 %[f0], 0(%[tmp_a]) \n\t" + "lwc1 %[f2], 32(%[tmp_a]) \n\t" + "lwc1 %[f5], 68(%[tmp_a]) \n\t" + "lwc1 %[f7], 100(%[tmp_a]) \n\t" + "lwc1 %[f1], 4(%[tmp_a]) \n\t" + "lwc1 %[f3], 36(%[tmp_a]) \n\t" + "lwc1 %[f4], 64(%[tmp_a]) \n\t" + "lwc1 %[f6], 96(%[tmp_a]) \n\t" + "sub.s %[f8], %[f0], %[f2] \n\t" + "add.s %[f0], %[f0], %[f2] \n\t" + "sub.s %[f2], %[f5], %[f7] \n\t" + "add.s %[f5], %[f5], %[f7] \n\t" + "sub.s %[f7], %[f1], %[f3] \n\t" + "add.s %[f1], %[f1], %[f3] \n\t" + "sub.s %[f3], %[f4], %[f6] \n\t" + "add.s %[f4], %[f4], %[f6] \n\t" + "sub.s %[f6], %[f8], %[f2] \n\t" + "add.s %[f8], %[f8], %[f2] \n\t" + "add.s %[f2], %[f5], %[f1] \n\t" + "sub.s %[f5], %[f5], %[f1] \n\t" + "add.s %[f1], %[f3], %[f7] \n\t" + "sub.s %[f3], %[f3], %[f7] \n\t" + "add.s %[f7], %[f0], %[f4] \n\t" + "sub.s %[f0], %[f0], %[f4] \n\t" + "sub.s %[f4], %[f6], %[f1] \n\t" + "add.s %[f6], %[f6], %[f1] \n\t" + "sub.s %[f1], %[f3], %[f8] \n\t" + "add.s %[f3], %[f3], %[f8] \n\t" + "mul.s %[f4], %[f4], %[f9] \n\t" + "mul.s %[f6], %[f6], %[f9] \n\t" + "mul.s %[f1], %[f1], %[f9] \n\t" + "mul.s %[f3], %[f3], %[f9] \n\t" + "swc1 %[f7], 0(%[tmp_a]) \n\t" + "swc1 %[f2], 4(%[tmp_a]) \n\t" + "swc1 %[f5], 64(%[tmp_a]) \n\t" + "swc1 %[f0], 68(%[tmp_a]) \n\t" + "swc1 %[f4], 32(%[tmp_a]) \n\t" + "swc1 %[f6], 36(%[tmp_a]) \n\t" + "swc1 %[f1], 96(%[tmp_a]) \n\t" + "swc1 %[f3], 100(%[tmp_a]) \n\t" + "bgtz %[count], 1b \n\t" + " addiu %[tmp_a], %[tmp_a], 8 \n\t" + ".set pop \n\t" + : [f0] "=&f"(f0), [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), + [f4] "=&f"(f4), [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), + [f8] "=&f"(f8), [tmp_a] "=&r"(tmp_a), [count] "=&r"(count) + : [a] "r"(a), [f9] "f"(f9) + : "memory"); + f10 = rdft_w[3]; + f11 = rdft_w[4]; + f12 = rdft_w[5]; + f13 = rdft_wk3ri_first[2]; + f14 = rdft_wk3ri_first[3]; + + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "addiu %[tmp_a], %[a], 256 \n\t" + "addiu %[count], $zero, 4 \n\t" + "1: \n\t" + "addiu %[count], %[count], -1 \n\t" + "lwc1 %[f0], 0(%[tmp_a]) \n\t" + "lwc1 %[f2], 32(%[tmp_a]) \n\t" + "lwc1 %[f4], 64(%[tmp_a]) \n\t" + "lwc1 %[f6], 96(%[tmp_a]) \n\t" + "lwc1 %[f1], 4(%[tmp_a]) \n\t" + "lwc1 %[f3], 36(%[tmp_a]) \n\t" + "lwc1 %[f5], 68(%[tmp_a]) \n\t" + "lwc1 %[f7], 100(%[tmp_a]) \n\t" + "add.s %[f8], %[f0], %[f2] \n\t" + "sub.s %[f0], %[f0], %[f2] \n\t" + "add.s %[f2], %[f4], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "add.s %[f6], %[f1], %[f3] \n\t" + "sub.s %[f1], %[f1], %[f3] \n\t" + "add.s %[f3], %[f5], %[f7] \n\t" + "sub.s %[f5], %[f5], %[f7] \n\t" + "sub.s %[f7], %[f8], %[f2] \n\t" + "add.s %[f8], %[f8], %[f2] \n\t" + "add.s %[f2], %[f1], %[f4] \n\t" + "sub.s %[f1], %[f1], %[f4] \n\t" + "sub.s %[f4], %[f6], %[f3] \n\t" + "add.s %[f6], %[f6], %[f3] \n\t" + "sub.s %[f3], %[f0], %[f5] \n\t" + "add.s %[f0], %[f0], %[f5] \n\t" + "swc1 %[f8], 0(%[tmp_a]) \n\t" + "swc1 %[f6], 4(%[tmp_a]) \n\t" + "mul.s %[f5], %[f9], %[f7] \n\t" +#if defined(MIPS32_R2_LE) + "mul.s %[f7], %[f10], %[f7] \n\t" + "mul.s %[f8], %[f11], %[f3] \n\t" + "mul.s %[f3], %[f12], %[f3] \n\t" + "mul.s %[f6], %[f13], %[f0] \n\t" + "mul.s %[f0], %[f14], %[f0] \n\t" + "nmsub.s %[f5], %[f5], %[f10], %[f4] \n\t" + "madd.s %[f7], %[f7], %[f9], %[f4] \n\t" + "nmsub.s %[f8], %[f8], %[f12], %[f2] \n\t" + "madd.s %[f3], %[f3], %[f11], %[f2] \n\t" + "nmsub.s %[f6], %[f6], %[f14], %[f1] \n\t" + "madd.s %[f0], %[f0], %[f13], %[f1] \n\t" + "swc1 %[f5], 64(%[tmp_a]) \n\t" + "swc1 %[f7], 68(%[tmp_a]) \n\t" +#else + "mul.s %[f8], %[f10], %[f4] \n\t" + "mul.s %[f4], %[f9], %[f4] \n\t" + "mul.s %[f7], %[f10], %[f7] \n\t" + "mul.s %[f6], %[f11], %[f3] \n\t" + "mul.s %[f3], %[f12], %[f3] \n\t" + "sub.s %[f5], %[f5], %[f8] \n\t" + "mul.s %[f8], %[f12], %[f2] \n\t" + "mul.s %[f2], %[f11], %[f2] \n\t" + "add.s %[f7], %[f4], %[f7] \n\t" + "mul.s %[f4], %[f13], %[f0] \n\t" + "mul.s %[f0], %[f14], %[f0] \n\t" + "sub.s %[f8], %[f6], %[f8] \n\t" + "mul.s %[f6], %[f14], %[f1] \n\t" + "mul.s %[f1], %[f13], %[f1] \n\t" + "add.s %[f3], %[f2], %[f3] \n\t" + "swc1 %[f5], 64(%[tmp_a]) \n\t" + "swc1 %[f7], 68(%[tmp_a]) \n\t" + "sub.s %[f6], %[f4], %[f6] \n\t" + "add.s %[f0], %[f1], %[f0] \n\t" +#endif + "swc1 %[f8], 32(%[tmp_a]) \n\t" + "swc1 %[f3], 36(%[tmp_a]) \n\t" + "swc1 %[f6], 96(%[tmp_a]) \n\t" + "swc1 %[f0], 100(%[tmp_a]) \n\t" + "bgtz %[count], 1b \n\t" + " addiu %[tmp_a], %[tmp_a], 8 \n\t" + ".set pop \n\t" + : [f0] "=&f"(f0), [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), + [f4] "=&f"(f4), [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), + [f8] "=&f"(f8), [tmp_a] "=&r"(tmp_a), [count] "=&r"(count) + : [a] "r"(a), [f9] "f"(f9), [f10] "f"(f10), [f11] "f"(f11), + [f12] "f"(f12), [f13] "f"(f13), [f14] "f"(f14) + : "memory"); + f11 = rdft_w[6]; + f12 = rdft_w[7]; + f13 = rdft_wk3ri_second[2]; + f14 = rdft_wk3ri_second[3]; + __asm __volatile( + ".set push " + "\n\t" + ".set noreorder " + "\n\t" + "addiu %[tmp_a], %[a], 384 " + "\n\t" + "addiu %[count], $zero, 4 " + "\n\t" + "1: " + "\n\t" + "addiu %[count], %[count], -1 " + "\n\t" + "lwc1 %[f0], 0(%[tmp_a]) " + "\n\t" + "lwc1 %[f1], 4(%[tmp_a]) " + "\n\t" + "lwc1 %[f2], 32(%[tmp_a]) " + "\n\t" + "lwc1 %[f3], 36(%[tmp_a]) " + "\n\t" + "lwc1 %[f4], 64(%[tmp_a]) " + "\n\t" + "lwc1 %[f5], 68(%[tmp_a]) " + "\n\t" + "lwc1 %[f6], 96(%[tmp_a]) " + "\n\t" + "lwc1 %[f7], 100(%[tmp_a]) " + "\n\t" + "add.s %[f8], %[f0], %[f2] " + "\n\t" + "sub.s %[f0], %[f0], %[f2] " + "\n\t" + "add.s %[f2], %[f4], %[f6] " + "\n\t" + "sub.s %[f4], %[f4], %[f6] " + "\n\t" + "add.s %[f6], %[f1], %[f3] " + "\n\t" + "sub.s %[f1], %[f1], %[f3] " + "\n\t" + "add.s %[f3], %[f5], %[f7] " + "\n\t" + "sub.s %[f5], %[f5], %[f7] " + "\n\t" + "sub.s %[f7], %[f2], %[f8] " + "\n\t" + "add.s %[f2], %[f2], %[f8] " + "\n\t" + "add.s %[f8], %[f1], %[f4] " + "\n\t" + "sub.s %[f1], %[f1], %[f4] " + "\n\t" + "sub.s %[f4], %[f3], %[f6] " + "\n\t" + "add.s %[f3], %[f3], %[f6] " + "\n\t" + "sub.s %[f6], %[f0], %[f5] " + "\n\t" + "add.s %[f0], %[f0], %[f5] " + "\n\t" + "swc1 %[f2], 0(%[tmp_a]) " + "\n\t" + "swc1 %[f3], 4(%[tmp_a]) " + "\n\t" + "mul.s %[f5], %[f10], %[f7] " + "\n\t" +#if defined(MIPS32_R2_LE) + "mul.s %[f7], %[f9], %[f7] " + "\n\t" + "mul.s %[f2], %[f12], %[f8] " + "\n\t" + "mul.s %[f8], %[f11], %[f8] " + "\n\t" + "mul.s %[f3], %[f14], %[f1] " + "\n\t" + "mul.s %[f1], %[f13], %[f1] " + "\n\t" + "madd.s %[f5], %[f5], %[f9], %[f4] " + "\n\t" + "msub.s %[f7], %[f7], %[f10], %[f4] " + "\n\t" + "msub.s %[f2], %[f2], %[f11], %[f6] " + "\n\t" + "madd.s %[f8], %[f8], %[f12], %[f6] " + "\n\t" + "msub.s %[f3], %[f3], %[f13], %[f0] " + "\n\t" + "madd.s %[f1], %[f1], %[f14], %[f0] " + "\n\t" + "swc1 %[f5], 64(%[tmp_a]) " + "\n\t" + "swc1 %[f7], 68(%[tmp_a]) " + "\n\t" +#else + "mul.s %[f2], %[f9], %[f4] " + "\n\t" + "mul.s %[f4], %[f10], %[f4] " + "\n\t" + "mul.s %[f7], %[f9], %[f7] " + "\n\t" + "mul.s %[f3], %[f11], %[f6] " + "\n\t" + "mul.s %[f6], %[f12], %[f6] " + "\n\t" + "add.s %[f5], %[f5], %[f2] " + "\n\t" + "sub.s %[f7], %[f4], %[f7] " + "\n\t" + "mul.s %[f2], %[f12], %[f8] " + "\n\t" + "mul.s %[f8], %[f11], %[f8] " + "\n\t" + "mul.s %[f4], %[f14], %[f1] " + "\n\t" + "mul.s %[f1], %[f13], %[f1] " + "\n\t" + "sub.s %[f2], %[f3], %[f2] " + "\n\t" + "mul.s %[f3], %[f13], %[f0] " + "\n\t" + "mul.s %[f0], %[f14], %[f0] " + "\n\t" + "add.s %[f8], %[f8], %[f6] " + "\n\t" + "swc1 %[f5], 64(%[tmp_a]) " + "\n\t" + "swc1 %[f7], 68(%[tmp_a]) " + "\n\t" + "sub.s %[f3], %[f3], %[f4] " + "\n\t" + "add.s %[f1], %[f1], %[f0] " + "\n\t" +#endif + "swc1 %[f2], 32(%[tmp_a]) " + "\n\t" + "swc1 %[f8], 36(%[tmp_a]) " + "\n\t" + "swc1 %[f3], 96(%[tmp_a]) " + "\n\t" + "swc1 %[f1], 100(%[tmp_a]) " + "\n\t" + "bgtz %[count], 1b " + "\n\t" + " addiu %[tmp_a], %[tmp_a], 8 " + "\n\t" + ".set pop " + "\n\t" + : [f0] "=&f"(f0), [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), + [f4] "=&f"(f4), [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), + [f8] "=&f"(f8), [tmp_a] "=&r"(tmp_a), [count] "=&r"(count) + : [a] "r"(a), [f9] "f"(f9), [f10] "f"(f10), [f11] "f"(f11), + [f12] "f"(f12), [f13] "f"(f13), [f14] "f"(f14) + : "memory"); +} + +void cftfsub_128_mips(float* a) { + float f0, f1, f2, f3, f4, f5, f6, f7, f8; + int tmp_a, count; + + cft1st_128_mips(a); + cftmdl_128_mips(a); + + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "addiu %[tmp_a], %[a], 0 \n\t" + "addiu %[count], $zero, 16 \n\t" + "1: \n\t" + "addiu %[count], %[count], -1 \n\t" + "lwc1 %[f0], 0(%[tmp_a]) \n\t" + "lwc1 %[f2], 128(%[tmp_a]) \n\t" + "lwc1 %[f4], 256(%[tmp_a]) \n\t" + "lwc1 %[f6], 384(%[tmp_a]) \n\t" + "lwc1 %[f1], 4(%[tmp_a]) \n\t" + "lwc1 %[f3], 132(%[tmp_a]) \n\t" + "lwc1 %[f5], 260(%[tmp_a]) \n\t" + "lwc1 %[f7], 388(%[tmp_a]) \n\t" + "add.s %[f8], %[f0], %[f2] \n\t" + "sub.s %[f0], %[f0], %[f2] \n\t" + "add.s %[f2], %[f4], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "add.s %[f6], %[f1], %[f3] \n\t" + "sub.s %[f1], %[f1], %[f3] \n\t" + "add.s %[f3], %[f5], %[f7] \n\t" + "sub.s %[f5], %[f5], %[f7] \n\t" + "add.s %[f7], %[f8], %[f2] \n\t" + "sub.s %[f8], %[f8], %[f2] \n\t" + "add.s %[f2], %[f1], %[f4] \n\t" + "sub.s %[f1], %[f1], %[f4] \n\t" + "add.s %[f4], %[f6], %[f3] \n\t" + "sub.s %[f6], %[f6], %[f3] \n\t" + "sub.s %[f3], %[f0], %[f5] \n\t" + "add.s %[f0], %[f0], %[f5] \n\t" + "swc1 %[f7], 0(%[tmp_a]) \n\t" + "swc1 %[f8], 256(%[tmp_a]) \n\t" + "swc1 %[f2], 132(%[tmp_a]) \n\t" + "swc1 %[f1], 388(%[tmp_a]) \n\t" + "swc1 %[f4], 4(%[tmp_a]) \n\t" + "swc1 %[f6], 260(%[tmp_a]) \n\t" + "swc1 %[f3], 128(%[tmp_a]) \n\t" + "swc1 %[f0], 384(%[tmp_a]) \n\t" + "bgtz %[count], 1b \n\t" + " addiu %[tmp_a], %[tmp_a], 8 \n\t" + ".set pop \n\t" + : [f0] "=&f"(f0), [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), + [f4] "=&f"(f4), [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), + [f8] "=&f"(f8), [tmp_a] "=&r"(tmp_a), [count] "=&r"(count) + : [a] "r"(a) + : "memory"); +} + +void cftbsub_128_mips(float* a) { + float f0, f1, f2, f3, f4, f5, f6, f7, f8; + int tmp_a, count; + + cft1st_128_mips(a); + cftmdl_128_mips(a); + + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "addiu %[tmp_a], %[a], 0 \n\t" + "addiu %[count], $zero, 16 \n\t" + "1: \n\t" + "addiu %[count], %[count], -1 \n\t" + "lwc1 %[f0], 0(%[tmp_a]) \n\t" + "lwc1 %[f2], 128(%[tmp_a]) \n\t" + "lwc1 %[f4], 256(%[tmp_a]) \n\t" + "lwc1 %[f6], 384(%[tmp_a]) \n\t" + "lwc1 %[f1], 4(%[tmp_a]) \n\t" + "lwc1 %[f3], 132(%[tmp_a]) \n\t" + "lwc1 %[f5], 260(%[tmp_a]) \n\t" + "lwc1 %[f7], 388(%[tmp_a]) \n\t" + "add.s %[f8], %[f0], %[f2] \n\t" + "sub.s %[f0], %[f0], %[f2] \n\t" + "add.s %[f2], %[f4], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "add.s %[f6], %[f1], %[f3] \n\t" + "sub.s %[f1], %[f3], %[f1] \n\t" + "add.s %[f3], %[f5], %[f7] \n\t" + "sub.s %[f5], %[f5], %[f7] \n\t" + "add.s %[f7], %[f8], %[f2] \n\t" + "sub.s %[f8], %[f8], %[f2] \n\t" + "sub.s %[f2], %[f1], %[f4] \n\t" + "add.s %[f1], %[f1], %[f4] \n\t" + "add.s %[f4], %[f3], %[f6] \n\t" + "sub.s %[f6], %[f3], %[f6] \n\t" + "sub.s %[f3], %[f0], %[f5] \n\t" + "add.s %[f0], %[f0], %[f5] \n\t" + "neg.s %[f4], %[f4] \n\t" + "swc1 %[f7], 0(%[tmp_a]) \n\t" + "swc1 %[f8], 256(%[tmp_a]) \n\t" + "swc1 %[f2], 132(%[tmp_a]) \n\t" + "swc1 %[f1], 388(%[tmp_a]) \n\t" + "swc1 %[f6], 260(%[tmp_a]) \n\t" + "swc1 %[f3], 128(%[tmp_a]) \n\t" + "swc1 %[f0], 384(%[tmp_a]) \n\t" + "swc1 %[f4], 4(%[tmp_a]) \n\t" + "bgtz %[count], 1b \n\t" + " addiu %[tmp_a], %[tmp_a], 8 \n\t" + ".set pop \n\t" + : [f0] "=&f"(f0), [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), + [f4] "=&f"(f4), [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), + [f8] "=&f"(f8), [tmp_a] "=&r"(tmp_a), [count] "=&r"(count) + : [a] "r"(a) + : "memory"); +} + +void rftfsub_128_mips(float* a) { + const float* c = rdft_w + 32; + const float f0 = 0.5f; + float* a1 = &a[2]; + float* a2 = &a[126]; + const float* c1 = &c[1]; + const float* c2 = &c[31]; + float f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15; + int count; + + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "lwc1 %[f6], 0(%[c2]) \n\t" + "lwc1 %[f1], 0(%[a1]) \n\t" + "lwc1 %[f2], 0(%[a2]) \n\t" + "lwc1 %[f3], 4(%[a1]) \n\t" + "lwc1 %[f4], 4(%[a2]) \n\t" + "lwc1 %[f5], 0(%[c1]) \n\t" + "sub.s %[f6], %[f0], %[f6] \n\t" + "sub.s %[f7], %[f1], %[f2] \n\t" + "add.s %[f8], %[f3], %[f4] \n\t" + "addiu %[count], $zero, 15 \n\t" + "mul.s %[f9], %[f6], %[f7] \n\t" + "mul.s %[f6], %[f6], %[f8] \n\t" +#if !defined(MIPS32_R2_LE) + "mul.s %[f8], %[f5], %[f8] \n\t" + "mul.s %[f5], %[f5], %[f7] \n\t" + "sub.s %[f9], %[f9], %[f8] \n\t" + "add.s %[f6], %[f6], %[f5] \n\t" +#else + "nmsub.s %[f9], %[f9], %[f5], %[f8] \n\t" + "madd.s %[f6], %[f6], %[f5], %[f7] \n\t" +#endif + "sub.s %[f1], %[f1], %[f9] \n\t" + "add.s %[f2], %[f2], %[f9] \n\t" + "sub.s %[f3], %[f3], %[f6] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "swc1 %[f1], 0(%[a1]) \n\t" + "swc1 %[f2], 0(%[a2]) \n\t" + "swc1 %[f3], 4(%[a1]) \n\t" + "swc1 %[f4], 4(%[a2]) \n\t" + "addiu %[a1], %[a1], 8 \n\t" + "addiu %[a2], %[a2], -8 \n\t" + "addiu %[c1], %[c1], 4 \n\t" + "addiu %[c2], %[c2], -4 \n\t" + "1: \n\t" + "lwc1 %[f6], 0(%[c2]) \n\t" + "lwc1 %[f1], 0(%[a1]) \n\t" + "lwc1 %[f2], 0(%[a2]) \n\t" + "lwc1 %[f3], 4(%[a1]) \n\t" + "lwc1 %[f4], 4(%[a2]) \n\t" + "lwc1 %[f5], 0(%[c1]) \n\t" + "sub.s %[f6], %[f0], %[f6] \n\t" + "sub.s %[f7], %[f1], %[f2] \n\t" + "add.s %[f8], %[f3], %[f4] \n\t" + "lwc1 %[f10], -4(%[c2]) \n\t" + "lwc1 %[f11], 8(%[a1]) \n\t" + "lwc1 %[f12], -8(%[a2]) \n\t" + "mul.s %[f9], %[f6], %[f7] \n\t" + "mul.s %[f6], %[f6], %[f8] \n\t" +#if !defined(MIPS32_R2_LE) + "mul.s %[f8], %[f5], %[f8] \n\t" + "mul.s %[f5], %[f5], %[f7] \n\t" + "lwc1 %[f13], 12(%[a1]) \n\t" + "lwc1 %[f14], -4(%[a2]) \n\t" + "lwc1 %[f15], 4(%[c1]) \n\t" + "sub.s %[f9], %[f9], %[f8] \n\t" + "add.s %[f6], %[f6], %[f5] \n\t" +#else + "lwc1 %[f13], 12(%[a1]) \n\t" + "lwc1 %[f14], -4(%[a2]) \n\t" + "lwc1 %[f15], 4(%[c1]) \n\t" + "nmsub.s %[f9], %[f9], %[f5], %[f8] \n\t" + "madd.s %[f6], %[f6], %[f5], %[f7] \n\t" +#endif + "sub.s %[f10], %[f0], %[f10] \n\t" + "sub.s %[f5], %[f11], %[f12] \n\t" + "add.s %[f7], %[f13], %[f14] \n\t" + "sub.s %[f1], %[f1], %[f9] \n\t" + "add.s %[f2], %[f2], %[f9] \n\t" + "sub.s %[f3], %[f3], %[f6] \n\t" + "mul.s %[f8], %[f10], %[f5] \n\t" + "mul.s %[f10], %[f10], %[f7] \n\t" +#if !defined(MIPS32_R2_LE) + "mul.s %[f9], %[f15], %[f7] \n\t" + "mul.s %[f15], %[f15], %[f5] \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "swc1 %[f1], 0(%[a1]) \n\t" + "swc1 %[f2], 0(%[a2]) \n\t" + "sub.s %[f8], %[f8], %[f9] \n\t" + "add.s %[f10], %[f10], %[f15] \n\t" +#else + "swc1 %[f1], 0(%[a1]) \n\t" + "swc1 %[f2], 0(%[a2]) \n\t" + "sub.s %[f4], %[f4], %[f6] \n\t" + "nmsub.s %[f8], %[f8], %[f15], %[f7] \n\t" + "madd.s %[f10], %[f10], %[f15], %[f5] \n\t" +#endif + "swc1 %[f3], 4(%[a1]) \n\t" + "swc1 %[f4], 4(%[a2]) \n\t" + "sub.s %[f11], %[f11], %[f8] \n\t" + "add.s %[f12], %[f12], %[f8] \n\t" + "sub.s %[f13], %[f13], %[f10] \n\t" + "sub.s %[f14], %[f14], %[f10] \n\t" + "addiu %[c2], %[c2], -8 \n\t" + "addiu %[c1], %[c1], 8 \n\t" + "swc1 %[f11], 8(%[a1]) \n\t" + "swc1 %[f12], -8(%[a2]) \n\t" + "swc1 %[f13], 12(%[a1]) \n\t" + "swc1 %[f14], -4(%[a2]) \n\t" + "addiu %[a1], %[a1], 16 \n\t" + "addiu %[count], %[count], -1 \n\t" + "bgtz %[count], 1b \n\t" + " addiu %[a2], %[a2], -16 \n\t" + ".set pop \n\t" + : [a1] "+r"(a1), [a2] "+r"(a2), [c1] "+r"(c1), [c2] "+r"(c2), + [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), [f4] "=&f"(f4), + [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), [f8] "=&f"(f8), + [f9] "=&f"(f9), [f10] "=&f"(f10), [f11] "=&f"(f11), [f12] "=&f"(f12), + [f13] "=&f"(f13), [f14] "=&f"(f14), [f15] "=&f"(f15), + [count] "=&r"(count) + : [f0] "f"(f0) + : "memory"); +} + +void rftbsub_128_mips(float* a) { + const float* c = rdft_w + 32; + const float f0 = 0.5f; + float* a1 = &a[2]; + float* a2 = &a[126]; + const float* c1 = &c[1]; + const float* c2 = &c[31]; + float f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15; + int count; + + a[1] = -a[1]; + a[65] = -a[65]; + + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "lwc1 %[f6], 0(%[c2]) \n\t" + "lwc1 %[f1], 0(%[a1]) \n\t" + "lwc1 %[f2], 0(%[a2]) \n\t" + "lwc1 %[f3], 4(%[a1]) \n\t" + "lwc1 %[f4], 4(%[a2]) \n\t" + "lwc1 %[f5], 0(%[c1]) \n\t" + "sub.s %[f6], %[f0], %[f6] \n\t" + "sub.s %[f7], %[f1], %[f2] \n\t" + "add.s %[f8], %[f3], %[f4] \n\t" + "addiu %[count], $zero, 15 \n\t" + "mul.s %[f9], %[f6], %[f7] \n\t" + "mul.s %[f6], %[f6], %[f8] \n\t" +#if !defined(MIPS32_R2_LE) + "mul.s %[f8], %[f5], %[f8] \n\t" + "mul.s %[f5], %[f5], %[f7] \n\t" + "add.s %[f9], %[f9], %[f8] \n\t" + "sub.s %[f6], %[f6], %[f5] \n\t" +#else + "madd.s %[f9], %[f9], %[f5], %[f8] \n\t" + "nmsub.s %[f6], %[f6], %[f5], %[f7] \n\t" +#endif + "sub.s %[f1], %[f1], %[f9] \n\t" + "add.s %[f2], %[f2], %[f9] \n\t" + "sub.s %[f3], %[f6], %[f3] \n\t" + "sub.s %[f4], %[f6], %[f4] \n\t" + "swc1 %[f1], 0(%[a1]) \n\t" + "swc1 %[f2], 0(%[a2]) \n\t" + "swc1 %[f3], 4(%[a1]) \n\t" + "swc1 %[f4], 4(%[a2]) \n\t" + "addiu %[a1], %[a1], 8 \n\t" + "addiu %[a2], %[a2], -8 \n\t" + "addiu %[c1], %[c1], 4 \n\t" + "addiu %[c2], %[c2], -4 \n\t" + "1: \n\t" + "lwc1 %[f6], 0(%[c2]) \n\t" + "lwc1 %[f1], 0(%[a1]) \n\t" + "lwc1 %[f2], 0(%[a2]) \n\t" + "lwc1 %[f3], 4(%[a1]) \n\t" + "lwc1 %[f4], 4(%[a2]) \n\t" + "lwc1 %[f5], 0(%[c1]) \n\t" + "sub.s %[f6], %[f0], %[f6] \n\t" + "sub.s %[f7], %[f1], %[f2] \n\t" + "add.s %[f8], %[f3], %[f4] \n\t" + "lwc1 %[f10], -4(%[c2]) \n\t" + "lwc1 %[f11], 8(%[a1]) \n\t" + "lwc1 %[f12], -8(%[a2]) \n\t" + "mul.s %[f9], %[f6], %[f7] \n\t" + "mul.s %[f6], %[f6], %[f8] \n\t" +#if !defined(MIPS32_R2_LE) + "mul.s %[f8], %[f5], %[f8] \n\t" + "mul.s %[f5], %[f5], %[f7] \n\t" + "lwc1 %[f13], 12(%[a1]) \n\t" + "lwc1 %[f14], -4(%[a2]) \n\t" + "lwc1 %[f15], 4(%[c1]) \n\t" + "add.s %[f9], %[f9], %[f8] \n\t" + "sub.s %[f6], %[f6], %[f5] \n\t" +#else + "lwc1 %[f13], 12(%[a1]) \n\t" + "lwc1 %[f14], -4(%[a2]) \n\t" + "lwc1 %[f15], 4(%[c1]) \n\t" + "madd.s %[f9], %[f9], %[f5], %[f8] \n\t" + "nmsub.s %[f6], %[f6], %[f5], %[f7] \n\t" +#endif + "sub.s %[f10], %[f0], %[f10] \n\t" + "sub.s %[f5], %[f11], %[f12] \n\t" + "add.s %[f7], %[f13], %[f14] \n\t" + "sub.s %[f1], %[f1], %[f9] \n\t" + "add.s %[f2], %[f2], %[f9] \n\t" + "sub.s %[f3], %[f6], %[f3] \n\t" + "mul.s %[f8], %[f10], %[f5] \n\t" + "mul.s %[f10], %[f10], %[f7] \n\t" +#if !defined(MIPS32_R2_LE) + "mul.s %[f9], %[f15], %[f7] \n\t" + "mul.s %[f15], %[f15], %[f5] \n\t" + "sub.s %[f4], %[f6], %[f4] \n\t" + "swc1 %[f1], 0(%[a1]) \n\t" + "swc1 %[f2], 0(%[a2]) \n\t" + "add.s %[f8], %[f8], %[f9] \n\t" + "sub.s %[f10], %[f10], %[f15] \n\t" +#else + "swc1 %[f1], 0(%[a1]) \n\t" + "swc1 %[f2], 0(%[a2]) \n\t" + "sub.s %[f4], %[f6], %[f4] \n\t" + "madd.s %[f8], %[f8], %[f15], %[f7] \n\t" + "nmsub.s %[f10], %[f10], %[f15], %[f5] \n\t" +#endif + "swc1 %[f3], 4(%[a1]) \n\t" + "swc1 %[f4], 4(%[a2]) \n\t" + "sub.s %[f11], %[f11], %[f8] \n\t" + "add.s %[f12], %[f12], %[f8] \n\t" + "sub.s %[f13], %[f10], %[f13] \n\t" + "sub.s %[f14], %[f10], %[f14] \n\t" + "addiu %[c2], %[c2], -8 \n\t" + "addiu %[c1], %[c1], 8 \n\t" + "swc1 %[f11], 8(%[a1]) \n\t" + "swc1 %[f12], -8(%[a2]) \n\t" + "swc1 %[f13], 12(%[a1]) \n\t" + "swc1 %[f14], -4(%[a2]) \n\t" + "addiu %[a1], %[a1], 16 \n\t" + "addiu %[count], %[count], -1 \n\t" + "bgtz %[count], 1b \n\t" + " addiu %[a2], %[a2], -16 \n\t" + ".set pop \n\t" + : [a1] "+r"(a1), [a2] "+r"(a2), [c1] "+r"(c1), [c2] "+r"(c2), + [f1] "=&f"(f1), [f2] "=&f"(f2), [f3] "=&f"(f3), [f4] "=&f"(f4), + [f5] "=&f"(f5), [f6] "=&f"(f6), [f7] "=&f"(f7), [f8] "=&f"(f8), + [f9] "=&f"(f9), [f10] "=&f"(f10), [f11] "=&f"(f11), [f12] "=&f"(f12), + [f13] "=&f"(f13), [f14] "=&f"(f14), [f15] "=&f"(f15), + [count] "=&r"(count) + : [f0] "f"(f0) + : "memory"); +} +#endif + +} // namespace webrtc diff --git a/audio_processing/utility/ooura_fft_neon.cc b/audio_processing/utility/ooura_fft_neon.cc new file mode 100644 index 0000000..366fea7 --- /dev/null +++ b/audio_processing/utility/ooura_fft_neon.cc @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * The rdft AEC algorithm, neon version of speed-critical functions. + * + * Based on the sse2 version. + */ + +#if defined(WEBRTC_HAS_NEON) +#include +#endif + +#include "audio_processing/utility/ooura_fft.h" +#include "audio_processing/utility/ooura_fft_tables_common.h" +#include "audio_processing/utility/ooura_fft_tables_neon_sse2.h" + +namespace webrtc { + +#if defined(WEBRTC_HAS_NEON) +void cft1st_128_neon(float* a) { + const float32x4_t vec_swap_sign = vld1q_f32((float32_t*)k_swap_sign); + int j, k2; + + for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) { + float32x4_t a00v = vld1q_f32(&a[j + 0]); + float32x4_t a04v = vld1q_f32(&a[j + 4]); + float32x4_t a08v = vld1q_f32(&a[j + 8]); + float32x4_t a12v = vld1q_f32(&a[j + 12]); + float32x4_t a01v = vcombine_f32(vget_low_f32(a00v), vget_low_f32(a08v)); + float32x4_t a23v = vcombine_f32(vget_high_f32(a00v), vget_high_f32(a08v)); + float32x4_t a45v = vcombine_f32(vget_low_f32(a04v), vget_low_f32(a12v)); + float32x4_t a67v = vcombine_f32(vget_high_f32(a04v), vget_high_f32(a12v)); + const float32x4_t wk1rv = vld1q_f32(&rdft_wk1r[k2]); + const float32x4_t wk1iv = vld1q_f32(&rdft_wk1i[k2]); + const float32x4_t wk2rv = vld1q_f32(&rdft_wk2r[k2]); + const float32x4_t wk2iv = vld1q_f32(&rdft_wk2i[k2]); + const float32x4_t wk3rv = vld1q_f32(&rdft_wk3r[k2]); + const float32x4_t wk3iv = vld1q_f32(&rdft_wk3i[k2]); + float32x4_t x0v = vaddq_f32(a01v, a23v); + const float32x4_t x1v = vsubq_f32(a01v, a23v); + const float32x4_t x2v = vaddq_f32(a45v, a67v); + const float32x4_t x3v = vsubq_f32(a45v, a67v); + const float32x4_t x3w = vrev64q_f32(x3v); + float32x4_t x0w; + a01v = vaddq_f32(x0v, x2v); + x0v = vsubq_f32(x0v, x2v); + x0w = vrev64q_f32(x0v); + a45v = vmulq_f32(wk2rv, x0v); + a45v = vmlaq_f32(a45v, wk2iv, x0w); + x0v = vmlaq_f32(x1v, x3w, vec_swap_sign); + x0w = vrev64q_f32(x0v); + a23v = vmulq_f32(wk1rv, x0v); + a23v = vmlaq_f32(a23v, wk1iv, x0w); + x0v = vmlsq_f32(x1v, x3w, vec_swap_sign); + x0w = vrev64q_f32(x0v); + a67v = vmulq_f32(wk3rv, x0v); + a67v = vmlaq_f32(a67v, wk3iv, x0w); + a00v = vcombine_f32(vget_low_f32(a01v), vget_low_f32(a23v)); + a04v = vcombine_f32(vget_low_f32(a45v), vget_low_f32(a67v)); + a08v = vcombine_f32(vget_high_f32(a01v), vget_high_f32(a23v)); + a12v = vcombine_f32(vget_high_f32(a45v), vget_high_f32(a67v)); + vst1q_f32(&a[j + 0], a00v); + vst1q_f32(&a[j + 4], a04v); + vst1q_f32(&a[j + 8], a08v); + vst1q_f32(&a[j + 12], a12v); + } +} + +void cftmdl_128_neon(float* a) { + int j; + const int l = 8; + const float32x4_t vec_swap_sign = vld1q_f32((float32_t*)k_swap_sign); + float32x4_t wk1rv = vld1q_f32(cftmdl_wk1r); + + for (j = 0; j < l; j += 2) { + const float32x2_t a_00 = vld1_f32(&a[j + 0]); + const float32x2_t a_08 = vld1_f32(&a[j + 8]); + const float32x2_t a_32 = vld1_f32(&a[j + 32]); + const float32x2_t a_40 = vld1_f32(&a[j + 40]); + const float32x4_t a_00_32 = vcombine_f32(a_00, a_32); + const float32x4_t a_08_40 = vcombine_f32(a_08, a_40); + const float32x4_t x0r0_0i0_0r1_x0i1 = vaddq_f32(a_00_32, a_08_40); + const float32x4_t x1r0_1i0_1r1_x1i1 = vsubq_f32(a_00_32, a_08_40); + const float32x2_t a_16 = vld1_f32(&a[j + 16]); + const float32x2_t a_24 = vld1_f32(&a[j + 24]); + const float32x2_t a_48 = vld1_f32(&a[j + 48]); + const float32x2_t a_56 = vld1_f32(&a[j + 56]); + const float32x4_t a_16_48 = vcombine_f32(a_16, a_48); + const float32x4_t a_24_56 = vcombine_f32(a_24, a_56); + const float32x4_t x2r0_2i0_2r1_x2i1 = vaddq_f32(a_16_48, a_24_56); + const float32x4_t x3r0_3i0_3r1_x3i1 = vsubq_f32(a_16_48, a_24_56); + const float32x4_t xx0 = vaddq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const float32x4_t xx1 = vsubq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const float32x4_t x3i0_3r0_3i1_x3r1 = vrev64q_f32(x3r0_3i0_3r1_x3i1); + const float32x4_t x1_x3_add = + vmlaq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1); + const float32x4_t x1_x3_sub = + vmlsq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1); + const float32x2_t yy0_a = vdup_lane_f32(vget_high_f32(x1_x3_add), 0); + const float32x2_t yy0_s = vdup_lane_f32(vget_high_f32(x1_x3_sub), 0); + const float32x4_t yy0_as = vcombine_f32(yy0_a, yy0_s); + const float32x2_t yy1_a = vdup_lane_f32(vget_high_f32(x1_x3_add), 1); + const float32x2_t yy1_s = vdup_lane_f32(vget_high_f32(x1_x3_sub), 1); + const float32x4_t yy1_as = vcombine_f32(yy1_a, yy1_s); + const float32x4_t yy0 = vmlaq_f32(yy0_as, vec_swap_sign, yy1_as); + const float32x4_t yy4 = vmulq_f32(wk1rv, yy0); + const float32x4_t xx1_rev = vrev64q_f32(xx1); + const float32x4_t yy4_rev = vrev64q_f32(yy4); + + vst1_f32(&a[j + 0], vget_low_f32(xx0)); + vst1_f32(&a[j + 32], vget_high_f32(xx0)); + vst1_f32(&a[j + 16], vget_low_f32(xx1)); + vst1_f32(&a[j + 48], vget_high_f32(xx1_rev)); + + a[j + 48] = -a[j + 48]; + + vst1_f32(&a[j + 8], vget_low_f32(x1_x3_add)); + vst1_f32(&a[j + 24], vget_low_f32(x1_x3_sub)); + vst1_f32(&a[j + 40], vget_low_f32(yy4)); + vst1_f32(&a[j + 56], vget_high_f32(yy4_rev)); + } + + { + const int k = 64; + const int k1 = 2; + const int k2 = 2 * k1; + const float32x4_t wk2rv = vld1q_f32(&rdft_wk2r[k2 + 0]); + const float32x4_t wk2iv = vld1q_f32(&rdft_wk2i[k2 + 0]); + const float32x4_t wk1iv = vld1q_f32(&rdft_wk1i[k2 + 0]); + const float32x4_t wk3rv = vld1q_f32(&rdft_wk3r[k2 + 0]); + const float32x4_t wk3iv = vld1q_f32(&rdft_wk3i[k2 + 0]); + wk1rv = vld1q_f32(&rdft_wk1r[k2 + 0]); + for (j = k; j < l + k; j += 2) { + const float32x2_t a_00 = vld1_f32(&a[j + 0]); + const float32x2_t a_08 = vld1_f32(&a[j + 8]); + const float32x2_t a_32 = vld1_f32(&a[j + 32]); + const float32x2_t a_40 = vld1_f32(&a[j + 40]); + const float32x4_t a_00_32 = vcombine_f32(a_00, a_32); + const float32x4_t a_08_40 = vcombine_f32(a_08, a_40); + const float32x4_t x0r0_0i0_0r1_x0i1 = vaddq_f32(a_00_32, a_08_40); + const float32x4_t x1r0_1i0_1r1_x1i1 = vsubq_f32(a_00_32, a_08_40); + const float32x2_t a_16 = vld1_f32(&a[j + 16]); + const float32x2_t a_24 = vld1_f32(&a[j + 24]); + const float32x2_t a_48 = vld1_f32(&a[j + 48]); + const float32x2_t a_56 = vld1_f32(&a[j + 56]); + const float32x4_t a_16_48 = vcombine_f32(a_16, a_48); + const float32x4_t a_24_56 = vcombine_f32(a_24, a_56); + const float32x4_t x2r0_2i0_2r1_x2i1 = vaddq_f32(a_16_48, a_24_56); + const float32x4_t x3r0_3i0_3r1_x3i1 = vsubq_f32(a_16_48, a_24_56); + const float32x4_t xx = vaddq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const float32x4_t xx1 = vsubq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const float32x4_t x3i0_3r0_3i1_x3r1 = vrev64q_f32(x3r0_3i0_3r1_x3i1); + const float32x4_t x1_x3_add = + vmlaq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1); + const float32x4_t x1_x3_sub = + vmlsq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1); + float32x4_t xx4 = vmulq_f32(wk2rv, xx1); + float32x4_t xx12 = vmulq_f32(wk1rv, x1_x3_add); + float32x4_t xx22 = vmulq_f32(wk3rv, x1_x3_sub); + xx4 = vmlaq_f32(xx4, wk2iv, vrev64q_f32(xx1)); + xx12 = vmlaq_f32(xx12, wk1iv, vrev64q_f32(x1_x3_add)); + xx22 = vmlaq_f32(xx22, wk3iv, vrev64q_f32(x1_x3_sub)); + + vst1_f32(&a[j + 0], vget_low_f32(xx)); + vst1_f32(&a[j + 32], vget_high_f32(xx)); + vst1_f32(&a[j + 16], vget_low_f32(xx4)); + vst1_f32(&a[j + 48], vget_high_f32(xx4)); + vst1_f32(&a[j + 8], vget_low_f32(xx12)); + vst1_f32(&a[j + 40], vget_high_f32(xx12)); + vst1_f32(&a[j + 24], vget_low_f32(xx22)); + vst1_f32(&a[j + 56], vget_high_f32(xx22)); + } + } +} + +__inline static float32x4_t reverse_order_f32x4(float32x4_t in) { + // A B C D -> C D A B + const float32x4_t rev = vcombine_f32(vget_high_f32(in), vget_low_f32(in)); + // C D A B -> D C B A + return vrev64q_f32(rev); +} + +void rftfsub_128_neon(float* a) { + const float* c = rdft_w + 32; + int j1, j2; + const float32x4_t mm_half = vdupq_n_f32(0.5f); + + // Vectorized code (four at once). + // Note: commented number are indexes for the first iteration of the loop. + for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { + // Load 'wk'. + const float32x4_t c_j1 = vld1q_f32(&c[j1]); // 1, 2, 3, 4, + const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]); // 28, 29, 30, 31, + const float32x4_t wkrt = vsubq_f32(mm_half, c_k1); // 28, 29, 30, 31, + const float32x4_t wkr_ = reverse_order_f32x4(wkrt); // 31, 30, 29, 28, + const float32x4_t wki_ = c_j1; // 1, 2, 3, 4, + // Load and shuffle 'a'. + // 2, 4, 6, 8, 3, 5, 7, 9 + float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]); + // 120, 122, 124, 126, 121, 123, 125, 127, + const float32x4x2_t k2_0_4 = vld2q_f32(&a[122 - j2]); + // 126, 124, 122, 120 + const float32x4_t a_k2_p0 = reverse_order_f32x4(k2_0_4.val[0]); + // 127, 125, 123, 121 + const float32x4_t a_k2_p1 = reverse_order_f32x4(k2_0_4.val[1]); + // Calculate 'x'. + const float32x4_t xr_ = vsubq_f32(a_j2_p.val[0], a_k2_p0); + // 2-126, 4-124, 6-122, 8-120, + const float32x4_t xi_ = vaddq_f32(a_j2_p.val[1], a_k2_p1); + // 3-127, 5-125, 7-123, 9-121, + // Calculate product into 'y'. + // yr = wkr * xr - wki * xi; + // yi = wkr * xi + wki * xr; + const float32x4_t a_ = vmulq_f32(wkr_, xr_); + const float32x4_t b_ = vmulq_f32(wki_, xi_); + const float32x4_t c_ = vmulq_f32(wkr_, xi_); + const float32x4_t d_ = vmulq_f32(wki_, xr_); + const float32x4_t yr_ = vsubq_f32(a_, b_); // 2-126, 4-124, 6-122, 8-120, + const float32x4_t yi_ = vaddq_f32(c_, d_); // 3-127, 5-125, 7-123, 9-121, + // Update 'a'. + // a[j2 + 0] -= yr; + // a[j2 + 1] -= yi; + // a[k2 + 0] += yr; + // a[k2 + 1] -= yi; + // 126, 124, 122, 120, + const float32x4_t a_k2_p0n = vaddq_f32(a_k2_p0, yr_); + // 127, 125, 123, 121, + const float32x4_t a_k2_p1n = vsubq_f32(a_k2_p1, yi_); + // Shuffle in right order and store. + const float32x4_t a_k2_p0nr = vrev64q_f32(a_k2_p0n); + const float32x4_t a_k2_p1nr = vrev64q_f32(a_k2_p1n); + // 124, 125, 126, 127, 120, 121, 122, 123 + const float32x4x2_t a_k2_n = vzipq_f32(a_k2_p0nr, a_k2_p1nr); + // 2, 4, 6, 8, + a_j2_p.val[0] = vsubq_f32(a_j2_p.val[0], yr_); + // 3, 5, 7, 9, + a_j2_p.val[1] = vsubq_f32(a_j2_p.val[1], yi_); + // 2, 3, 4, 5, 6, 7, 8, 9, + vst2q_f32(&a[0 + j2], a_j2_p); + + vst1q_f32(&a[122 - j2], a_k2_n.val[1]); + vst1q_f32(&a[126 - j2], a_k2_n.val[0]); + } + + // Scalar code for the remaining items. + for (; j2 < 64; j1 += 1, j2 += 2) { + const int k2 = 128 - j2; + const int k1 = 32 - j1; + const float wkr = 0.5f - c[k1]; + const float wki = c[j1]; + const float xr = a[j2 + 0] - a[k2 + 0]; + const float xi = a[j2 + 1] + a[k2 + 1]; + const float yr = wkr * xr - wki * xi; + const float yi = wkr * xi + wki * xr; + a[j2 + 0] -= yr; + a[j2 + 1] -= yi; + a[k2 + 0] += yr; + a[k2 + 1] -= yi; + } +} + +void rftbsub_128_neon(float* a) { + const float* c = rdft_w + 32; + int j1, j2; + const float32x4_t mm_half = vdupq_n_f32(0.5f); + + a[1] = -a[1]; + // Vectorized code (four at once). + // Note: commented number are indexes for the first iteration of the loop. + for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { + // Load 'wk'. + const float32x4_t c_j1 = vld1q_f32(&c[j1]); // 1, 2, 3, 4, + const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]); // 28, 29, 30, 31, + const float32x4_t wkrt = vsubq_f32(mm_half, c_k1); // 28, 29, 30, 31, + const float32x4_t wkr_ = reverse_order_f32x4(wkrt); // 31, 30, 29, 28, + const float32x4_t wki_ = c_j1; // 1, 2, 3, 4, + // Load and shuffle 'a'. + // 2, 4, 6, 8, 3, 5, 7, 9 + float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]); + // 120, 122, 124, 126, 121, 123, 125, 127, + const float32x4x2_t k2_0_4 = vld2q_f32(&a[122 - j2]); + // 126, 124, 122, 120 + const float32x4_t a_k2_p0 = reverse_order_f32x4(k2_0_4.val[0]); + // 127, 125, 123, 121 + const float32x4_t a_k2_p1 = reverse_order_f32x4(k2_0_4.val[1]); + // Calculate 'x'. + const float32x4_t xr_ = vsubq_f32(a_j2_p.val[0], a_k2_p0); + // 2-126, 4-124, 6-122, 8-120, + const float32x4_t xi_ = vaddq_f32(a_j2_p.val[1], a_k2_p1); + // 3-127, 5-125, 7-123, 9-121, + // Calculate product into 'y'. + // yr = wkr * xr - wki * xi; + // yi = wkr * xi + wki * xr; + const float32x4_t a_ = vmulq_f32(wkr_, xr_); + const float32x4_t b_ = vmulq_f32(wki_, xi_); + const float32x4_t c_ = vmulq_f32(wkr_, xi_); + const float32x4_t d_ = vmulq_f32(wki_, xr_); + const float32x4_t yr_ = vaddq_f32(a_, b_); // 2-126, 4-124, 6-122, 8-120, + const float32x4_t yi_ = vsubq_f32(c_, d_); // 3-127, 5-125, 7-123, 9-121, + // Update 'a'. + // a[j2 + 0] -= yr; + // a[j2 + 1] -= yi; + // a[k2 + 0] += yr; + // a[k2 + 1] -= yi; + // 126, 124, 122, 120, + const float32x4_t a_k2_p0n = vaddq_f32(a_k2_p0, yr_); + // 127, 125, 123, 121, + const float32x4_t a_k2_p1n = vsubq_f32(yi_, a_k2_p1); + // Shuffle in right order and store. + // 2, 3, 4, 5, 6, 7, 8, 9, + const float32x4_t a_k2_p0nr = vrev64q_f32(a_k2_p0n); + const float32x4_t a_k2_p1nr = vrev64q_f32(a_k2_p1n); + // 124, 125, 126, 127, 120, 121, 122, 123 + const float32x4x2_t a_k2_n = vzipq_f32(a_k2_p0nr, a_k2_p1nr); + // 2, 4, 6, 8, + a_j2_p.val[0] = vsubq_f32(a_j2_p.val[0], yr_); + // 3, 5, 7, 9, + a_j2_p.val[1] = vsubq_f32(yi_, a_j2_p.val[1]); + // 2, 3, 4, 5, 6, 7, 8, 9, + vst2q_f32(&a[0 + j2], a_j2_p); + + vst1q_f32(&a[122 - j2], a_k2_n.val[1]); + vst1q_f32(&a[126 - j2], a_k2_n.val[0]); + } + + // Scalar code for the remaining items. + for (; j2 < 64; j1 += 1, j2 += 2) { + const int k2 = 128 - j2; + const int k1 = 32 - j1; + const float wkr = 0.5f - c[k1]; + const float wki = c[j1]; + const float xr = a[j2 + 0] - a[k2 + 0]; + const float xi = a[j2 + 1] + a[k2 + 1]; + const float yr = wkr * xr + wki * xi; + const float yi = wkr * xi - wki * xr; + a[j2 + 0] = a[j2 + 0] - yr; + a[j2 + 1] = yi - a[j2 + 1]; + a[k2 + 0] = yr + a[k2 + 0]; + a[k2 + 1] = yi - a[k2 + 1]; + } + a[65] = -a[65]; +} +#endif + +} // namespace webrtc diff --git a/audio_processing/utility/ooura_fft_sse2.cc b/audio_processing/utility/ooura_fft_sse2.cc new file mode 100644 index 0000000..2b799d2 --- /dev/null +++ b/audio_processing/utility/ooura_fft_sse2.cc @@ -0,0 +1,439 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "audio_processing/utility/ooura_fft.h" +#include "audio_processing/utility/ooura_fft_tables_common.h" +#include "audio_processing/utility/ooura_fft_tables_neon_sse2.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { + +#if defined(WEBRTC_ARCH_X86_FAMILY) + +namespace { +// These intrinsics were unavailable before VS 2008. +// TODO(andrew): move to a common file. +#if defined(_MSC_VER) && _MSC_VER < 1500 +static __inline __m128 _mm_castsi128_ps(__m128i a) { + return *(__m128*)&a; +} +static __inline __m128i _mm_castps_si128(__m128 a) { + return *(__m128i*)&a; +} +#endif + +} // namespace + +void cft1st_128_SSE2(float* a) { + const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign); + int j, k2; + + for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) { + __m128 a00v = _mm_loadu_ps(&a[j + 0]); + __m128 a04v = _mm_loadu_ps(&a[j + 4]); + __m128 a08v = _mm_loadu_ps(&a[j + 8]); + __m128 a12v = _mm_loadu_ps(&a[j + 12]); + __m128 a01v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(1, 0, 1, 0)); + __m128 a23v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(3, 2, 3, 2)); + __m128 a45v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(1, 0, 1, 0)); + __m128 a67v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(3, 2, 3, 2)); + + const __m128 wk1rv = _mm_load_ps(&rdft_wk1r[k2]); + const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2]); + const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2]); + const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2]); + const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2]); + const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2]); + __m128 x0v = _mm_add_ps(a01v, a23v); + const __m128 x1v = _mm_sub_ps(a01v, a23v); + const __m128 x2v = _mm_add_ps(a45v, a67v); + const __m128 x3v = _mm_sub_ps(a45v, a67v); + __m128 x0w; + a01v = _mm_add_ps(x0v, x2v); + x0v = _mm_sub_ps(x0v, x2v); + x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); + { + const __m128 a45_0v = _mm_mul_ps(wk2rv, x0v); + const __m128 a45_1v = _mm_mul_ps(wk2iv, x0w); + a45v = _mm_add_ps(a45_0v, a45_1v); + } + { + __m128 a23_0v, a23_1v; + const __m128 x3w = _mm_shuffle_ps(x3v, x3v, _MM_SHUFFLE(2, 3, 0, 1)); + const __m128 x3s = _mm_mul_ps(mm_swap_sign, x3w); + x0v = _mm_add_ps(x1v, x3s); + x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); + a23_0v = _mm_mul_ps(wk1rv, x0v); + a23_1v = _mm_mul_ps(wk1iv, x0w); + a23v = _mm_add_ps(a23_0v, a23_1v); + + x0v = _mm_sub_ps(x1v, x3s); + x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); + } + { + const __m128 a67_0v = _mm_mul_ps(wk3rv, x0v); + const __m128 a67_1v = _mm_mul_ps(wk3iv, x0w); + a67v = _mm_add_ps(a67_0v, a67_1v); + } + + a00v = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(1, 0, 1, 0)); + a04v = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(1, 0, 1, 0)); + a08v = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(3, 2, 3, 2)); + a12v = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(3, 2, 3, 2)); + _mm_storeu_ps(&a[j + 0], a00v); + _mm_storeu_ps(&a[j + 4], a04v); + _mm_storeu_ps(&a[j + 8], a08v); + _mm_storeu_ps(&a[j + 12], a12v); + } +} + +void cftmdl_128_SSE2(float* a) { + const int l = 8; + const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign); + int j0; + + __m128 wk1rv = _mm_load_ps(cftmdl_wk1r); + for (j0 = 0; j0 < l; j0 += 2) { + const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 + 0]); + const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 + 8]); + const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]); + const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]); + const __m128 a_00_32 = + _mm_shuffle_ps(_mm_castsi128_ps(a_00), _mm_castsi128_ps(a_32), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 a_08_40 = + _mm_shuffle_ps(_mm_castsi128_ps(a_08), _mm_castsi128_ps(a_40), + _MM_SHUFFLE(1, 0, 1, 0)); + __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40); + const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40); + + const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]); + const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]); + const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]); + const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]); + const __m128 a_16_48 = + _mm_shuffle_ps(_mm_castsi128_ps(a_16), _mm_castsi128_ps(a_48), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 a_24_56 = + _mm_shuffle_ps(_mm_castsi128_ps(a_24), _mm_castsi128_ps(a_56), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56); + const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56); + + const __m128 xx0 = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + + const __m128 x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(_mm_shuffle_epi32( + _mm_castps_si128(x3r0_3i0_3r1_x3i1), _MM_SHUFFLE(2, 3, 0, 1))); + const __m128 x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1); + const __m128 x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped); + const __m128 x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped); + + const __m128 yy0 = + _mm_shuffle_ps(x1_x3_add, x1_x3_sub, _MM_SHUFFLE(2, 2, 2, 2)); + const __m128 yy1 = + _mm_shuffle_ps(x1_x3_add, x1_x3_sub, _MM_SHUFFLE(3, 3, 3, 3)); + const __m128 yy2 = _mm_mul_ps(mm_swap_sign, yy1); + const __m128 yy3 = _mm_add_ps(yy0, yy2); + const __m128 yy4 = _mm_mul_ps(wk1rv, yy3); + + _mm_storel_epi64((__m128i*)&a[j0 + 0], _mm_castps_si128(xx0)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 32], + _mm_shuffle_epi32(_mm_castps_si128(xx0), _MM_SHUFFLE(3, 2, 3, 2))); + + _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx1)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 48], + _mm_shuffle_epi32(_mm_castps_si128(xx1), _MM_SHUFFLE(2, 3, 2, 3))); + a[j0 + 48] = -a[j0 + 48]; + + _mm_storel_epi64((__m128i*)&a[j0 + 8], _mm_castps_si128(x1_x3_add)); + _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(x1_x3_sub)); + + _mm_storel_epi64((__m128i*)&a[j0 + 40], _mm_castps_si128(yy4)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 56], + _mm_shuffle_epi32(_mm_castps_si128(yy4), _MM_SHUFFLE(2, 3, 2, 3))); + } + + { + int k = 64; + int k1 = 2; + int k2 = 2 * k1; + const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2 + 0]); + const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2 + 0]); + const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2 + 0]); + const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2 + 0]); + const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2 + 0]); + wk1rv = _mm_load_ps(&rdft_wk1r[k2 + 0]); + for (j0 = k; j0 < l + k; j0 += 2) { + const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 + 0]); + const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 + 8]); + const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]); + const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]); + const __m128 a_00_32 = + _mm_shuffle_ps(_mm_castsi128_ps(a_00), _mm_castsi128_ps(a_32), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 a_08_40 = + _mm_shuffle_ps(_mm_castsi128_ps(a_08), _mm_castsi128_ps(a_40), + _MM_SHUFFLE(1, 0, 1, 0)); + __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40); + const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40); + + const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]); + const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]); + const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]); + const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]); + const __m128 a_16_48 = + _mm_shuffle_ps(_mm_castsi128_ps(a_16), _mm_castsi128_ps(a_48), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 a_24_56 = + _mm_shuffle_ps(_mm_castsi128_ps(a_24), _mm_castsi128_ps(a_56), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56); + const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56); + + const __m128 xx = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const __m128 xx2 = _mm_mul_ps(xx1, wk2rv); + const __m128 xx3 = _mm_mul_ps( + wk2iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xx1), + _MM_SHUFFLE(2, 3, 0, 1)))); + const __m128 xx4 = _mm_add_ps(xx2, xx3); + + const __m128 x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(_mm_shuffle_epi32( + _mm_castps_si128(x3r0_3i0_3r1_x3i1), _MM_SHUFFLE(2, 3, 0, 1))); + const __m128 x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1); + const __m128 x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped); + const __m128 x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped); + + const __m128 xx10 = _mm_mul_ps(x1_x3_add, wk1rv); + const __m128 xx11 = _mm_mul_ps( + wk1iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_add), + _MM_SHUFFLE(2, 3, 0, 1)))); + const __m128 xx12 = _mm_add_ps(xx10, xx11); + + const __m128 xx20 = _mm_mul_ps(x1_x3_sub, wk3rv); + const __m128 xx21 = _mm_mul_ps( + wk3iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_sub), + _MM_SHUFFLE(2, 3, 0, 1)))); + const __m128 xx22 = _mm_add_ps(xx20, xx21); + + _mm_storel_epi64((__m128i*)&a[j0 + 0], _mm_castps_si128(xx)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 32], + _mm_shuffle_epi32(_mm_castps_si128(xx), _MM_SHUFFLE(3, 2, 3, 2))); + + _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx4)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 48], + _mm_shuffle_epi32(_mm_castps_si128(xx4), _MM_SHUFFLE(3, 2, 3, 2))); + + _mm_storel_epi64((__m128i*)&a[j0 + 8], _mm_castps_si128(xx12)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 40], + _mm_shuffle_epi32(_mm_castps_si128(xx12), _MM_SHUFFLE(3, 2, 3, 2))); + + _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(xx22)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 56], + _mm_shuffle_epi32(_mm_castps_si128(xx22), _MM_SHUFFLE(3, 2, 3, 2))); + } + } +} + +void rftfsub_128_SSE2(float* a) { + const float* c = rdft_w + 32; + int j1, j2, k1, k2; + float wkr, wki, xr, xi, yr, yi; + + static const ALIGN16_BEG float ALIGN16_END k_half[4] = {0.5f, 0.5f, 0.5f, + 0.5f}; + const __m128 mm_half = _mm_load_ps(k_half); + + // Vectorized code (four at once). + // Note: commented number are indexes for the first iteration of the loop. + for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { + // Load 'wk'. + const __m128 c_j1 = _mm_loadu_ps(&c[j1]); // 1, 2, 3, 4, + const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]); // 28, 29, 30, 31, + const __m128 wkrt = _mm_sub_ps(mm_half, c_k1); // 28, 29, 30, 31, + const __m128 wkr_ = + _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28, + const __m128 wki_ = c_j1; // 1, 2, 3, 4, + // Load and shuffle 'a'. + const __m128 a_j2_0 = _mm_loadu_ps(&a[0 + j2]); // 2, 3, 4, 5, + const __m128 a_j2_4 = _mm_loadu_ps(&a[4 + j2]); // 6, 7, 8, 9, + const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]); // 120, 121, 122, 123, + const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]); // 124, 125, 126, 127, + const __m128 a_j2_p0 = _mm_shuffle_ps( + a_j2_0, a_j2_4, _MM_SHUFFLE(2, 0, 2, 0)); // 2, 4, 6, 8, + const __m128 a_j2_p1 = _mm_shuffle_ps( + a_j2_0, a_j2_4, _MM_SHUFFLE(3, 1, 3, 1)); // 3, 5, 7, 9, + const __m128 a_k2_p0 = _mm_shuffle_ps( + a_k2_4, a_k2_0, _MM_SHUFFLE(0, 2, 0, 2)); // 126, 124, 122, 120, + const __m128 a_k2_p1 = _mm_shuffle_ps( + a_k2_4, a_k2_0, _MM_SHUFFLE(1, 3, 1, 3)); // 127, 125, 123, 121, + // Calculate 'x'. + const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0); + // 2-126, 4-124, 6-122, 8-120, + const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1); + // 3-127, 5-125, 7-123, 9-121, + // Calculate product into 'y'. + // yr = wkr * xr - wki * xi; + // yi = wkr * xi + wki * xr; + const __m128 a_ = _mm_mul_ps(wkr_, xr_); + const __m128 b_ = _mm_mul_ps(wki_, xi_); + const __m128 c_ = _mm_mul_ps(wkr_, xi_); + const __m128 d_ = _mm_mul_ps(wki_, xr_); + const __m128 yr_ = _mm_sub_ps(a_, b_); // 2-126, 4-124, 6-122, 8-120, + const __m128 yi_ = _mm_add_ps(c_, d_); // 3-127, 5-125, 7-123, 9-121, + // Update 'a'. + // a[j2 + 0] -= yr; + // a[j2 + 1] -= yi; + // a[k2 + 0] += yr; + // a[k2 + 1] -= yi; + const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_); // 2, 4, 6, 8, + const __m128 a_j2_p1n = _mm_sub_ps(a_j2_p1, yi_); // 3, 5, 7, 9, + const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_); // 126, 124, 122, 120, + const __m128 a_k2_p1n = _mm_sub_ps(a_k2_p1, yi_); // 127, 125, 123, 121, + // Shuffle in right order and store. + const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n); + // 2, 3, 4, 5, + const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n); + // 6, 7, 8, 9, + const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n); + // 122, 123, 120, 121, + const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n); + // 126, 127, 124, 125, + const __m128 a_k2_0n = _mm_shuffle_ps( + a_k2_0nt, a_k2_0nt, _MM_SHUFFLE(1, 0, 3, 2)); // 120, 121, 122, 123, + const __m128 a_k2_4n = _mm_shuffle_ps( + a_k2_4nt, a_k2_4nt, _MM_SHUFFLE(1, 0, 3, 2)); // 124, 125, 126, 127, + _mm_storeu_ps(&a[0 + j2], a_j2_0n); + _mm_storeu_ps(&a[4 + j2], a_j2_4n); + _mm_storeu_ps(&a[122 - j2], a_k2_0n); + _mm_storeu_ps(&a[126 - j2], a_k2_4n); + } + // Scalar code for the remaining items. + for (; j2 < 64; j1 += 1, j2 += 2) { + k2 = 128 - j2; + k1 = 32 - j1; + wkr = 0.5f - c[k1]; + wki = c[j1]; + xr = a[j2 + 0] - a[k2 + 0]; + xi = a[j2 + 1] + a[k2 + 1]; + yr = wkr * xr - wki * xi; + yi = wkr * xi + wki * xr; + a[j2 + 0] -= yr; + a[j2 + 1] -= yi; + a[k2 + 0] += yr; + a[k2 + 1] -= yi; + } +} + +void rftbsub_128_SSE2(float* a) { + const float* c = rdft_w + 32; + int j1, j2, k1, k2; + float wkr, wki, xr, xi, yr, yi; + + static const ALIGN16_BEG float ALIGN16_END k_half[4] = {0.5f, 0.5f, 0.5f, + 0.5f}; + const __m128 mm_half = _mm_load_ps(k_half); + + a[1] = -a[1]; + // Vectorized code (four at once). + // Note: commented number are indexes for the first iteration of the loop. + for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { + // Load 'wk'. + const __m128 c_j1 = _mm_loadu_ps(&c[j1]); // 1, 2, 3, 4, + const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]); // 28, 29, 30, 31, + const __m128 wkrt = _mm_sub_ps(mm_half, c_k1); // 28, 29, 30, 31, + const __m128 wkr_ = + _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28, + const __m128 wki_ = c_j1; // 1, 2, 3, 4, + // Load and shuffle 'a'. + const __m128 a_j2_0 = _mm_loadu_ps(&a[0 + j2]); // 2, 3, 4, 5, + const __m128 a_j2_4 = _mm_loadu_ps(&a[4 + j2]); // 6, 7, 8, 9, + const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]); // 120, 121, 122, 123, + const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]); // 124, 125, 126, 127, + const __m128 a_j2_p0 = _mm_shuffle_ps( + a_j2_0, a_j2_4, _MM_SHUFFLE(2, 0, 2, 0)); // 2, 4, 6, 8, + const __m128 a_j2_p1 = _mm_shuffle_ps( + a_j2_0, a_j2_4, _MM_SHUFFLE(3, 1, 3, 1)); // 3, 5, 7, 9, + const __m128 a_k2_p0 = _mm_shuffle_ps( + a_k2_4, a_k2_0, _MM_SHUFFLE(0, 2, 0, 2)); // 126, 124, 122, 120, + const __m128 a_k2_p1 = _mm_shuffle_ps( + a_k2_4, a_k2_0, _MM_SHUFFLE(1, 3, 1, 3)); // 127, 125, 123, 121, + // Calculate 'x'. + const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0); + // 2-126, 4-124, 6-122, 8-120, + const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1); + // 3-127, 5-125, 7-123, 9-121, + // Calculate product into 'y'. + // yr = wkr * xr + wki * xi; + // yi = wkr * xi - wki * xr; + const __m128 a_ = _mm_mul_ps(wkr_, xr_); + const __m128 b_ = _mm_mul_ps(wki_, xi_); + const __m128 c_ = _mm_mul_ps(wkr_, xi_); + const __m128 d_ = _mm_mul_ps(wki_, xr_); + const __m128 yr_ = _mm_add_ps(a_, b_); // 2-126, 4-124, 6-122, 8-120, + const __m128 yi_ = _mm_sub_ps(c_, d_); // 3-127, 5-125, 7-123, 9-121, + // Update 'a'. + // a[j2 + 0] = a[j2 + 0] - yr; + // a[j2 + 1] = yi - a[j2 + 1]; + // a[k2 + 0] = yr + a[k2 + 0]; + // a[k2 + 1] = yi - a[k2 + 1]; + const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_); // 2, 4, 6, 8, + const __m128 a_j2_p1n = _mm_sub_ps(yi_, a_j2_p1); // 3, 5, 7, 9, + const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_); // 126, 124, 122, 120, + const __m128 a_k2_p1n = _mm_sub_ps(yi_, a_k2_p1); // 127, 125, 123, 121, + // Shuffle in right order and store. + const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n); + // 2, 3, 4, 5, + const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n); + // 6, 7, 8, 9, + const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n); + // 122, 123, 120, 121, + const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n); + // 126, 127, 124, 125, + const __m128 a_k2_0n = _mm_shuffle_ps( + a_k2_0nt, a_k2_0nt, _MM_SHUFFLE(1, 0, 3, 2)); // 120, 121, 122, 123, + const __m128 a_k2_4n = _mm_shuffle_ps( + a_k2_4nt, a_k2_4nt, _MM_SHUFFLE(1, 0, 3, 2)); // 124, 125, 126, 127, + _mm_storeu_ps(&a[0 + j2], a_j2_0n); + _mm_storeu_ps(&a[4 + j2], a_j2_4n); + _mm_storeu_ps(&a[122 - j2], a_k2_0n); + _mm_storeu_ps(&a[126 - j2], a_k2_4n); + } + // Scalar code for the remaining items. + for (; j2 < 64; j1 += 1, j2 += 2) { + k2 = 128 - j2; + k1 = 32 - j1; + wkr = 0.5f - c[k1]; + wki = c[j1]; + xr = a[j2 + 0] - a[k2 + 0]; + xi = a[j2 + 1] + a[k2 + 1]; + yr = wkr * xr + wki * xi; + yi = wkr * xi - wki * xr; + a[j2 + 0] = a[j2 + 0] - yr; + a[j2 + 1] = yi - a[j2 + 1]; + a[k2 + 0] = yr + a[k2 + 0]; + a[k2 + 1] = yi - a[k2 + 1]; + } + a[65] = -a[65]; +} +#endif + +} // namespace webrtc diff --git a/audio_processing/utility/ooura_fft_tables_common.h b/audio_processing/utility/ooura_fft_tables_common.h new file mode 100644 index 0000000..f4dc6f6 --- /dev/null +++ b/audio_processing/utility/ooura_fft_tables_common.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_ +#define MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_ + +#include "audio_processing/utility/ooura_fft.h" + +namespace webrtc { + +// This tables used to be computed at run-time. For example, refer to: +// https://code.google.com/p/webrtc/source/browse/trunk/webrtc/audio_processing/utility/apm_rdft.c?r=6564 +// to see the initialization code. +// Constants shared by all paths (C, SSE2, NEON). +const float rdft_w[64] = { + 1.0000000000f, 0.0000000000f, 0.7071067691f, 0.7071067691f, 0.9238795638f, + 0.3826834559f, 0.3826834559f, 0.9238795638f, 0.9807852507f, 0.1950903237f, + 0.5555702448f, 0.8314695954f, 0.8314695954f, 0.5555702448f, 0.1950903237f, + 0.9807852507f, 0.9951847196f, 0.0980171412f, 0.6343933344f, 0.7730104327f, + 0.8819212914f, 0.4713967443f, 0.2902846634f, 0.9569403529f, 0.9569403529f, + 0.2902846634f, 0.4713967443f, 0.8819212914f, 0.7730104327f, 0.6343933344f, + 0.0980171412f, 0.9951847196f, 0.7071067691f, 0.4993977249f, 0.4975923598f, + 0.4945882559f, 0.4903926253f, 0.4850156307f, 0.4784701765f, 0.4707720280f, + 0.4619397819f, 0.4519946277f, 0.4409606457f, 0.4288643003f, 0.4157347977f, + 0.4016037583f, 0.3865052164f, 0.3704755902f, 0.3535533845f, 0.3357794881f, + 0.3171966672f, 0.2978496552f, 0.2777851224f, 0.2570513785f, 0.2356983721f, + 0.2137775421f, 0.1913417280f, 0.1684449315f, 0.1451423317f, 0.1214900985f, + 0.0975451618f, 0.0733652338f, 0.0490085706f, 0.0245338380f, +}; + +// Constants used by the C and MIPS paths. +const float rdft_wk3ri_first[16] = { + 1.000000000f, 0.000000000f, 0.382683456f, 0.923879564f, + 0.831469536f, 0.555570245f, -0.195090353f, 0.980785251f, + 0.956940353f, 0.290284693f, 0.098017156f, 0.995184720f, + 0.634393334f, 0.773010492f, -0.471396863f, 0.881921172f, +}; +const float rdft_wk3ri_second[16] = { + -0.707106769f, 0.707106769f, -0.923879564f, -0.382683456f, + -0.980785251f, 0.195090353f, -0.555570245f, -0.831469536f, + -0.881921172f, 0.471396863f, -0.773010492f, -0.634393334f, + -0.995184720f, -0.098017156f, -0.290284693f, -0.956940353f, +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_ diff --git a/audio_processing/utility/ooura_fft_tables_neon_sse2.h b/audio_processing/utility/ooura_fft_tables_neon_sse2.h new file mode 100644 index 0000000..99bbd56 --- /dev/null +++ b/audio_processing/utility/ooura_fft_tables_neon_sse2.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_ +#define MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_ + +#include "audio_processing/utility/ooura_fft.h" +#include "rtc_base/system/arch.h" + +#ifdef _MSC_VER /* visual c++ */ +#define ALIGN16_BEG __declspec(align(16)) +#define ALIGN16_END +#else /* gcc or icc */ +#define ALIGN16_BEG +#define ALIGN16_END __attribute__((aligned(16))) +#endif + +namespace webrtc { + +// These tables used to be computed at run-time. For example, refer to: +// https://code.google.com/p/webrtc/source/browse/trunk/webrtc/audio_processing/utility/apm_rdft.c?r=6564 +// to see the initialization code. +#if defined(WEBRTC_ARCH_X86_FAMILY) || defined(WEBRTC_HAS_NEON) +// Constants used by SSE2 and NEON but initialized in the C path. +const ALIGN16_BEG float ALIGN16_END k_swap_sign[4] = {-1.f, 1.f, -1.f, 1.f}; + +ALIGN16_BEG const float ALIGN16_END rdft_wk1r[32] = { + 1.000000000f, 1.000000000f, 0.707106769f, 0.707106769f, 0.923879564f, + 0.923879564f, 0.382683456f, 0.382683456f, 0.980785251f, 0.980785251f, + 0.555570245f, 0.555570245f, 0.831469595f, 0.831469595f, 0.195090324f, + 0.195090324f, 0.995184720f, 0.995184720f, 0.634393334f, 0.634393334f, + 0.881921291f, 0.881921291f, 0.290284663f, 0.290284663f, 0.956940353f, + 0.956940353f, 0.471396744f, 0.471396744f, 0.773010433f, 0.773010433f, + 0.098017141f, 0.098017141f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk2r[32] = { + 1.000000000f, 1.000000000f, -0.000000000f, -0.000000000f, 0.707106769f, + 0.707106769f, -0.707106769f, -0.707106769f, 0.923879564f, 0.923879564f, + -0.382683456f, -0.382683456f, 0.382683456f, 0.382683456f, -0.923879564f, + -0.923879564f, 0.980785251f, 0.980785251f, -0.195090324f, -0.195090324f, + 0.555570245f, 0.555570245f, -0.831469595f, -0.831469595f, 0.831469595f, + 0.831469595f, -0.555570245f, -0.555570245f, 0.195090324f, 0.195090324f, + -0.980785251f, -0.980785251f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk3r[32] = { + 1.000000000f, 1.000000000f, -0.707106769f, -0.707106769f, 0.382683456f, + 0.382683456f, -0.923879564f, -0.923879564f, 0.831469536f, 0.831469536f, + -0.980785251f, -0.980785251f, -0.195090353f, -0.195090353f, -0.555570245f, + -0.555570245f, 0.956940353f, 0.956940353f, -0.881921172f, -0.881921172f, + 0.098017156f, 0.098017156f, -0.773010492f, -0.773010492f, 0.634393334f, + 0.634393334f, -0.995184720f, -0.995184720f, -0.471396863f, -0.471396863f, + -0.290284693f, -0.290284693f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk1i[32] = { + -0.000000000f, 0.000000000f, -0.707106769f, 0.707106769f, -0.382683456f, + 0.382683456f, -0.923879564f, 0.923879564f, -0.195090324f, 0.195090324f, + -0.831469595f, 0.831469595f, -0.555570245f, 0.555570245f, -0.980785251f, + 0.980785251f, -0.098017141f, 0.098017141f, -0.773010433f, 0.773010433f, + -0.471396744f, 0.471396744f, -0.956940353f, 0.956940353f, -0.290284663f, + 0.290284663f, -0.881921291f, 0.881921291f, -0.634393334f, 0.634393334f, + -0.995184720f, 0.995184720f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk2i[32] = { + -0.000000000f, 0.000000000f, -1.000000000f, 1.000000000f, -0.707106769f, + 0.707106769f, -0.707106769f, 0.707106769f, -0.382683456f, 0.382683456f, + -0.923879564f, 0.923879564f, -0.923879564f, 0.923879564f, -0.382683456f, + 0.382683456f, -0.195090324f, 0.195090324f, -0.980785251f, 0.980785251f, + -0.831469595f, 0.831469595f, -0.555570245f, 0.555570245f, -0.555570245f, + 0.555570245f, -0.831469595f, 0.831469595f, -0.980785251f, 0.980785251f, + -0.195090324f, 0.195090324f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk3i[32] = { + -0.000000000f, 0.000000000f, -0.707106769f, 0.707106769f, -0.923879564f, + 0.923879564f, 0.382683456f, -0.382683456f, -0.555570245f, 0.555570245f, + -0.195090353f, 0.195090353f, -0.980785251f, 0.980785251f, 0.831469536f, + -0.831469536f, -0.290284693f, 0.290284693f, -0.471396863f, 0.471396863f, + -0.995184720f, 0.995184720f, 0.634393334f, -0.634393334f, -0.773010492f, + 0.773010492f, 0.098017156f, -0.098017156f, -0.881921172f, 0.881921172f, + 0.956940353f, -0.956940353f, +}; +ALIGN16_BEG const float ALIGN16_END cftmdl_wk1r[4] = { + 0.707106769f, + 0.707106769f, + 0.707106769f, + -0.707106769f, +}; +#endif + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_ diff --git a/audio_processing/utility/pffft_wrapper.cc b/audio_processing/utility/pffft_wrapper.cc new file mode 100644 index 0000000..a6965bb --- /dev/null +++ b/audio_processing/utility/pffft_wrapper.cc @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/utility/pffft_wrapper.h" + +#include "rtc_base/checks.h" +#include "third_party/pffft/src/pffft.h" + +namespace webrtc { +namespace { + +size_t GetBufferSize(size_t fft_size, Pffft::FftType fft_type) { + return fft_size * (fft_type == Pffft::FftType::kReal ? 1 : 2); +} + +float* AllocatePffftBuffer(size_t size) { + return static_cast(pffft_aligned_malloc(size * sizeof(float))); +} + +} // namespace + +Pffft::FloatBuffer::FloatBuffer(size_t fft_size, FftType fft_type) + : size_(GetBufferSize(fft_size, fft_type)), + data_(AllocatePffftBuffer(size_)) {} + +Pffft::FloatBuffer::~FloatBuffer() { + pffft_aligned_free(data_); +} + +rtc::ArrayView Pffft::FloatBuffer::GetConstView() const { + return {data_, size_}; +} + +rtc::ArrayView Pffft::FloatBuffer::GetView() { + return {data_, size_}; +} + +Pffft::Pffft(size_t fft_size, FftType fft_type) + : fft_size_(fft_size), + fft_type_(fft_type), + pffft_status_(pffft_new_setup( + fft_size_, + fft_type == Pffft::FftType::kReal ? PFFFT_REAL : PFFFT_COMPLEX)), + scratch_buffer_( + AllocatePffftBuffer(GetBufferSize(fft_size_, fft_type_))) { + RTC_DCHECK(pffft_status_); + RTC_DCHECK(scratch_buffer_); +} + +Pffft::~Pffft() { + pffft_destroy_setup(pffft_status_); + pffft_aligned_free(scratch_buffer_); +} + +bool Pffft::IsValidFftSize(size_t fft_size, FftType fft_type) { + if (fft_size == 0) { + return false; + } + // PFFFT only supports transforms for inputs of length N of the form + // N = (2^a)*(3^b)*(5^c) where b >=0 and c >= 0 and a >= 5 for the real FFT + // and a >= 4 for the complex FFT. + constexpr int kFactors[] = {2, 3, 5}; + int factorization[] = {0, 0, 0}; + int n = static_cast(fft_size); + for (int i = 0; i < 3; ++i) { + while (n % kFactors[i] == 0) { + n = n / kFactors[i]; + factorization[i]++; + } + } + int a_min = (fft_type == Pffft::FftType::kReal) ? 5 : 4; + return factorization[0] >= a_min && n == 1; +} + +bool Pffft::IsSimdEnabled() { + return pffft_simd_size() > 1; +} + +std::unique_ptr Pffft::CreateBuffer() const { + // Cannot use make_unique from absl because Pffft is the only friend of + // Pffft::FloatBuffer. + std::unique_ptr buffer( + new Pffft::FloatBuffer(fft_size_, fft_type_)); + return buffer; +} + +void Pffft::ForwardTransform(const FloatBuffer& in, + FloatBuffer* out, + bool ordered) { + RTC_DCHECK_EQ(in.size(), GetBufferSize(fft_size_, fft_type_)); + RTC_DCHECK_EQ(in.size(), out->size()); + RTC_DCHECK(scratch_buffer_); + if (ordered) { + pffft_transform_ordered(pffft_status_, in.const_data(), out->data(), + scratch_buffer_, PFFFT_FORWARD); + } else { + pffft_transform(pffft_status_, in.const_data(), out->data(), + scratch_buffer_, PFFFT_FORWARD); + } +} + +void Pffft::BackwardTransform(const FloatBuffer& in, + FloatBuffer* out, + bool ordered) { + RTC_DCHECK_EQ(in.size(), GetBufferSize(fft_size_, fft_type_)); + RTC_DCHECK_EQ(in.size(), out->size()); + RTC_DCHECK(scratch_buffer_); + if (ordered) { + pffft_transform_ordered(pffft_status_, in.const_data(), out->data(), + scratch_buffer_, PFFFT_BACKWARD); + } else { + pffft_transform(pffft_status_, in.const_data(), out->data(), + scratch_buffer_, PFFFT_BACKWARD); + } +} + +void Pffft::FrequencyDomainConvolve(const FloatBuffer& fft_x, + const FloatBuffer& fft_y, + FloatBuffer* out, + float scaling) { + RTC_DCHECK_EQ(fft_x.size(), GetBufferSize(fft_size_, fft_type_)); + RTC_DCHECK_EQ(fft_x.size(), fft_y.size()); + RTC_DCHECK_EQ(fft_x.size(), out->size()); + pffft_zconvolve_accumulate(pffft_status_, fft_x.const_data(), + fft_y.const_data(), out->data(), scaling); +} + +} // namespace webrtc diff --git a/audio_processing/utility/pffft_wrapper.h b/audio_processing/utility/pffft_wrapper.h new file mode 100644 index 0000000..160f0da --- /dev/null +++ b/audio_processing/utility/pffft_wrapper.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_UTILITY_PFFFT_WRAPPER_H_ +#define MODULES_AUDIO_PROCESSING_UTILITY_PFFFT_WRAPPER_H_ + +#include + +#include "api/array_view.h" + +// Forward declaration. +struct PFFFT_Setup; + +namespace webrtc { + +// Pretty-Fast Fast Fourier Transform (PFFFT) wrapper class. +// Not thread safe. +class Pffft { + public: + enum class FftType { kReal, kComplex }; + + // 1D floating point buffer used as input/output data type for the FFT ops. + // It must be constructed using Pffft::CreateBuffer(). + class FloatBuffer { + public: + FloatBuffer(const FloatBuffer&) = delete; + FloatBuffer& operator=(const FloatBuffer&) = delete; + ~FloatBuffer(); + + rtc::ArrayView GetConstView() const; + rtc::ArrayView GetView(); + + private: + friend class Pffft; + FloatBuffer(size_t fft_size, FftType fft_type); + const float* const_data() const { return data_; } + float* data() { return data_; } + size_t size() const { return size_; } + + const size_t size_; + float* const data_; + }; + + // TODO(https://crbug.com/webrtc/9577): Consider adding a factory and making + // the ctor private. + // static std::unique_ptr Create(size_t fft_size, + // FftType fft_type); Ctor. |fft_size| must be a supported size (see + // Pffft::IsValidFftSize()). If not supported, the code will crash. + Pffft(size_t fft_size, FftType fft_type); + Pffft(const Pffft&) = delete; + Pffft& operator=(const Pffft&) = delete; + ~Pffft(); + + // Returns true if the FFT size is supported. + static bool IsValidFftSize(size_t fft_size, FftType fft_type); + + // Returns true if SIMD code optimizations are being used. + static bool IsSimdEnabled(); + + // Creates a buffer of the right size. + std::unique_ptr CreateBuffer() const; + + // TODO(https://crbug.com/webrtc/9577): Overload with rtc::ArrayView args. + // Computes the forward fast Fourier transform. + void ForwardTransform(const FloatBuffer& in, FloatBuffer* out, bool ordered); + // Computes the backward fast Fourier transform. + void BackwardTransform(const FloatBuffer& in, FloatBuffer* out, bool ordered); + + // Multiplies the frequency components of |fft_x| and |fft_y| and accumulates + // them into |out|. The arrays must have been obtained with + // ForwardTransform(..., /*ordered=*/false) - i.e., |fft_x| and |fft_y| must + // not be ordered. + void FrequencyDomainConvolve(const FloatBuffer& fft_x, + const FloatBuffer& fft_y, + FloatBuffer* out, + float scaling = 1.f); + + private: + const size_t fft_size_; + const FftType fft_type_; + PFFFT_Setup* pffft_status_; + float* const scratch_buffer_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_UTILITY_PFFFT_WRAPPER_H_ diff --git a/audio_processing/utility/pffft_wrapper_unittest.cc b/audio_processing/utility/pffft_wrapper_unittest.cc new file mode 100644 index 0000000..d42abaf --- /dev/null +++ b/audio_processing/utility/pffft_wrapper_unittest.cc @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_processing/utility/pffft_wrapper.h" + +#include +#include +#include + +#include "test/gtest.h" +#include "third_party/pffft/src/pffft.h" + +namespace webrtc { +namespace test { +namespace { + +constexpr size_t kMaxValidSizeCheck = 1024; + +static constexpr int kFftSizes[] = { + 16, 32, 64, 96, 128, 160, 192, 256, 288, 384, 5 * 96, 512, + 576, 5 * 128, 800, 864, 1024, 2048, 2592, 4000, 4096, 12000, 36864}; + +void CreatePffftWrapper(size_t fft_size, Pffft::FftType fft_type) { + Pffft pffft_wrapper(fft_size, fft_type); +} + +float* AllocateScratchBuffer(size_t fft_size, bool complex_fft) { + return static_cast( + pffft_aligned_malloc(fft_size * (complex_fft ? 2 : 1) * sizeof(float))); +} + +double frand() { + return std::rand() / static_cast(RAND_MAX); +} + +void ExpectArrayViewsEquality(rtc::ArrayView a, + rtc::ArrayView b) { + ASSERT_EQ(a.size(), b.size()); + for (size_t i = 0; i < a.size(); ++i) { + SCOPED_TRACE(i); + EXPECT_EQ(a[i], b[i]); + } +} + +// Compares the output of the PFFFT C++ wrapper to that of the C PFFFT. +// Bit-exactness is expected. +void PffftValidateWrapper(size_t fft_size, bool complex_fft) { + // Always use the same seed to avoid flakiness. + std::srand(0); + + // Init PFFFT. + PFFFT_Setup* pffft_status = + pffft_new_setup(fft_size, complex_fft ? PFFFT_COMPLEX : PFFFT_REAL); + ASSERT_TRUE(pffft_status) << "FFT size (" << fft_size << ") not supported."; + size_t num_floats = fft_size * (complex_fft ? 2 : 1); + int num_bytes = static_cast(num_floats) * sizeof(float); + float* in = static_cast(pffft_aligned_malloc(num_bytes)); + float* out = static_cast(pffft_aligned_malloc(num_bytes)); + float* scratch = AllocateScratchBuffer(fft_size, complex_fft); + + // Init PFFFT C++ wrapper. + Pffft::FftType fft_type = + complex_fft ? Pffft::FftType::kComplex : Pffft::FftType::kReal; + ASSERT_TRUE(Pffft::IsValidFftSize(fft_size, fft_type)); + Pffft pffft_wrapper(fft_size, fft_type); + auto in_wrapper = pffft_wrapper.CreateBuffer(); + auto out_wrapper = pffft_wrapper.CreateBuffer(); + + // Input and output buffers views. + rtc::ArrayView in_view(in, num_floats); + rtc::ArrayView out_view(out, num_floats); + auto in_wrapper_view = in_wrapper->GetView(); + EXPECT_EQ(in_wrapper_view.size(), num_floats); + auto out_wrapper_view = out_wrapper->GetConstView(); + EXPECT_EQ(out_wrapper_view.size(), num_floats); + + // Random input data. + for (size_t i = 0; i < num_floats; ++i) { + in_wrapper_view[i] = in[i] = static_cast(frand() * 2.0 - 1.0); + } + + // Forward transform. + pffft_transform(pffft_status, in, out, scratch, PFFFT_FORWARD); + pffft_wrapper.ForwardTransform(*in_wrapper, out_wrapper.get(), + /*ordered=*/false); + ExpectArrayViewsEquality(out_view, out_wrapper_view); + + // Copy the FFT results into the input buffers to compute the backward FFT. + std::copy(out_view.begin(), out_view.end(), in_view.begin()); + std::copy(out_wrapper_view.begin(), out_wrapper_view.end(), + in_wrapper_view.begin()); + + // Backward transform. + pffft_transform(pffft_status, in, out, scratch, PFFFT_BACKWARD); + pffft_wrapper.BackwardTransform(*in_wrapper, out_wrapper.get(), + /*ordered=*/false); + ExpectArrayViewsEquality(out_view, out_wrapper_view); + + pffft_destroy_setup(pffft_status); + pffft_aligned_free(in); + pffft_aligned_free(out); + pffft_aligned_free(scratch); +} + +} // namespace + +TEST(PffftTest, CreateWrapperWithValidSize) { + for (size_t fft_size = 0; fft_size < kMaxValidSizeCheck; ++fft_size) { + SCOPED_TRACE(fft_size); + if (Pffft::IsValidFftSize(fft_size, Pffft::FftType::kReal)) { + CreatePffftWrapper(fft_size, Pffft::FftType::kReal); + } + if (Pffft::IsValidFftSize(fft_size, Pffft::FftType::kComplex)) { + CreatePffftWrapper(fft_size, Pffft::FftType::kComplex); + } + } +} + +#if !defined(NDEBUG) && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +class PffftInvalidSizeTest : public ::testing::Test, + public ::testing::WithParamInterface {}; + +TEST_P(PffftInvalidSizeTest, DoNotCreateRealWrapper) { + size_t fft_size = GetParam(); + ASSERT_FALSE(Pffft::IsValidFftSize(fft_size, Pffft::FftType::kReal)); + EXPECT_DEATH(CreatePffftWrapper(fft_size, Pffft::FftType::kReal), ""); +} + +TEST_P(PffftInvalidSizeTest, DoNotCreateComplexWrapper) { + size_t fft_size = GetParam(); + ASSERT_FALSE(Pffft::IsValidFftSize(fft_size, Pffft::FftType::kComplex)); + EXPECT_DEATH(CreatePffftWrapper(fft_size, Pffft::FftType::kComplex), ""); +} + +INSTANTIATE_TEST_SUITE_P(PffftTest, + PffftInvalidSizeTest, + ::testing::Values(17, + 33, + 65, + 97, + 129, + 161, + 193, + 257, + 289, + 385, + 481, + 513, + 577, + 641, + 801, + 865, + 1025)); + +#endif + +// TODO(https://crbug.com/webrtc/9577): Enable once SIMD is always enabled. +TEST(PffftTest, DISABLED_CheckSimd) { + EXPECT_TRUE(Pffft::IsSimdEnabled()); +} + +TEST(PffftTest, FftBitExactness) { + for (int fft_size : kFftSizes) { + SCOPED_TRACE(fft_size); + if (fft_size != 16) { + PffftValidateWrapper(fft_size, /*complex_fft=*/false); + } + PffftValidateWrapper(fft_size, /*complex_fft=*/true); + } +} + +} // namespace test +} // namespace webrtc diff --git a/base.vcxproj b/base.vcxproj new file mode 100644 index 0000000..38734ad --- /dev/null +++ b/base.vcxproj @@ -0,0 +1,255 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {C0E01E32-A6A3-439D-A3D5-BECB3EFAE67F} + rtc_base + 8.1 + base + + + + StaticLibrary + true + v140 + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + StaticLibrary + true + v140 + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + $(SolutionDir)output\$(Configuration)_$(Platform)\ + $(OutDir)$(ProjectName)\ + + + + Level1 + Disabled + true + $(SolutionDir);$(SolutionDir)base\;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;%(AdditionalIncludeDirectories) + WEBRTC_WIN;WIN32;_WINDOWS;_DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN + 4005;4068;4180;4244;4267;4503;4800 + + + $(SolutionDir)libs\$(Configuration)_$(Platform)\;%(AdditionalLibraryDirectories) + jsoncpp.lib;winmm.lib;%(AdditionalDependencies) + + + + + Level3 + Disabled + true + $(SolutionDir);$(SolutionDir)base\;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;%(AdditionalIncludeDirectories) + WEBRTC_WIN;WIN32;_WINDOWS;_DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN + 4005;4068;4180;4244;4267;4503;4800 + + + + + Level3 + MaxSpeed + true + true + true + $(SolutionDir);$(SolutionDir)base\;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;%(AdditionalIncludeDirectories) + WEBRTC_WIN;WIN32;_WINDOWS;DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN + 4005;4068;4180;4244;4267;4503;4800 + + + true + true + + + + + Level3 + MaxSpeed + true + true + true + $(SolutionDir);$(SolutionDir)base\;$(SolutionDir)base\abseil;$(SolutionDir)base\jsoncpp\include;%(AdditionalIncludeDirectories) + WEBRTC_WIN;WIN32;_WINDOWS;DEBUG;NOMINMAX;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;WIN32_LEAN_AND_MEAN + 4005;4068;4180;4244;4267;4503;4800 + + + true + true + + + + + + \ No newline at end of file diff --git a/base.vcxproj.filters b/base.vcxproj.filters new file mode 100644 index 0000000..1833f1c --- /dev/null +++ b/base.vcxproj.filters @@ -0,0 +1,366 @@ + + + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + {467defae-3b30-48b1-9e70-51348a6ac824} + + + {096c7b42-dd07-4d67-af55-7673f93573a2} + + + {92ba31d4-3aaa-4f96-8a9b-4f5ccb045100} + + + {5148e8e4-407f-4ee6-a06a-f49dd24649a0} + + + {87175455-5786-4208-83be-dad1baca05f7} + + + {2761aa9c-6b7f-4670-bdf7-edfd5c86aa22} + + + {99003496-e818-4b3d-92db-1cdecaf86260} + + + {57cb3bd2-ad46-49b8-8dfa-2d4e8e0d1a9a} + + + {b9652da2-4cca-49bd-ac31-c08b1c249e95} + + + {2de807a6-b817-490f-b20b-d6ff5bcae9be} + + + {00ca39fa-717a-49db-a255-a28d2840c1db} + + + {f242ba37-7766-43f9-91ee-5aa3eee664f8} + + + {92e24cf5-f0d4-47e2-8a8f-f1c7a5dcd38b} + + + {9ba374ad-9068-4b42-8ad4-99e3a2d444bf} + + + {74da6884-9209-4720-baf0-0f1ffe73ab86} + + + + + system_wrappers + + + system_wrappers + + + system_wrappers + + + rtc_base\memory + + + rtc_base\numerics + + + rtc_base\numerics + + + rtc_base\numerics + + + rtc_base\numerics + + + rtc_base\strings + + + rtc_base\strings + + + rtc_base\system + + + rtc_base\system + + + rtc_base\system + + + rtc_base\system + + + rtc_base\system + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Public API + + + jsoncpp\Header Files + + + jsoncpp\Header Files + + + absl\bad_optional_access + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\strings\Header Files + + + absl\throw_delegate + + + + + system_wrappers + + + system_wrappers + + + system_wrappers + + + system_wrappers + + + system_wrappers + + + rtc_base\memory + + + rtc_base\strings + + + rtc_base\strings + + + rtc_base\system + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + rtc_base + + + jsoncpp\Source Files + + + jsoncpp\Source Files + + + jsoncpp\Source Files + + + absl\bad_optional_access + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\strings\Source Files + + + absl\throw_delegate + + + + + jsoncpp\Header Files + + + \ No newline at end of file diff --git a/base.vcxproj.user b/base.vcxproj.user new file mode 100644 index 0000000..abe8dd8 --- /dev/null +++ b/base.vcxproj.user @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/base/abseil/.clang-format b/base/abseil/.clang-format new file mode 100644 index 0000000..06ea346 --- /dev/null +++ b/base/abseil/.clang-format @@ -0,0 +1,4 @@ +--- +Language: Cpp +BasedOnStyle: Google +... diff --git a/base/abseil/.gitignore b/base/abseil/.gitignore new file mode 100644 index 0000000..d54fa5a --- /dev/null +++ b/base/abseil/.gitignore @@ -0,0 +1,15 @@ +# Ignore all bazel-* symlinks. +/bazel-* +# Ignore Bazel verbose explanations +--verbose_explanations +# Ignore CMake usual build directory +build +# Ignore Vim files +*.swp +# Ignore QtCreator Project file +CMakeLists.txt.user +# Ignore VS Code files +.vscode/* +# Ignore generated python artifacts +*.pyc +copts/__pycache__/ diff --git a/base/abseil/ABSEIL_ISSUE_TEMPLATE.md b/base/abseil/ABSEIL_ISSUE_TEMPLATE.md new file mode 100644 index 0000000..ed5461f --- /dev/null +++ b/base/abseil/ABSEIL_ISSUE_TEMPLATE.md @@ -0,0 +1,22 @@ +Please submit a new Abseil Issue using the template below: + +## [Short title of proposed API change(s)] + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- + +## Background + +[Provide the background information that is required in order to evaluate the +proposed API changes. No controversial claims should be made here. If there are +design constraints that need to be considered, they should be presented here +**along with justification for those constraints**. Linking to other docs is +good, but please keep the **pertinent information as self contained** as +possible in this section.] + +## Proposed API Change (s) + +[Please clearly describe the API change(s) being proposed. If multiple changes, +please keep them clearly distinguished. When possible, **use example code +snippets to illustrate before-after API usages**. List pros-n-cons. Highlight +the main questions that you want to be answered. Given the Abseil project compatibility requirements, describe why the API change is safe.] diff --git a/base/abseil/AUTHORS b/base/abseil/AUTHORS new file mode 100644 index 0000000..976d31d --- /dev/null +++ b/base/abseil/AUTHORS @@ -0,0 +1,6 @@ +# This is the list of Abseil authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google Inc. diff --git a/base/abseil/CMake/AbseilHelpers.cmake b/base/abseil/CMake/AbseilHelpers.cmake new file mode 100644 index 0000000..28fefaa --- /dev/null +++ b/base/abseil/CMake/AbseilHelpers.cmake @@ -0,0 +1,258 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include(CMakeParseArguments) +include(AbseilConfigureCopts) +include(AbseilInstallDirs) + +# The IDE folder for Abseil that will be used if Abseil is included in a CMake +# project that sets +# set_property(GLOBAL PROPERTY USE_FOLDERS ON) +# For example, Visual Studio supports folders. +set(ABSL_IDE_FOLDER Abseil) + +# absl_cc_library() +# +# CMake function to imitate Bazel's cc_library rule. +# +# Parameters: +# NAME: name of target (see Note) +# HDRS: List of public header files for the library +# SRCS: List of source files for the library +# DEPS: List of other libraries to be linked in to the binary targets +# COPTS: List of private compile options +# DEFINES: List of public defines +# LINKOPTS: List of link options +# PUBLIC: Add this so that this library will be exported under absl:: +# Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal. +# TESTONLY: When added, this target will only be built if user passes -DABSL_RUN_TESTS=ON to CMake. +# +# Note: +# By default, absl_cc_library will always create a library named absl_${NAME}, +# and alias target absl::${NAME}. The absl:: form should always be used. +# This is to reduce namespace pollution. +# +# absl_cc_library( +# NAME +# awesome +# HDRS +# "a.h" +# SRCS +# "a.cc" +# ) +# absl_cc_library( +# NAME +# fantastic_lib +# SRCS +# "b.cc" +# DEPS +# absl::awesome # not "awesome" ! +# PUBLIC +# ) +# +# absl_cc_library( +# NAME +# main_lib +# ... +# DEPS +# absl::fantastic_lib +# ) +# +# TODO: Implement "ALWAYSLINK" +function(absl_cc_library) + cmake_parse_arguments(ABSL_CC_LIB + "DISABLE_INSTALL;PUBLIC;TESTONLY" + "NAME" + "HDRS;SRCS;COPTS;DEFINES;LINKOPTS;DEPS" + ${ARGN} + ) + + if(NOT ABSL_CC_LIB_TESTONLY OR ABSL_RUN_TESTS) + if(ABSL_ENABLE_INSTALL) + set(_NAME "${ABSL_CC_LIB_NAME}") + else() + set(_NAME "absl_${ABSL_CC_LIB_NAME}") + endif() + + # Check if this is a header-only library + # Note that as of February 2019, many popular OS's (for example, Ubuntu + # 16.04 LTS) only come with cmake 3.5 by default. For this reason, we can't + # use list(FILTER...) + set(ABSL_CC_SRCS "${ABSL_CC_LIB_SRCS}") + foreach(src_file IN LISTS ABSL_CC_SRCS) + if(${src_file} MATCHES ".*\\.(h|inc)") + list(REMOVE_ITEM ABSL_CC_SRCS "${src_file}") + endif() + endforeach() + if("${ABSL_CC_SRCS}" STREQUAL "") + set(ABSL_CC_LIB_IS_INTERFACE 1) + else() + set(ABSL_CC_LIB_IS_INTERFACE 0) + endif() + + if(NOT ABSL_CC_LIB_IS_INTERFACE) + # CMake creates static libraries by default. Users can specify + # -DBUILD_SHARED_LIBS=ON during initial configuration to build shared + # libraries instead. + add_library(${_NAME} "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) + target_include_directories(${_NAME} + PUBLIC + "$" + $ + ) + target_compile_options(${_NAME} + PRIVATE ${ABSL_CC_LIB_COPTS}) + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_LIB_DEPS} + PRIVATE + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_LIB_DEFINES}) + + # Add all Abseil targets to a a folder in the IDE for organization. + if(ABSL_CC_LIB_PUBLIC) + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}) + elseif(ABSL_CC_LIB_TESTONLY) + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) + else() + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/internal) + endif() + + # INTERFACE libraries can't have the CXX_STANDARD property set + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) + + # When being installed, we lose the absl_ prefix. We want to put it back + # to have properly named lib files. This is a no-op when we are not being + # installed. + set_target_properties(${_NAME} PROPERTIES + OUTPUT_NAME "absl_${_NAME}" + ) + else() + # Generating header-only library + add_library(${_NAME} INTERFACE) + target_include_directories(${_NAME} + INTERFACE + "$" + $ + ) + target_link_libraries(${_NAME} + INTERFACE + ${ABSL_CC_LIB_DEPS} + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES}) + endif() + + # TODO currently we don't install googletest alongside abseil sources, so + # installed abseil can't be tested. + if(NOT ABSL_CC_LIB_TESTONLY AND ABSL_ENABLE_INSTALL) + install(TARGETS ${_NAME} EXPORT ${PROJECT_NAME}Targets + RUNTIME DESTINATION ${ABSL_INSTALL_BINDIR} + LIBRARY DESTINATION ${ABSL_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${ABSL_INSTALL_LIBDIR} + ) + endif() + + add_library(absl::${ABSL_CC_LIB_NAME} ALIAS ${_NAME}) + endif() +endfunction() + +# absl_cc_test() +# +# CMake function to imitate Bazel's cc_test rule. +# +# Parameters: +# NAME: name of target (see Usage below) +# SRCS: List of source files for the binary +# DEPS: List of other libraries to be linked in to the binary targets +# COPTS: List of private compile options +# DEFINES: List of public defines +# LINKOPTS: List of link options +# +# Note: +# By default, absl_cc_test will always create a binary named absl_${NAME}. +# This will also add it to ctest list as absl_${NAME}. +# +# Usage: +# absl_cc_library( +# NAME +# awesome +# HDRS +# "a.h" +# SRCS +# "a.cc" +# PUBLIC +# ) +# +# absl_cc_test( +# NAME +# awesome_test +# SRCS +# "awesome_test.cc" +# DEPS +# absl::awesome +# gmock +# gtest_main +# ) +function(absl_cc_test) + if(NOT ABSL_RUN_TESTS) + return() + endif() + + cmake_parse_arguments(ABSL_CC_TEST + "" + "NAME" + "SRCS;COPTS;DEFINES;LINKOPTS;DEPS" + ${ARGN} + ) + + set(_NAME "absl_${ABSL_CC_TEST_NAME}") + add_executable(${_NAME} "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_TEST_SRCS}) + target_include_directories(${_NAME} + PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} + PRIVATE ${GMOCK_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS} + ) + target_compile_definitions(${_NAME} + PUBLIC ${ABSL_CC_TEST_DEFINES} + ) + target_compile_options(${_NAME} + PRIVATE ${ABSL_CC_TEST_COPTS} + ) + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_TEST_DEPS} + PRIVATE ${ABSL_CC_TEST_LINKOPTS} + ) + # Add all Abseil targets to a a folder in the IDE for organization. + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) + + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) + set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) + + add_test(NAME ${_NAME} COMMAND ${_NAME}) +endfunction() + + +function(check_target my_target) + if(NOT TARGET ${my_target}) + message(FATAL_ERROR " ABSL: compiling absl requires a ${my_target} CMake target in your project, + see CMake/README.md for more details") + endif(NOT TARGET ${my_target}) +endfunction() diff --git a/base/abseil/CMake/AbseilInstallDirs.cmake b/base/abseil/CMake/AbseilInstallDirs.cmake new file mode 100644 index 0000000..b67272f --- /dev/null +++ b/base/abseil/CMake/AbseilInstallDirs.cmake @@ -0,0 +1,20 @@ +include(GNUInstallDirs) + +# absl_VERSION is only set if we are an LTS release being installed, in which +# case it may be into a system directory and so we need to make subdirectories +# for each installed version of Abseil. This mechanism is implemented in +# Abseil's internal Copybara (https://github.com/google/copybara) workflows and +# isn't visible in the CMake buildsystem itself. + +if(absl_VERSION) + set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}") + set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}") + set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}") + set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/{ABSL_SUBDIR}") + set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}") +else() + set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}") + set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") + set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}") + set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}") +endif() \ No newline at end of file diff --git a/base/abseil/CMake/Googletest/CMakeLists.txt.in b/base/abseil/CMake/Googletest/CMakeLists.txt.in new file mode 100644 index 0000000..d60a33e --- /dev/null +++ b/base/abseil/CMake/Googletest/CMakeLists.txt.in @@ -0,0 +1,15 @@ +cmake_minimum_required(VERSION 2.8.2) + +project(googletest-download NONE) + +include(ExternalProject) +ExternalProject_Add(googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG master + SOURCE_DIR "${CMAKE_BINARY_DIR}/googletest-src" + BINARY_DIR "${CMAKE_BINARY_DIR}/googletest-build" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) \ No newline at end of file diff --git a/base/abseil/CMake/Googletest/DownloadGTest.cmake b/base/abseil/CMake/Googletest/DownloadGTest.cmake new file mode 100644 index 0000000..3c682ae --- /dev/null +++ b/base/abseil/CMake/Googletest/DownloadGTest.cmake @@ -0,0 +1,32 @@ +# Downloads and unpacks googletest at configure time. Based on the instructions +# at https://github.com/google/googletest/tree/master/googletest#incorporating-into-an-existing-cmake-project + +# Download the latest googletest from Github master +configure_file( + ${CMAKE_CURRENT_LIST_DIR}/CMakeLists.txt.in + ${CMAKE_BINARY_DIR}/googletest-download/CMakeLists.txt +) + +# Configure and build the downloaded googletest source +execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-download ) +if(result) + message(FATAL_ERROR "CMake step for googletest failed: ${result}") +endif() + +execute_process(COMMAND ${CMAKE_COMMAND} --build . + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-download) +if(result) + message(FATAL_ERROR "Build step for googletest failed: ${result}") +endif() + +# Prevent overriding the parent project's compiler/linker settings on Windows +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +# Add googletest directly to our build. This defines the gtest and gtest_main +# targets. +add_subdirectory(${CMAKE_BINARY_DIR}/googletest-src + ${CMAKE_BINARY_DIR}/googletest-build + EXCLUDE_FROM_ALL) diff --git a/base/abseil/CMake/README.md b/base/abseil/CMake/README.md new file mode 100644 index 0000000..04d5df3 --- /dev/null +++ b/base/abseil/CMake/README.md @@ -0,0 +1,101 @@ +# Abseil CMake Build Instructions + +Abseil comes with a CMake build script ([CMakeLists.txt](../CMakeLists.txt)) +that can be used on a wide range of platforms ("C" stands for cross-platform.). +If you don't have CMake installed already, you can download it for free from +. + +CMake works by generating native makefiles or build projects that can +be used in the compiler environment of your choice. + +For API/ABI compatibility reasons, we strongly recommend building Abseil in a +subdirectory of your project or as an embedded dependency. + +## Incorporating Abseil Into a CMake Project + +The recommendations below are similar to those for using CMake within the +googletest framework +() + +### Step-by-Step Instructions + +1. If you want to build the Abseil tests, integrate the Abseil dependency +[Google Test](https://github.com/google/googletest) into your CMake project. To disable Abseil tests, you have to pass +`-DBUILD_TESTING=OFF` when configuring your project with CMake. + +2. Download Abseil and copy it into a subdirectory in your CMake project or add +Abseil as a [git submodule](https://git-scm.com/docs/git-submodule) in your +CMake project. + +3. You can then use the CMake command +[`add_subdirectory()`](https://cmake.org/cmake/help/latest/command/add_subdirectory.html) +to include Abseil directly in your CMake project. + +4. Add the **absl::** target you wish to use to the +[`target_link_libraries()`](https://cmake.org/cmake/help/latest/command/target_link_libraries.html) +section of your executable or of your library.
+Here is a short CMakeLists.txt example of a project file using Abseil. + +```cmake +cmake_minimum_required(VERSION 3.5) +project(my_project) + +# Pick the C++ standard to compile with. +# Abseil currently supports C++11, C++14, and C++17. +set(CMAKE_CXX_STANDARD 11) + +add_subdirectory(abseil-cpp) + +add_executable(my_exe source.cpp) +target_link_libraries(my_exe absl::base absl::synchronization absl::strings) +``` + +### Running Abseil Tests with CMake + +Use the `-DABSL_RUN_TESTS=ON` flag to run Abseil tests. Note that if the `-DBUILD_TESTING=OFF` flag is passed then Abseil tests will not be run. + +You will need to provide Abseil with a Googletest dependency. There are two +options for how to do this: + +* Use `-DABSL_USE_GOOGLETEST_HEAD`. This will automatically download the latest +Googletest source into the build directory at configure time. Googletest will +then be compiled directly alongside Abseil's tests. +* Manually integrate Googletest with your build. See +https://github.com/google/googletest/blob/master/googletest/README.md#using-cmake +for more information on using Googletest in a CMake project. + +For example, to run just the Abseil tests, you could use this script: + +``` +cd path/to/abseil-cpp +mkdir build +cd build +cmake -DABSL_USE_GOOGLETEST_HEAD=ON -DABSL_RUN_TESTS=ON .. +make -j +ctest +``` + +Currently, we only run our tests with CMake in a Linux environment, but we are +working on the rest of our supported platforms. See +https://github.com/abseil/abseil-cpp/projects/1 and +https://github.com/abseil/abseil-cpp/issues/109 for more information. + +### Available Abseil CMake Public Targets + +Here's a non-exhaustive list of Abseil CMake public targets: + +```cmake +absl::algorithm +absl::base +absl::debugging +absl::flat_hash_map +absl::flags +absl::memory +absl::meta +absl::numeric +absl::random +absl::strings +absl::synchronization +absl::time +absl::utility +``` diff --git a/base/abseil/CMake/abslConfig.cmake.in b/base/abseil/CMake/abslConfig.cmake.in new file mode 100644 index 0000000..60847fa --- /dev/null +++ b/base/abseil/CMake/abslConfig.cmake.in @@ -0,0 +1,7 @@ +# absl CMake configuration file. + +include(FindThreads) + +@PACKAGE_INIT@ + +include ("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake") \ No newline at end of file diff --git a/base/abseil/CMake/install_test_project/CMakeLists.txt b/base/abseil/CMake/install_test_project/CMakeLists.txt new file mode 100644 index 0000000..06b797e --- /dev/null +++ b/base/abseil/CMake/install_test_project/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# Copyright 2019 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A simple CMakeLists.txt for testing cmake installation + +cmake_minimum_required(VERSION 3.5) +project(absl_cmake_testing CXX) + +set(CMAKE_CXX_STANDARD 11) + +add_executable(simple simple.cc) + +find_package(absl REQUIRED) + +target_link_libraries(simple absl::strings) diff --git a/base/abseil/CMake/install_test_project/simple.cc b/base/abseil/CMake/install_test_project/simple.cc new file mode 100644 index 0000000..e9e3529 --- /dev/null +++ b/base/abseil/CMake/install_test_project/simple.cc @@ -0,0 +1,23 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "absl/strings/substitute.h" + +int main(int argc, char** argv) { + for (int i = 0; i < argc; ++i) { + std::cout << absl::Substitute("Arg $0: $1\n", i, argv[i]); + } +} diff --git a/base/abseil/CMake/install_test_project/test.sh b/base/abseil/CMake/install_test_project/test.sh new file mode 100644 index 0000000..99989b0 --- /dev/null +++ b/base/abseil/CMake/install_test_project/test.sh @@ -0,0 +1,144 @@ +#!/bin/bash +# +# Copyright 2019 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# "Unit" and integration tests for Absl CMake installation + +# TODO(absl-team): This script isn't fully hermetic because +# -DABSL_USE_GOOGLETEST_HEAD=ON means that this script isn't pinned to a fixed +# version of GoogleTest. This means that an upstream change to GoogleTest could +# break this test. Fix this by allowing this script to pin to a known-good +# version of GoogleTest. + +# Fail on any error. Treat unset variables an error. Print commands as executed. +set -euox pipefail + +install_absl() { + pushd "${absl_build_dir}" + if [[ "${#}" -eq 1 ]]; then + cmake -DCMAKE_INSTALL_PREFIX="${1}" "${absl_dir}" + else + cmake "${absl_dir}" + fi + cmake --build . --target install -- -j + popd +} + +uninstall_absl() { + xargs rm < "${absl_build_dir}"/install_manifest.txt + rm -rf "${absl_build_dir}" + mkdir -p "${absl_build_dir}" +} + +lts_install="" + +while getopts ":l" lts; do + case "${lts}" in + l ) + lts_install="true" + ;; + esac +done + +absl_dir=/abseil-cpp +absl_build_dir=/buildfs/absl-build +project_dir="${absl_dir}"/CMake/install_test_project +project_build_dir=/buildfs/project-build + +mkdir -p "${absl_build_dir}" +mkdir -p "${project_build_dir}" + +if [[ "${lts_install}" ]]; then + install_dir="/usr/local" +else + install_dir="${project_build_dir}"/install +fi +mkdir -p "${install_dir}" + +# Test build, install, and link against installed abseil +pushd "${project_build_dir}" +if [[ "${lts_install}" ]]; then + install_absl + cmake "${project_dir}" +else + install_absl "${install_dir}" + cmake "${project_dir}" -DCMAKE_PREFIX_PATH="${install_dir}" +fi + +cmake --build . --target simple + +output="$(${project_build_dir}/simple "printme" 2>&1)" +if [[ "${output}" != *"Arg 1: printme"* ]]; then + echo "Faulty output on simple project:" + echo "${output}" + exit 1 +fi + +popd + +# Test that we haven't accidentally made absl::abslblah +pushd "${install_dir}" + +# Starting in CMake 3.12 the default install dir is lib$bit_width +if [[ -d lib64 ]]; then + libdir="lib64" +elif [[ -d lib ]]; then + libdir="lib" +else + echo "ls *, */*, */*/*:" + ls * + ls */* + ls */*/* + echo "unknown lib dir" +fi + +if [[ "${lts_install}" ]]; then + # LTS versions append the date of the release to the subdir. + # 9999/99/99 is the dummy date used in the local_lts workflow. + absl_subdir="absl_99999999" +else + absl_subdir="absl" +fi + +if ! grep absl::strings "${libdir}/cmake/${absl_subdir}/abslTargets.cmake"; then + cat "${libdir}"/cmake/absl/abslTargets.cmake + echo "CMake targets named incorrectly" + exit 1 +fi + +uninstall_absl +popd + +if [[ ! "${lts_install}" ]]; then + # Test that we warn if installed without a prefix or a system prefix + output="$(install_absl 2>&1)" + if [[ "${output}" != *"Please set CMAKE_INSTALL_PREFIX"* ]]; then + echo "Install without prefix didn't warn as expected. Output:" + echo "${output}" + exit 1 + fi + uninstall_absl + + output="$(install_absl /usr 2>&1)" + if [[ "${output}" != *"Please set CMAKE_INSTALL_PREFIX"* ]]; then + echo "Install with /usr didn't warn as expected. Output:" + echo "${output}" + exit 1 + fi + uninstall_absl +fi + +echo "Install test complete!" +exit 0 diff --git a/base/abseil/CMakeLists.txt b/base/abseil/CMakeLists.txt new file mode 100644 index 0000000..86f5634 --- /dev/null +++ b/base/abseil/CMakeLists.txt @@ -0,0 +1,160 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Most widely used distributions have cmake 3.5 or greater available as of March +# 2019. A notable exception is RHEL-7 (CentOS7). You can install a current +# version of CMake by first installing Extra Packages for Enterprise Linux +# (https://fedoraproject.org/wiki/EPEL#Extra_Packages_for_Enterprise_Linux_.28EPEL.29) +# and then issuing `yum install cmake3` on the command line. +cmake_minimum_required(VERSION 3.5) + +# Compiler id for Apple Clang is now AppleClang. +cmake_policy(SET CMP0025 NEW) + +# if command can use IN_LIST +cmake_policy(SET CMP0057 NEW) + +# Project version variables are the empty std::string if version is unspecified +cmake_policy(SET CMP0048 NEW) + +project(absl CXX) + +# when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp)) +# in the source tree of a project that uses it, install rules are disabled. +if(NOT "^${CMAKE_SOURCE_DIR}$" STREQUAL "^${PROJECT_SOURCE_DIR}$") + set(ABSL_ENABLE_INSTALL FALSE) +else() + set(ABSL_ENABLE_INSTALL TRUE) +endif() + +list(APPEND CMAKE_MODULE_PATH + ${CMAKE_CURRENT_LIST_DIR}/CMake + ${CMAKE_CURRENT_LIST_DIR}/absl/copts +) + +include(AbseilInstallDirs) +include(CMakePackageConfigHelpers) +include(AbseilHelpers) + + +## +## Using absl targets +## +## all public absl targets are +## exported with the absl:: prefix +## +## e.g absl::base absl::synchronization absl::strings .... +## +## DO NOT rely on the internal targets outside of the prefix + + +# include current path +list(APPEND ABSL_COMMON_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}) + +if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + set(ABSL_USING_CLANG ON) +else() + set(ABSL_USING_CLANG OFF) +endif() + +# find dependencies +## pthread +find_package(Threads REQUIRED) + +option(ABSL_USE_GOOGLETEST_HEAD + "If ON, abseil will download HEAD from googletest at config time." OFF) + +option(ABSL_RUN_TESTS "If ON, Abseil tests will be run." OFF) + +if(${ABSL_RUN_TESTS}) + # enable CTest. This will set BUILD_TESTING to ON unless otherwise specified + # on the command line + include(CTest) + enable_testing() +endif() + +## check targets +if(BUILD_TESTING) + + if(${ABSL_USE_GOOGLETEST_HEAD}) + include(CMake/Googletest/DownloadGTest.cmake) + set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src) + set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build) + endif() + + check_target(gtest) + check_target(gtest_main) + check_target(gmock) + + list(APPEND ABSL_TEST_COMMON_LIBRARIES + gtest_main + gtest + gmock + ${CMAKE_THREAD_LIBS_INIT} + ) +endif() + +add_subdirectory(absl) + +if(ABSL_ENABLE_INSTALL) + # absl:lts-remove-begin(system installation is supported for LTS releases) + # We don't support system-wide installation + list(APPEND SYSTEM_INSTALL_DIRS "/usr/local" "/usr" "/opt/" "/opt/local" "c:/Program Files/${PROJECT_NAME}") + if(NOT DEFINED CMAKE_INSTALL_PREFIX OR CMAKE_INSTALL_PREFIX IN_LIST SYSTEM_INSTALL_DIRS) + message(WARNING "\ + The default and system-level install directories are unsupported except in LTS \ + releases of Abseil. Please set CMAKE_INSTALL_PREFIX to install Abseil in your \ + source or build tree directly.\ + ") + endif() + # absl:lts-remove-end + + # install as a subdirectory only + install(EXPORT ${PROJECT_NAME}Targets + NAMESPACE absl:: + DESTINATION "${ABSL_INSTALL_CONFIGDIR}" + ) + + configure_package_config_file( + CMake/abslConfig.cmake.in + "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" + INSTALL_DESTINATION "${ABSL_INSTALL_CONFIGDIR}" + ) + install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" + DESTINATION "${ABSL_INSTALL_CONFIGDIR}" + ) + + # Abseil only has a version in LTS releases. This mechanism is accomplished + # Abseil's internal Copybara (https://github.com/google/copybara) workflows and + # isn't visible in the CMake buildsystem itself. + if(absl_VERSION) + write_basic_package_version_file( + "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" + COMPATIBILITY ExactVersion + ) + + install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" + DESTINATION ${ABSL_INSTALL_CONFIGDIR} + ) + endif() # absl_VERSION + + install(DIRECTORY absl + DESTINATION ${ABSL_INSTALL_INCLUDEDIR} + FILES_MATCHING + PATTERN "*.inc" + PATTERN "*.h" + ) +endif() # ABSL_ENABLE_INSTALL diff --git a/base/abseil/CONTRIBUTING.md b/base/abseil/CONTRIBUTING.md new file mode 100644 index 0000000..f4cb4a2 --- /dev/null +++ b/base/abseil/CONTRIBUTING.md @@ -0,0 +1,138 @@ +# How to Contribute to Abseil + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +NOTE: If you are new to GitHub, please start by reading [Pull Request +howto](https://help.github.com/articles/about-pull-requests/) + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Contribution Guidelines + +Potential contributors sometimes ask us if the Abseil project is the appropriate +home for their utility library code or for specific functions implementing +missing portions of the standard. Often, the answer to this question is "no". +We’d like to articulate our thinking on this issue so that our choices can be +understood by everyone and so that contributors can have a better intuition +about whether Abseil might be interested in adopting a new library. + +### Priorities + +Although our mission is to augment the C++ standard library, our goal is not to +provide a full forward-compatible implementation of the latest standard. For us +to consider a library for inclusion in Abseil, it is not enough that a library +is useful. We generally choose to release a library when it meets at least one +of the following criteria: + +* **Widespread usage** - Using our internal codebase to help gauge usage, most + of the libraries we've released have tens of thousands of users. +* **Anticipated widespread usage** - Pre-adoption of some standard-compliant + APIs may not have broad adoption initially but can be expected to pick up + usage when it replaces legacy APIs. `absl::from_chars`, for example, + replaces existing code that converts strings to numbers and will therefore + likely see usage growth. +* **High impact** - APIs that provide a key solution to a specific problem, + such as `absl::FixedArray`, have higher impact than usage numbers may signal + and are released because of their importance. +* **Direct support for a library that falls under one of the above** - When we + want access to a smaller library as an implementation detail for a + higher-priority library we plan to release, we may release it, as we did + with portions of `absl/meta/type_traits.h`. One consequence of this is that + the presence of a library in Abseil does not necessarily mean that other + similar libraries would be a high priority. + +### API Freeze Consequences + +Via the +[Abseil Compatibility Guidelines](https://abseil.io/about/compatibility), we +have promised a large degree of API stability. In particular, we will not make +backward-incompatible changes to released APIs without also shipping a tool or +process that can upgrade our users' code. We are not yet at the point of easily +releasing such tools. Therefore, at this time, shipping a library establishes an +API contract which is borderline unchangeable. (We can add new functionality, +but we cannot easily change existing behavior.) This constraint forces us to +very carefully review all APIs that we ship. + + +## Coding Style + +To keep the source consistent, readable, diffable and easy to merge, we use a +fairly rigid coding style, as defined by the +[google-styleguide](https://github.com/google/styleguide) project. All patches +will be expected to conform to the style outlined +[here](https://google.github.io/styleguide/cppguide.html). + +## Guidelines for Pull Requests + +* If you are a Googler, it is preferable to first create an internal CL and + have it reviewed and submitted. The code propagation process will deliver + the change to GitHub. + +* Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often receive PRs that are trying to fix several things at a + time, but if only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address + different concerns and everyone will be happy. + +* For speculative changes, consider opening an [Abseil + issue](https://github.com/abseil/abseil-cpp/issues) and discussing it first. + If you are suggesting a behavioral or API change, consider starting with an + [Abseil proposal template](ABSEIL_ISSUE_TEMPLATE.md). + +* Provide a good **PR description** as a record of **what** change is being + made and **why** it was made. Link to a GitHub issue if it exists. + +* Don't fix code style and formatting unless you are already changing that + line to address an issue. Formatting of modified lines may be done using + `git clang-format`. PRs with irrelevant changes won't be merged. If + you do want to fix formatting or style, do that in a separate PR. + +* Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 + weeks of inactivity. + +* Maintain **clean commit history** and use **meaningful commit messages**. + PRs with messy commit history are difficult to review and won't be merged. + Use `rebase -i upstream/master` to curate your commit history and/or to + bring in latest changes from master (but avoid rebasing in the middle of a + code review). + +* Keep your PR up to date with upstream/master (if there are merge conflicts, + we can't really merge your change). + +* **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** (see below) + +* Exceptions to the rules can be made if there's a compelling reason for doing + so. That is - the rules are here to serve us, not the other way around, and + the rules need to be serving their intended purpose to be valuable. + +* All submissions, including submissions by project members, require review. + +## Running Tests + +Use "bazel test <>" functionality to run the unit tests. + +Prerequisites for building and running tests are listed in +[README.md](README.md) + +## Abseil Committers + +The current members of the Abseil engineering team are the only committers at +present. + +## Release Process + +Abseil lives at head, where latest-and-greatest code can be found. diff --git a/base/abseil/LICENSE b/base/abseil/LICENSE new file mode 100644 index 0000000..ccd61dc --- /dev/null +++ b/base/abseil/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/base/abseil/LTS.md b/base/abseil/LTS.md new file mode 100644 index 0000000..94363b6 --- /dev/null +++ b/base/abseil/LTS.md @@ -0,0 +1,15 @@ +# Long Term Support (LTS) Branches + +This repository contains periodic snapshots of the Abseil codebase that are +Long Term Support (LTS) branches. An LTS branch allows you to use a known +version of Abseil without interfering with other projects which may also, in +turn, use Abseil. (For more information about our releases, see the +[Abseil Release Management](https://abseil.io/about/releases) guide.) + +## LTS Branches + +The following lists LTS branches and the dates on which they have been released: + +* [LTS Branch December 18, 2018](https://github.com/abseil/abseil-cpp/tree/lts_2018_12_18/) +* [LTS Branch June 20, 2018](https://github.com/abseil/abseil-cpp/tree/lts_2018_06_20/) +* [LTS Branch August 8, 2019](https://github.com/abseil/abseil-cpp/tree/lts_2019_08_08/) diff --git a/base/abseil/README.md b/base/abseil/README.md new file mode 100644 index 0000000..85de569 --- /dev/null +++ b/base/abseil/README.md @@ -0,0 +1,114 @@ +# Abseil - C++ Common Libraries + +The repository contains the Abseil C++ library code. Abseil is an open-source +collection of C++ code (compliant to C++11) designed to augment the C++ +standard library. + +## Table of Contents + +- [About Abseil](#about) +- [Quickstart](#quickstart) +- [Building Abseil](#build) +- [Codemap](#codemap) +- [License](#license) +- [Links](#links) + + +## About Abseil + +Abseil is an open-source collection of C++ library code designed to augment +the C++ standard library. The Abseil library code is collected from Google's +own C++ code base, has been extensively tested and used in production, and +is the same code we depend on in our daily coding lives. + +In some cases, Abseil provides pieces missing from the C++ standard; in +others, Abseil provides alternatives to the standard for special needs +we've found through usage in the Google code base. We denote those cases +clearly within the library code we provide you. + +Abseil is not meant to be a competitor to the standard library; we've +just found that many of these utilities serve a purpose within our code +base, and we now want to provide those resources to the C++ community as +a whole. + + +## Quickstart + +If you want to just get started, make sure you at least run through the +[Abseil Quickstart](https://abseil.io/docs/cpp/quickstart). The Quickstart +contains information about setting up your development environment, downloading +the Abseil code, running tests, and getting a simple binary working. + + +## Building Abseil + +[Bazel](https://bazel.build) is the official build system for Abseil, +which is supported on most major platforms (Linux, Windows, macOS, for example) +and compilers. See the [quickstart](https://abseil.io/docs/cpp/quickstart) for +more information on building Abseil using the Bazel build system. + + +If you require CMake support, please check the +[CMake build instructions](CMake/README.md). + +## Codemap + +Abseil contains the following C++ library components: + +* [`base`](absl/base/) Abseil Fundamentals +
The `base` library contains initialization code and other code which + all other Abseil code depends on. Code within `base` may not depend on any + other code (other than the C++ standard library). +* [`algorithm`](absl/algorithm/) +
The `algorithm` library contains additions to the C++ `` + library and container-based versions of such algorithms. +* [`container`](absl/container/) +
The `container` library contains additional STL-style containers, + including Abseil's unordered "Swiss table" containers. +* [`debugging`](absl/debugging/) +
The `debugging` library contains code useful for enabling leak + checks, and stacktrace and symbolization utilities. +* [`hash`](absl/hash/) +
The `hash` library contains the hashing framework and default hash + functor implementations for hashable types in Abseil. +* [`memory`](absl/memory/) +
The `memory` library contains C++11-compatible versions of + `std::make_unique()` and related memory management facilities. +* [`meta`](absl/meta/) +
The `meta` library contains C++11-compatible versions of type checks + available within C++14 and C++17 versions of the C++ `` library. +* [`numeric`](absl/numeric/) +
The `numeric` library contains C++11-compatible 128-bit integers. +* [`strings`](absl/strings/) +
The `strings` library contains a variety of strings routines and + utilities, including a C++11-compatible version of the C++17 + `std::string_view` type. +* [`synchronization`](absl/synchronization/) +
The `synchronization` library contains concurrency primitives (Abseil's + `absl::Mutex` class, an alternative to `std::mutex`) and a variety of + synchronization abstractions. +* [`time`](absl/time/) +
The `time` library contains abstractions for computing with absolute + points in time, durations of time, and formatting and parsing time within + time zones. +* [`types`](absl/types/) +
The `types` library contains non-container utility types, like a + C++11-compatible version of the C++17 `std::optional` type. +* [`utility`](absl/utility/) +
The `utility` library contains utility and helper code. + +## License + +The Abseil C++ library is licensed under the terms of the Apache +license. See [LICENSE](LICENSE) for more information. + +## Links + +For more information about Abseil: + +* Consult our [Abseil Introduction](https://abseil.io/about/intro) +* Read [Why Adopt Abseil](https://abseil.io/about/philosophy) to understand our + design philosophy. +* Peruse our + [Abseil Compatibility Guarantees](https://abseil.io/about/compatibility) to + understand both what we promise to you, and what we expect of you in return. diff --git a/base/abseil/UPGRADES.md b/base/abseil/UPGRADES.md new file mode 100644 index 0000000..35599d0 --- /dev/null +++ b/base/abseil/UPGRADES.md @@ -0,0 +1,17 @@ +# C++ Upgrade Tools + +Abseil may occassionally release API-breaking changes. As noted in our +[Compatibility Guidelines][compatibility-guide], we will aim to provide a tool +to do the work of effecting such API-breaking changes, when absolutely +necessary. + +These tools will be listed on the [C++ Upgrade Tools][upgrade-tools] guide on +https://abseil.io. + +For more information, the [C++ Automated Upgrade Guide][api-upgrades-guide] +outlines this process. + +[compatibility-guide]: https://abseil.io/about/compatibility +[api-upgrades-guide]: https://abseil.io/docs/cpp/tools/api-upgrades +[upgrade-tools]: https://abseil.io/docs/cpp/tools/upgrades/ + diff --git a/base/abseil/WORKSPACE b/base/abseil/WORKSPACE new file mode 100644 index 0000000..f2b1046 --- /dev/null +++ b/base/abseil/WORKSPACE @@ -0,0 +1,45 @@ +# +# Copyright 2019 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +workspace(name = "com_google_absl") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +# GoogleTest/GoogleMock framework. Used by most unit-tests. +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip"], # 2019-01-07 + strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb", + sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86", +) + +# Google benchmark. +http_archive( + name = "com_github_google_benchmark", + urls = ["https://github.com/google/benchmark/archive/16703ff83c1ae6d53e5155df3bb3ab0bc96083be.zip"], + strip_prefix = "benchmark-16703ff83c1ae6d53e5155df3bb3ab0bc96083be", + sha256 = "59f918c8ccd4d74b6ac43484467b500f1d64b40cc1010daa055375b322a43ba3", +) + +# C++ rules for Bazel. +http_archive( + name = "rules_cc", + sha256 = "9a446e9dd9c1bb180c86977a8dc1e9e659550ae732ae58bd2e8fd51e15b2c91d", + strip_prefix = "rules_cc-262ebec3c2296296526740db4aefce68c80de7fa", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_cc/archive/262ebec3c2296296526740db4aefce68c80de7fa.zip", + "https://github.com/bazelbuild/rules_cc/archive/262ebec3c2296296526740db4aefce68c80de7fa.zip", + ], +) diff --git a/base/abseil/absl/BUILD.bazel b/base/abseil/absl/BUILD.bazel new file mode 100644 index 0000000..5a03acf --- /dev/null +++ b/base/abseil/absl/BUILD.bazel @@ -0,0 +1,59 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + ":compiler_config_setting.bzl", + "create_llvm_config", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +create_llvm_config( + name = "llvm_compiler", + visibility = [":__subpackages__"], +) + +config_setting( + name = "osx", + constraint_values = [ + "@bazel_tools//platforms:osx", + ], +) + +config_setting( + name = "ios", + constraint_values = [ + "@bazel_tools//platforms:ios", + ], +) + +config_setting( + name = "windows", + values = { + "cpu": "x64_windows", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "ppc", + values = { + "cpu": "ppc", + }, + visibility = [":__subpackages__"], +) diff --git a/base/abseil/absl/CMakeLists.txt b/base/abseil/absl/CMakeLists.txt new file mode 100644 index 0000000..3e78397 --- /dev/null +++ b/base/abseil/absl/CMakeLists.txt @@ -0,0 +1,33 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +add_subdirectory(base) +add_subdirectory(algorithm) +add_subdirectory(container) +add_subdirectory(debugging) +add_subdirectory(flags) +add_subdirectory(hash) +add_subdirectory(memory) +add_subdirectory(meta) +add_subdirectory(numeric) +add_subdirectory(random) +add_subdirectory(strings) +add_subdirectory(synchronization) +add_subdirectory(time) +add_subdirectory(types) +add_subdirectory(utility) diff --git a/base/abseil/absl/abseil.podspec.gen.py b/base/abseil/absl/abseil.podspec.gen.py new file mode 100644 index 0000000..2bf153c --- /dev/null +++ b/base/abseil/absl/abseil.podspec.gen.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""This script generates abseil.podspec from all BUILD.bazel files. + +This is expected to run on abseil git repository with Bazel 1.0 on Linux. +It recursively analyzes BUILD.bazel files using query command of Bazel to +dump its build rules in XML format. From these rules, it constructs podspec +structure. +""" + +import argparse +import collections +import os +import re +import subprocess +import xml.etree.ElementTree + +# Template of root podspec. +SPEC_TEMPLATE = """ +# This file has been automatically generated from a script. +# Please make modifications to `abseil.podspec.gen.py` instead. +Pod::Spec.new do |s| + s.name = 'abseil' + s.version = '${version}' + s.summary = 'Abseil Common Libraries (C++) from Google' + s.homepage = 'https://abseil.io' + s.license = 'Apache License, Version 2.0' + s.authors = { 'Abseil Team' => 'abseil-io@googlegroups.com' } + s.source = { + :git => 'https://github.com/abseil/abseil-cpp.git', + :tag => '${tag}', + } + s.module_name = 'absl' + s.header_mappings_dir = 'absl' + s.header_dir = 'absl' + s.libraries = 'c++' + s.compiler_flags = '-Wno-everything' + s.pod_target_xcconfig = { + 'USER_HEADER_SEARCH_PATHS' => '$(inherited) "$(PODS_TARGET_SRCROOT)"', + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } + s.ios.deployment_target = '7.0' + s.osx.deployment_target = '10.9' + s.tvos.deployment_target = '9.0' + s.watchos.deployment_target = '2.0' +""" + +# Limited platforms that abseil supports. +# This is mainly because of sigaltstack unavailable on watchOS. +LIMITED_SUPPORT_PLATFORMS = [ + "ios.deployment_target = '7.0'", + "osx.deployment_target = '10.9'", +] + +# Custom specification per rule. +CUSTOM_SPEC_MAP = { + "//absl/debugging:failure_signal_handler": LIMITED_SUPPORT_PLATFORMS, +} + +# Rule object representing the rule of Bazel BUILD. +Rule = collections.namedtuple( + "Rule", "type name package srcs hdrs textual_hdrs deps visibility testonly") + + +def get_elem_value(elem, name): + """Returns the value of XML element with the given name.""" + for child in elem: + if child.attrib.get("name") != name: + continue + if child.tag == "string": + return child.attrib.get("value") + if child.tag == "boolean": + return child.attrib.get("value") == "true" + if child.tag == "list": + return [nested_child.attrib.get("value") for nested_child in child] + raise "Cannot recognize tag: " + child.tag + return None + + +def normalize_paths(paths): + """Returns the list of normalized path.""" + # e.g. ["//absl/strings:dir/header.h"] -> ["absl/strings/dir/header.h"] + return [path.lstrip("/").replace(":", "/") for path in paths] + + +def parse_rule(elem, package): + """Returns a rule from bazel XML rule.""" + return Rule( + type=elem.attrib["class"], + name=get_elem_value(elem, "name"), + package=package, + srcs=normalize_paths(get_elem_value(elem, "srcs") or []), + hdrs=normalize_paths(get_elem_value(elem, "hdrs") or []), + textual_hdrs=normalize_paths(get_elem_value(elem, "textual_hdrs") or []), + deps=get_elem_value(elem, "deps") or [], + visibility=get_elem_value(elem, "visibility") or [], + testonly=get_elem_value(elem, "testonly") or False) + + +def read_build(package): + """Runs bazel query on given package file and returns all cc rules.""" + result = subprocess.check_output( + ["bazel", "query", package + ":all", "--output", "xml"]) + root = xml.etree.ElementTree.fromstring(result) + return [ + parse_rule(elem, package) + for elem in root + if elem.tag == "rule" and elem.attrib["class"].startswith("cc_") + ] + + +def collect_rules(root_path): + """Collects and returns all rules from root path recursively.""" + rules = [] + for cur, _, _ in os.walk(root_path): + build_path = os.path.join(cur, "BUILD.bazel") + if os.path.exists(build_path): + rules.extend(read_build("//" + cur)) + return rules + + +def relevant_rule(rule): + """Returns true if a given rule is relevant when generating a podspec.""" + return ( + # cc_library only (ignore cc_test, cc_binary) + rule.type == "cc_library" and + # ignore empty rule + (rule.hdrs + rule.textual_hdrs + rule.srcs) and + # ignore test-only rule + not rule.testonly) + + +def get_spec_var(depth): + """Returns the name of variable for spec with given depth.""" + return "s" if depth == 0 else "s{}".format(depth) + + +def get_spec_name(label): + """Converts the label of bazel rule to the name of podspec.""" + assert label.startswith("//absl/"), "{} doesn't start with //absl/".format( + label) + # e.g. //absl/apple/banana -> abseil/apple/banana + return "abseil/" + label[7:] + + +def write_podspec(f, rules, args): + """Writes a podspec from given rules and args.""" + rule_dir = build_rule_directory(rules)["abseil"] + # Write root part with given arguments + spec = re.sub(r"\$\{(\w+)\}", lambda x: args[x.group(1)], + SPEC_TEMPLATE).lstrip() + f.write(spec) + # Write all target rules + write_podspec_map(f, rule_dir, 0) + f.write("end\n") + + +def build_rule_directory(rules): + """Builds a tree-style rule directory from given rules.""" + rule_dir = {} + for rule in rules: + cur = rule_dir + for frag in get_spec_name(rule.package).split("/"): + cur = cur.setdefault(frag, {}) + cur[rule.name] = rule + return rule_dir + + +def write_podspec_map(f, cur_map, depth): + """Writes podspec from rule map recursively.""" + for key, value in sorted(cur_map.items()): + indent = " " * (depth + 1) + f.write("{indent}{var0}.subspec '{key}' do |{var1}|\n".format( + indent=indent, + key=key, + var0=get_spec_var(depth), + var1=get_spec_var(depth + 1))) + if isinstance(value, dict): + write_podspec_map(f, value, depth + 1) + else: + write_podspec_rule(f, value, depth + 1) + f.write("{indent}end\n".format(indent=indent)) + + +def write_podspec_rule(f, rule, depth): + """Writes podspec from given rule.""" + indent = " " * (depth + 1) + spec_var = get_spec_var(depth) + # Puts all files in hdrs, textual_hdrs, and srcs into source_files. + # Since CocoaPods treats header_files a bit differently from bazel, + # this won't generate a header_files field so that all source_files + # are considered as header files. + srcs = sorted(set(rule.hdrs + rule.textual_hdrs + rule.srcs)) + write_indented_list( + f, "{indent}{var}.source_files = ".format(indent=indent, var=spec_var), + srcs) + # Writes dependencies of this rule. + for dep in sorted(rule.deps): + name = get_spec_name(dep.replace(":", "/")) + f.write("{indent}{var}.dependency '{dep}'\n".format( + indent=indent, var=spec_var, dep=name)) + # Writes custom specification. + custom_spec = CUSTOM_SPEC_MAP.get(rule.package + ":" + rule.name) + if custom_spec: + for spec in custom_spec: + f.write("{indent}{var}.{spec}\n".format( + indent=indent, var=spec_var, spec=spec)) + + +def write_indented_list(f, leading, values): + """Writes leading values in an indented style.""" + f.write(leading) + f.write((",\n" + " " * len(leading)).join("'{}'".format(v) for v in values)) + f.write("\n") + + +def generate(args): + """Generates a podspec file from all BUILD files under absl directory.""" + rules = filter(relevant_rule, collect_rules("absl")) + with open(args.output, "wt") as f: + write_podspec(f, rules, vars(args)) + + +def main(): + parser = argparse.ArgumentParser( + description="Generates abseil.podspec from BUILD.bazel") + parser.add_argument( + "-v", "--version", help="The version of podspec", required=True) + parser.add_argument( + "-t", + "--tag", + default=None, + help="The name of git tag (default: version)") + parser.add_argument( + "-o", + "--output", + default="abseil.podspec", + help="The name of output file (default: abseil.podspec)") + args = parser.parse_args() + if args.tag is None: + args.tag = args.version + generate(args) + + +if __name__ == "__main__": + main() diff --git a/base/abseil/absl/algorithm/BUILD.bazel b/base/abseil/absl/algorithm/BUILD.bazel new file mode 100644 index 0000000..6a96420 --- /dev/null +++ b/base/abseil/absl/algorithm/BUILD.bazel @@ -0,0 +1,89 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") +load( + "//absl:copts/configure_copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_DEFAULT_LINKOPTS", + "ABSL_TEST_COPTS", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "algorithm", + hdrs = ["algorithm.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = ["//absl/base:config"], +) + +cc_test( + name = "algorithm_test", + size = "small", + srcs = ["algorithm_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":algorithm", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "algorithm_benchmark", + srcs = ["equal_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + deps = [ + ":algorithm", + "//absl/base:core_headers", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "container", + hdrs = [ + "container.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":algorithm", + "//absl/base:core_headers", + "//absl/meta:type_traits", + ], +) + +cc_test( + name = "container_test", + srcs = ["container_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container", + "//absl/base", + "//absl/base:core_headers", + "//absl/memory", + "//absl/types:span", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/base/abseil/absl/algorithm/CMakeLists.txt b/base/abseil/absl/algorithm/CMakeLists.txt new file mode 100644 index 0000000..56cd0fb --- /dev/null +++ b/base/abseil/absl/algorithm/CMakeLists.txt @@ -0,0 +1,69 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +absl_cc_library( + NAME + algorithm + HDRS + "algorithm.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_test( + NAME + algorithm_test + SRCS + "algorithm_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::algorithm + gmock_main +) + +absl_cc_library( + NAME + algorithm_container + HDRS + "container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::meta + PUBLIC +) + +absl_cc_test( + NAME + container_test + SRCS + "container_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::algorithm_container + absl::base + absl::core_headers + absl::memory + absl::span + gmock_main +) diff --git a/base/abseil/absl/algorithm/algorithm.h b/base/abseil/absl/algorithm/algorithm.h new file mode 100644 index 0000000..e9b4733 --- /dev/null +++ b/base/abseil/absl/algorithm/algorithm.h @@ -0,0 +1,159 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: algorithm.h +// ----------------------------------------------------------------------------- +// +// This header file contains Google extensions to the standard C++ +// header. + +#ifndef ABSL_ALGORITHM_ALGORITHM_H_ +#define ABSL_ALGORITHM_ALGORITHM_H_ + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace algorithm_internal { + +// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`. +struct EqualTo { + template + bool operator()(const T& a, const U& b) const { + return a == b; + } +}; + +template +bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred pred, std::input_iterator_tag, + std::input_iterator_tag) { + while (true) { + if (first1 == last1) return first2 == last2; + if (first2 == last2) return false; + if (!pred(*first1, *first2)) return false; + ++first1; + ++first2; + } +} + +template +bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, + std::random_access_iterator_tag) { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2, std::forward(pred)); +} + +// When we are using our own internal predicate that just applies operator==, we +// forward to the non-predicate form of std::equal. This enables an optimization +// in libstdc++ that can result in std::memcmp being used for integer types. +template +bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, algorithm_internal::EqualTo /* unused */, + std::random_access_iterator_tag, + std::random_access_iterator_tag) { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2); +} + +template +It RotateImpl(It first, It middle, It last, std::true_type) { + return std::rotate(first, middle, last); +} + +template +It RotateImpl(It first, It middle, It last, std::false_type) { + std::rotate(first, middle, last); + return std::next(first, std::distance(middle, last)); +} + +} // namespace algorithm_internal + +// equal() +// +// Compares the equality of two ranges specified by pairs of iterators, using +// the given predicate, returning true iff for each corresponding iterator i1 +// and i2 in the first and second range respectively, pred(*i1, *i2) == true +// +// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`) +// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are +// both random-access iterators, and `last1` - `first1` != `last2` - `first2`, +// then the predicate is never invoked and the function returns false. +// +// This is a C++11-compatible implementation of C++14 `std::equal`. See +// https://en.cppreference.com/w/cpp/algorithm/equal for more information. +template +bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred&& pred) { + return algorithm_internal::EqualImpl( + first1, last1, first2, last2, std::forward(pred), + typename std::iterator_traits::iterator_category{}, + typename std::iterator_traits::iterator_category{}); +} + +// Overload of equal() that performs comparison of two ranges specified by pairs +// of iterators using operator==. +template +bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2) { + return absl::equal(first1, last1, first2, last2, + algorithm_internal::EqualTo{}); +} + +// linear_search() +// +// Performs a linear search for `value` using the iterator `first` up to +// but not including `last`, returning true if [`first`, `last`) contains an +// element equal to `value`. +// +// A linear search is of O(n) complexity which is guaranteed to make at most +// n = (`last` - `first`) comparisons. A linear search over short containers +// may be faster than a binary search, even when the container is sorted. +template +bool linear_search(InputIterator first, InputIterator last, + const EqualityComparable& value) { + return std::find(first, last, value) != last; +} + +// rotate() +// +// Performs a left rotation on a range of elements (`first`, `last`) such that +// `middle` is now the first element. `rotate()` returns an iterator pointing to +// the first element before rotation. This function is exactly the same as +// `std::rotate`, but fixes a bug in gcc +// <= 4.9 where `std::rotate` returns `void` instead of an iterator. +// +// The complexity of this algorithm is the same as that of `std::rotate`, but if +// `ForwardIterator` is not a random-access iterator, then `absl::rotate` +// performs an additional pass over the range to construct the return value. +template +ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, + ForwardIterator last) { + return algorithm_internal::RotateImpl( + first, middle, last, + std::is_same()); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_ALGORITHM_ALGORITHM_H_ diff --git a/base/abseil/absl/algorithm/algorithm_test.cc b/base/abseil/absl/algorithm/algorithm_test.cc new file mode 100644 index 0000000..81fccb6 --- /dev/null +++ b/base/abseil/absl/algorithm/algorithm_test.cc @@ -0,0 +1,182 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/algorithm/algorithm.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace { + +TEST(EqualTest, DefaultComparisonRandomAccess) { + std::vector v1{1, 2, 3}; + std::vector v2 = v1; + std::vector v3 = {1, 2}; + std::vector v4 = {1, 2, 4}; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); +} + +TEST(EqualTest, DefaultComparison) { + std::list lst1{1, 2, 3}; + std::list lst2 = lst1; + std::list lst3{1, 2}; + std::list lst4{1, 2, 4}; + + EXPECT_TRUE(absl::equal(lst1.begin(), lst1.end(), lst2.begin(), lst2.end())); + EXPECT_FALSE(absl::equal(lst1.begin(), lst1.end(), lst3.begin(), lst3.end())); + EXPECT_FALSE(absl::equal(lst1.begin(), lst1.end(), lst4.begin(), lst4.end())); +} + +TEST(EqualTest, EmptyRange) { + std::vector v1{1, 2, 3}; + std::vector empty1; + std::vector empty2; + + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), empty1.begin(), empty1.end())); + EXPECT_FALSE(absl::equal(empty1.begin(), empty1.end(), v1.begin(), v1.end())); + EXPECT_TRUE( + absl::equal(empty1.begin(), empty1.end(), empty2.begin(), empty2.end())); +} + +TEST(EqualTest, MixedIterTypes) { + std::vector v1{1, 2, 3}; + std::list lst1{v1.begin(), v1.end()}; + std::list lst2{1, 2, 4}; + std::list lst3{1, 2}; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), lst1.begin(), lst1.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), lst2.begin(), lst2.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), lst3.begin(), lst3.end())); +} + +TEST(EqualTest, MixedValueTypes) { + std::vector v1{1, 2, 3}; + std::vector v2{1, 2, 3}; + std::vector v3{1, 2}; + std::vector v4{1, 2, 4}; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); +} + +TEST(EqualTest, WeirdIterators) { + std::vector v1{true, false}; + std::vector v2 = v1; + std::vector v3{true}; + std::vector v4{true, true, true}; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); +} + +TEST(EqualTest, CustomComparison) { + int n[] = {1, 2, 3, 4}; + std::vector v1{&n[0], &n[1], &n[2]}; + std::vector v2 = v1; + std::vector v3{&n[0], &n[1], &n[3]}; + std::vector v4{&n[0], &n[1]}; + + auto eq = [](int* a, int* b) { return *a == *b; }; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), eq)); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end(), eq)); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end(), eq)); +} + +TEST(EqualTest, MoveOnlyPredicate) { + std::vector v1{1, 2, 3}; + std::vector v2{4, 5, 6}; + + // move-only equality predicate + struct Eq { + Eq() = default; + Eq(Eq &&) = default; + Eq(const Eq &) = delete; + Eq &operator=(const Eq &) = delete; + bool operator()(const int a, const int b) const { return a == b; } + }; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v1.begin(), v1.end(), Eq())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), Eq())); +} + +struct CountingTrivialPred { + int* count; + bool operator()(int, int) const { + ++*count; + return true; + } +}; + +TEST(EqualTest, RandomAccessComplexity) { + std::vector v1{1, 1, 3}; + std::vector v2 = v1; + std::vector v3{1, 2}; + + do { + int count = 0; + absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), + CountingTrivialPred{&count}); + EXPECT_LE(count, 3); + } while (std::next_permutation(v2.begin(), v2.end())); + + int count = 0; + absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end(), + CountingTrivialPred{&count}); + EXPECT_EQ(count, 0); +} + +class LinearSearchTest : public testing::Test { + protected: + LinearSearchTest() : container_{1, 2, 3} {} + + static bool Is3(int n) { return n == 3; } + static bool Is4(int n) { return n == 4; } + + std::vector container_; +}; + +TEST_F(LinearSearchTest, linear_search) { + EXPECT_TRUE(absl::linear_search(container_.begin(), container_.end(), 3)); + EXPECT_FALSE(absl::linear_search(container_.begin(), container_.end(), 4)); +} + +TEST_F(LinearSearchTest, linear_searchConst) { + const std::vector *const const_container = &container_; + EXPECT_TRUE( + absl::linear_search(const_container->begin(), const_container->end(), 3)); + EXPECT_FALSE( + absl::linear_search(const_container->begin(), const_container->end(), 4)); +} + +TEST(RotateTest, Rotate) { + std::vector v{0, 1, 2, 3, 4}; + EXPECT_EQ(*absl::rotate(v.begin(), v.begin() + 2, v.end()), 0); + EXPECT_THAT(v, testing::ElementsAreArray({2, 3, 4, 0, 1})); + + std::list l{0, 1, 2, 3, 4}; + EXPECT_EQ(*absl::rotate(l.begin(), std::next(l.begin(), 3), l.end()), 0); + EXPECT_THAT(l, testing::ElementsAreArray({3, 4, 0, 1, 2})); +} + +} // namespace diff --git a/base/abseil/absl/algorithm/container.h b/base/abseil/absl/algorithm/container.h new file mode 100644 index 0000000..d72532d --- /dev/null +++ b/base/abseil/absl/algorithm/container.h @@ -0,0 +1,1727 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: container.h +// ----------------------------------------------------------------------------- +// +// This header file provides Container-based versions of algorithmic functions +// within the C++ standard library. The following standard library sets of +// functions are covered within this file: +// +// * Algorithmic functions +// * Algorithmic functions +// * functions +// +// The standard library functions operate on iterator ranges; the functions +// within this API operate on containers, though many return iterator ranges. +// +// All functions within this API are named with a `c_` prefix. Calls such as +// `absl::c_xx(container, ...) are equivalent to std:: functions such as +// `std::xx(std::begin(cont), std::end(cont), ...)`. Functions that act on +// iterators but not conceptually on iterator ranges (e.g. `std::iter_swap`) +// have no equivalent here. +// +// For template parameter and variable naming, `C` indicates the container type +// to which the function is applied, `Pred` indicates the predicate object type +// to be used by the function and `T` indicates the applicable element type. + +#ifndef ABSL_ALGORITHM_CONTAINER_H_ +#define ABSL_ALGORITHM_CONTAINER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_algorithm_internal { + +// NOTE: it is important to defer to ADL lookup for building with C++ modules, +// especially for headers like which are not visible from this file +// but specialize std::begin and std::end. +using std::begin; +using std::end; + +// The type of the iterator given by begin(c) (possibly std::begin(c)). +// ContainerIter> gives vector::const_iterator, +// while ContainerIter> gives vector::iterator. +template +using ContainerIter = decltype(begin(std::declval())); + +// An MSVC bug involving template parameter substitution requires us to use +// decltype() here instead of just std::pair. +template +using ContainerIterPairType = + decltype(std::make_pair(ContainerIter(), ContainerIter())); + +template +using ContainerDifferenceType = + decltype(std::distance(std::declval>(), + std::declval>())); + +template +using ContainerPointerType = + typename std::iterator_traits>::pointer; + +// container_algorithm_internal::c_begin and +// container_algorithm_internal::c_end are abbreviations for proper ADL +// lookup of std::begin and std::end, i.e. +// using std::begin; +// using std::end; +// std::foo(begin(c), end(c); +// becomes +// std::foo(container_algorithm_internal::begin(c), +// container_algorithm_internal::end(c)); +// These are meant for internal use only. + +template +ContainerIter c_begin(C& c) { return begin(c); } + +template +ContainerIter c_end(C& c) { return end(c); } + +template +struct IsUnorderedContainer : std::false_type {}; + +template +struct IsUnorderedContainer< + std::unordered_map> : std::true_type {}; + +template +struct IsUnorderedContainer> + : std::true_type {}; + +// container_algorithm_internal::c_size. It is meant for internal use only. + +template +auto c_size(C& c) -> decltype(c.size()) { + return c.size(); +} + +template +constexpr std::size_t c_size(T (&)[N]) { + return N; +} + +} // namespace container_algorithm_internal + +// PUBLIC API + +//------------------------------------------------------------------------------ +// Abseil algorithm.h functions +//------------------------------------------------------------------------------ + +// c_linear_search() +// +// Container-based version of absl::linear_search() for performing a linear +// search within a container. +template +bool c_linear_search(const C& c, EqualityComparable&& value) { + return linear_search(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +//------------------------------------------------------------------------------ +// algorithms +//------------------------------------------------------------------------------ + +// c_distance() +// +// Container-based version of the `std::distance()` function to +// return the number of elements within a container. +template +container_algorithm_internal::ContainerDifferenceType c_distance( + const C& c) { + return std::distance(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +//------------------------------------------------------------------------------ +// Non-modifying sequence operations +//------------------------------------------------------------------------------ + +// c_all_of() +// +// Container-based version of the `std::all_of()` function to +// test a condition on all elements within a container. +template +bool c_all_of(const C& c, Pred&& pred) { + return std::all_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_any_of() +// +// Container-based version of the `std::any_of()` function to +// test if any element in a container fulfills a condition. +template +bool c_any_of(const C& c, Pred&& pred) { + return std::any_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_none_of() +// +// Container-based version of the `std::none_of()` function to +// test if no elements in a container fulfil a condition. +template +bool c_none_of(const C& c, Pred&& pred) { + return std::none_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_for_each() +// +// Container-based version of the `std::for_each()` function to +// apply a function to a container's elements. +template +decay_t c_for_each(C&& c, Function&& f) { + return std::for_each(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(f)); +} + +// c_find() +// +// Container-based version of the `std::find()` function to find +// the first element containing the passed value within a container value. +template +container_algorithm_internal::ContainerIter c_find(C& c, T&& value) { + return std::find(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +// c_find_if() +// +// Container-based version of the `std::find_if()` function to find +// the first element in a container matching the given condition. +template +container_algorithm_internal::ContainerIter c_find_if(C& c, Pred&& pred) { + return std::find_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_find_if_not() +// +// Container-based version of the `std::find_if_not()` function to +// find the first element in a container not matching the given condition. +template +container_algorithm_internal::ContainerIter c_find_if_not(C& c, + Pred&& pred) { + return std::find_if_not(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_find_end() +// +// Container-based version of the `std::find_end()` function to +// find the last subsequence within a container. +template +container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence) { + return std::find_end(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence)); +} + +// Overload of c_find_end() for using a predicate evaluation other than `==` as +// the function's test condition. +template +container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { + return std::find_end(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence), + std::forward(pred)); +} + +// c_find_first_of() +// +// Container-based version of the `std::find_first_of()` function to +// find the first element within the container that is also within the options +// container. +template +container_algorithm_internal::ContainerIter c_find_first_of(C1& container, + C2& options) { + return std::find_first_of(container_algorithm_internal::c_begin(container), + container_algorithm_internal::c_end(container), + container_algorithm_internal::c_begin(options), + container_algorithm_internal::c_end(options)); +} + +// Overload of c_find_first_of() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_find_first_of( + C1& container, C2& options, BinaryPredicate&& pred) { + return std::find_first_of(container_algorithm_internal::c_begin(container), + container_algorithm_internal::c_end(container), + container_algorithm_internal::c_begin(options), + container_algorithm_internal::c_end(options), + std::forward(pred)); +} + +// c_adjacent_find() +// +// Container-based version of the `std::adjacent_find()` function to +// find equal adjacent elements within a container. +template +container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence) { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_adjacent_find() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence, BinaryPredicate&& pred) { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(pred)); +} + +// c_count() +// +// Container-based version of the `std::count()` function to count +// values that match within a container. +template +container_algorithm_internal::ContainerDifferenceType c_count( + const C& c, T&& value) { + return std::count(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +// c_count_if() +// +// Container-based version of the `std::count_if()` function to +// count values matching a condition within a container. +template +container_algorithm_internal::ContainerDifferenceType c_count_if( + const C& c, Pred&& pred) { + return std::count_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_mismatch() +// +// Container-based version of the `std::mismatch()` function to +// return the first element where two ordered containers differ. +template +container_algorithm_internal::ContainerIterPairType +c_mismatch(C1& c1, C2& c2) { + return std::mismatch(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2)); +} + +// Overload of c_mismatch() for using a predicate evaluation other than `==` as +// the function's test condition. +template +container_algorithm_internal::ContainerIterPairType +c_mismatch(C1& c1, C2& c2, BinaryPredicate&& pred) { + return std::mismatch(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + std::forward(pred)); +} + +// c_equal() +// +// Container-based version of the `std::equal()` function to +// test whether two containers are equal. +// +// NOTE: the semantics of c_equal() are slightly different than those of +// equal(): while the latter iterates over the second container only up to the +// size of the first container, c_equal() also checks whether the container +// sizes are equal. This better matches expectations about c_equal() based on +// its signature. +// +// Example: +// vector v1 = <1, 2, 3>; +// vector v2 = <1, 2, 3, 4>; +// equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true +// c_equal(v1, v2) returns false + +template +bool c_equal(const C1& c1, const C2& c2) { + return ((container_algorithm_internal::c_size(c1) == + container_algorithm_internal::c_size(c2)) && + std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2))); +} + +// Overload of c_equal() for using a predicate evaluation other than `==` as +// the function's test condition. +template +bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) { + return ((container_algorithm_internal::c_size(c1) == + container_algorithm_internal::c_size(c2)) && + std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + std::forward(pred))); +} + +// c_is_permutation() +// +// Container-based version of the `std::is_permutation()` function +// to test whether a container is a permutation of another. +template +bool c_is_permutation(const C1& c1, const C2& c2) { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2)); +} + +// Overload of c_is_permutation() for using a predicate evaluation other than +// `==` as the function's test condition. +template +bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2), + std::forward(pred)); +} + +// c_search() +// +// Container-based version of the `std::search()` function to search +// a container for a subsequence. +template +container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence) { + return std::search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence)); +} + +// Overload of c_search() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { + return std::search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence), + std::forward(pred)); +} + +// c_search_n() +// +// Container-based version of the `std::search_n()` function to +// search a container for the first sequence of N elements. +template +container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value) { + return std::search_n(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), count, + std::forward(value)); +} + +// Overload of c_search_n() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) { + return std::search_n(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), count, + std::forward(value), + std::forward(pred)); +} + +//------------------------------------------------------------------------------ +// Modifying sequence operations +//------------------------------------------------------------------------------ + +// c_copy() +// +// Container-based version of the `std::copy()` function to copy a +// container's elements into an iterator. +template +OutputIterator c_copy(const InputSequence& input, OutputIterator output) { + return std::copy(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output); +} + +// c_copy_n() +// +// Container-based version of the `std::copy_n()` function to copy a +// container's first N elements into an iterator. +template +OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { + return std::copy_n(container_algorithm_internal::c_begin(input), n, output); +} + +// c_copy_if() +// +// Container-based version of the `std::copy_if()` function to copy +// a container's elements satisfying some condition into an iterator. +template +OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, + Pred&& pred) { + return std::copy_if(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output, + std::forward(pred)); +} + +// c_copy_backward() +// +// Container-based version of the `std::copy_backward()` function to +// copy a container's elements in reverse order into an iterator. +template +BidirectionalIterator c_copy_backward(const C& src, + BidirectionalIterator dest) { + return std::copy_backward(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_move() +// +// Container-based version of the `std::move()` function to move +// a container's elements into an iterator. +template +OutputIterator c_move(C&& src, OutputIterator dest) { + return std::move(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_move_backward() +// +// Container-based version of the `std::move_backward()` function to +// move a container's elements into an iterator in reverse order. +template +BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { + return std::move_backward(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_swap_ranges() +// +// Container-based version of the `std::swap_ranges()` function to +// swap a container's elements with another container's elements. +template +container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { + return std::swap_ranges(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2)); +} + +// c_transform() +// +// Container-based version of the `std::transform()` function to +// transform a container's elements using the unary operation, storing the +// result in an iterator pointing to the last transformed element in the output +// range. +template +OutputIterator c_transform(const InputSequence& input, OutputIterator output, + UnaryOp&& unary_op) { + return std::transform(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output, + std::forward(unary_op)); +} + +// Overload of c_transform() for performing a transformation using a binary +// predicate. +template +OutputIterator c_transform(const InputSequence1& input1, + const InputSequence2& input2, OutputIterator output, + BinaryOp&& binary_op) { + return std::transform(container_algorithm_internal::c_begin(input1), + container_algorithm_internal::c_end(input1), + container_algorithm_internal::c_begin(input2), output, + std::forward(binary_op)); +} + +// c_replace() +// +// Container-based version of the `std::replace()` function to +// replace a container's elements of some value with a new value. The container +// is modified in place. +template +void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { + std::replace(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), old_value, + new_value); +} + +// c_replace_if() +// +// Container-based version of the `std::replace_if()` function to +// replace a container's elements of some value with a new value based on some +// condition. The container is modified in place. +template +void c_replace_if(C& c, Pred&& pred, T&& new_value) { + std::replace_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred), std::forward(new_value)); +} + +// c_replace_copy() +// +// Container-based version of the `std::replace_copy()` function to +// replace a container's elements of some value with a new value and return the +// results within an iterator. +template +OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, + T&& new_value) { + return std::replace_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(old_value), + std::forward(new_value)); +} + +// c_replace_copy_if() +// +// Container-based version of the `std::replace_copy_if()` function +// to replace a container's elements of some value with a new value based on +// some condition, and return the results within an iterator. +template +OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, + T&& new_value) { + return std::replace_copy_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred), + std::forward(new_value)); +} + +// c_fill() +// +// Container-based version of the `std::fill()` function to fill a +// container with some value. +template +void c_fill(C& c, T&& value) { + std::fill(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), std::forward(value)); +} + +// c_fill_n() +// +// Container-based version of the `std::fill_n()` function to fill +// the first N elements in a container with some value. +template +void c_fill_n(C& c, Size n, T&& value) { + std::fill_n(container_algorithm_internal::c_begin(c), n, + std::forward(value)); +} + +// c_generate() +// +// Container-based version of the `std::generate()` function to +// assign a container's elements to the values provided by the given generator. +template +void c_generate(C& c, Generator&& gen) { + std::generate(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(gen)); +} + +// c_generate_n() +// +// Container-based version of the `std::generate_n()` function to +// assign a container's first N elements to the values provided by the given +// generator. +template +container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, + Generator&& gen) { + return std::generate_n(container_algorithm_internal::c_begin(c), n, + std::forward(gen)); +} + +// Note: `c_xx()` container versions for `remove()`, `remove_if()`, +// and `unique()` are omitted, because it's not clear whether or not such +// functions should call erase on their supplied sequences afterwards. Either +// behavior would be surprising for a different set of users. + +// c_remove_copy() +// +// Container-based version of the `std::remove_copy()` function to +// copy a container's elements while removing any elements matching the given +// `value`. +template +OutputIterator c_remove_copy(const C& c, OutputIterator result, T&& value) { + return std::remove_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(value)); +} + +// c_remove_copy_if() +// +// Container-based version of the `std::remove_copy_if()` function +// to copy a container's elements while removing any elements matching the given +// condition. +template +OutputIterator c_remove_copy_if(const C& c, OutputIterator result, + Pred&& pred) { + return std::remove_copy_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred)); +} + +// c_unique_copy() +// +// Container-based version of the `std::unique_copy()` function to +// copy a container's elements while removing any elements containing duplicate +// values. +template +OutputIterator c_unique_copy(const C& c, OutputIterator result) { + return std::unique_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result); +} + +// Overload of c_unique_copy() for using a predicate evaluation other than +// `==` for comparing uniqueness of the element values. +template +OutputIterator c_unique_copy(const C& c, OutputIterator result, + BinaryPredicate&& pred) { + return std::unique_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred)); +} + +// c_reverse() +// +// Container-based version of the `std::reverse()` function to +// reverse a container's elements. +template +void c_reverse(Sequence& sequence) { + std::reverse(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// c_reverse_copy() +// +// Container-based version of the `std::reverse()` function to +// reverse a container's elements and write them to an iterator range. +template +OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { + return std::reverse_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + result); +} + +// c_rotate() +// +// Container-based version of the `std::rotate()` function to +// shift a container's elements leftward such that the `middle` element becomes +// the first element in the container. +template > +Iterator c_rotate(C& sequence, Iterator middle) { + return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence)); +} + +// c_rotate_copy() +// +// Container-based version of the `std::rotate_copy()` function to +// shift a container's elements leftward such that the `middle` element becomes +// the first element in a new iterator range. +template +OutputIterator c_rotate_copy( + const C& sequence, + container_algorithm_internal::ContainerIter middle, + OutputIterator result) { + return std::rotate_copy(container_algorithm_internal::c_begin(sequence), + middle, container_algorithm_internal::c_end(sequence), + result); +} + +// c_shuffle() +// +// Container-based version of the `std::shuffle()` function to +// randomly shuffle elements within the container using a `gen()` uniform random +// number generator. +template +void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) { + std::shuffle(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(gen)); +} + +//------------------------------------------------------------------------------ +// Partition functions +//------------------------------------------------------------------------------ + +// c_is_partitioned() +// +// Container-based version of the `std::is_partitioned()` function +// to test whether all elements in the container for which `pred` returns `true` +// precede those for which `pred` is `false`. +template +bool c_is_partitioned(const C& c, Pred&& pred) { + return std::is_partitioned(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_partition() +// +// Container-based version of the `std::partition()` function +// to rearrange all elements in a container in such a way that all elements for +// which `pred` returns `true` precede all those for which it returns `false`, +// returning an iterator to the first element of the second group. +template +container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) { + return std::partition(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_stable_partition() +// +// Container-based version of the `std::stable_partition()` function +// to rearrange all elements in a container in such a way that all elements for +// which `pred` returns `true` precede all those for which it returns `false`, +// preserving the relative ordering between the two groups. The function returns +// an iterator to the first element of the second group. +template +container_algorithm_internal::ContainerIter c_stable_partition(C& c, + Pred&& pred) { + return std::stable_partition(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_partition_copy() +// +// Container-based version of the `std::partition_copy()` function +// to partition a container's elements and return them into two iterators: one +// for which `pred` returns `true`, and one for which `pred` returns `false.` + +template +std::pair c_partition_copy( + const C& c, OutputIterator1 out_true, OutputIterator2 out_false, + Pred&& pred) { + return std::partition_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), out_true, + out_false, std::forward(pred)); +} + +// c_partition_point() +// +// Container-based version of the `std::partition_point()` function +// to return the first element of an already partitioned container for which +// the given `pred` is not `true`. +template +container_algorithm_internal::ContainerIter c_partition_point(C& c, + Pred&& pred) { + return std::partition_point(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +//------------------------------------------------------------------------------ +// Sorting functions +//------------------------------------------------------------------------------ + +// c_sort() +// +// Container-based version of the `std::sort()` function +// to sort elements in ascending order of their values. +template +void c_sort(C& c) { + std::sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_sort() for performing a `comp` comparison other than the +// default `operator<`. +template +void c_sort(C& c, Compare&& comp) { + std::sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_stable_sort() +// +// Container-based version of the `std::stable_sort()` function +// to sort elements in ascending order of their values, preserving the order +// of equivalents. +template +void c_stable_sort(C& c) { + std::stable_sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_stable_sort() for performing a `comp` comparison other than the +// default `operator<`. +template +void c_stable_sort(C& c, Compare&& comp) { + std::stable_sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_is_sorted() +// +// Container-based version of the `std::is_sorted()` function +// to evaluate whether the given container is sorted in ascending order. +template +bool c_is_sorted(const C& c) { + return std::is_sorted(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// c_is_sorted() overload for performing a `comp` comparison other than the +// default `operator<`. +template +bool c_is_sorted(const C& c, Compare&& comp) { + return std::is_sorted(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_partial_sort() +// +// Container-based version of the `std::partial_sort()` function +// to rearrange elements within a container such that elements before `middle` +// are sorted in ascending order. +template +void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle) { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_partial_sort() for performing a `comp` comparison other than +// the default `operator<`. +template +void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle, + Compare&& comp) { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_partial_sort_copy() +// +// Container-based version of the `std::partial_sort_copy()` +// function to sort elements within a container such that elements before +// `middle` are sorted in ascending order, and return the result within an +// iterator. +template +container_algorithm_internal::ContainerIter +c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(result), + container_algorithm_internal::c_end(result)); +} + +// Overload of c_partial_sort_copy() for performing a `comp` comparison other +// than the default `operator<`. +template +container_algorithm_internal::ContainerIter +c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, + Compare&& comp) { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(result), + container_algorithm_internal::c_end(result), + std::forward(comp)); +} + +// c_is_sorted_until() +// +// Container-based version of the `std::is_sorted_until()` function +// to return the first element within a container that is not sorted in +// ascending order as an iterator. +template +container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_is_sorted_until() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_is_sorted_until( + C& c, Compare&& comp) { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_nth_element() +// +// Container-based version of the `std::nth_element()` function +// to rearrange the elements within a container such that the `nth` element +// would be in that position in an ordered sequence; other elements may be in +// any order, except that all preceding `nth` will be less than that element, +// and all following `nth` will be greater than that element. +template +void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth) { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_nth_element() for performing a `comp` comparison other than +// the default `operator<`. +template +void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth, + Compare&& comp) { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Binary Search +//------------------------------------------------------------------------------ + +// c_lower_bound() +// +// Container-based version of the `std::lower_bound()` function +// to return an iterator pointing to the first element in a sorted container +// which does not compare less than `value`. +template +container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, T&& value) { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_lower_bound() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, T&& value, Compare&& comp) { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_upper_bound() +// +// Container-based version of the `std::upper_bound()` function +// to return an iterator pointing to the first element in a sorted container +// which is greater than `value`. +template +container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, T&& value) { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_upper_bound() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, T&& value, Compare&& comp) { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_equal_range() +// +// Container-based version of the `std::equal_range()` function +// to return an iterator pair pointing to the first and last elements in a +// sorted container which compare equal to `value`. +template +container_algorithm_internal::ContainerIterPairType +c_equal_range(Sequence& sequence, T&& value) { + return std::equal_range(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_equal_range() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIterPairType +c_equal_range(Sequence& sequence, T&& value, Compare&& comp) { + return std::equal_range(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_binary_search() +// +// Container-based version of the `std::binary_search()` function +// to test if any element in the sorted container contains a value equivalent to +// 'value'. +template +bool c_binary_search(Sequence&& sequence, T&& value) { + return std::binary_search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_binary_search() for performing a `comp` comparison other than +// the default `operator<`. +template +bool c_binary_search(Sequence&& sequence, T&& value, Compare&& comp) { + return std::binary_search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Merge functions +//------------------------------------------------------------------------------ + +// c_merge() +// +// Container-based version of the `std::merge()` function +// to merge two sorted containers into a single sorted iterator. +template +OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { + return std::merge(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), result); +} + +// Overload of c_merge() for performing a `comp` comparison other than +// the default `operator<`. +template +OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, + Compare&& comp) { + return std::merge(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), result, + std::forward(comp)); +} + +// c_inplace_merge() +// +// Container-based version of the `std::inplace_merge()` function +// to merge a supplied iterator `middle` into a container. +template +void c_inplace_merge(C& c, + container_algorithm_internal::ContainerIter middle) { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, + container_algorithm_internal::c_end(c)); +} + +// Overload of c_inplace_merge() for performing a merge using a `comp` other +// than `operator<`. +template +void c_inplace_merge(C& c, + container_algorithm_internal::ContainerIter middle, + Compare&& comp) { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_includes() +// +// Container-based version of the `std::includes()` function +// to test whether a sorted container `c1` entirely contains another sorted +// container `c2`. +template +bool c_includes(const C1& c1, const C2& c2) { + return std::includes(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); +} + +// Overload of c_includes() for performing a merge using a `comp` other than +// `operator<`. +template +bool c_includes(const C1& c1, const C2& c2, Compare&& comp) { + return std::includes(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + std::forward(comp)); +} + +// c_set_union() +// +// Container-based version of the `std::set_union()` function +// to return an iterator containing the union of two containers; duplicate +// values are not copied into the output. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { + return std::set_union(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_union() for performing a merge using a `comp` other than +// `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, + Compare&& comp) { + return std::set_union(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_intersection() +// +// Container-based version of the `std::set_intersection()` function +// to return an iterator containing the intersection of two containers. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_intersection(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_intersection(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_intersection() for performing a merge using a `comp` other +// than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_intersection(const C1& c1, const C2& c2, + OutputIterator output, Compare&& comp) { + return std::set_intersection(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_difference() +// +// Container-based version of the `std::set_difference()` function +// to return an iterator containing elements present in the first container but +// not in the second. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_difference(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_difference(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_difference() for performing a merge using a `comp` other +// than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_difference(const C1& c1, const C2& c2, + OutputIterator output, Compare&& comp) { + return std::set_difference(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_symmetric_difference() +// +// Container-based version of the `std::set_symmetric_difference()` +// function to return an iterator containing elements present in either one +// container or the other, but not both. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_symmetric_difference() for performing a merge using a +// `comp` other than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, + OutputIterator output, + Compare&& comp) { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Heap functions +//------------------------------------------------------------------------------ + +// c_push_heap() +// +// Container-based version of the `std::push_heap()` function +// to push a value onto a container heap. +template +void c_push_heap(RandomAccessContainer& sequence) { + std::push_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_push_heap() for performing a push operation on a heap using a +// `comp` other than `operator<`. +template +void c_push_heap(RandomAccessContainer& sequence, Compare&& comp) { + std::push_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_pop_heap() +// +// Container-based version of the `std::pop_heap()` function +// to pop a value from a heap container. +template +void c_pop_heap(RandomAccessContainer& sequence) { + std::pop_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_pop_heap() for performing a pop operation on a heap using a +// `comp` other than `operator<`. +template +void c_pop_heap(RandomAccessContainer& sequence, Compare&& comp) { + std::pop_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_make_heap() +// +// Container-based version of the `std::make_heap()` function +// to make a container a heap. +template +void c_make_heap(RandomAccessContainer& sequence) { + std::make_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_make_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +void c_make_heap(RandomAccessContainer& sequence, Compare&& comp) { + std::make_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_sort_heap() +// +// Container-based version of the `std::sort_heap()` function +// to sort a heap into ascending order (after which it is no longer a heap). +template +void c_sort_heap(RandomAccessContainer& sequence) { + std::sort_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_sort_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +void c_sort_heap(RandomAccessContainer& sequence, Compare&& comp) { + std::sort_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_is_heap() +// +// Container-based version of the `std::is_heap()` function +// to check whether the given container is a heap. +template +bool c_is_heap(const RandomAccessContainer& sequence) { + return std::is_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_is_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +bool c_is_heap(const RandomAccessContainer& sequence, Compare&& comp) { + return std::is_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_is_heap_until() +// +// Container-based version of the `std::is_heap_until()` function +// to find the first element in a given container which is not in heap order. +template +container_algorithm_internal::ContainerIter +c_is_heap_until(RandomAccessContainer& sequence) { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_is_heap_until() for performing heap comparisons using a +// `comp` other than `operator<` +template +container_algorithm_internal::ContainerIter +c_is_heap_until(RandomAccessContainer& sequence, Compare&& comp) { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Min/max +//------------------------------------------------------------------------------ + +// c_min_element() +// +// Container-based version of the `std::min_element()` function +// to return an iterator pointing to the element with the smallest value, using +// `operator<` to make the comparisons. +template +container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence) { + return std::min_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_min_element() for performing a `comp` comparison other than +// `operator<`. +template +container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence, Compare&& comp) { + return std::min_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_max_element() +// +// Container-based version of the `std::max_element()` function +// to return an iterator pointing to the element with the largest value, using +// `operator<` to make the comparisons. +template +container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence) { + return std::max_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_max_element() for performing a `comp` comparison other than +// `operator<`. +template +container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence, Compare&& comp) { + return std::max_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_minmax_element() +// +// Container-based version of the `std::minmax_element()` function +// to return a pair of iterators pointing to the elements containing the +// smallest and largest values, respectively, using `operator<` to make the +// comparisons. +template +container_algorithm_internal::ContainerIterPairType +c_minmax_element(C& c) { + return std::minmax_element(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_minmax_element() for performing `comp` comparisons other than +// `operator<`. +template +container_algorithm_internal::ContainerIterPairType +c_minmax_element(C& c, Compare&& comp) { + return std::minmax_element(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Lexicographical Comparisons +//------------------------------------------------------------------------------ + +// c_lexicographical_compare() +// +// Container-based version of the `std::lexicographical_compare()` +// function to lexicographically compare (e.g. sort words alphabetically) two +// container sequences. The comparison is performed using `operator<`. Note +// that capital letters ("A-Z") have ASCII values less than lowercase letters +// ("a-z"). +template +bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2)); +} + +// Overload of c_lexicographical_compare() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2, + Compare&& comp) { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2), + std::forward(comp)); +} + +// c_next_permutation() +// +// Container-based version of the `std::next_permutation()` function +// to rearrange a container's elements into the next lexicographically greater +// permutation. +template +bool c_next_permutation(C& c) { + return std::next_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_next_permutation() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_next_permutation(C& c, Compare&& comp) { + return std::next_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_prev_permutation() +// +// Container-based version of the `std::prev_permutation()` function +// to rearrange a container's elements into the next lexicographically lesser +// permutation. +template +bool c_prev_permutation(C& c) { + return std::prev_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_prev_permutation() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_prev_permutation(C& c, Compare&& comp) { + return std::prev_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// algorithms +//------------------------------------------------------------------------------ + +// c_iota() +// +// Container-based version of the `std::iota()` function +// to compute successive values of `value`, as if incremented with `++value` +// after each element is written. and write them to the container. +template +void c_iota(Sequence& sequence, T&& value) { + std::iota(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} +// c_accumulate() +// +// Container-based version of the `std::accumulate()` function +// to accumulate the element values of a container to `init` and return that +// accumulation by value. +// +// Note: Due to a language technicality this function has return type +// absl::decay_t. As a user of this function you can casually read +// this as "returns T by value" and assume it does the right thing. +template +decay_t c_accumulate(const Sequence& sequence, T&& init) { + return std::accumulate(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(init)); +} + +// Overload of c_accumulate() for using a binary operations other than +// addition for computing the accumulation. +template +decay_t c_accumulate(const Sequence& sequence, T&& init, + BinaryOp&& binary_op) { + return std::accumulate(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(init), + std::forward(binary_op)); +} + +// c_inner_product() +// +// Container-based version of the `std::inner_product()` function +// to compute the cumulative inner product of container element pairs. +// +// Note: Due to a language technicality this function has return type +// absl::decay_t. As a user of this function you can casually read +// this as "returns T by value" and assume it does the right thing. +template +decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, + T&& sum) { + return std::inner_product(container_algorithm_internal::c_begin(factors1), + container_algorithm_internal::c_end(factors1), + container_algorithm_internal::c_begin(factors2), + std::forward(sum)); +} + +// Overload of c_inner_product() for using binary operations other than +// `operator+` (for computing the accumulation) and `operator*` (for computing +// the product between the two container's element pair). +template +decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, + T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) { + return std::inner_product(container_algorithm_internal::c_begin(factors1), + container_algorithm_internal::c_end(factors1), + container_algorithm_internal::c_begin(factors2), + std::forward(sum), std::forward(op1), + std::forward(op2)); +} + +// c_adjacent_difference() +// +// Container-based version of the `std::adjacent_difference()` +// function to compute the difference between each element and the one preceding +// it and write it to an iterator. +template +OutputIt c_adjacent_difference(const InputSequence& input, + OutputIt output_first) { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first); +} + +// Overload of c_adjacent_difference() for using a binary operation other than +// subtraction to compute the adjacent difference. +template +OutputIt c_adjacent_difference(const InputSequence& input, + OutputIt output_first, BinaryOp&& op) { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first, std::forward(op)); +} + +// c_partial_sum() +// +// Container-based version of the `std::partial_sum()` function +// to compute the partial sum of the elements in a sequence and write them +// to an iterator. The partial sum is the sum of all element values so far in +// the sequence. +template +OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { + return std::partial_sum(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first); +} + +// Overload of c_partial_sum() for using a binary operation other than addition +// to compute the "partial sum". +template +OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, + BinaryOp&& op) { + return std::partial_sum(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first, std::forward(op)); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_ALGORITHM_CONTAINER_H_ diff --git a/base/abseil/absl/algorithm/container_test.cc b/base/abseil/absl/algorithm/container_test.cc new file mode 100644 index 0000000..0a4abe9 --- /dev/null +++ b/base/abseil/absl/algorithm/container_test.cc @@ -0,0 +1,1031 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/algorithm/container.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/macros.h" +#include "absl/memory/memory.h" +#include "absl/types/span.h" + +namespace { + +using ::testing::Each; +using ::testing::ElementsAre; +using ::testing::Gt; +using ::testing::IsNull; +using ::testing::Lt; +using ::testing::Pointee; +using ::testing::Truly; +using ::testing::UnorderedElementsAre; + +// Most of these tests just check that the code compiles, not that it +// does the right thing. That's fine since the functions just forward +// to the STL implementation. +class NonMutatingTest : public testing::Test { + protected: + std::unordered_set container_ = {1, 2, 3}; + std::list sequence_ = {1, 2, 3}; + std::vector vector_ = {1, 2, 3}; + int array_[3] = {1, 2, 3}; +}; + +struct AccumulateCalls { + void operator()(int value) { + calls.push_back(value); + } + std::vector calls; +}; + +bool Predicate(int value) { return value < 3; } +bool BinPredicate(int v1, int v2) { return v1 < v2; } +bool Equals(int v1, int v2) { return v1 == v2; } +bool IsOdd(int x) { return x % 2 != 0; } + + +TEST_F(NonMutatingTest, Distance) { + EXPECT_EQ(container_.size(), absl::c_distance(container_)); + EXPECT_EQ(sequence_.size(), absl::c_distance(sequence_)); + EXPECT_EQ(vector_.size(), absl::c_distance(vector_)); + EXPECT_EQ(ABSL_ARRAYSIZE(array_), absl::c_distance(array_)); + + // Works with a temporary argument. + EXPECT_EQ(vector_.size(), absl::c_distance(std::vector(vector_))); +} + +TEST_F(NonMutatingTest, Distance_OverloadedBeginEnd) { + // Works with classes which have custom ADL-selected overloads of std::begin + // and std::end. + std::initializer_list a = {1, 2, 3}; + std::valarray b = {1, 2, 3}; + EXPECT_EQ(3, absl::c_distance(a)); + EXPECT_EQ(3, absl::c_distance(b)); + + // It is assumed that other c_* functions use the same mechanism for + // ADL-selecting begin/end overloads. +} + +TEST_F(NonMutatingTest, ForEach) { + AccumulateCalls c = absl::c_for_each(container_, AccumulateCalls()); + // Don't rely on the unordered_set's order. + std::sort(c.calls.begin(), c.calls.end()); + EXPECT_EQ(vector_, c.calls); + + // Works with temporary container, too. + AccumulateCalls c2 = + absl::c_for_each(std::unordered_set(container_), AccumulateCalls()); + std::sort(c2.calls.begin(), c2.calls.end()); + EXPECT_EQ(vector_, c2.calls); +} + +TEST_F(NonMutatingTest, FindReturnsCorrectType) { + auto it = absl::c_find(container_, 3); + EXPECT_EQ(3, *it); + absl::c_find(absl::implicit_cast&>(sequence_), 3); +} + +TEST_F(NonMutatingTest, FindIf) { absl::c_find_if(container_, Predicate); } + +TEST_F(NonMutatingTest, FindIfNot) { + absl::c_find_if_not(container_, Predicate); +} + +TEST_F(NonMutatingTest, FindEnd) { + absl::c_find_end(sequence_, vector_); + absl::c_find_end(vector_, sequence_); +} + +TEST_F(NonMutatingTest, FindEndWithPredicate) { + absl::c_find_end(sequence_, vector_, BinPredicate); + absl::c_find_end(vector_, sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, FindFirstOf) { + absl::c_find_first_of(container_, sequence_); + absl::c_find_first_of(sequence_, container_); +} + +TEST_F(NonMutatingTest, FindFirstOfWithPredicate) { + absl::c_find_first_of(container_, sequence_, BinPredicate); + absl::c_find_first_of(sequence_, container_, BinPredicate); +} + +TEST_F(NonMutatingTest, AdjacentFind) { absl::c_adjacent_find(sequence_); } + +TEST_F(NonMutatingTest, AdjacentFindWithPredicate) { + absl::c_adjacent_find(sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, Count) { EXPECT_EQ(1, absl::c_count(container_, 3)); } + +TEST_F(NonMutatingTest, CountIf) { + EXPECT_EQ(2, absl::c_count_if(container_, Predicate)); + const std::unordered_set& const_container = container_; + EXPECT_EQ(2, absl::c_count_if(const_container, Predicate)); +} + +TEST_F(NonMutatingTest, Mismatch) { + absl::c_mismatch(container_, sequence_); + absl::c_mismatch(sequence_, container_); +} + +TEST_F(NonMutatingTest, MismatchWithPredicate) { + absl::c_mismatch(container_, sequence_, BinPredicate); + absl::c_mismatch(sequence_, container_, BinPredicate); +} + +TEST_F(NonMutatingTest, Equal) { + EXPECT_TRUE(absl::c_equal(vector_, sequence_)); + EXPECT_TRUE(absl::c_equal(sequence_, vector_)); + EXPECT_TRUE(absl::c_equal(sequence_, array_)); + EXPECT_TRUE(absl::c_equal(array_, vector_)); + + // Test that behavior appropriately differs from that of equal(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_equal(vector_plus, sequence_)); + EXPECT_FALSE(absl::c_equal(sequence_, vector_plus)); + EXPECT_FALSE(absl::c_equal(array_, vector_plus)); +} + +TEST_F(NonMutatingTest, EqualWithPredicate) { + EXPECT_TRUE(absl::c_equal(vector_, sequence_, Equals)); + EXPECT_TRUE(absl::c_equal(sequence_, vector_, Equals)); + EXPECT_TRUE(absl::c_equal(array_, sequence_, Equals)); + EXPECT_TRUE(absl::c_equal(vector_, array_, Equals)); + + // Test that behavior appropriately differs from that of equal(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_equal(vector_plus, sequence_, Equals)); + EXPECT_FALSE(absl::c_equal(sequence_, vector_plus, Equals)); + EXPECT_FALSE(absl::c_equal(vector_plus, array_, Equals)); +} + +TEST_F(NonMutatingTest, IsPermutation) { + auto vector_permut_ = vector_; + std::next_permutation(vector_permut_.begin(), vector_permut_.end()); + EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_)); + EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_)); + + // Test that behavior appropriately differs from that of is_permutation(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_)); + EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus)); +} + +TEST_F(NonMutatingTest, IsPermutationWithPredicate) { + auto vector_permut_ = vector_; + std::next_permutation(vector_permut_.begin(), vector_permut_.end()); + EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_, Equals)); + EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_, Equals)); + + // Test that behavior appropriately differs from that of is_permutation(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_, Equals)); + EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus, Equals)); +} + +TEST_F(NonMutatingTest, Search) { + absl::c_search(sequence_, vector_); + absl::c_search(vector_, sequence_); + absl::c_search(array_, sequence_); +} + +TEST_F(NonMutatingTest, SearchWithPredicate) { + absl::c_search(sequence_, vector_, BinPredicate); + absl::c_search(vector_, sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, SearchN) { absl::c_search_n(sequence_, 3, 1); } + +TEST_F(NonMutatingTest, SearchNWithPredicate) { + absl::c_search_n(sequence_, 3, 1, BinPredicate); +} + +TEST_F(NonMutatingTest, LowerBound) { + std::list::iterator i = absl::c_lower_bound(sequence_, 3); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(2, std::distance(sequence_.begin(), i)); + EXPECT_EQ(3, *i); +} + +TEST_F(NonMutatingTest, LowerBoundWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::vector::iterator i = absl::c_lower_bound(v, 3, std::greater()); + EXPECT_TRUE(i == v.begin()); + EXPECT_EQ(3, *i); +} + +TEST_F(NonMutatingTest, UpperBound) { + std::list::iterator i = absl::c_upper_bound(sequence_, 1); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(1, std::distance(sequence_.begin(), i)); + EXPECT_EQ(2, *i); +} + +TEST_F(NonMutatingTest, UpperBoundWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::vector::iterator i = absl::c_upper_bound(v, 1, std::greater()); + EXPECT_EQ(3, i - v.begin()); + EXPECT_TRUE(i == v.end()); +} + +TEST_F(NonMutatingTest, EqualRange) { + std::pair::iterator, std::list::iterator> p = + absl::c_equal_range(sequence_, 2); + EXPECT_EQ(1, std::distance(sequence_.begin(), p.first)); + EXPECT_EQ(2, std::distance(sequence_.begin(), p.second)); +} + +TEST_F(NonMutatingTest, EqualRangeArray) { + auto p = absl::c_equal_range(array_, 2); + EXPECT_EQ(1, std::distance(std::begin(array_), p.first)); + EXPECT_EQ(2, std::distance(std::begin(array_), p.second)); +} + +TEST_F(NonMutatingTest, EqualRangeWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::pair::iterator, std::vector::iterator> p = + absl::c_equal_range(v, 2, std::greater()); + EXPECT_EQ(1, std::distance(v.begin(), p.first)); + EXPECT_EQ(2, std::distance(v.begin(), p.second)); +} + +TEST_F(NonMutatingTest, BinarySearch) { + EXPECT_TRUE(absl::c_binary_search(vector_, 2)); + EXPECT_TRUE(absl::c_binary_search(std::vector(vector_), 2)); +} + +TEST_F(NonMutatingTest, BinarySearchWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + EXPECT_TRUE(absl::c_binary_search(v, 2, std::greater())); + EXPECT_TRUE( + absl::c_binary_search(std::vector(v), 2, std::greater())); +} + +TEST_F(NonMutatingTest, MinElement) { + std::list::iterator i = absl::c_min_element(sequence_); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 1); +} + +TEST_F(NonMutatingTest, MinElementWithPredicate) { + std::list::iterator i = + absl::c_min_element(sequence_, std::greater()); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 3); +} + +TEST_F(NonMutatingTest, MaxElement) { + std::list::iterator i = absl::c_max_element(sequence_); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 3); +} + +TEST_F(NonMutatingTest, MaxElementWithPredicate) { + std::list::iterator i = + absl::c_max_element(sequence_, std::greater()); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 1); +} + +TEST_F(NonMutatingTest, LexicographicalCompare) { + EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_)); + + std::vector v; + v.push_back(1); + v.push_back(2); + v.push_back(4); + + EXPECT_TRUE(absl::c_lexicographical_compare(sequence_, v)); + EXPECT_TRUE(absl::c_lexicographical_compare(std::list(sequence_), v)); +} + +TEST_F(NonMutatingTest, LexicographicalCopmareWithPredicate) { + EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_, + std::greater())); + + std::vector v; + v.push_back(1); + v.push_back(2); + v.push_back(4); + + EXPECT_TRUE( + absl::c_lexicographical_compare(v, sequence_, std::greater())); + EXPECT_TRUE(absl::c_lexicographical_compare( + std::vector(v), std::list(sequence_), std::greater())); +} + +TEST_F(NonMutatingTest, Includes) { + std::set s(vector_.begin(), vector_.end()); + s.insert(4); + EXPECT_TRUE(absl::c_includes(s, vector_)); +} + +TEST_F(NonMutatingTest, IncludesWithPredicate) { + std::vector v = {3, 2, 1}; + std::set> s(v.begin(), v.end()); + s.insert(4); + EXPECT_TRUE(absl::c_includes(s, v, std::greater())); +} + +class NumericMutatingTest : public testing::Test { + protected: + std::list list_ = {1, 2, 3}; + std::vector output_; +}; + +TEST_F(NumericMutatingTest, Iota) { + absl::c_iota(list_, 5); + std::list expected{5, 6, 7}; + EXPECT_EQ(list_, expected); +} + +TEST_F(NonMutatingTest, Accumulate) { + EXPECT_EQ(absl::c_accumulate(sequence_, 4), 1 + 2 + 3 + 4); +} + +TEST_F(NonMutatingTest, AccumulateWithBinaryOp) { + EXPECT_EQ(absl::c_accumulate(sequence_, 4, std::multiplies()), + 1 * 2 * 3 * 4); +} + +TEST_F(NonMutatingTest, AccumulateLvalueInit) { + int lvalue = 4; + EXPECT_EQ(absl::c_accumulate(sequence_, lvalue), 1 + 2 + 3 + 4); +} + +TEST_F(NonMutatingTest, AccumulateWithBinaryOpLvalueInit) { + int lvalue = 4; + EXPECT_EQ(absl::c_accumulate(sequence_, lvalue, std::multiplies()), + 1 * 2 * 3 * 4); +} + +TEST_F(NonMutatingTest, InnerProduct) { + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 1000), + 1000 + 1 * 1 + 2 * 2 + 3 * 3); +} + +TEST_F(NonMutatingTest, InnerProductWithBinaryOps) { + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 10, + std::multiplies(), std::plus()), + 10 * (1 + 1) * (2 + 2) * (3 + 3)); +} + +TEST_F(NonMutatingTest, InnerProductLvalueInit) { + int lvalue = 1000; + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue), + 1000 + 1 * 1 + 2 * 2 + 3 * 3); +} + +TEST_F(NonMutatingTest, InnerProductWithBinaryOpsLvalueInit) { + int lvalue = 10; + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue, + std::multiplies(), std::plus()), + 10 * (1 + 1) * (2 + 2) * (3 + 3)); +} + +TEST_F(NumericMutatingTest, AdjacentDifference) { + auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_)); + *last = 1000; + std::vector expected{1, 2 - 1, 3 - 2, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, AdjacentDifferenceWithBinaryOp) { + auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_), + std::multiplies()); + *last = 1000; + std::vector expected{1, 2 * 1, 3 * 2, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, PartialSum) { + auto last = absl::c_partial_sum(list_, std::back_inserter(output_)); + *last = 1000; + std::vector expected{1, 1 + 2, 1 + 2 + 3, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, PartialSumWithBinaryOp) { + auto last = absl::c_partial_sum(list_, std::back_inserter(output_), + std::multiplies()); + *last = 1000; + std::vector expected{1, 1 * 2, 1 * 2 * 3, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NonMutatingTest, LinearSearch) { + EXPECT_TRUE(absl::c_linear_search(container_, 3)); + EXPECT_FALSE(absl::c_linear_search(container_, 4)); +} + +TEST_F(NonMutatingTest, AllOf) { + const std::vector& v = vector_; + EXPECT_FALSE(absl::c_all_of(v, [](int x) { return x > 1; })); + EXPECT_TRUE(absl::c_all_of(v, [](int x) { return x > 0; })); +} + +TEST_F(NonMutatingTest, AnyOf) { + const std::vector& v = vector_; + EXPECT_TRUE(absl::c_any_of(v, [](int x) { return x > 2; })); + EXPECT_FALSE(absl::c_any_of(v, [](int x) { return x > 5; })); +} + +TEST_F(NonMutatingTest, NoneOf) { + const std::vector& v = vector_; + EXPECT_FALSE(absl::c_none_of(v, [](int x) { return x > 2; })); + EXPECT_TRUE(absl::c_none_of(v, [](int x) { return x > 5; })); +} + +TEST_F(NonMutatingTest, MinMaxElementLess) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_, std::less()); + EXPECT_TRUE(p.first == vector_.begin()); + EXPECT_TRUE(p.second == vector_.begin() + 2); +} + +TEST_F(NonMutatingTest, MinMaxElementGreater) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_, std::greater()); + EXPECT_TRUE(p.first == vector_.begin() + 2); + EXPECT_TRUE(p.second == vector_.begin()); +} + +TEST_F(NonMutatingTest, MinMaxElementNoPredicate) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_); + EXPECT_TRUE(p.first == vector_.begin()); + EXPECT_TRUE(p.second == vector_.begin() + 2); +} + +class SortingTest : public testing::Test { + protected: + std::list sorted_ = {1, 2, 3, 4}; + std::list unsorted_ = {2, 4, 1, 3}; + std::list reversed_ = {4, 3, 2, 1}; +}; + +TEST_F(SortingTest, IsSorted) { + EXPECT_TRUE(absl::c_is_sorted(sorted_)); + EXPECT_FALSE(absl::c_is_sorted(unsorted_)); + EXPECT_FALSE(absl::c_is_sorted(reversed_)); +} + +TEST_F(SortingTest, IsSortedWithPredicate) { + EXPECT_FALSE(absl::c_is_sorted(sorted_, std::greater())); + EXPECT_FALSE(absl::c_is_sorted(unsorted_, std::greater())); + EXPECT_TRUE(absl::c_is_sorted(reversed_, std::greater())); +} + +TEST_F(SortingTest, IsSortedUntil) { + EXPECT_EQ(1, *absl::c_is_sorted_until(unsorted_)); + EXPECT_EQ(4, *absl::c_is_sorted_until(unsorted_, std::greater())); +} + +TEST_F(SortingTest, NthElement) { + std::vector unsorted = {2, 4, 1, 3}; + absl::c_nth_element(unsorted, unsorted.begin() + 2); + EXPECT_THAT(unsorted, + ElementsAre(Lt(3), Lt(3), 3, Gt(3))); + absl::c_nth_element(unsorted, unsorted.begin() + 2, std::greater()); + EXPECT_THAT(unsorted, + ElementsAre(Gt(2), Gt(2), 2, Lt(2))); +} + +TEST(MutatingTest, IsPartitioned) { + EXPECT_TRUE( + absl::c_is_partitioned(std::vector{1, 3, 5, 2, 4, 6}, IsOdd)); + EXPECT_FALSE( + absl::c_is_partitioned(std::vector{1, 2, 3, 4, 5, 6}, IsOdd)); + EXPECT_FALSE( + absl::c_is_partitioned(std::vector{2, 4, 6, 1, 3, 5}, IsOdd)); +} + +TEST(MutatingTest, Partition) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_partition(actual, IsOdd); + EXPECT_THAT(actual, Truly([](const std::vector& c) { + return absl::c_is_partitioned(c, IsOdd); + })); +} + +TEST(MutatingTest, StablePartition) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_stable_partition(actual, IsOdd); + EXPECT_THAT(actual, ElementsAre(1, 3, 5, 2, 4)); +} + +TEST(MutatingTest, PartitionCopy) { + const std::vector initial = {1, 2, 3, 4, 5}; + std::vector odds, evens; + auto ends = absl::c_partition_copy(initial, back_inserter(odds), + back_inserter(evens), IsOdd); + *ends.first = 7; + *ends.second = 6; + EXPECT_THAT(odds, ElementsAre(1, 3, 5, 7)); + EXPECT_THAT(evens, ElementsAre(2, 4, 6)); +} + +TEST(MutatingTest, PartitionPoint) { + const std::vector initial = {1, 3, 5, 2, 4}; + auto middle = absl::c_partition_point(initial, IsOdd); + EXPECT_EQ(2, *middle); +} + +TEST(MutatingTest, CopyMiddle) { + const std::vector initial = {4, -1, -2, -3, 5}; + const std::list input = {1, 2, 3}; + const std::vector expected = {4, 1, 2, 3, 5}; + + std::list test_list(initial.begin(), initial.end()); + absl::c_copy(input, ++test_list.begin()); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); + + std::vector test_vector = initial; + absl::c_copy(input, test_vector.begin() + 1); + EXPECT_EQ(expected, test_vector); +} + +TEST(MutatingTest, CopyFrontInserter) { + const std::list initial = {4, 5}; + const std::list input = {1, 2, 3}; + const std::list expected = {3, 2, 1, 4, 5}; + + std::list test_list = initial; + absl::c_copy(input, std::front_inserter(test_list)); + EXPECT_EQ(expected, test_list); +} + +TEST(MutatingTest, CopyBackInserter) { + const std::vector initial = {4, 5}; + const std::list input = {1, 2, 3}; + const std::vector expected = {4, 5, 1, 2, 3}; + + std::list test_list(initial.begin(), initial.end()); + absl::c_copy(input, std::back_inserter(test_list)); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); + + std::vector test_vector = initial; + absl::c_copy(input, std::back_inserter(test_vector)); + EXPECT_EQ(expected, test_vector); +} + +TEST(MutatingTest, CopyN) { + const std::vector initial = {1, 2, 3, 4, 5}; + const std::vector expected = {1, 2}; + std::vector actual; + absl::c_copy_n(initial, 2, back_inserter(actual)); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, CopyIf) { + const std::list input = {1, 2, 3}; + std::vector output; + absl::c_copy_if(input, std::back_inserter(output), + [](int i) { return i != 2; }); + EXPECT_THAT(output, ElementsAre(1, 3)); +} + +TEST(MutatingTest, CopyBackward) { + std::vector actual = {1, 2, 3, 4, 5}; + std::vector expected = {1, 2, 1, 2, 3}; + absl::c_copy_backward(absl::MakeSpan(actual.data(), 3), actual.end()); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Move) { + std::vector> src; + src.emplace_back(absl::make_unique(1)); + src.emplace_back(absl::make_unique(2)); + src.emplace_back(absl::make_unique(3)); + src.emplace_back(absl::make_unique(4)); + src.emplace_back(absl::make_unique(5)); + + std::vector> dest = {}; + absl::c_move(src, std::back_inserter(dest)); + EXPECT_THAT(src, Each(IsNull())); + EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(4), + Pointee(5))); +} + +TEST(MutatingTest, MoveBackward) { + std::vector> actual; + actual.emplace_back(absl::make_unique(1)); + actual.emplace_back(absl::make_unique(2)); + actual.emplace_back(absl::make_unique(3)); + actual.emplace_back(absl::make_unique(4)); + actual.emplace_back(absl::make_unique(5)); + auto subrange = absl::MakeSpan(actual.data(), 3); + absl::c_move_backward(subrange, actual.end()); + EXPECT_THAT(actual, ElementsAre(IsNull(), IsNull(), Pointee(1), Pointee(2), + Pointee(3))); +} + +TEST(MutatingTest, MoveWithRvalue) { + auto MakeRValueSrc = [] { + std::vector> src; + src.emplace_back(absl::make_unique(1)); + src.emplace_back(absl::make_unique(2)); + src.emplace_back(absl::make_unique(3)); + return src; + }; + + std::vector> dest = MakeRValueSrc(); + absl::c_move(MakeRValueSrc(), std::back_inserter(dest)); + EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(1), + Pointee(2), Pointee(3))); +} + +TEST(MutatingTest, SwapRanges) { + std::vector odds = {2, 4, 6}; + std::vector evens = {1, 3, 5}; + absl::c_swap_ranges(odds, evens); + EXPECT_THAT(odds, ElementsAre(1, 3, 5)); + EXPECT_THAT(evens, ElementsAre(2, 4, 6)); +} + +TEST_F(NonMutatingTest, Transform) { + std::vector x{0, 2, 4}, y, z; + auto end = absl::c_transform(x, back_inserter(y), std::negate()); + EXPECT_EQ(std::vector({0, -2, -4}), y); + *end = 7; + EXPECT_EQ(std::vector({0, -2, -4, 7}), y); + + y = {1, 3, 0}; + end = absl::c_transform(x, y, back_inserter(z), std::plus()); + EXPECT_EQ(std::vector({1, 5, 4}), z); + *end = 7; + EXPECT_EQ(std::vector({1, 5, 4, 7}), z); +} + +TEST(MutatingTest, Replace) { + const std::vector initial = {1, 2, 3, 1, 4, 5}; + const std::vector expected = {4, 2, 3, 4, 4, 5}; + + std::vector test_vector = initial; + absl::c_replace(test_vector, 1, 4); + EXPECT_EQ(expected, test_vector); + + std::list test_list(initial.begin(), initial.end()); + absl::c_replace(test_list, 1, 4); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); +} + +TEST(MutatingTest, ReplaceIf) { + std::vector actual = {1, 2, 3, 4, 5}; + const std::vector expected = {0, 2, 0, 4, 0}; + + absl::c_replace_if(actual, IsOdd, 0); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, ReplaceCopy) { + const std::vector initial = {1, 2, 3, 1, 4, 5}; + const std::vector expected = {4, 2, 3, 4, 4, 5}; + + std::vector actual; + absl::c_replace_copy(initial, back_inserter(actual), 1, 4); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Sort) { + std::vector test_vector = {2, 3, 1, 4}; + absl::c_sort(test_vector); + EXPECT_THAT(test_vector, ElementsAre(1, 2, 3, 4)); +} + +TEST(MutatingTest, SortWithPredicate) { + std::vector test_vector = {2, 3, 1, 4}; + absl::c_sort(test_vector, std::greater()); + EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1)); +} + +// For absl::c_stable_sort tests. Needs an operator< that does not cover all +// fields so that the test can check the sort preserves order of equal elements. +struct Element { + int key; + int value; + friend bool operator<(const Element& e1, const Element& e2) { + return e1.key < e2.key; + } + // Make gmock print useful diagnostics. + friend std::ostream& operator<<(std::ostream& o, const Element& e) { + return o << "{" << e.key << ", " << e.value << "}"; + } +}; + +MATCHER_P2(IsElement, key, value, "") { + return arg.key == key && arg.value == value; +} + +TEST(MutatingTest, StableSort) { + std::vector test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; + absl::c_stable_sort(test_vector); + EXPECT_THAT( + test_vector, + ElementsAre(IsElement(1, 1), IsElement(1, 0), IsElement(2, 1), + IsElement(2, 0), IsElement(2, 2))); +} + +TEST(MutatingTest, StableSortWithPredicate) { + std::vector test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; + absl::c_stable_sort(test_vector, [](const Element& e1, const Element& e2) { + return e2 < e1; + }); + EXPECT_THAT( + test_vector, + ElementsAre(IsElement(2, 1), IsElement(2, 0), IsElement(2, 2), + IsElement(1, 1), IsElement(1, 0))); +} + +TEST(MutatingTest, ReplaceCopyIf) { + const std::vector initial = {1, 2, 3, 4, 5}; + const std::vector expected = {0, 2, 0, 4, 0}; + + std::vector actual; + absl::c_replace_copy_if(initial, back_inserter(actual), IsOdd, 0); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Fill) { + std::vector actual(5); + absl::c_fill(actual, 1); + EXPECT_THAT(actual, ElementsAre(1, 1, 1, 1, 1)); +} + +TEST(MutatingTest, FillN) { + std::vector actual(5, 0); + absl::c_fill_n(actual, 2, 1); + EXPECT_THAT(actual, ElementsAre(1, 1, 0, 0, 0)); +} + +TEST(MutatingTest, Generate) { + std::vector actual(5); + int x = 0; + absl::c_generate(actual, [&x]() { return ++x; }); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, GenerateN) { + std::vector actual(5, 0); + int x = 0; + absl::c_generate_n(actual, 3, [&x]() { return ++x; }); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 0, 0)); +} + +TEST(MutatingTest, RemoveCopy) { + std::vector actual; + absl::c_remove_copy(std::vector{1, 2, 3}, back_inserter(actual), 2); + EXPECT_THAT(actual, ElementsAre(1, 3)); +} + +TEST(MutatingTest, RemoveCopyIf) { + std::vector actual; + absl::c_remove_copy_if(std::vector{1, 2, 3}, back_inserter(actual), + IsOdd); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST(MutatingTest, UniqueCopy) { + std::vector actual; + absl::c_unique_copy(std::vector{1, 2, 2, 2, 3, 3, 2}, + back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 2)); +} + +TEST(MutatingTest, UniqueCopyWithPredicate) { + std::vector actual; + absl::c_unique_copy(std::vector{1, 2, 3, -1, -2, -3, 1}, + back_inserter(actual), + [](int x, int y) { return (x < 0) == (y < 0); }); + EXPECT_THAT(actual, ElementsAre(1, -1, 1)); +} + +TEST(MutatingTest, Reverse) { + std::vector test_vector = {1, 2, 3, 4}; + absl::c_reverse(test_vector); + EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1)); + + std::list test_list = {1, 2, 3, 4}; + absl::c_reverse(test_list); + EXPECT_THAT(test_list, ElementsAre(4, 3, 2, 1)); +} + +TEST(MutatingTest, ReverseCopy) { + std::vector actual; + absl::c_reverse_copy(std::vector{1, 2, 3, 4}, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(4, 3, 2, 1)); +} + +TEST(MutatingTest, Rotate) { + std::vector actual = {1, 2, 3, 4}; + auto it = absl::c_rotate(actual, actual.begin() + 2); + EXPECT_THAT(actual, testing::ElementsAreArray({3, 4, 1, 2})); + EXPECT_EQ(*it, 1); +} + +TEST(MutatingTest, RotateCopy) { + std::vector initial = {1, 2, 3, 4}; + std::vector actual; + auto end = + absl::c_rotate_copy(initial, initial.begin() + 2, back_inserter(actual)); + *end = 5; + EXPECT_THAT(actual, ElementsAre(3, 4, 1, 2, 5)); +} + +TEST(MutatingTest, Shuffle) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_shuffle(actual, std::random_device()); + EXPECT_THAT(actual, UnorderedElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, PartialSort) { + std::vector sequence{5, 3, 42, 0}; + absl::c_partial_sort(sequence, sequence.begin() + 2); + EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(0, 3)); + absl::c_partial_sort(sequence, sequence.begin() + 2, std::greater()); + EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(42, 5)); +} + +TEST(MutatingTest, PartialSortCopy) { + const std::vector initial = {5, 3, 42, 0}; + std::vector actual(2); + absl::c_partial_sort_copy(initial, actual); + EXPECT_THAT(actual, ElementsAre(0, 3)); + absl::c_partial_sort_copy(initial, actual, std::greater()); + EXPECT_THAT(actual, ElementsAre(42, 5)); +} + +TEST(MutatingTest, Merge) { + std::vector actual; + absl::c_merge(std::vector{1, 3, 5}, std::vector{2, 4}, + back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, MergeWithComparator) { + std::vector actual; + absl::c_merge(std::vector{5, 3, 1}, std::vector{4, 2}, + back_inserter(actual), std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1)); +} + +TEST(MutatingTest, InplaceMerge) { + std::vector actual = {1, 3, 5, 2, 4}; + absl::c_inplace_merge(actual, actual.begin() + 3); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, InplaceMergeWithComparator) { + std::vector actual = {5, 3, 1, 4, 2}; + absl::c_inplace_merge(actual, actual.begin() + 3, std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1)); +} + +class SetOperationsTest : public testing::Test { + protected: + std::vector a_ = {1, 2, 3}; + std::vector b_ = {1, 3, 5}; + + std::vector a_reversed_ = {3, 2, 1}; + std::vector b_reversed_ = {5, 3, 1}; +}; + +TEST_F(SetOperationsTest, SetUnion) { + std::vector actual; + absl::c_set_union(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 5)); +} + +TEST_F(SetOperationsTest, SetUnionWithComparator) { + std::vector actual; + absl::c_set_union(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 3, 2, 1)); +} + +TEST_F(SetOperationsTest, SetIntersection) { + std::vector actual; + absl::c_set_intersection(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 3)); +} + +TEST_F(SetOperationsTest, SetIntersectionWithComparator) { + std::vector actual; + absl::c_set_intersection(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(3, 1)); +} + +TEST_F(SetOperationsTest, SetDifference) { + std::vector actual; + absl::c_set_difference(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST_F(SetOperationsTest, SetDifferenceWithComparator) { + std::vector actual; + absl::c_set_difference(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST_F(SetOperationsTest, SetSymmetricDifference) { + std::vector actual; + absl::c_set_symmetric_difference(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(2, 5)); +} + +TEST_F(SetOperationsTest, SetSymmetricDifferenceWithComparator) { + std::vector actual; + absl::c_set_symmetric_difference(a_reversed_, b_reversed_, + back_inserter(actual), std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 2)); +} + +TEST(HeapOperationsTest, WithoutComparator) { + std::vector heap = {1, 2, 3}; + EXPECT_FALSE(absl::c_is_heap(heap)); + absl::c_make_heap(heap); + EXPECT_TRUE(absl::c_is_heap(heap)); + heap.push_back(4); + EXPECT_EQ(3, absl::c_is_heap_until(heap) - heap.begin()); + absl::c_push_heap(heap); + EXPECT_EQ(4, heap[0]); + absl::c_pop_heap(heap); + EXPECT_EQ(4, heap[3]); + absl::c_make_heap(heap); + absl::c_sort_heap(heap); + EXPECT_THAT(heap, ElementsAre(1, 2, 3, 4)); + EXPECT_FALSE(absl::c_is_heap(heap)); +} + +TEST(HeapOperationsTest, WithComparator) { + using greater = std::greater; + std::vector heap = {3, 2, 1}; + EXPECT_FALSE(absl::c_is_heap(heap, greater())); + absl::c_make_heap(heap, greater()); + EXPECT_TRUE(absl::c_is_heap(heap, greater())); + heap.push_back(0); + EXPECT_EQ(3, absl::c_is_heap_until(heap, greater()) - heap.begin()); + absl::c_push_heap(heap, greater()); + EXPECT_EQ(0, heap[0]); + absl::c_pop_heap(heap, greater()); + EXPECT_EQ(0, heap[3]); + absl::c_make_heap(heap, greater()); + absl::c_sort_heap(heap, greater()); + EXPECT_THAT(heap, ElementsAre(3, 2, 1, 0)); + EXPECT_FALSE(absl::c_is_heap(heap, greater())); +} + +TEST(MutatingTest, PermutationOperations) { + std::vector initial = {1, 2, 3, 4}; + std::vector permuted = initial; + + absl::c_next_permutation(permuted); + EXPECT_TRUE(absl::c_is_permutation(initial, permuted)); + EXPECT_TRUE(absl::c_is_permutation(initial, permuted, std::equal_to())); + + std::vector permuted2 = initial; + absl::c_prev_permutation(permuted2, std::greater()); + EXPECT_EQ(permuted, permuted2); + + absl::c_prev_permutation(permuted); + EXPECT_EQ(initial, permuted); +} + +} // namespace diff --git a/base/abseil/absl/algorithm/equal_benchmark.cc b/base/abseil/absl/algorithm/equal_benchmark.cc new file mode 100644 index 0000000..7bf62c9 --- /dev/null +++ b/base/abseil/absl/algorithm/equal_benchmark.cc @@ -0,0 +1,126 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "benchmark/benchmark.h" +#include "absl/algorithm/algorithm.h" + +namespace { + +// The range of sequence sizes to benchmark. +constexpr int kMinBenchmarkSize = 1024; +constexpr int kMaxBenchmarkSize = 8 * 1024 * 1024; + +// A user-defined type for use in equality benchmarks. Note that we expect +// std::memcmp to win for this type: libstdc++'s std::equal only defers to +// memcmp for integral types. This is because it is not straightforward to +// guarantee that std::memcmp would produce a result "as-if" compared by +// operator== for other types (example gotchas: NaN floats, structs with +// padding). +struct EightBits { + explicit EightBits(int /* unused */) : data(0) {} + bool operator==(const EightBits& rhs) const { return data == rhs.data; } + uint8_t data; +}; + +template +void BM_absl_equal_benchmark(benchmark::State& state) { + std::vector xs(state.range(0), T(0)); + std::vector ys = xs; + while (state.KeepRunning()) { + const bool same = absl::equal(xs.begin(), xs.end(), ys.begin(), ys.end()); + benchmark::DoNotOptimize(same); + } +} + +template +void BM_std_equal_benchmark(benchmark::State& state) { + std::vector xs(state.range(0), T(0)); + std::vector ys = xs; + while (state.KeepRunning()) { + const bool same = std::equal(xs.begin(), xs.end(), ys.begin()); + benchmark::DoNotOptimize(same); + } +} + +template +void BM_memcmp_benchmark(benchmark::State& state) { + std::vector xs(state.range(0), T(0)); + std::vector ys = xs; + while (state.KeepRunning()) { + const bool same = + std::memcmp(xs.data(), ys.data(), xs.size() * sizeof(T)) == 0; + benchmark::DoNotOptimize(same); + } +} + +// The expectation is that the compiler should be able to elide the equality +// comparison altogether for sufficiently simple types. +template +void BM_absl_equal_self_benchmark(benchmark::State& state) { + std::vector xs(state.range(0), T(0)); + while (state.KeepRunning()) { + const bool same = absl::equal(xs.begin(), xs.end(), xs.begin(), xs.end()); + benchmark::DoNotOptimize(same); + } +} + +BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint8_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint8_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint8_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint8_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); + +BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint16_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint16_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint16_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint16_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); + +BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint32_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint32_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint32_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint32_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); + +BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint64_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint64_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint64_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint64_t) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); + +BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, EightBits) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_std_equal_benchmark, EightBits) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_memcmp_benchmark, EightBits) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); +BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, EightBits) + ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); + +} // namespace diff --git a/base/abseil/absl/base/BUILD.bazel b/base/abseil/absl/base/BUILD.bazel new file mode 100644 index 0000000..e2068f5 --- /dev/null +++ b/base/abseil/absl/base/BUILD.bazel @@ -0,0 +1,679 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") +load( + "//absl:copts/configure_copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_DEFAULT_LINKOPTS", + "ABSL_TEST_COPTS", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "atomic_hook", + hdrs = ["internal/atomic_hook.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [":config"], +) + +cc_library( + name = "log_severity", + srcs = ["log_severity.cc"], + hdrs = ["log_severity.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":core_headers", + ], +) + +cc_library( + name = "raw_logging_internal", + srcs = ["internal/raw_logging.cc"], + hdrs = ["internal/raw_logging.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":atomic_hook", + ":config", + ":core_headers", + ":log_severity", + ], +) + +cc_library( + name = "spinlock_wait", + srcs = [ + "internal/spinlock_akaros.inc", + "internal/spinlock_linux.inc", + "internal/spinlock_posix.inc", + "internal/spinlock_wait.cc", + "internal/spinlock_win32.inc", + ], + hdrs = ["internal/spinlock_wait.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl/base:__pkg__", + ], + deps = [ + ":base_internal", + ":core_headers", + ], +) + +cc_library( + name = "config", + hdrs = [ + "config.h", + "options.h", + "policy_checks.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, +) + +cc_library( + name = "dynamic_annotations", + srcs = ["dynamic_annotations.cc"], + hdrs = ["dynamic_annotations.h"], + copts = ABSL_DEFAULT_COPTS, + defines = ["__CLANG_SUPPORT_DYN_ANNOTATION__"], + linkopts = ABSL_DEFAULT_LINKOPTS, +) + +cc_library( + name = "core_headers", + srcs = [ + "internal/thread_annotations.h", + ], + hdrs = [ + "attributes.h", + "const_init.h", + "macros.h", + "optimization.h", + "port.h", + "thread_annotations.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ], +) + +cc_library( + name = "malloc_internal", + srcs = [ + "internal/low_level_alloc.cc", + ], + hdrs = [ + "internal/direct_mmap.h", + "internal/low_level_alloc.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = select({ + "//absl:windows": [], + "//conditions:default": ["-pthread"], + }) + ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//visibility:public", + ], + deps = [ + ":base", + ":base_internal", + ":config", + ":core_headers", + ":dynamic_annotations", + ":raw_logging_internal", + ], +) + +cc_library( + name = "base_internal", + hdrs = [ + "internal/hide_ptr.h", + "internal/identity.h", + "internal/inline_variable.h", + "internal/invoke.h", + "internal/scheduling_mode.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + "//absl/meta:type_traits", + ], +) + +cc_library( + name = "base", + srcs = [ + "internal/cycleclock.cc", + "internal/spinlock.cc", + "internal/sysinfo.cc", + "internal/thread_identity.cc", + "internal/unscaledcycleclock.cc", + ], + hdrs = [ + "call_once.h", + "casts.h", + "internal/cycleclock.h", + "internal/low_level_scheduling.h", + "internal/per_thread_tls.h", + "internal/spinlock.h", + "internal/sysinfo.h", + "internal/thread_identity.h", + "internal/tsan_mutex_interface.h", + "internal/unscaledcycleclock.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = select({ + "//absl:windows": [ + "-DEFAULTLIB:advapi32.lib", + ], + "//conditions:default": ["-pthread"], + }) + ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":base_internal", + ":config", + ":core_headers", + ":dynamic_annotations", + ":log_severity", + ":raw_logging_internal", + ":spinlock_wait", + "//absl/meta:type_traits", + ], +) + +cc_library( + name = "atomic_hook_test_helper", + testonly = 1, + srcs = ["internal/atomic_hook_test_helper.cc"], + hdrs = ["internal/atomic_hook_test_helper.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":core_headers", + ], +) + +cc_test( + name = "atomic_hook_test", + size = "small", + srcs = ["internal/atomic_hook_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":atomic_hook_test_helper", + ":core_headers", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "bit_cast_test", + size = "small", + srcs = [ + "bit_cast_test.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":core_headers", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "throw_delegate", + srcs = ["internal/throw_delegate.cc"], + hdrs = ["internal/throw_delegate.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":raw_logging_internal", + ], +) + +cc_test( + name = "throw_delegate_test", + srcs = ["throw_delegate_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":throw_delegate", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "exception_testing", + testonly = 1, + hdrs = ["internal/exception_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "pretty_function", + hdrs = ["internal/pretty_function.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//absl:__subpackages__"], +) + +cc_library( + name = "exception_safety_testing", + testonly = 1, + srcs = ["internal/exception_safety_testing.cc"], + hdrs = ["internal/exception_safety_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":pretty_function", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/utility", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "exception_safety_testing_test", + srcs = ["exception_safety_testing_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":exception_safety_testing", + "//absl/memory", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "inline_variable_test", + size = "small", + srcs = [ + "inline_variable_test.cc", + "inline_variable_test_a.cc", + "inline_variable_test_b.cc", + "internal/inline_variable_testing.h", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base_internal", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "invoke_test", + size = "small", + srcs = ["invoke_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base_internal", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +# Common test library made available for use in non-absl code that overrides +# AbslInternalSpinLockDelay and AbslInternalSpinLockWake. +cc_library( + name = "spinlock_test_common", + testonly = 1, + srcs = ["spinlock_test_common.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":base_internal", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest", + ], + alwayslink = 1, +) + +cc_test( + name = "spinlock_test", + size = "medium", + srcs = ["spinlock_test_common.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":base_internal", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "spinlock_benchmark_common", + testonly = 1, + srcs = ["internal/spinlock_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl/base:__pkg__", + ], + deps = [ + ":base", + ":base_internal", + ":raw_logging_internal", + "//absl/synchronization", + "@com_github_google_benchmark//:benchmark_main", + ], + alwayslink = 1, +) + +cc_binary( + name = "spinlock_benchmark", + testonly = 1, + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":spinlock_benchmark_common", + ], +) + +cc_library( + name = "endian", + hdrs = [ + "internal/endian.h", + "internal/unaligned_access.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "endian_test", + srcs = ["internal/endian_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":config", + ":endian", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "config_test", + srcs = ["config_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + "//absl/synchronization:thread_pool", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "call_once_test", + srcs = ["call_once_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "raw_logging_test", + srcs = ["raw_logging_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":raw_logging_internal", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "sysinfo_test", + size = "small", + srcs = ["internal/sysinfo_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "low_level_alloc_test", + size = "medium", + srcs = ["internal/low_level_alloc_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_ios_x86_64"], + deps = [":malloc_internal"], +) + +cc_test( + name = "thread_identity_test", + size = "small", + srcs = ["internal/thread_identity_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "thread_identity_benchmark", + srcs = ["internal/thread_identity_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":base", + "//absl/synchronization", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "bits", + hdrs = ["internal/bits.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "bits_test", + size = "small", + srcs = ["internal/bits_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":bits", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "exponential_biased", + srcs = ["internal/exponential_biased.cc"], + hdrs = ["internal/exponential_biased.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "exponential_biased_test", + size = "small", + srcs = ["internal/exponential_biased_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":exponential_biased", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "periodic_sampler", + srcs = ["internal/periodic_sampler.cc"], + hdrs = ["internal/periodic_sampler.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":core_headers", + ":exponential_biased", + ], +) + +cc_test( + name = "periodic_sampler_test", + size = "small", + srcs = ["internal/periodic_sampler_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":core_headers", + ":periodic_sampler", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "periodic_sampler_benchmark", + testonly = 1, + srcs = ["internal/periodic_sampler_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":core_headers", + ":periodic_sampler", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "scoped_set_env", + testonly = 1, + srcs = ["internal/scoped_set_env.cc"], + hdrs = ["internal/scoped_set_env.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":raw_logging_internal", + ], +) + +cc_test( + name = "scoped_set_env_test", + size = "small", + srcs = ["internal/scoped_set_env_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":scoped_set_env", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "log_severity_test", + size = "small", + srcs = ["log_severity_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":log_severity", + "//absl/flags:marshalling", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/base/abseil/absl/base/CMakeLists.txt b/base/abseil/absl/base/CMakeLists.txt new file mode 100644 index 0000000..60fc1a8 --- /dev/null +++ b/base/abseil/absl/base/CMakeLists.txt @@ -0,0 +1,617 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +find_library(LIBRT rt) + +absl_cc_library( + NAME + atomic_hook + HDRS + "internal/atomic_hook.h" + DEPS + absl::config + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + log_severity + HDRS + "log_severity.h" + SRCS + "log_severity.cc" + DEPS + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + raw_logging_internal + HDRS + "internal/raw_logging.h" + SRCS + "internal/raw_logging.cc" + DEPS + absl::atomic_hook + absl::config + absl::core_headers + absl::log_severity + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + spinlock_wait + HDRS + "internal/spinlock_wait.h" + SRCS + "internal/spinlock_akaros.inc" + "internal/spinlock_linux.inc" + "internal/spinlock_posix.inc" + "internal/spinlock_wait.cc" + "internal/spinlock_win32.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers +) + +absl_cc_library( + NAME + config + HDRS + "config.h" + "options.h" + "policy_checks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + dynamic_annotations + HDRS + "dynamic_annotations.h" + SRCS + "dynamic_annotations.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEFINES + "__CLANG_SUPPORT_DYN_ANNOTATION__" + PUBLIC +) + +absl_cc_library( + NAME + core_headers + HDRS + "attributes.h" + "const_init.h" + "macros.h" + "optimization.h" + "port.h" + "thread_annotations.h" + "internal/thread_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + malloc_internal + HDRS + "internal/direct_mmap.h" + "internal/low_level_alloc.h" + SRCS + "internal/low_level_alloc.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::raw_logging_internal + Threads::Threads +) + +absl_cc_library( + NAME + base_internal + HDRS + "internal/hide_ptr.h" + "internal/identity.h" + "internal/inline_variable.h" + "internal/invoke.h" + "internal/scheduling_mode.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::type_traits +) + +absl_cc_library( + NAME + base + HDRS + "call_once.h" + "casts.h" + "internal/cycleclock.h" + "internal/low_level_scheduling.h" + "internal/per_thread_tls.h" + "internal/spinlock.h" + "internal/sysinfo.h" + "internal/thread_identity.h" + "internal/tsan_mutex_interface.h" + "internal/unscaledcycleclock.h" + SRCS + "internal/cycleclock.cc" + "internal/spinlock.cc" + "internal/sysinfo.cc" + "internal/thread_identity.cc" + "internal/unscaledcycleclock.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + $<$:${LIBRT}> + $<$:"advapi32"> + DEPS + absl::atomic_hook + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::log_severity + absl::raw_logging_internal + absl::spinlock_wait + absl::type_traits + Threads::Threads + PUBLIC +) + +absl_cc_library( + NAME + throw_delegate + HDRS + "internal/throw_delegate.h" + SRCS + "internal/throw_delegate.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +absl_cc_library( + NAME + exception_testing + HDRS + "internal/exception_testing.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + gtest + TESTONLY +) + +absl_cc_library( + NAME + pretty_function + HDRS + "internal/pretty_function.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + exception_safety_testing + HDRS + "internal/exception_safety_testing.h" + SRCS + "internal/exception_safety_testing.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::pretty_function + absl::memory + absl::meta + absl::strings + absl::utility + gtest + TESTONLY +) + +absl_cc_test( + NAME + absl_exception_safety_testing_test + SRCS + "exception_safety_testing_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::exception_safety_testing + absl::memory + gtest_main +) + +absl_cc_library( + NAME + atomic_hook_test_helper + SRCS + "internal/atomic_hook_test_helper.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::atomic_hook + absl::core_headers + TESTONLY +) + +absl_cc_test( + NAME + atomic_hook_test + SRCS + "internal/atomic_hook_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::atomic_hook_test_helper + absl::atomic_hook + absl::core_headers + gmock + gtest_main +) + +absl_cc_test( + NAME + bit_cast_test + SRCS + "bit_cast_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + gtest_main +) + +absl_cc_test( + NAME + throw_delegate_test + SRCS + "throw_delegate_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::throw_delegate + gtest_main +) + +absl_cc_test( + NAME + inline_variable_test + SRCS + "internal/inline_variable_testing.h" + "inline_variable_test.cc" + "inline_variable_test_a.cc" + "inline_variable_test_b.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base_internal + gtest_main +) + +absl_cc_test( + NAME + invoke_test + SRCS + "invoke_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base_internal + absl::memory + absl::strings + gmock + gtest_main +) + +absl_cc_library( + NAME + spinlock_test_common + SRCS + "spinlock_test_common.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::base_internal + absl::core_headers + absl::synchronization + gtest + TESTONLY +) + +# On bazel BUILD this target use "alwayslink = 1" which is not implemented here +absl_cc_test( + NAME + spinlock_test + SRCS + "spinlock_test_common.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::base_internal + absl::core_headers + absl::synchronization + gtest_main +) + +absl_cc_library( + NAME + endian + HDRS + "internal/endian.h" + "internal/unaligned_access.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + PUBLIC +) + +absl_cc_test( + NAME + endian_test + SRCS + "internal/endian_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::endian + gtest_main +) + +absl_cc_test( + NAME + config_test + SRCS + "config_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::synchronization + gtest_main +) + +absl_cc_test( + NAME + call_once_test + SRCS + "call_once_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + absl::synchronization + gtest_main +) + +absl_cc_test( + NAME + raw_logging_test + SRCS + "raw_logging_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::raw_logging_internal + absl::strings + gtest_main +) + +absl_cc_test( + NAME + sysinfo_test + SRCS + "internal/sysinfo_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::synchronization + gtest_main +) + +absl_cc_test( + NAME + low_level_alloc_test + SRCS + "internal/low_level_alloc_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::malloc_internal + Threads::Threads +) + +absl_cc_test( + NAME + thread_identity_test + SRCS + "internal/thread_identity_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + absl::synchronization + Threads::Threads + gtest_main +) + +absl_cc_library( + NAME + bits + HDRS + "internal/bits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + bits_test + SRCS + "internal/bits_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::bits + gtest_main +) + +absl_cc_library( + NAME + exponential_biased + SRCS + "internal/exponential_biased.cc" + HDRS + "internal/exponential_biased.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + exponential_biased_test + SRCS + "internal/exponential_biased_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::exponential_biased + absl::strings + gmock_main +) + +absl_cc_library( + NAME + periodic_sampler + SRCS + "internal/periodic_sampler.cc" + HDRS + "internal/periodic_sampler.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::exponential_biased +) + +absl_cc_test( + NAME + periodic_sampler_test + SRCS + "internal/periodic_sampler_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::core_headers + absl::periodic_sampler + gmock_main +) + +absl_cc_library( + NAME + scoped_set_env + SRCS + "internal/scoped_set_env.cc" + HDRS + "internal/scoped_set_env.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +absl_cc_test( + NAME + scoped_set_env_test + SRCS + "internal/scoped_set_env_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::scoped_set_env + gtest_main +) + +absl_cc_test( + NAME + cmake_thread_test + SRCS + "internal/cmake_thread_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base +) + +absl_cc_test( + NAME + log_severity_test + SRCS + "log_severity_test.cc" + DEPS + absl::flags_marshalling + absl::log_severity + absl::strings + gmock + gtest_main +) diff --git a/base/abseil/absl/base/attributes.h b/base/abseil/absl/base/attributes.h new file mode 100644 index 0000000..acd1c52 --- /dev/null +++ b/base/abseil/absl/base/attributes.h @@ -0,0 +1,609 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This header file defines macros for declaring attributes for functions, +// types, and variables. +// +// These macros are used within Abseil and allow the compiler to optimize, where +// applicable, certain function calls. +// +// This file is used for both C and C++! +// +// Most macros here are exposing GCC or Clang features, and are stubbed out for +// other compilers. +// +// GCC attributes documentation: +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html +// +// Most attributes in this file are already supported by GCC 4.7. However, some +// of them are not supported in older version of Clang. Thus, we check +// `__has_attribute()` first. If the check fails, we check if we are on GCC and +// assume the attribute exists on GCC (which is verified on GCC 4.7). +// +// ----------------------------------------------------------------------------- +// Sanitizer Attributes +// ----------------------------------------------------------------------------- +// +// Sanitizer-related attributes are not "defined" in this file (and indeed +// are not defined as such in any file). To utilize the following +// sanitizer-related attributes within your builds, define the following macros +// within your build using a `-D` flag, along with the given value for +// `-fsanitize`: +// +// * `ADDRESS_SANITIZER` + `-fsanitize=address` (Clang, GCC 4.8) +// * `MEMORY_SANITIZER` + `-fsanitize=memory` (Clang-only) +// * `THREAD_SANITIZER + `-fsanitize=thread` (Clang, GCC 4.8+) +// * `UNDEFINED_BEHAVIOR_SANITIZER` + `-fsanitize=undefined` (Clang, GCC 4.9+) +// * `CONTROL_FLOW_INTEGRITY` + -fsanitize=cfi (Clang-only) +// +// Example: +// +// // Enable branches in the Abseil code that are tagged for ASan: +// $ bazel build --copt=-DADDRESS_SANITIZER --copt=-fsanitize=address +// --linkopt=-fsanitize=address *target* +// +// Since these macro names are only supported by GCC and Clang, we only check +// for `__GNUC__` (GCC or Clang) and the above macros. +#ifndef ABSL_BASE_ATTRIBUTES_H_ +#define ABSL_BASE_ATTRIBUTES_H_ + +// ABSL_HAVE_ATTRIBUTE +// +// A function-like feature checking macro that is a wrapper around +// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a +// nonzero constant integer if the attribute is supported or 0 if not. +// +// It evaluates to zero if `__has_attribute` is not defined by the compiler. +// +// GCC: https://gcc.gnu.org/gcc-5/changes.html +// Clang: https://clang.llvm.org/docs/LanguageExtensions.html +#ifdef __has_attribute +#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) +#else +#define ABSL_HAVE_ATTRIBUTE(x) 0 +#endif + +// ABSL_HAVE_CPP_ATTRIBUTE +// +// A function-like feature checking macro that accepts C++11 style attributes. +// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6 +// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't +// find `__has_cpp_attribute`, will evaluate to 0. +#if defined(__cplusplus) && defined(__has_cpp_attribute) +// NOTE: requiring __cplusplus above should not be necessary, but +// works around https://bugs.llvm.org/show_bug.cgi?id=23435. +#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0 +#endif + +// ----------------------------------------------------------------------------- +// Function Attributes +// ----------------------------------------------------------------------------- +// +// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +// Clang: https://clang.llvm.org/docs/AttributeReference.html + +// ABSL_PRINTF_ATTRIBUTE +// ABSL_SCANF_ATTRIBUTE +// +// Tells the compiler to perform `printf` format string checking if the +// compiler supports it; see the 'format' attribute in +// . +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__scanf__, string_index, first_to_check))) +#else +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) +#endif + +// ABSL_ATTRIBUTE_ALWAYS_INLINE +// ABSL_ATTRIBUTE_NOINLINE +// +// Forces functions to either inline or not inline. Introduced in gcc 3.1. +#if ABSL_HAVE_ATTRIBUTE(always_inline) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) +#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 +#else +#define ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + +#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline)) +#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1 +#else +#define ABSL_ATTRIBUTE_NOINLINE +#endif + +// ABSL_ATTRIBUTE_NO_TAIL_CALL +// +// Prevents the compiler from optimizing away stack frames for functions which +// end in a call to another function. +#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) +#elif defined(__GNUC__) && !defined(__clang__) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define ABSL_ATTRIBUTE_NO_TAIL_CALL +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 +#endif + +// ABSL_ATTRIBUTE_WEAK +// +// Tags a function as weak for the purposes of compilation and linking. +// Weak attributes currently do not work properly in LLVM's Windows backend, +// so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// for further information. +// The MinGW compiler doesn't complain about the weak attribute until the link +// step, presumably because Windows doesn't use ELF binaries. +#if (ABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !(defined(__llvm__) && defined(_WIN32)) && !defined(__MINGW32__) +#undef ABSL_ATTRIBUTE_WEAK +#define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) +#define ABSL_HAVE_ATTRIBUTE_WEAK 1 +#else +#define ABSL_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_WEAK 0 +#endif + +// ABSL_ATTRIBUTE_NONNULL +// +// Tells the compiler either (a) that a particular function parameter +// should be a non-null pointer, or (b) that all pointer arguments should +// be non-null. +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +// +// Args are indexed starting at 1. +// +// For non-static class member functions, the implicit `this` argument +// is arg 1, and the first explicit argument is arg 2. For static class member +// functions, there is no implicit `this`, and the first explicit argument is +// arg 1. +// +// Example: +// +// /* arg_a cannot be null, but arg_b can */ +// void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1); +// +// class C { +// /* arg_a cannot be null, but arg_b can */ +// void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2); +// +// /* arg_a cannot be null, but arg_b can */ +// static void StaticMethod(void* arg_a, void* arg_b) +// ABSL_ATTRIBUTE_NONNULL(1); +// }; +// +// If no arguments are provided, then all pointer arguments should be non-null. +// +// /* No pointer arguments may be null. */ +// void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL(); +// +// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but +// ABSL_ATTRIBUTE_NONNULL does not. +#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index))) +#else +#define ABSL_ATTRIBUTE_NONNULL(...) +#endif + +// ABSL_ATTRIBUTE_NORETURN +// +// Tells the compiler that a given function never returns. +#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn)) +#elif defined(_MSC_VER) +#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn) +#else +#define ABSL_ATTRIBUTE_NORETURN +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +// +// Tells the AddressSanitizer (or other memory testing tools) to ignore a given +// function. Useful for cases when a function reads random locations on stack, +// calls _exit from a cloned subprocess, deliberately accesses buffer +// out of bounds or does other scary things with memory. +// NOTE: GCC supports AddressSanitizer(asan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(__GNUC__) +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +// +// Tells the MemorySanitizer to relax the handling of a given function. All +// "Use of uninitialized value" warnings from such functions will be suppressed, +// and all values loaded from memory will be considered fully initialized. +// This attribute is similar to the ADDRESS_SANITIZER attribute above, but deals +// with initialized-ness rather than addressability issues. +// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. +#if defined(__clang__) +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +// +// Tells the ThreadSanitizer to not instrument a given function. +// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(__GNUC__) +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +// +// Tells the UndefinedSanitizer to ignore a given function. Useful for cases +// where certain behavior (eg. division by zero) is being used intentionally. +// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. +// https://gcc.gnu.org/gcc-4.9/changes.html +#if defined(__GNUC__) && \ + (defined(UNDEFINED_BEHAVIOR_SANITIZER) || defined(ADDRESS_SANITIZER)) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize("undefined"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_CFI +// +// Tells the ControlFlowIntegrity sanitizer to not instrument a given function. +// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. +#if defined(__GNUC__) && defined(CONTROL_FLOW_INTEGRITY) +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +// +// Tells the SafeStack to not instrument a given function. +// See https://clang.llvm.org/docs/SafeStack.html for details. +#if defined(__GNUC__) && defined(SAFESTACK_SANITIZER) +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ + __attribute__((no_sanitize("safe-stack"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +#endif + +// ABSL_ATTRIBUTE_RETURNS_NONNULL +// +// Tells the compiler that a particular function never returns a null pointer. +#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) || \ + (defined(__GNUC__) && \ + (__GNUC__ > 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \ + !defined(__clang__)) +#define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) +#else +#define ABSL_ATTRIBUTE_RETURNS_NONNULL +#endif + +// ABSL_HAVE_ATTRIBUTE_SECTION +// +// Indicates whether labeled sections are supported. Weak symbol support is +// a prerequisite. Labeled sections are not supported on Darwin/iOS. +#ifdef ABSL_HAVE_ATTRIBUTE_SECTION +#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set +#elif (ABSL_HAVE_ATTRIBUTE(section) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_SECTION 1 + +// ABSL_ATTRIBUTE_SECTION +// +// Tells the compiler/linker to put a given function into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. Any function annotated with +// `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into +// whatever section its caller is placed into. +// +#ifndef ABSL_ATTRIBUTE_SECTION +#define ABSL_ATTRIBUTE_SECTION(name) \ + __attribute__((section(#name))) __attribute__((noinline)) +#endif + + +// ABSL_ATTRIBUTE_SECTION_VARIABLE +// +// Tells the compiler/linker to put a given variable into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. +#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) +#endif + +// ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +// +// A weak section declaration to be used as a global declaration +// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link +// even without functions with ABSL_ATTRIBUTE_SECTION(name). +// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's +// a no-op on ELF but not on Mach-O. +// +#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ + extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK +#endif +#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#endif + +// ABSL_ATTRIBUTE_SECTION_START +// +// Returns `void*` pointers to start/end of a section of code with +// functions having ABSL_ATTRIBUTE_SECTION(name). +// Returns 0 if no such functions exist. +// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and +// link. +// +#define ABSL_ATTRIBUTE_SECTION_START(name) \ + (reinterpret_cast(__start_##name)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) \ + (reinterpret_cast(__stop_##name)) + +#else // !ABSL_HAVE_ATTRIBUTE_SECTION + +#define ABSL_HAVE_ATTRIBUTE_SECTION 0 + +// provide dummy definitions +#define ABSL_ATTRIBUTE_SECTION(name) +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) + +#endif // ABSL_ATTRIBUTE_SECTION + +// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +// +// Support for aligning the stack on 32-bit x86. +#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \ + (defined(__GNUC__) && !defined(__clang__)) +#if defined(__i386__) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ + __attribute__((force_align_arg_pointer)) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#elif defined(__x86_64__) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#else // !__i386__ && !__x86_64 +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#endif // __i386__ +#else +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#endif + +// ABSL_MUST_USE_RESULT +// +// Tells the compiler to warn about unused results. +// +// When annotating a function, it must appear as the first part of the +// declaration or definition. The compiler will warn if the return value from +// such a function is unused: +// +// ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); +// AllocateSprocket(); // Triggers a warning. +// +// When annotating a class, it is equivalent to annotating every function which +// returns an instance. +// +// class ABSL_MUST_USE_RESULT Sprocket {}; +// Sprocket(); // Triggers a warning. +// +// Sprocket MakeSprocket(); +// MakeSprocket(); // Triggers a warning. +// +// Note that references and pointers are not instances: +// +// Sprocket* SprocketPointer(); +// SprocketPointer(); // Does *not* trigger a warning. +// +// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result +// warning. For that, warn_unused_result is used only for clang but not for gcc. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 +// +// Note: past advice was to place the macro after the argument list. +#if ABSL_HAVE_ATTRIBUTE(nodiscard) +#define ABSL_MUST_USE_RESULT [[nodiscard]] +#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) +#else +#define ABSL_MUST_USE_RESULT +#endif + +// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD +// +// Tells GCC that a function is hot or cold. GCC can use this information to +// improve static analysis, i.e. a conditional branch to a cold function +// is likely to be not-taken. +// This annotation is used for function declarations. +// +// Example: +// +// int foo() ABSL_ATTRIBUTE_HOT; +#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_HOT __attribute__((hot)) +#else +#define ABSL_ATTRIBUTE_HOT +#endif + +#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_COLD __attribute__((cold)) +#else +#define ABSL_ATTRIBUTE_COLD +#endif + +// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS +// +// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT +// macro used as an attribute to mark functions that must always or never be +// instrumented by XRay. Currently, this is only supported in Clang/LLVM. +// +// For reference on the LLVM XRay instrumentation, see +// http://llvm.org/docs/XRay.html. +// +// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration +// will always get the XRay instrumentation sleds. These sleds may introduce +// some binary size and runtime overhead and must be used sparingly. +// +// These attributes only take effect when the following conditions are met: +// +// * The file/target is built in at least C++11 mode, with a Clang compiler +// that supports XRay attributes. +// * The file/target is built with the -fxray-instrument flag set for the +// Clang/LLVM compiler. +// * The function is defined in the translation unit (the compiler honors the +// attribute in either the definition or the declaration, and must match). +// +// There are cases when, even when building with XRay instrumentation, users +// might want to control specifically which functions are instrumented for a +// particular build using special-case lists provided to the compiler. These +// special case lists are provided to Clang via the +// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The +// attributes in source take precedence over these special-case lists. +// +// To disable the XRay attributes at build-time, users may define +// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific +// packages/targets, as this may lead to conflicting definitions of functions at +// link-time. +// +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \ + !defined(ABSL_NO_XRAY_ATTRIBUTES) +#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]] +#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) +#define ABSL_XRAY_LOG_ARGS(N) \ + [[clang::xray_always_instrument, clang::xray_log_args(N)]] +#else +#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] +#endif +#else +#define ABSL_XRAY_ALWAYS_INSTRUMENT +#define ABSL_XRAY_NEVER_INSTRUMENT +#define ABSL_XRAY_LOG_ARGS(N) +#endif + +// ABSL_ATTRIBUTE_REINITIALIZES +// +// Indicates that a member function reinitializes the entire object to a known +// state, independent of the previous state of the object. +// +// The clang-tidy check bugprone-use-after-move allows member functions marked +// with this attribute to be called on objects that have been moved from; +// without the attribute, this would result in a use-after-move warning. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes) +#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]] +#else +#define ABSL_ATTRIBUTE_REINITIALIZES +#endif + +// ----------------------------------------------------------------------------- +// Variable Attributes +// ----------------------------------------------------------------------------- + +// ABSL_ATTRIBUTE_UNUSED +// +// Prevents the compiler from complaining about variables that appear unused. +#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) +#undef ABSL_ATTRIBUTE_UNUSED +#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else +#define ABSL_ATTRIBUTE_UNUSED +#endif + +// ABSL_ATTRIBUTE_INITIAL_EXEC +// +// Tells the compiler to use "initial-exec" mode for a thread-local variable. +// See http://people.redhat.com/drepper/tls.pdf for the gory details. +#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec"))) +#else +#define ABSL_ATTRIBUTE_INITIAL_EXEC +#endif + +// ABSL_ATTRIBUTE_PACKED +// +// Prevents the compiler from padding a structure to natural alignment +#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__)) +#else +#define ABSL_ATTRIBUTE_PACKED +#endif + +// ABSL_ATTRIBUTE_FUNC_ALIGN +// +// Tells the compiler to align the function start at least to certain +// alignment boundary +#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes))) +#else +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) +#endif + +// ABSL_CONST_INIT +// +// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will +// not compile (on supported platforms) unless the variable has a constant +// initializer. This is useful for variables with static and thread storage +// duration, because it guarantees that they will not suffer from the so-called +// "static init order fiasco". Prefer to put this attribute on the most visible +// declaration of the variable, if there's more than one, because code that +// accesses the variable can then use the attribute for optimization. +// +// Example: +// +// class MyClass { +// public: +// ABSL_CONST_INIT static MyType my_var; +// }; +// +// MyType MyClass::my_var = MakeMyType(...); +// +// Note that this attribute is redundant if the variable is declared constexpr. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#define ABSL_CONST_INIT [[clang::require_constant_initialization]] +#else +#define ABSL_CONST_INIT +#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) + +#endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/base/abseil/absl/base/bit_cast_test.cc b/base/abseil/absl/base/bit_cast_test.cc new file mode 100644 index 0000000..8a3a41e --- /dev/null +++ b/base/abseil/absl/base/bit_cast_test.cc @@ -0,0 +1,109 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Unit test for bit_cast template. + +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/macros.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +template +struct marshall { char buf[N]; }; + +template +void TestMarshall(const T values[], int num_values) { + for (int i = 0; i < num_values; ++i) { + T t0 = values[i]; + marshall m0 = absl::bit_cast >(t0); + T t1 = absl::bit_cast(m0); + marshall m1 = absl::bit_cast >(t1); + ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); + ASSERT_EQ(0, memcmp(&m0, &m1, sizeof(T))); + } +} + +// Convert back and forth to an integral type. The C++ standard does +// not guarantee this will work, but we test that this works on all the +// platforms we support. +// +// Likewise, we below make assumptions about sizeof(float) and +// sizeof(double) which the standard does not guarantee, but which hold on the +// platforms we support. + +template +void TestIntegral(const T values[], int num_values) { + for (int i = 0; i < num_values; ++i) { + T t0 = values[i]; + I i0 = absl::bit_cast(t0); + T t1 = absl::bit_cast(i0); + I i1 = absl::bit_cast(t1); + ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); + ASSERT_EQ(i0, i1); + } +} + +TEST(BitCast, Bool) { + static const bool bool_list[] = { false, true }; + TestMarshall(bool_list, ABSL_ARRAYSIZE(bool_list)); +} + +TEST(BitCast, Int32) { + static const int32_t int_list[] = + { 0, 1, 100, 2147483647, -1, -100, -2147483647, -2147483647-1 }; + TestMarshall(int_list, ABSL_ARRAYSIZE(int_list)); +} + +TEST(BitCast, Int64) { + static const int64_t int64_list[] = + { 0, 1, 1LL << 40, -1, -(1LL<<40) }; + TestMarshall(int64_list, ABSL_ARRAYSIZE(int64_list)); +} + +TEST(BitCast, Uint64) { + static const uint64_t uint64_list[] = + { 0, 1, 1LLU << 40, 1LLU << 63 }; + TestMarshall(uint64_list, ABSL_ARRAYSIZE(uint64_list)); +} + +TEST(BitCast, Float) { + static const float float_list[] = + { 0.0f, 1.0f, -1.0f, 10.0f, -10.0f, + 1e10f, 1e20f, 1e-10f, 1e-20f, + 2.71828f, 3.14159f }; + TestMarshall(float_list, ABSL_ARRAYSIZE(float_list)); + TestIntegral(float_list, ABSL_ARRAYSIZE(float_list)); + TestIntegral(float_list, ABSL_ARRAYSIZE(float_list)); +} + +TEST(BitCast, Double) { + static const double double_list[] = + { 0.0, 1.0, -1.0, 10.0, -10.0, + 1e10, 1e100, 1e-10, 1e-100, + 2.718281828459045, + 3.141592653589793238462643383279502884197169399375105820974944 }; + TestMarshall(double_list, ABSL_ARRAYSIZE(double_list)); + TestIntegral(double_list, ABSL_ARRAYSIZE(double_list)); + TestIntegral(double_list, ABSL_ARRAYSIZE(double_list)); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/call_once.h b/base/abseil/absl/base/call_once.h new file mode 100644 index 0000000..bc5ec93 --- /dev/null +++ b/base/abseil/absl/base/call_once.h @@ -0,0 +1,226 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: call_once.h +// ----------------------------------------------------------------------------- +// +// This header file provides an Abseil version of `std::call_once` for invoking +// a given function at most once, across all threads. This Abseil version is +// faster than the C++11 version and incorporates the C++17 argument-passing +// fix, so that (for example) non-const references may be passed to the invoked +// function. + +#ifndef ABSL_BASE_CALL_ONCE_H_ +#define ABSL_BASE_CALL_ONCE_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +class once_flag; + +namespace base_internal { +std::atomic* ControlWord(absl::once_flag* flag); +} // namespace base_internal + +// call_once() +// +// For all invocations using a given `once_flag`, invokes a given `fn` exactly +// once across all threads. The first call to `call_once()` with a particular +// `once_flag` argument (that does not throw an exception) will run the +// specified function with the provided `args`; other calls with the same +// `once_flag` argument will not run the function, but will wait +// for the provided function to finish running (if it is still running). +// +// This mechanism provides a safe, simple, and fast mechanism for one-time +// initialization in a multi-threaded process. +// +// Example: +// +// class MyInitClass { +// public: +// ... +// mutable absl::once_flag once_; +// +// MyInitClass* init() const { +// absl::call_once(once_, &MyInitClass::Init, this); +// return ptr_; +// } +// +template +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); + +// once_flag +// +// Objects of this type are used to distinguish calls to `call_once()` and +// ensure the provided function is only invoked once across all threads. This +// type is not copyable or movable. However, it has a `constexpr` +// constructor, and is safe to use as a namespace-scoped global variable. +class once_flag { + public: + constexpr once_flag() : control_(0) {} + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + friend std::atomic* base_internal::ControlWord(once_flag* flag); + std::atomic control_; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +// Implementation details follow. +//------------------------------------------------------------------------------ + +namespace base_internal { + +// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to +// initialize entities used by the scheduler implementation. +template +void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); + +// Disables scheduling while on stack when scheduling mode is non-cooperative. +// No effect for cooperative scheduling modes. +class SchedulingHelper { + public: + explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); + } + } + + ~SchedulingHelper() { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + base_internal::SchedulingGuard::EnableRescheduling(guard_result_); + } + } + + private: + base_internal::SchedulingMode mode_; + bool guard_result_; +}; + +// Bit patterns for call_once state machine values. Internal implementation +// detail, not for use by clients. +// +// The bit patterns are arbitrarily chosen from unlikely values, to aid in +// debugging. However, kOnceInit must be 0, so that a zero-initialized +// once_flag will be valid for immediate use. +enum { + kOnceInit = 0, + kOnceRunning = 0x65C2937B, + kOnceWaiter = 0x05A308D2, + // A very small constant is chosen for kOnceDone so that it fit in a single + // compare with immediate instruction for most common ISAs. This is verified + // for x86, POWER and ARM. + kOnceDone = 221, // Random Number +}; + +template +ABSL_ATTRIBUTE_NOINLINE +void CallOnceImpl(std::atomic* control, + base_internal::SchedulingMode scheduling_mode, Callable&& fn, + Args&&... args) { +#ifndef NDEBUG + { + uint32_t old_control = control->load(std::memory_order_relaxed); + if (old_control != kOnceInit && + old_control != kOnceRunning && + old_control != kOnceWaiter && + old_control != kOnceDone) { + ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", + static_cast(old_control)); // NOLINT + } + } +#endif // NDEBUG + static const base_internal::SpinLockWaitTransition trans[] = { + {kOnceInit, kOnceRunning, true}, + {kOnceRunning, kOnceWaiter, false}, + {kOnceDone, kOnceDone, true}}; + + // Must do this before potentially modifying control word's state. + base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); + // Short circuit the simplest case to avoid procedure call overhead. + // The base_internal::SpinLockWait() call returns either kOnceInit or + // kOnceDone. If it returns kOnceDone, it must have loaded the control word + // with std::memory_order_acquire and seen a value of kOnceDone. + uint32_t old_control = kOnceInit; + if (control->compare_exchange_strong(old_control, kOnceRunning, + std::memory_order_relaxed) || + base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, + scheduling_mode) == kOnceInit) { + base_internal::Invoke(std::forward(fn), + std::forward(args)...); + // The call to SpinLockWake below is an optimization, because the waiter + // in SpinLockWait is waiting with a short timeout. The atomic load/store + // sequence is slightly faster than an atomic exchange: + // old_control = control->exchange(base_internal::kOnceDone, + // std::memory_order_release); + // We opt for a slightly faster case when there are no waiters, in spite + // of longer tail latency when there are waiters. + old_control = control->load(std::memory_order_relaxed); + control->store(base_internal::kOnceDone, std::memory_order_release); + if (old_control == base_internal::kOnceWaiter) { + base_internal::SpinLockWake(control, true); + } + } // else *control is already kOnceDone +} + +inline std::atomic* ControlWord(once_flag* flag) { + return &flag->control_; +} + +template +void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) { + std::atomic* once = base_internal::ControlWord(flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, + std::forward(fn), + std::forward(args)...); + } +} + +} // namespace base_internal + +template +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) { + std::atomic* once = base_internal::ControlWord(&flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl( + once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, + std::forward(fn), std::forward(args)...); + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CALL_ONCE_H_ diff --git a/base/abseil/absl/base/call_once_test.cc b/base/abseil/absl/base/call_once_test.cc new file mode 100644 index 0000000..11d26c4 --- /dev/null +++ b/base/abseil/absl/base/call_once_test.cc @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/call_once.h" + +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +absl::once_flag once; + +ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit); + +int running_thread_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_invoke_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_finished_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_return_count ABSL_GUARDED_BY(counters_mu) = 0; +bool done_blocking ABSL_GUARDED_BY(counters_mu) = false; + +// Function to be called from absl::call_once. Waits for a notification. +void WaitAndIncrement() { + counters_mu.Lock(); + ++call_once_invoke_count; + counters_mu.Unlock(); + + counters_mu.LockWhen(Condition(&done_blocking)); + ++call_once_finished_count; + counters_mu.Unlock(); +} + +void ThreadBody() { + counters_mu.Lock(); + ++running_thread_count; + counters_mu.Unlock(); + + absl::call_once(once, WaitAndIncrement); + + counters_mu.Lock(); + ++call_once_return_count; + counters_mu.Unlock(); +} + +// Returns true if all threads are set up for the test. +bool ThreadsAreSetup(void*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(counters_mu) { + // All ten threads must be running, and WaitAndIncrement should be blocked. + return running_thread_count == 10 && call_once_invoke_count == 1; +} + +TEST(CallOnceTest, ExecutionCount) { + std::vector threads; + + // Start 10 threads all calling call_once on the same once_flag. + for (int i = 0; i < 10; ++i) { + threads.emplace_back(ThreadBody); + } + + + // Wait until all ten threads have started, and WaitAndIncrement has been + // invoked. + counters_mu.LockWhen(Condition(ThreadsAreSetup, nullptr)); + + // WaitAndIncrement should have been invoked by exactly one call_once() + // instance. That thread should be blocking on a notification, and all other + // call_once instances should be blocking as well. + EXPECT_EQ(call_once_invoke_count, 1); + EXPECT_EQ(call_once_finished_count, 0); + EXPECT_EQ(call_once_return_count, 0); + + // Allow WaitAndIncrement to finish executing. Once it does, the other + // call_once waiters will be unblocked. + done_blocking = true; + counters_mu.Unlock(); + + for (std::thread& thread : threads) { + thread.join(); + } + + counters_mu.Lock(); + EXPECT_EQ(call_once_invoke_count, 1); + EXPECT_EQ(call_once_finished_count, 1); + EXPECT_EQ(call_once_return_count, 10); + counters_mu.Unlock(); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/casts.h b/base/abseil/absl/base/casts.h new file mode 100644 index 0000000..322cc1d --- /dev/null +++ b/base/abseil/absl/base/casts.h @@ -0,0 +1,184 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: casts.h +// ----------------------------------------------------------------------------- +// +// This header file defines casting templates to fit use cases not covered by +// the standard casts provided in the C++ standard. As with all cast operations, +// use these with caution and only if alternatives do not exist. + +#ifndef ABSL_BASE_CASTS_H_ +#define ABSL_BASE_CASTS_H_ + +#include +#include +#include +#include + +#include "absl/base/internal/identity.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace internal_casts { + +template +struct is_bitcastable + : std::integral_constant< + bool, + sizeof(Dest) == sizeof(Source) && + type_traits_internal::is_trivially_copyable::value && + type_traits_internal::is_trivially_copyable::value && + std::is_default_constructible::value> {}; + +} // namespace internal_casts + +// implicit_cast() +// +// Performs an implicit conversion between types following the language +// rules for implicit conversion; if an implicit conversion is otherwise +// allowed by the language in the given context, this function performs such an +// implicit conversion. +// +// Example: +// +// // If the context allows implicit conversion: +// From from; +// To to = from; +// +// // Such code can be replaced by: +// implicit_cast(from); +// +// An `implicit_cast()` may also be used to annotate numeric type conversions +// that, although safe, may produce compiler warnings (such as `long` to `int`). +// Additionally, an `implicit_cast()` is also useful within return statements to +// indicate a specific implicit conversion is being undertaken. +// +// Example: +// +// return implicit_cast(size_in_bytes) / capacity_; +// +// Annotating code with `implicit_cast()` allows you to explicitly select +// particular overloads and template instantiations, while providing a safer +// cast than `reinterpret_cast()` or `static_cast()`. +// +// Additionally, an `implicit_cast()` can be used to allow upcasting within a +// type hierarchy where incorrect use of `static_cast()` could accidentally +// allow downcasting. +// +// Finally, an `implicit_cast()` can be used to perform implicit conversions +// from unrelated types that otherwise couldn't be implicitly cast directly; +// C++ will normally only implicitly cast "one step" in such conversions. +// +// That is, if C is a type which can be implicitly converted to B, with B being +// a type that can be implicitly converted to A, an `implicit_cast()` can be +// used to convert C to B (which the compiler can then implicitly convert to A +// using language rules). +// +// Example: +// +// // Assume an object C is convertible to B, which is implicitly convertible +// // to A +// A a = implicit_cast(C); +// +// Such implicit cast chaining may be useful within template logic. +template +constexpr To implicit_cast(typename absl::internal::identity_t to) { + return to; +} + +// bit_cast() +// +// Performs a bitwise cast on a type without changing the underlying bit +// representation of that type's value. The two types must be of the same size +// and both types must be trivially copyable. As with most casts, use with +// caution. A `bit_cast()` might be needed when you need to temporarily treat a +// type as some other type, such as in the following cases: +// +// * Serialization (casting temporarily to `char *` for those purposes is +// always allowed by the C++ standard) +// * Managing the individual bits of a type within mathematical operations +// that are not normally accessible through that type +// * Casting non-pointer types to pointer types (casting the other way is +// allowed by `reinterpret_cast()` but round-trips cannot occur the other +// way). +// +// Example: +// +// float f = 3.14159265358979; +// int i = bit_cast(f); +// // i = 0x40490fdb +// +// Casting non-pointer types to pointer types and then dereferencing them +// traditionally produces undefined behavior. +// +// Example: +// +// // WRONG +// float f = 3.14159265358979; // WRONG +// int i = * reinterpret_cast(&f); // WRONG +// +// The address-casting method produces undefined behavior according to the ISO +// C++ specification section [basic.lval]. Roughly, this section says: if an +// object in memory has one type, and a program accesses it with a different +// type, the result is undefined behavior for most values of "different type". +// +// Such casting results in type punning: holding an object in memory of one type +// and reading its bits back using a different type. A `bit_cast()` avoids this +// issue by implementing its casts using `memcpy()`, which avoids introducing +// this undefined behavior. +// +// NOTE: The requirements here are more strict than the bit_cast of standard +// proposal p0476 due to the need for workarounds and lack of intrinsics. +// Specifically, this implementation also requires `Dest` to be +// default-constructible. +template < + typename Dest, typename Source, + typename std::enable_if::value, + int>::type = 0> +inline Dest bit_cast(const Source& source) { + Dest dest; + memcpy(static_cast(std::addressof(dest)), + static_cast(std::addressof(source)), sizeof(dest)); + return dest; +} + +// NOTE: This overload is only picked if the requirements of bit_cast are not +// met. It is therefore UB, but is provided temporarily as previous versions of +// this function template were unchecked. Do not use this in new code. +template < + typename Dest, typename Source, + typename std::enable_if< + !internal_casts::is_bitcastable::value, int>::type = 0> +ABSL_DEPRECATED( + "absl::bit_cast type requirements were violated. Update the types being " + "used such that they are the same size and are both TriviallyCopyable.") +inline Dest bit_cast(const Source& source) { + static_assert(sizeof(Dest) == sizeof(Source), + "Source and destination types should have equal sizes."); + + Dest dest; + memcpy(&dest, &source, sizeof(dest)); + return dest; +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CASTS_H_ diff --git a/base/abseil/absl/base/config.h b/base/abseil/absl/base/config.h new file mode 100644 index 0000000..87f5b4a --- /dev/null +++ b/base/abseil/absl/base/config.h @@ -0,0 +1,588 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: config.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of macros for checking the presence of +// important compiler and platform features. Such macros can be used to +// produce portable code by parameterizing compilation based on the presence or +// lack of a given feature. +// +// We define a "feature" as some interface we wish to program to: for example, +// a library function or system call. A value of `1` indicates support for +// that feature; any other value indicates the feature support is undefined. +// +// Example: +// +// Suppose a programmer wants to write a program that uses the 'mmap()' system +// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to +// selectively include the `mmap.h` header and bracket code using that feature +// in the macro: +// +// #include "absl/base/config.h" +// +// #ifdef ABSL_HAVE_MMAP +// #include "sys/mman.h" +// #endif //ABSL_HAVE_MMAP +// +// ... +// #ifdef ABSL_HAVE_MMAP +// void *ptr = mmap(...); +// ... +// #endif // ABSL_HAVE_MMAP + +#ifndef ABSL_BASE_CONFIG_H_ +#define ABSL_BASE_CONFIG_H_ + +// Included for the __GLIBC__ macro (or similar macros on other systems). +#include + +#ifdef __cplusplus +// Included for __GLIBCXX__, _LIBCPP_VERSION +#include +#endif // __cplusplus + +#if defined(__APPLE__) +// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, +// __IPHONE_8_0. +#include +#include +#endif + +#include "absl/base/options.h" +#include "absl/base/policy_checks.h" + +// ----------------------------------------------------------------------------- +// Abseil namespace annotations +// ----------------------------------------------------------------------------- + +// ABSL_NAMESPACE_BEGIN/ABSL_NAMESPACE_END +// +// An annotation placed at the beginning/end of each `namespace absl` scope. +// This is used to inject an inline namespace. +// +// The proper way to write Abseil code in the `absl` namespace is: +// +// namespace absl { +// ABSL_NAMESPACE_BEGIN +// +// void Foo(); // absl::Foo(). +// +// ABSL_NAMESPACE_END +// } // namespace absl +// +// Users of Abseil should not use these macros, because users of Abseil should +// not write `namespace absl {` in their own code for any reason. (Abseil does +// not support forward declarations of its own types, nor does it support +// user-provided specialization of Abseil templates. Code that violates these +// rules may be broken without warning.) +#define ABSL_NAMESPACE_BEGIN +#define ABSL_NAMESPACE_END + +// ----------------------------------------------------------------------------- +// Compiler Feature Checks +// ----------------------------------------------------------------------------- + +// ABSL_HAVE_BUILTIN() +// +// Checks whether the compiler supports a Clang Feature Checking Macro, and if +// so, checks whether it supports the provided builtin function "x" where x +// is one of the functions noted in +// https://clang.llvm.org/docs/LanguageExtensions.html +// +// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. +// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html +#ifdef __has_builtin +#define ABSL_HAVE_BUILTIN(x) __has_builtin(x) +#else +#define ABSL_HAVE_BUILTIN(x) 0 +#endif + +#if defined(__is_identifier) +#define ABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x)) +#else +#define ABSL_INTERNAL_HAS_KEYWORD(x) 0 +#endif + +// ABSL_HAVE_TLS is defined to 1 when __thread should be supported. +// We assume __thread is supported on Linux when compiled with Clang or compiled +// against libstdc++ with _GLIBCXX_HAVE_TLS defined. +#ifdef ABSL_HAVE_TLS +#error ABSL_HAVE_TLS cannot be directly set +#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#define ABSL_HAVE_TLS 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +// +// Checks whether `std::is_trivially_destructible` is supported. +// +// Notes: All supported compilers using libc++ support this feature, as does +// gcc >= 4.8.1 using libstdc++, and Visual Studio. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set +#elif defined(_LIBCPP_VERSION) || \ + (!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || \ + defined(_MSC_VER) +#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +// +// Checks whether `std::is_trivially_default_constructible` and +// `std::is_trivially_copy_constructible` are supported. + +// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +// +// Checks whether `std::is_trivially_copy_assignable` is supported. + +// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with +// either libc++ or libstdc++, and Visual Studio (but not NVCC). +#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set +#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set +#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ + (!defined(__clang__) && defined(__GNUC__) && \ + (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 4)) && \ + (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \ + (defined(_MSC_VER) && !defined(__NVCC__)) +#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 +#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 +#endif + +// ABSL_HAVE_SOURCE_LOCATION_CURRENT +// +// Indicates whether `absl::SourceLocation::current()` will return useful +// information in some contexts. +#ifndef ABSL_HAVE_SOURCE_LOCATION_CURRENT +#if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \ + ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE) +#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1 +#endif +#endif + +// ABSL_HAVE_THREAD_LOCAL +// +// Checks whether C++11's `thread_local` storage duration specifier is +// supported. +#ifdef ABSL_HAVE_THREAD_LOCAL +#error ABSL_HAVE_THREAD_LOCAL cannot be directly set +#elif defined(__APPLE__) +// Notes: +// * Xcode's clang did not support `thread_local` until version 8, and +// even then not for all iOS < 9.0. +// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator +// targeting iOS 9.x. +// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time +// making __has_feature unreliable there. +// +// Otherwise, `__has_feature` is only supported by Clang so it has be inside +// `defined(__APPLE__)` check. +#if __has_feature(cxx_thread_local) && \ + !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif +#else // !defined(__APPLE__) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif + +// There are platforms for which TLS should not be used even though the compiler +// makes it seem like it's supported (Android NDK < r12b for example). +// This is primarily because of linker problems and toolchain misconfiguration: +// Abseil does not intend to support this indefinitely. Currently, the newest +// toolchain that we intend to support that requires this behavior is the +// r11 NDK - allowing for a 5 year support window on that means this option +// is likely to be removed around June of 2021. +// TLS isn't supported until NDK r12b per +// https://developer.android.com/ndk/downloads/revision_history.html +// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in +// . For NDK < r16, users should define these macros, +// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. +#if defined(__ANDROID__) && defined(__clang__) +#if __has_include() +#include +#endif // __has_include() +#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \ + defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) +#undef ABSL_HAVE_TLS +#undef ABSL_HAVE_THREAD_LOCAL +#endif +#endif // defined(__ANDROID__) && defined(__clang__) + +// Emscripten doesn't yet support `thread_local` or `__thread`. +// https://github.com/emscripten-core/emscripten/issues/3502 +#if defined(__EMSCRIPTEN__) +#undef ABSL_HAVE_TLS +#undef ABSL_HAVE_THREAD_LOCAL +#endif // defined(__EMSCRIPTEN__) + +// ABSL_HAVE_INTRINSIC_INT128 +// +// Checks whether the __int128 compiler extension for a 128-bit integral type is +// supported. +// +// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is +// supported, but we avoid using it in certain cases: +// * On Clang: +// * Building using Clang for Windows, where the Clang runtime library has +// 128-bit support only on LP64 architectures, but Windows is LLP64. +// * On Nvidia's nvcc: +// * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions +// actually support __int128. +#ifdef ABSL_HAVE_INTRINSIC_INT128 +#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set +#elif defined(__SIZEOF_INT128__) +#if (defined(__clang__) && !defined(_WIN32)) || \ + (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ + (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) +#define ABSL_HAVE_INTRINSIC_INT128 1 +#elif defined(__CUDACC__) +// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a +// string explaining that it has been removed starting with CUDA 9. We use +// nested #ifs because there is no short-circuiting in the preprocessor. +// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined. +#if __CUDACC_VER__ >= 70000 +#define ABSL_HAVE_INTRINSIC_INT128 1 +#endif // __CUDACC_VER__ >= 70000 +#endif // defined(__CUDACC__) +#endif // ABSL_HAVE_INTRINSIC_INT128 + +// ABSL_HAVE_EXCEPTIONS +// +// Checks whether the compiler both supports and enables exceptions. Many +// compilers support a "no exceptions" mode that disables exceptions. +// +// Generally, when ABSL_HAVE_EXCEPTIONS is not defined: +// +// * Code using `throw` and `try` may not compile. +// * The `noexcept` specifier will still compile and behave as normal. +// * The `noexcept` operator may still return `false`. +// +// For further details, consult the compiler's documentation. +#ifdef ABSL_HAVE_EXCEPTIONS +#error ABSL_HAVE_EXCEPTIONS cannot be directly set. + +#elif defined(__clang__) +// TODO(calabrese) +// Switch to using __cpp_exceptions when we no longer support versions < 3.6. +// For details on this check, see: +// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro +#if defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) + +// Handle remaining special cases and default to exceptions being supported. +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \ + !(defined(_MSC_VER) && !defined(_CPPUNWIND)) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif + +// ----------------------------------------------------------------------------- +// Platform Feature Checks +// ----------------------------------------------------------------------------- + +// Currently supported operating systems and associated preprocessor +// symbols: +// +// Linux and Linux-derived __linux__ +// Android __ANDROID__ (implies __linux__) +// Linux (non-Android) __linux__ && !__ANDROID__ +// Darwin (macOS and iOS) __APPLE__ +// Akaros (http://akaros.org) __ros__ +// Windows _WIN32 +// NaCL __native_client__ +// AsmJS __asmjs__ +// WebAssembly __wasm__ +// Fuchsia __Fuchsia__ +// +// Note that since Android defines both __ANDROID__ and __linux__, one +// may probe for either Linux or Android by simply testing for __linux__. + +// ABSL_HAVE_MMAP +// +// Checks whether the platform has an mmap(2) implementation as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_MMAP +#error ABSL_HAVE_MMAP cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ + defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ + defined(__ASYLO__) +#define ABSL_HAVE_MMAP 1 +#endif + +// ABSL_HAVE_PTHREAD_GETSCHEDPARAM +// +// Checks whether the platform implements the pthread_(get|set)schedparam(3) +// functions as defined in POSIX.1-2001. +#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM +#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__ros__) +#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 +#endif + +// ABSL_HAVE_SCHED_YIELD +// +// Checks whether the platform implements sched_yield(2) as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_SCHED_YIELD +#error ABSL_HAVE_SCHED_YIELD cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) +#define ABSL_HAVE_SCHED_YIELD 1 +#endif + +// ABSL_HAVE_SEMAPHORE_H +// +// Checks whether the platform supports the header and sem_init(3) +// family of functions as standardized in POSIX.1-2001. +// +// Note: While Apple provides for both iOS and macOS, it is +// explicitly deprecated and will cause build failures if enabled for those +// platforms. We side-step the issue by not defining it here for Apple +// platforms. +#ifdef ABSL_HAVE_SEMAPHORE_H +#error ABSL_HAVE_SEMAPHORE_H cannot be directly set +#elif defined(__linux__) || defined(__ros__) +#define ABSL_HAVE_SEMAPHORE_H 1 +#endif + +// ABSL_HAVE_ALARM +// +// Checks whether the platform supports the header and alarm(2) +// function as standardized in POSIX.1-2001. +#ifdef ABSL_HAVE_ALARM +#error ABSL_HAVE_ALARM cannot be directly set +#elif defined(__GOOGLE_GRTE_VERSION__) +// feature tests for Google's GRTE +#define ABSL_HAVE_ALARM 1 +#elif defined(__GLIBC__) +// feature test for glibc +#define ABSL_HAVE_ALARM 1 +#elif defined(_MSC_VER) +// feature tests for Microsoft's library +#elif defined(__MINGW32__) +// mingw32 doesn't provide alarm(2): +// https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h +// mingw-w64 provides a no-op implementation: +// https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c +#elif defined(__EMSCRIPTEN__) +// emscripten doesn't support signals +#elif defined(__Fuchsia__) +// Signals don't exist on fuchsia. +#elif defined(__native_client__) +#else +// other standard libraries +#define ABSL_HAVE_ALARM 1 +#endif + +// ABSL_IS_LITTLE_ENDIAN +// ABSL_IS_BIG_ENDIAN +// +// Checks the endianness of the platform. +// +// Notes: uses the built in endian macros provided by GCC (since 4.6) and +// Clang (since 3.2); see +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html. +// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error. +#if defined(ABSL_IS_BIG_ENDIAN) +#error "ABSL_IS_BIG_ENDIAN cannot be directly set." +#endif +#if defined(ABSL_IS_LITTLE_ENDIAN) +#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." +#endif + +#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define ABSL_IS_LITTLE_ENDIAN 1 +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define ABSL_IS_BIG_ENDIAN 1 +#elif defined(_WIN32) +#define ABSL_IS_LITTLE_ENDIAN 1 +#else +#error "absl endian detection needs to be set up for your compiler" +#endif + +// macOS 10.13 and iOS 10.11 don't let you use , , or +// even though the headers exist and are publicly noted to work. See +// https://github.com/abseil/abseil-cpp/issues/207 and +// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +// libc++ spells out the availability requirements in the file +// llvm-project/libcxx/include/__config via the #define +// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. +#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 50000)) +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 +#else +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 +#endif + +// ABSL_HAVE_STD_ANY +// +// Checks whether C++17 std::any is available by checking whether exists. +#ifdef ABSL_HAVE_STD_ANY +#error "ABSL_HAVE_STD_ANY cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_ANY 1 +#endif +#endif + +// ABSL_HAVE_STD_OPTIONAL +// +// Checks whether C++17 std::optional is available. +#ifdef ABSL_HAVE_STD_OPTIONAL +#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_OPTIONAL 1 +#endif +#endif + +// ABSL_HAVE_STD_VARIANT +// +// Checks whether C++17 std::variant is available. +#ifdef ABSL_HAVE_STD_VARIANT +#error "ABSL_HAVE_STD_VARIANT cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_VARIANT 1 +#endif +#endif + +// ABSL_HAVE_STD_STRING_VIEW +// +// Checks whether C++17 std::string_view is available. +#ifdef ABSL_HAVE_STD_STRING_VIEW +#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif +#endif + +// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than +// the support for , , , . So we use +// _MSC_VER to check whether we have VS 2017 RTM (when , , +// , is implemented) or higher. Also, `__cplusplus` is +// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language +// version. +// TODO(zhangxy): fix tests before enabling aliasing for `std::any`. +#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ + ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402) +// #define ABSL_HAVE_STD_ANY 1 +#define ABSL_HAVE_STD_OPTIONAL 1 +#define ABSL_HAVE_STD_VARIANT 1 +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif + +// ABSL_USES_STD_ANY +// +// Indicates whether absl::any is an alias for std::any. +#if !defined(ABSL_OPTION_USE_STD_ANY) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_ANY == 0 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY)) +#undef ABSL_USES_STD_ANY +#elif ABSL_OPTION_USE_STD_ANY == 1 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY)) +#define ABSL_USES_STD_ANY 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_OPTIONAL +// +// Indicates whether absl::optional is an alias for std::optional. +#if !defined(ABSL_OPTION_USE_STD_OPTIONAL) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL)) +#undef ABSL_USES_STD_OPTIONAL +#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL)) +#define ABSL_USES_STD_OPTIONAL 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_VARIANT +// +// Indicates whether absl::variant is an alias for std::variant. +#if !defined(ABSL_OPTION_USE_STD_VARIANT) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT)) +#undef ABSL_USES_STD_VARIANT +#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT)) +#define ABSL_USES_STD_VARIANT 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_STRING_VIEW +// +// Indicates whether absl::string_view is an alias for std::string_view. +#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + !defined(ABSL_HAVE_STD_STRING_VIEW)) +#undef ABSL_USES_STD_STRING_VIEW +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + defined(ABSL_HAVE_STD_STRING_VIEW)) +#define ABSL_USES_STD_STRING_VIEW 1 +#else +#error options.h is misconfigured. +#endif + +// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION +// SEH exception from emplace for variant when constructing the +// struct can throw. This defeats some of variant_test and +// variant_exception_safety_test. +#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG) +#define ABSL_INTERNAL_MSVC_2017_DBG_MODE +#endif + +#undef ABSL_INTERNAL_HAS_KEYWORD + +#endif // ABSL_BASE_CONFIG_H_ diff --git a/base/abseil/absl/base/config_test.cc b/base/abseil/absl/base/config_test.cc new file mode 100644 index 0000000..7e0c033 --- /dev/null +++ b/base/abseil/absl/base/config_test.cc @@ -0,0 +1,60 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" + +#include + +#include "gtest/gtest.h" +#include "absl/synchronization/internal/thread_pool.h" + +namespace { + +TEST(ConfigTest, Endianness) { + union { + uint32_t value; + uint8_t data[sizeof(uint32_t)]; + } number; + number.data[0] = 0x00; + number.data[1] = 0x01; + number.data[2] = 0x02; + number.data[3] = 0x03; +#if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN) +#error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined +#elif defined(ABSL_IS_LITTLE_ENDIAN) + EXPECT_EQ(UINT32_C(0x03020100), number.value); +#elif defined(ABSL_IS_BIG_ENDIAN) + EXPECT_EQ(UINT32_C(0x00010203), number.value); +#else +#error Unknown endianness +#endif +} + +#if defined(ABSL_HAVE_THREAD_LOCAL) +TEST(ConfigTest, ThreadLocal) { + static thread_local int mine_mine_mine = 16; + EXPECT_EQ(16, mine_mine_mine); + { + absl::synchronization_internal::ThreadPool pool(1); + pool.Schedule([&] { + EXPECT_EQ(16, mine_mine_mine); + mine_mine_mine = 32; + EXPECT_EQ(32, mine_mine_mine); + }); + } + EXPECT_EQ(16, mine_mine_mine); +} +#endif + +} // namespace diff --git a/base/abseil/absl/base/const_init.h b/base/abseil/absl/base/const_init.h new file mode 100644 index 0000000..16520b6 --- /dev/null +++ b/base/abseil/absl/base/const_init.h @@ -0,0 +1,76 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// kConstInit +// ----------------------------------------------------------------------------- +// +// A constructor tag used to mark an object as safe for use as a global +// variable, avoiding the usual lifetime issues that can affect globals. + +#ifndef ABSL_BASE_CONST_INIT_H_ +#define ABSL_BASE_CONST_INIT_H_ + +#include "absl/base/config.h" + +// In general, objects with static storage duration (such as global variables) +// can trigger tricky object lifetime situations. Attempting to access them +// from the constructors or destructors of other global objects can result in +// undefined behavior, unless their constructors and destructors are designed +// with this issue in mind. +// +// The normal way to deal with this issue in C++11 is to use constant +// initialization and trivial destructors. +// +// Constant initialization is guaranteed to occur before any other code +// executes. Constructors that are declared 'constexpr' are eligible for +// constant initialization. You can annotate a variable declaration with the +// ABSL_CONST_INIT macro to express this intent. For compilers that support +// it, this annotation will cause a compilation error for declarations that +// aren't subject to constant initialization (perhaps because a runtime value +// was passed as a constructor argument). +// +// On program shutdown, lifetime issues can be avoided on global objects by +// ensuring that they contain trivial destructors. A class has a trivial +// destructor unless it has a user-defined destructor, a virtual method or base +// class, or a data member or base class with a non-trivial destructor of its +// own. Objects with static storage duration and a trivial destructor are not +// cleaned up on program shutdown, and are thus safe to access from other code +// running during shutdown. +// +// For a few core Abseil classes, we make a best effort to allow for safe global +// instances, even though these classes have non-trivial destructors. These +// objects can be created with the absl::kConstInit tag. For example: +// ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit); +// +// The line above declares a global variable of type absl::Mutex which can be +// accessed at any point during startup or shutdown. global_mutex's destructor +// will still run, but will not invalidate the object. Note that C++ specifies +// that accessing an object after its destructor has run results in undefined +// behavior, but this pattern works on the toolchains we support. +// +// The absl::kConstInit tag should only be used to define objects with static +// or thread_local storage duration. + +namespace absl { +ABSL_NAMESPACE_BEGIN + +enum ConstInitType { + kConstInit, +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CONST_INIT_H_ diff --git a/base/abseil/absl/base/dynamic_annotations.cc b/base/abseil/absl/base/dynamic_annotations.cc new file mode 100644 index 0000000..21e822e --- /dev/null +++ b/base/abseil/absl/base/dynamic_annotations.cc @@ -0,0 +1,129 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "absl/base/dynamic_annotations.h" + +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +/* Compiler-based ThreadSanitizer defines + DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1 + and provides its own definitions of the functions. */ + +#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL +# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0 +#endif + +/* Each function is empty and called (via a macro) only in debug mode. + The arguments are captured by dynamic tools at runtime. */ + +#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__) + +#if __has_feature(memory_sanitizer) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void AnnotateRWLockCreate(const char *, int, + const volatile void *){} +void AnnotateRWLockDestroy(const char *, int, + const volatile void *){} +void AnnotateRWLockAcquired(const char *, int, + const volatile void *, long){} +void AnnotateRWLockReleased(const char *, int, + const volatile void *, long){} +void AnnotateBenignRace(const char *, int, + const volatile void *, + const char *){} +void AnnotateBenignRaceSized(const char *, int, + const volatile void *, + size_t, + const char *) {} +void AnnotateThreadName(const char *, int, + const char *){} +void AnnotateIgnoreReadsBegin(const char *, int){} +void AnnotateIgnoreReadsEnd(const char *, int){} +void AnnotateIgnoreWritesBegin(const char *, int){} +void AnnotateIgnoreWritesEnd(const char *, int){} +void AnnotateEnableRaceDetection(const char *, int, int){} +void AnnotateMemoryIsInitialized(const char *, int, + const volatile void *mem, size_t size) { +#if __has_feature(memory_sanitizer) + __msan_unpoison(mem, size); +#else + (void)mem; + (void)size; +#endif +} + +void AnnotateMemoryIsUninitialized(const char *, int, + const volatile void *mem, size_t size) { +#if __has_feature(memory_sanitizer) + __msan_allocated_memory(mem, size); +#else + (void)mem; + (void)size; +#endif +} + +static int GetRunningOnValgrind(void) { +#ifdef RUNNING_ON_VALGRIND + if (RUNNING_ON_VALGRIND) return 1; +#endif + char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND"); + if (running_on_valgrind_str) { + return strcmp(running_on_valgrind_str, "0") != 0; + } + return 0; +} + +/* See the comments in dynamic_annotations.h */ +int RunningOnValgrind(void) { + static volatile int running_on_valgrind = -1; + int local_running_on_valgrind = running_on_valgrind; + /* C doesn't have thread-safe initialization of statics, and we + don't want to depend on pthread_once here, so hack it. */ + ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack"); + if (local_running_on_valgrind == -1) + running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind(); + return local_running_on_valgrind; +} + +/* See the comments in dynamic_annotations.h */ +double ValgrindSlowdown(void) { + /* Same initialization hack as in RunningOnValgrind(). */ + static volatile double slowdown = 0.0; + double local_slowdown = slowdown; + ANNOTATE_BENIGN_RACE(&slowdown, "safe hack"); + if (RunningOnValgrind() == 0) { + return 1.0; + } + if (local_slowdown == 0.0) { + char *env = getenv("VALGRIND_SLOWDOWN"); + slowdown = local_slowdown = env ? atof(env) : 50.0; + } + return local_slowdown; +} + +#ifdef __cplusplus +} // extern "C" +#endif +#endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */ diff --git a/base/abseil/absl/base/dynamic_annotations.h b/base/abseil/absl/base/dynamic_annotations.h new file mode 100644 index 0000000..65a54b4 --- /dev/null +++ b/base/abseil/absl/base/dynamic_annotations.h @@ -0,0 +1,389 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* This file defines dynamic annotations for use with dynamic analysis + tool such as valgrind, PIN, etc. + + Dynamic annotation is a source code annotation that affects + the generated code (that is, the annotation is not a comment). + Each such annotation is attached to a particular + instruction and/or to a particular object (address) in the program. + + The annotations that should be used by users are macros in all upper-case + (e.g., ANNOTATE_THREAD_NAME). + + Actual implementation of these macros may differ depending on the + dynamic analysis tool being used. + + This file supports the following configurations: + - Dynamic Annotations enabled (with static thread-safety warnings disabled). + In this case, macros expand to functions implemented by Thread Sanitizer, + when building with TSan. When not provided an external implementation, + dynamic_annotations.cc provides no-op implementations. + + - Static Clang thread-safety warnings enabled. + When building with a Clang compiler that supports thread-safety warnings, + a subset of annotations can be statically-checked at compile-time. We + expand these macros to static-inline functions that can be analyzed for + thread-safety, but afterwards elided when building the final binary. + + - All annotations are disabled. + If neither Dynamic Annotations nor Clang thread-safety warnings are + enabled, then all annotation-macros expand to empty. */ + +#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +# define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + + /* ------------------------------------------------------------- + Annotations that suppress errors. It is usually better to express the + program's synchronization using the other annotations, but these can + be used when all else fails. */ + + /* Report that we may have a benign race at "pointer", with size + "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the + point where "pointer" has been allocated, preferably close to the point + where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */ + #define ANNOTATE_BENIGN_RACE(pointer, description) \ + AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \ + sizeof(*(pointer)), description) + + /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to + the memory range [address, address+size). */ + #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description) + + /* Enable (enable!=0) or disable (enable==0) race detection for all threads. + This annotation could be useful if you want to skip expensive race analysis + during some period of program execution, e.g. during initialization. */ + #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + AnnotateEnableRaceDetection(__FILE__, __LINE__, enable) + + /* ------------------------------------------------------------- + Annotations useful for debugging. */ + + /* Report the current thread name to a race detector. */ + #define ANNOTATE_THREAD_NAME(name) \ + AnnotateThreadName(__FILE__, __LINE__, name) + + /* ------------------------------------------------------------- + Annotations useful when implementing locks. They are not + normally needed by modules that merely use locks. + The "lock" argument is a pointer to the lock object. */ + + /* Report that a lock has been created at address "lock". */ + #define ANNOTATE_RWLOCK_CREATE(lock) \ + AnnotateRWLockCreate(__FILE__, __LINE__, lock) + + /* Report that a linker initialized lock has been created at address "lock". + */ +#ifdef THREAD_SANITIZER + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + AnnotateRWLockCreateStatic(__FILE__, __LINE__, lock) +#else + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) +#endif + + /* Report that the lock at address "lock" is about to be destroyed. */ + #define ANNOTATE_RWLOCK_DESTROY(lock) \ + AnnotateRWLockDestroy(__FILE__, __LINE__, lock) + + /* Report that the lock at address "lock" has been acquired. + is_w=1 for writer lock, is_w=0 for reader lock. */ + #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) + + /* Report that the lock at address "lock" is about to be released. */ + #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) + +#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ + + #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */ + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) /* empty */ + #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */ + #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */ + #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */ + #define ANNOTATE_BENIGN_RACE(address, description) /* empty */ + #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */ + #define ANNOTATE_THREAD_NAME(name) /* empty */ + #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */ + +#endif /* DYNAMIC_ANNOTATIONS_ENABLED */ + +/* These annotations are also made available to LLVM's Memory Sanitizer */ +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER) + #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + AnnotateMemoryIsInitialized(__FILE__, __LINE__, address, size) + + #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + AnnotateMemoryIsUninitialized(__FILE__, __LINE__, address, size) +#else + #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */ + #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) /* empty */ +#endif /* DYNAMIC_ANNOTATIONS_ENABLED || MEMORY_SANITIZER */ + +/* TODO(delesley) -- Replace __CLANG_SUPPORT_DYN_ANNOTATION__ with the + appropriate feature ID. */ +#if defined(__clang__) && (!defined(SWIG)) \ + && defined(__CLANG_SUPPORT_DYN_ANNOTATION__) + + #if DYNAMIC_ANNOTATIONS_ENABLED == 0 + #define ANNOTALYSIS_ENABLED + #endif + + /* When running in opt-mode, GCC will issue a warning, if these attributes are + compiled. Only include them when compiling using Clang. */ + #define ATTRIBUTE_IGNORE_READS_BEGIN \ + __attribute((exclusive_lock_function("*"))) + #define ATTRIBUTE_IGNORE_READS_END \ + __attribute((unlock_function("*"))) +#else + #define ATTRIBUTE_IGNORE_READS_BEGIN /* empty */ + #define ATTRIBUTE_IGNORE_READS_END /* empty */ +#endif /* defined(__clang__) && ... */ + +#if (DYNAMIC_ANNOTATIONS_ENABLED != 0) || defined(ANNOTALYSIS_ENABLED) + #define ANNOTATIONS_ENABLED +#endif + +#if (DYNAMIC_ANNOTATIONS_ENABLED != 0) + + /* Request the analysis tool to ignore all reads in the current thread + until ANNOTATE_IGNORE_READS_END is called. + Useful to ignore intentional racey reads, while still checking + other reads and all writes. + See also ANNOTATE_UNPROTECTED_READ. */ + #define ANNOTATE_IGNORE_READS_BEGIN() \ + AnnotateIgnoreReadsBegin(__FILE__, __LINE__) + + /* Stop ignoring reads. */ + #define ANNOTATE_IGNORE_READS_END() \ + AnnotateIgnoreReadsEnd(__FILE__, __LINE__) + + /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. */ + #define ANNOTATE_IGNORE_WRITES_BEGIN() \ + AnnotateIgnoreWritesBegin(__FILE__, __LINE__) + + /* Stop ignoring writes. */ + #define ANNOTATE_IGNORE_WRITES_END() \ + AnnotateIgnoreWritesEnd(__FILE__, __LINE__) + +/* Clang provides limited support for static thread-safety analysis + through a feature called Annotalysis. We configure macro-definitions + according to whether Annotalysis support is available. */ +#elif defined(ANNOTALYSIS_ENABLED) + + #define ANNOTATE_IGNORE_READS_BEGIN() \ + StaticAnnotateIgnoreReadsBegin(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_READS_END() \ + StaticAnnotateIgnoreReadsEnd(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_WRITES_BEGIN() \ + StaticAnnotateIgnoreWritesBegin(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_WRITES_END() \ + StaticAnnotateIgnoreWritesEnd(__FILE__, __LINE__) + +#else + #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_READS_END() /* empty */ + #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_WRITES_END() /* empty */ +#endif + +/* Implement the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more + primitive annotations defined above. */ +#if defined(ANNOTATIONS_ENABLED) + + /* Start ignoring all memory accesses (both reads and writes). */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + }while (0) + + /* Stop ignoring both reads and writes. */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + }while (0) + +#else + #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */ +#endif + +/* Use the macros above rather than using these functions directly. */ +#include +#ifdef __cplusplus +extern "C" { +#endif +void AnnotateRWLockCreate(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockCreateStatic(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockDestroy(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockAcquired(const char *file, int line, + const volatile void *lock, long is_w); /* NOLINT */ +void AnnotateRWLockReleased(const char *file, int line, + const volatile void *lock, long is_w); /* NOLINT */ +void AnnotateBenignRace(const char *file, int line, + const volatile void *address, + const char *description); +void AnnotateBenignRaceSized(const char *file, int line, + const volatile void *address, + size_t size, + const char *description); +void AnnotateThreadName(const char *file, int line, + const char *name); +void AnnotateEnableRaceDetection(const char *file, int line, int enable); +void AnnotateMemoryIsInitialized(const char *file, int line, + const volatile void *mem, size_t size); +void AnnotateMemoryIsUninitialized(const char *file, int line, + const volatile void *mem, size_t size); + +/* Annotations expand to these functions, when Dynamic Annotations are enabled. + These functions are either implemented as no-op calls, if no Sanitizer is + attached, or provided with externally-linked implementations by a library + like ThreadSanitizer. */ +void AnnotateIgnoreReadsBegin(const char *file, int line) + ATTRIBUTE_IGNORE_READS_BEGIN; +void AnnotateIgnoreReadsEnd(const char *file, int line) + ATTRIBUTE_IGNORE_READS_END; +void AnnotateIgnoreWritesBegin(const char *file, int line); +void AnnotateIgnoreWritesEnd(const char *file, int line); + +#if defined(ANNOTALYSIS_ENABLED) +/* When Annotalysis is enabled without Dynamic Annotations, the use of + static-inline functions allows the annotations to be read at compile-time, + while still letting the compiler elide the functions from the final build. + + TODO(delesley) -- The exclusive lock here ignores writes as well, but + allows IGNORE_READS_AND_WRITES to work properly. */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-function" +static inline void StaticAnnotateIgnoreReadsBegin(const char *file, int line) + ATTRIBUTE_IGNORE_READS_BEGIN { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreReadsEnd(const char *file, int line) + ATTRIBUTE_IGNORE_READS_END { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreWritesBegin( + const char *file, int line) { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreWritesEnd( + const char *file, int line) { (void)file; (void)line; } +#pragma GCC diagnostic pop +#endif + +/* Return non-zero value if running under valgrind. + + If "valgrind.h" is included into dynamic_annotations.cc, + the regular valgrind mechanism will be used. + See http://valgrind.org/docs/manual/manual-core-adv.html about + RUNNING_ON_VALGRIND and other valgrind "client requests". + The file "valgrind.h" may be obtained by doing + svn co svn://svn.valgrind.org/valgrind/trunk/include + + If for some reason you can't use "valgrind.h" or want to fake valgrind, + there are two ways to make this function return non-zero: + - Use environment variable: export RUNNING_ON_VALGRIND=1 + - Make your tool intercept the function RunningOnValgrind() and + change its return value. + */ +int RunningOnValgrind(void); + +/* ValgrindSlowdown returns: + * 1.0, if (RunningOnValgrind() == 0) + * 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL) + * atof(getenv("VALGRIND_SLOWDOWN")) otherwise + This function can be used to scale timeout values: + EXAMPLE: + for (;;) { + DoExpensiveBackgroundTask(); + SleepForSeconds(5 * ValgrindSlowdown()); + } + */ +double ValgrindSlowdown(void); + +#ifdef __cplusplus +} +#endif + +/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. + + Instead of doing + ANNOTATE_IGNORE_READS_BEGIN(); + ... = x; + ANNOTATE_IGNORE_READS_END(); + one can use + ... = ANNOTATE_UNPROTECTED_READ(x); */ +#if defined(__cplusplus) && defined(ANNOTATIONS_ENABLED) +template +inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) { /* NOLINT */ + ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + ANNOTATE_IGNORE_READS_END(); + return res; + } +#else + #define ANNOTATE_UNPROTECTED_READ(x) (x) +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus) + /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ + #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var ## _annotator { \ + public: \ + static_var ## _annotator() { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, \ + sizeof(static_var), \ + # static_var ": " description); \ + } \ + }; \ + static static_var ## _annotator the ## static_var ## _annotator;\ + } // namespace +#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ + #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */ +#endif /* DYNAMIC_ANNOTATIONS_ENABLED */ + +#ifdef ADDRESS_SANITIZER +/* Describe the current state of a contiguous container such as e.g. + * std::vector or std::string. For more details see + * sanitizer/common_interface_defs.h, which is provided by the compiler. */ +#include +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct { char x[8] __attribute__ ((aligned (8))); } name +#else +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") +#endif // ADDRESS_SANITIZER + +/* Undefine the macros intended only in this file. */ +#undef ANNOTALYSIS_ENABLED +#undef ANNOTATIONS_ENABLED +#undef ATTRIBUTE_IGNORE_READS_BEGIN +#undef ATTRIBUTE_IGNORE_READS_END + +#endif /* ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ */ diff --git a/base/abseil/absl/base/exception_safety_testing_test.cc b/base/abseil/absl/base/exception_safety_testing_test.cc new file mode 100644 index 0000000..575b535 --- /dev/null +++ b/base/abseil/absl/base/exception_safety_testing_test.cc @@ -0,0 +1,958 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exception_safety_testing.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest-spi.h" +#include "gtest/gtest.h" +#include "absl/memory/memory.h" + +namespace testing { + +namespace { + +using ::testing::exceptions_internal::SetCountdown; +using ::testing::exceptions_internal::TestException; +using ::testing::exceptions_internal::UnsetCountdown; + +// EXPECT_NO_THROW can't inspect the thrown inspection in general. +template +void ExpectNoThrow(const F& f) { + try { + f(); + } catch (const TestException& e) { + ADD_FAILURE() << "Unexpected exception thrown from " << e.what(); + } +} + +TEST(ThrowingValueTest, Throws) { + SetCountdown(); + EXPECT_THROW(ThrowingValue<> bomb, TestException); + + // It's not guaranteed that every operator only throws *once*. The default + // ctor only throws once, though, so use it to make sure we only throw when + // the countdown hits 0 + SetCountdown(2); + ExpectNoThrow([]() { ThrowingValue<> bomb; }); + ExpectNoThrow([]() { ThrowingValue<> bomb; }); + EXPECT_THROW(ThrowingValue<> bomb, TestException); + + UnsetCountdown(); +} + +// Tests that an operation throws when the countdown is at 0, doesn't throw when +// the countdown doesn't hit 0, and doesn't modify the state of the +// ThrowingValue if it throws +template +void TestOp(const F& f) { + ExpectNoThrow(f); + + SetCountdown(); + EXPECT_THROW(f(), TestException); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingCtors) { + ThrowingValue<> bomb; + + TestOp([]() { ThrowingValue<> bomb(1); }); + TestOp([&]() { ThrowingValue<> bomb1 = bomb; }); + TestOp([&]() { ThrowingValue<> bomb1 = std::move(bomb); }); +} + +TEST(ThrowingValueTest, ThrowingAssignment) { + ThrowingValue<> bomb, bomb1; + + TestOp([&]() { bomb = bomb1; }); + TestOp([&]() { bomb = std::move(bomb1); }); + + // Test that when assignment throws, the assignment should fail (lhs != rhs) + // and strong guarantee fails (lhs != lhs_copy). + { + ThrowingValue<> lhs(39), rhs(42); + ThrowingValue<> lhs_copy(lhs); + SetCountdown(); + EXPECT_THROW(lhs = rhs, TestException); + UnsetCountdown(); + EXPECT_NE(lhs, rhs); + EXPECT_NE(lhs_copy, lhs); + } + { + ThrowingValue<> lhs(39), rhs(42); + ThrowingValue<> lhs_copy(lhs), rhs_copy(rhs); + SetCountdown(); + EXPECT_THROW(lhs = std::move(rhs), TestException); + UnsetCountdown(); + EXPECT_NE(lhs, rhs_copy); + EXPECT_NE(lhs_copy, lhs); + } +} + +TEST(ThrowingValueTest, ThrowingComparisons) { + ThrowingValue<> bomb1, bomb2; + TestOp([&]() { return bomb1 == bomb2; }); + TestOp([&]() { return bomb1 != bomb2; }); + TestOp([&]() { return bomb1 < bomb2; }); + TestOp([&]() { return bomb1 <= bomb2; }); + TestOp([&]() { return bomb1 > bomb2; }); + TestOp([&]() { return bomb1 >= bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingArithmeticOps) { + ThrowingValue<> bomb1(1), bomb2(2); + + TestOp([&bomb1]() { +bomb1; }); + TestOp([&bomb1]() { -bomb1; }); + TestOp([&bomb1]() { ++bomb1; }); + TestOp([&bomb1]() { bomb1++; }); + TestOp([&bomb1]() { --bomb1; }); + TestOp([&bomb1]() { bomb1--; }); + + TestOp([&]() { bomb1 + bomb2; }); + TestOp([&]() { bomb1 - bomb2; }); + TestOp([&]() { bomb1* bomb2; }); + TestOp([&]() { bomb1 / bomb2; }); + TestOp([&]() { bomb1 << 1; }); + TestOp([&]() { bomb1 >> 1; }); +} + +TEST(ThrowingValueTest, ThrowingLogicalOps) { + ThrowingValue<> bomb1, bomb2; + + TestOp([&bomb1]() { !bomb1; }); + TestOp([&]() { bomb1&& bomb2; }); + TestOp([&]() { bomb1 || bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingBitwiseOps) { + ThrowingValue<> bomb1, bomb2; + + TestOp([&bomb1]() { ~bomb1; }); + TestOp([&]() { bomb1& bomb2; }); + TestOp([&]() { bomb1 | bomb2; }); + TestOp([&]() { bomb1 ^ bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingCompoundAssignmentOps) { + ThrowingValue<> bomb1(1), bomb2(2); + + TestOp([&]() { bomb1 += bomb2; }); + TestOp([&]() { bomb1 -= bomb2; }); + TestOp([&]() { bomb1 *= bomb2; }); + TestOp([&]() { bomb1 /= bomb2; }); + TestOp([&]() { bomb1 %= bomb2; }); + TestOp([&]() { bomb1 &= bomb2; }); + TestOp([&]() { bomb1 |= bomb2; }); + TestOp([&]() { bomb1 ^= bomb2; }); + TestOp([&]() { bomb1 *= bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingStreamOps) { + ThrowingValue<> bomb; + + TestOp([&]() { + std::istringstream stream; + stream >> bomb; + }); + TestOp([&]() { + std::stringstream stream; + stream << bomb; + }); +} + +// Tests the operator<< of ThrowingValue by forcing ConstructorTracker to emit +// a nonfatal failure that contains the string representation of the Thrower +TEST(ThrowingValueTest, StreamOpsOutput) { + using ::testing::TypeSpec; + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + + // Test default spec list (kEverythingThrows) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue; + auto thrower = Thrower(123); + thrower.~Thrower(); + }, + "ThrowingValue<>(123)"); + + // Test with one item in spec list (kNoThrowCopy) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue; + auto thrower = Thrower(234); + thrower.~Thrower(); + }, + "ThrowingValue(234)"); + + // Test with multiple items in spec list (kNoThrowMove, kNoThrowNew) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = + ThrowingValue; + auto thrower = Thrower(345); + thrower.~Thrower(); + }, + "ThrowingValue(345)"); + + // Test with all items in spec list (kNoThrowCopy, kNoThrowMove, kNoThrowNew) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue(-1)>; + auto thrower = Thrower(456); + thrower.~Thrower(); + }, + "ThrowingValue(456)"); +} + +template +void TestAllocatingOp(const F& f) { + ExpectNoThrow(f); + + SetCountdown(); + EXPECT_THROW(f(), exceptions_internal::TestBadAllocException); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingAllocatingOps) { + // make_unique calls unqualified operator new, so these exercise the + // ThrowingValue overloads. + TestAllocatingOp([]() { return absl::make_unique>(1); }); + TestAllocatingOp([]() { return absl::make_unique[]>(2); }); +} + +TEST(ThrowingValueTest, NonThrowingMoveCtor) { + ThrowingValue nothrow_ctor; + + SetCountdown(); + ExpectNoThrow([¬hrow_ctor]() { + ThrowingValue nothrow1 = std::move(nothrow_ctor); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingMoveAssign) { + ThrowingValue nothrow_assign1, nothrow_assign2; + + SetCountdown(); + ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() { + nothrow_assign1 = std::move(nothrow_assign2); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingCopyCtor) { + ThrowingValue<> tv; + + TestOp([&]() { ThrowingValue<> tv_copy(tv); }); +} + +TEST(ThrowingValueTest, ThrowingCopyAssign) { + ThrowingValue<> tv1, tv2; + + TestOp([&]() { tv1 = tv2; }); +} + +TEST(ThrowingValueTest, NonThrowingCopyCtor) { + ThrowingValue nothrow_ctor; + + SetCountdown(); + ExpectNoThrow([¬hrow_ctor]() { + ThrowingValue nothrow1(nothrow_ctor); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingCopyAssign) { + ThrowingValue nothrow_assign1, nothrow_assign2; + + SetCountdown(); + ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() { + nothrow_assign1 = nothrow_assign2; + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingSwap) { + ThrowingValue<> bomb1, bomb2; + TestOp([&]() { std::swap(bomb1, bomb2); }); +} + +TEST(ThrowingValueTest, NonThrowingSwap) { + ThrowingValue bomb1, bomb2; + ExpectNoThrow([&]() { std::swap(bomb1, bomb2); }); +} + +TEST(ThrowingValueTest, NonThrowingAllocation) { + ThrowingValue* allocated; + ThrowingValue* array; + + ExpectNoThrow([&allocated]() { + allocated = new ThrowingValue(1); + delete allocated; + }); + ExpectNoThrow([&array]() { + array = new ThrowingValue[2]; + delete[] array; + }); +} + +TEST(ThrowingValueTest, NonThrowingDelete) { + auto* allocated = new ThrowingValue<>(1); + auto* array = new ThrowingValue<>[2]; + + SetCountdown(); + ExpectNoThrow([allocated]() { delete allocated; }); + SetCountdown(); + ExpectNoThrow([array]() { delete[] array; }); + + UnsetCountdown(); +} + +using Storage = + absl::aligned_storage_t), alignof(ThrowingValue<>)>; + +TEST(ThrowingValueTest, NonThrowingPlacementDelete) { + constexpr int kArrayLen = 2; + // We intentionally create extra space to store the tag allocated by placement + // new[]. + constexpr int kStorageLen = 4; + + Storage buf; + Storage array_buf[kStorageLen]; + auto* placed = new (&buf) ThrowingValue<>(1); + auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen]; + + SetCountdown(); + ExpectNoThrow([placed, &buf]() { + placed->~ThrowingValue<>(); + ThrowingValue<>::operator delete(placed, &buf); + }); + + SetCountdown(); + ExpectNoThrow([&, placed_array]() { + for (int i = 0; i < kArrayLen; ++i) placed_array[i].~ThrowingValue<>(); + ThrowingValue<>::operator delete[](placed_array, &array_buf); + }); + + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingDestructor) { + auto* allocated = new ThrowingValue<>(); + + SetCountdown(); + ExpectNoThrow([allocated]() { delete allocated; }); + UnsetCountdown(); +} + +TEST(ThrowingBoolTest, ThrowingBool) { + ThrowingBool t = true; + + // Test that it's contextually convertible to bool + if (t) { // NOLINT(whitespace/empty_if_body) + } + EXPECT_TRUE(t); + + TestOp([&]() { (void)!t; }); +} + +TEST(ThrowingAllocatorTest, MemoryManagement) { + // Just exercise the memory management capabilities under LSan to make sure we + // don't leak. + ThrowingAllocator int_alloc; + int* ip = int_alloc.allocate(1); + int_alloc.deallocate(ip, 1); + int* i_array = int_alloc.allocate(2); + int_alloc.deallocate(i_array, 2); + + ThrowingAllocator> tv_alloc; + ThrowingValue<>* ptr = tv_alloc.allocate(1); + tv_alloc.deallocate(ptr, 1); + ThrowingValue<>* tv_array = tv_alloc.allocate(2); + tv_alloc.deallocate(tv_array, 2); +} + +TEST(ThrowingAllocatorTest, CallsGlobalNew) { + ThrowingAllocator, AllocSpec::kNoThrowAllocate> nothrow_alloc; + ThrowingValue<>* ptr; + + SetCountdown(); + // This will only throw if ThrowingValue::new is called. + ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); }); + nothrow_alloc.deallocate(ptr, 1); + + UnsetCountdown(); +} + +TEST(ThrowingAllocatorTest, ThrowingConstructors) { + ThrowingAllocator int_alloc; + int* ip = nullptr; + + SetCountdown(); + EXPECT_THROW(ip = int_alloc.allocate(1), TestException); + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + + *ip = 1; + SetCountdown(); + EXPECT_THROW(int_alloc.construct(ip, 2), TestException); + EXPECT_EQ(*ip, 1); + int_alloc.deallocate(ip, 1); + + UnsetCountdown(); +} + +TEST(ThrowingAllocatorTest, NonThrowingConstruction) { + { + ThrowingAllocator int_alloc; + int* ip = nullptr; + + SetCountdown(); + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + + SetCountdown(); + ExpectNoThrow([&]() { int_alloc.construct(ip, 2); }); + + EXPECT_EQ(*ip, 2); + int_alloc.deallocate(ip, 1); + + UnsetCountdown(); + } + + { + ThrowingAllocator int_alloc; + int* ip = nullptr; + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + ExpectNoThrow([&]() { int_alloc.construct(ip, 2); }); + EXPECT_EQ(*ip, 2); + int_alloc.deallocate(ip, 1); + } + + { + ThrowingAllocator, AllocSpec::kNoThrowAllocate> + nothrow_alloc; + ThrowingValue<>* ptr; + + SetCountdown(); + ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); }); + + SetCountdown(); + ExpectNoThrow( + [&]() { nothrow_alloc.construct(ptr, 2, testing::nothrow_ctor); }); + + EXPECT_EQ(ptr->Get(), 2); + nothrow_alloc.destroy(ptr); + nothrow_alloc.deallocate(ptr, 1); + + UnsetCountdown(); + } + + { + ThrowingAllocator a; + + SetCountdown(); + ExpectNoThrow([&]() { ThrowingAllocator a1 = a; }); + + SetCountdown(); + ExpectNoThrow([&]() { ThrowingAllocator a1 = std::move(a); }); + + UnsetCountdown(); + } +} + +TEST(ThrowingAllocatorTest, ThrowingAllocatorConstruction) { + ThrowingAllocator a; + TestOp([]() { ThrowingAllocator a; }); + TestOp([&]() { a.select_on_container_copy_construction(); }); +} + +TEST(ThrowingAllocatorTest, State) { + ThrowingAllocator a1, a2; + EXPECT_NE(a1, a2); + + auto a3 = a1; + EXPECT_EQ(a3, a1); + int* ip = a1.allocate(1); + EXPECT_EQ(a3, a1); + a3.deallocate(ip, 1); + EXPECT_EQ(a3, a1); +} + +TEST(ThrowingAllocatorTest, InVector) { + std::vector, ThrowingAllocator>> v; + for (int i = 0; i < 20; ++i) v.push_back({}); + for (int i = 0; i < 20; ++i) v.pop_back(); +} + +TEST(ThrowingAllocatorTest, InList) { + std::list, ThrowingAllocator>> l; + for (int i = 0; i < 20; ++i) l.push_back({}); + for (int i = 0; i < 20; ++i) l.pop_back(); + for (int i = 0; i < 20; ++i) l.push_front({}); + for (int i = 0; i < 20; ++i) l.pop_front(); +} + +template +struct NullaryTestValidator : public std::false_type {}; + +template +struct NullaryTestValidator< + TesterInstance, + absl::void_t().Test())>> + : public std::true_type {}; + +template +bool HasNullaryTest(const TesterInstance&) { + return NullaryTestValidator::value; +} + +void DummyOp(void*) {} + +template +struct UnaryTestValidator : public std::false_type {}; + +template +struct UnaryTestValidator< + TesterInstance, + absl::void_t().Test(DummyOp))>> + : public std::true_type {}; + +template +bool HasUnaryTest(const TesterInstance&) { + return UnaryTestValidator::value; +} + +TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) { + using T = exceptions_internal::UninitializedT; + auto op = [](T* t) {}; + auto inv = [](T*) { return testing::AssertionSuccess(); }; + auto fac = []() { return absl::make_unique(); }; + + // Test that providing operation and inveriants still does not allow for the + // the invocation of .Test() and .Test(op) because it lacks a factory + auto without_fac = + testing::MakeExceptionSafetyTester().WithOperation(op).WithContracts( + inv, testing::strong_guarantee); + EXPECT_FALSE(HasNullaryTest(without_fac)); + EXPECT_FALSE(HasUnaryTest(without_fac)); + + // Test that providing contracts and factory allows the invocation of + // .Test(op) but does not allow for .Test() because it lacks an operation + auto without_op = testing::MakeExceptionSafetyTester() + .WithContracts(inv, testing::strong_guarantee) + .WithFactory(fac); + EXPECT_FALSE(HasNullaryTest(without_op)); + EXPECT_TRUE(HasUnaryTest(without_op)); + + // Test that providing operation and factory still does not allow for the + // the invocation of .Test() and .Test(op) because it lacks contracts + auto without_inv = + testing::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac); + EXPECT_FALSE(HasNullaryTest(without_inv)); + EXPECT_FALSE(HasUnaryTest(without_inv)); +} + +struct ExampleStruct {}; + +std::unique_ptr ExampleFunctionFactory() { + return absl::make_unique(); +} + +void ExampleFunctionOperation(ExampleStruct*) {} + +testing::AssertionResult ExampleFunctionContract(ExampleStruct*) { + return testing::AssertionSuccess(); +} + +struct { + std::unique_ptr operator()() const { + return ExampleFunctionFactory(); + } +} example_struct_factory; + +struct { + void operator()(ExampleStruct*) const {} +} example_struct_operation; + +struct { + testing::AssertionResult operator()(ExampleStruct* example_struct) const { + return ExampleFunctionContract(example_struct); + } +} example_struct_contract; + +auto example_lambda_factory = []() { return ExampleFunctionFactory(); }; + +auto example_lambda_operation = [](ExampleStruct*) {}; + +auto example_lambda_contract = [](ExampleStruct* example_struct) { + return ExampleFunctionContract(example_struct); +}; + +// Testing that function references, pointers, structs with operator() and +// lambdas can all be used with ExceptionSafetyTester +TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) { + // function reference + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(ExampleFunctionFactory) + .WithOperation(ExampleFunctionOperation) + .WithContracts(ExampleFunctionContract) + .Test()); + + // function pointer + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(&ExampleFunctionFactory) + .WithOperation(&ExampleFunctionOperation) + .WithContracts(&ExampleFunctionContract) + .Test()); + + // struct + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(example_struct_factory) + .WithOperation(example_struct_operation) + .WithContracts(example_struct_contract) + .Test()); + + // lambda + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(example_lambda_factory) + .WithOperation(example_lambda_operation) + .WithContracts(example_lambda_contract) + .Test()); +} + +struct NonNegative { + bool operator==(const NonNegative& other) const { return i == other.i; } + int i; +}; + +testing::AssertionResult CheckNonNegativeInvariants(NonNegative* g) { + if (g->i >= 0) { + return testing::AssertionSuccess(); + } + return testing::AssertionFailure() + << "i should be non-negative but is " << g->i; +} + +struct { + template + void operator()(T* t) const { + (*t)(); + } +} invoker; + +auto tester = + testing::MakeExceptionSafetyTester().WithOperation(invoker).WithContracts( + CheckNonNegativeInvariants); +auto strong_tester = tester.WithContracts(testing::strong_guarantee); + +struct FailsBasicGuarantee : public NonNegative { + void operator()() { + --i; + ThrowingValue<> bomb; + ++i; + } +}; + +TEST(ExceptionCheckTest, BasicGuaranteeFailure) { + EXPECT_FALSE(tester.WithInitialValue(FailsBasicGuarantee{}).Test()); +} + +struct FollowsBasicGuarantee : public NonNegative { + void operator()() { + ++i; + ThrowingValue<> bomb; + } +}; + +TEST(ExceptionCheckTest, BasicGuarantee) { + EXPECT_TRUE(tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); +} + +TEST(ExceptionCheckTest, StrongGuaranteeFailure) { + EXPECT_FALSE(strong_tester.WithInitialValue(FailsBasicGuarantee{}).Test()); + EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); +} + +struct BasicGuaranteeWithExtraContracts : public NonNegative { + // After operator(), i is incremented. If operator() throws, i is set to 9999 + void operator()() { + int old_i = i; + i = kExceptionSentinel; + ThrowingValue<> bomb; + i = ++old_i; + } + + static constexpr int kExceptionSentinel = 9999; +}; +constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel; + +TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) { + auto tester_with_val = + tester.WithInitialValue(BasicGuaranteeWithExtraContracts{}); + EXPECT_TRUE(tester_with_val.Test()); + EXPECT_TRUE( + tester_with_val + .WithContracts([](BasicGuaranteeWithExtraContracts* o) { + if (o->i == BasicGuaranteeWithExtraContracts::kExceptionSentinel) { + return testing::AssertionSuccess(); + } + return testing::AssertionFailure() + << "i should be " + << BasicGuaranteeWithExtraContracts::kExceptionSentinel + << ", but is " << o->i; + }) + .Test()); +} + +struct FollowsStrongGuarantee : public NonNegative { + void operator()() { ThrowingValue<> bomb; } +}; + +TEST(ExceptionCheckTest, StrongGuarantee) { + EXPECT_TRUE(tester.WithInitialValue(FollowsStrongGuarantee{}).Test()); + EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}).Test()); +} + +struct HasReset : public NonNegative { + void operator()() { + i = -1; + ThrowingValue<> bomb; + i = 1; + } + + void reset() { i = 0; } +}; + +testing::AssertionResult CheckHasResetContracts(HasReset* h) { + h->reset(); + return testing::AssertionResult(h->i == 0); +} + +TEST(ExceptionCheckTest, ModifyingChecker) { + auto set_to_1000 = [](FollowsBasicGuarantee* g) { + g->i = 1000; + return testing::AssertionSuccess(); + }; + auto is_1000 = [](FollowsBasicGuarantee* g) { + return testing::AssertionResult(g->i == 1000); + }; + auto increment = [](FollowsStrongGuarantee* g) { + ++g->i; + return testing::AssertionSuccess(); + }; + + EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{}) + .WithContracts(set_to_1000, is_1000) + .Test()); + EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}) + .WithContracts(increment) + .Test()); + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithInitialValue(HasReset{}) + .WithContracts(CheckHasResetContracts) + .Test(invoker)); +} + +TEST(ExceptionSafetyTesterTest, ResetsCountdown) { + auto test = + testing::MakeExceptionSafetyTester() + .WithInitialValue(ThrowingValue<>()) + .WithContracts([](ThrowingValue<>*) { return AssertionSuccess(); }) + .WithOperation([](ThrowingValue<>*) {}); + ASSERT_TRUE(test.Test()); + // If the countdown isn't reset because there were no exceptions thrown, then + // this will fail with a termination from an unhandled exception + EXPECT_TRUE(test.Test()); +} + +struct NonCopyable : public NonNegative { + NonCopyable(const NonCopyable&) = delete; + NonCopyable() : NonNegative{0} {} + + void operator()() { ThrowingValue<> bomb; } +}; + +TEST(ExceptionCheckTest, NonCopyable) { + auto factory = []() { return absl::make_unique(); }; + EXPECT_TRUE(tester.WithFactory(factory).Test()); + EXPECT_TRUE(strong_tester.WithFactory(factory).Test()); +} + +struct NonEqualityComparable : public NonNegative { + void operator()() { ThrowingValue<> bomb; } + + void ModifyOnThrow() { + ++i; + ThrowingValue<> bomb; + static_cast(bomb); + --i; + } +}; + +TEST(ExceptionCheckTest, NonEqualityComparable) { + auto nec_is_strong = [](NonEqualityComparable* nec) { + return testing::AssertionResult(nec->i == NonEqualityComparable().i); + }; + auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{}) + .WithContracts(nec_is_strong); + + EXPECT_TRUE(strong_nec_tester.Test()); + EXPECT_FALSE(strong_nec_tester.Test( + [](NonEqualityComparable* n) { n->ModifyOnThrow(); })); +} + +template +struct ExhaustivenessTester { + void operator()() { + successes |= 1; + T b1; + static_cast(b1); + successes |= (1 << 1); + T b2; + static_cast(b2); + successes |= (1 << 2); + T b3; + static_cast(b3); + successes |= (1 << 3); + } + + bool operator==(const ExhaustivenessTester>&) const { + return true; + } + + static unsigned char successes; +}; + +struct { + template + testing::AssertionResult operator()(ExhaustivenessTester*) const { + return testing::AssertionSuccess(); + } +} CheckExhaustivenessTesterContracts; + +template +unsigned char ExhaustivenessTester::successes = 0; + +TEST(ExceptionCheckTest, Exhaustiveness) { + auto exhaust_tester = testing::MakeExceptionSafetyTester() + .WithContracts(CheckExhaustivenessTesterContracts) + .WithOperation(invoker); + + EXPECT_TRUE( + exhaust_tester.WithInitialValue(ExhaustivenessTester{}).Test()); + EXPECT_EQ(ExhaustivenessTester::successes, 0xF); + + EXPECT_TRUE( + exhaust_tester.WithInitialValue(ExhaustivenessTester>{}) + .WithContracts(testing::strong_guarantee) + .Test()); + EXPECT_EQ(ExhaustivenessTester>::successes, 0xF); +} + +struct LeaksIfCtorThrows : private exceptions_internal::TrackedObject { + LeaksIfCtorThrows() : TrackedObject(ABSL_PRETTY_FUNCTION) { + ++counter; + ThrowingValue<> v; + static_cast(v); + --counter; + } + LeaksIfCtorThrows(const LeaksIfCtorThrows&) noexcept + : TrackedObject(ABSL_PRETTY_FUNCTION) {} + static int counter; +}; +int LeaksIfCtorThrows::counter = 0; + +TEST(ExceptionCheckTest, TestLeakyCtor) { + testing::TestThrowingCtor(); + EXPECT_EQ(LeaksIfCtorThrows::counter, 1); + LeaksIfCtorThrows::counter = 0; +} + +struct Tracked : private exceptions_internal::TrackedObject { + Tracked() : TrackedObject(ABSL_PRETTY_FUNCTION) {} +}; + +TEST(ConstructorTrackerTest, CreatedBefore) { + Tracked a, b, c; + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); +} + +TEST(ConstructorTrackerTest, CreatedAfter) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + Tracked a, b, c; +} + +TEST(ConstructorTrackerTest, NotDestroyedAfter) { + absl::aligned_storage_t storage; + EXPECT_NONFATAL_FAILURE( + { + exceptions_internal::ConstructorTracker ct( + exceptions_internal::countdown); + new (&storage) Tracked; + }, + "not destroyed"); +} + +TEST(ConstructorTrackerTest, DestroyedTwice) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + EXPECT_NONFATAL_FAILURE( + { + Tracked t; + t.~Tracked(); + }, + "re-destroyed"); +} + +TEST(ConstructorTrackerTest, ConstructedTwice) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + absl::aligned_storage_t storage; + EXPECT_NONFATAL_FAILURE( + { + new (&storage) Tracked; + new (&storage) Tracked; + reinterpret_cast(&storage)->~Tracked(); + }, + "re-constructed"); +} + +TEST(ThrowingValueTraitsTest, RelationalOperators) { + ThrowingValue<> a, b; + EXPECT_TRUE((std::is_convertible::value)); + EXPECT_TRUE((std::is_convertible::value)); + EXPECT_TRUE((std::is_convertible::value)); + EXPECT_TRUE((std::is_convertible::value)); + EXPECT_TRUE((std::is_convertible b), bool>::value)); + EXPECT_TRUE((std::is_convertible= b), bool>::value)); +} + +TEST(ThrowingAllocatorTraitsTest, Assignablility) { + EXPECT_TRUE(absl::is_move_assignable>::value); + EXPECT_TRUE(absl::is_copy_assignable>::value); + EXPECT_TRUE(std::is_nothrow_move_assignable>::value); + EXPECT_TRUE(std::is_nothrow_copy_assignable>::value); +} + +} // namespace + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/base/abseil/absl/base/inline_variable_test.cc b/base/abseil/absl/base/inline_variable_test.cc new file mode 100644 index 0000000..37a40e1 --- /dev/null +++ b/base/abseil/absl/base/inline_variable_test.cc @@ -0,0 +1,64 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/inline_variable_testing.h" + +#include "gtest/gtest.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { +namespace { + +TEST(InlineVariableTest, Constexpr) { + static_assert(inline_variable_foo.value == 5, ""); + static_assert(other_inline_variable_foo.value == 5, ""); + static_assert(inline_variable_int == 5, ""); + static_assert(other_inline_variable_int == 5, ""); +} + +TEST(InlineVariableTest, DefaultConstructedIdentityEquality) { + EXPECT_EQ(get_foo_a().value, 5); + EXPECT_EQ(get_foo_b().value, 5); + EXPECT_EQ(&get_foo_a(), &get_foo_b()); +} + +TEST(InlineVariableTest, DefaultConstructedIdentityInequality) { + EXPECT_NE(&inline_variable_foo, &other_inline_variable_foo); +} + +TEST(InlineVariableTest, InitializedIdentityEquality) { + EXPECT_EQ(get_int_a(), 5); + EXPECT_EQ(get_int_b(), 5); + EXPECT_EQ(&get_int_a(), &get_int_b()); +} + +TEST(InlineVariableTest, InitializedIdentityInequality) { + EXPECT_NE(&inline_variable_int, &other_inline_variable_int); +} + +TEST(InlineVariableTest, FunPtrType) { + static_assert( + std::is_same::type>::value, + ""); +} + +} // namespace +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/inline_variable_test_a.cc b/base/abseil/absl/base/inline_variable_test_a.cc new file mode 100644 index 0000000..f96a58d --- /dev/null +++ b/base/abseil/absl/base/inline_variable_test_a.cc @@ -0,0 +1,27 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/inline_variable_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +const Foo& get_foo_a() { return inline_variable_foo; } + +const int& get_int_a() { return inline_variable_int; } + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/inline_variable_test_b.cc b/base/abseil/absl/base/inline_variable_test_b.cc new file mode 100644 index 0000000..038adc3 --- /dev/null +++ b/base/abseil/absl/base/inline_variable_test_b.cc @@ -0,0 +1,27 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/inline_variable_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +const Foo& get_foo_b() { return inline_variable_foo; } + +const int& get_int_b() { return inline_variable_int; } + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/atomic_hook.h b/base/abseil/absl/base/internal/atomic_hook.h new file mode 100644 index 0000000..d885bc0 --- /dev/null +++ b/base/abseil/absl/base/internal/atomic_hook.h @@ -0,0 +1,179 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" + +#ifdef _MSC_FULL_VER +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0 +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0 +#else +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1 +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template +class AtomicHook; + +// `AtomicHook` is a helper class, templatized on a raw function pointer type, +// for implementing Abseil customization hooks. It is a callable object that +// dispatches to the registered hook. Objects of type `AtomicHook` must have +// static or thread storage duration. +// +// A default constructed object performs a no-op (and returns a default +// constructed object) if no hook has been registered. +// +// Hooks can be pre-registered via constant initialization, for example, +// `ABSL_CONST_INIT static AtomicHook my_hook(DefaultAction);` +// and then changed at runtime via a call to `Store()`. +// +// Reads and writes guarantee memory_order_acquire/memory_order_release +// semantics. +template +class AtomicHook { + public: + using FnPtr = ReturnType (*)(Args...); + + // Constructs an object that by default performs a no-op (and + // returns a default constructed object) when no hook as been registered. + constexpr AtomicHook() : AtomicHook(DummyFunction) {} + + // Constructs an object that by default dispatches to/returns the + // pre-registered default_fn when no hook has been registered at runtime. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) + : hook_(default_fn), default_fn_(default_fn) {} +#else + // On MSVC, this function sometimes executes after dynamic initialization =(. + // If a non-zero `hook_` has been installed by a dynamic initializer, we want + // to preserve it. If not, `hook_` will be zero initialized and we have no + // need to set it to `kUninitialized`. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + explicit constexpr AtomicHook(FnPtr default_fn) + : /* hook_(deliberately omitted), */ default_fn_(default_fn) { + static_assert(kUninitialized == 0, "here we rely on zero-initialization"); + } +#endif + + // Stores the provided function pointer as the value for this hook. + // + // This is intended to be called once. Multiple calls are legal only if the + // same function pointer is provided for each call. The store is implemented + // as a memory_order_release operation, and read accesses are implemented as + // memory_order_acquire. + void Store(FnPtr fn) { + bool success = DoStore(fn); + static_cast(success); + assert(success); + } + + // Invokes the registered callback. If no callback has yet been registered, a + // default-constructed object of the appropriate type is returned instead. + template + ReturnType operator()(CallArgs&&... args) const { + return DoLoad()(std::forward(args)...); + } + + // Returns the registered callback, or nullptr if none has been registered. + // Useful if client code needs to conditionalize behavior based on whether a + // callback was registered. + // + // Note that atomic_hook.Load()() and atomic_hook() have different semantics: + // operator()() will perform a no-op if no callback was registered, while + // Load()() will dereference a null function pointer. Prefer operator()() to + // Load()() unless you must conditionalize behavior on whether a hook was + // registered. + FnPtr Load() const { + FnPtr ptr = DoLoad(); + return (ptr == DummyFunction) ? nullptr : ptr; + } + + private: + static ReturnType DummyFunction(Args...) { + return ReturnType(); + } + + // Current versions of MSVC (as of September 2017) have a broken + // implementation of std::atomic: Its constructor attempts to do the + // equivalent of a reinterpret_cast in a constexpr context, which is not + // allowed. + // + // This causes an issue when building with LLVM under Windows. To avoid this, + // we use a less-efficient, intptr_t-based implementation on Windows. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER + // Return the stored value, or DummyFunction if no value has been stored. + FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); } + + // Store the given value. Returns false if a different value was already + // stored to this object. + bool DoStore(FnPtr fn) { + assert(fn); + FnPtr expected = default_fn_; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, fn, std::memory_order_acq_rel, std::memory_order_acquire); + const bool same_value_already_stored = (expected == fn); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; +#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER + // Use a sentinel value unlikely to be the address of an actual function. + static constexpr intptr_t kUninitialized = 0; + + static_assert(sizeof(intptr_t) >= sizeof(FnPtr), + "intptr_t can't contain a function pointer"); + + FnPtr DoLoad() const { + const intptr_t value = hook_.load(std::memory_order_acquire); + if (value == kUninitialized) { + return default_fn_; + } + return reinterpret_cast(value); + } + + bool DoStore(FnPtr fn) { + assert(fn); + const auto value = reinterpret_cast(fn); + intptr_t expected = kUninitialized; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, value, std::memory_order_acq_rel, std::memory_order_acquire); + const bool same_value_already_stored = (expected == value); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; +#endif + + const FnPtr default_fn_; +}; + +#undef ABSL_HAVE_WORKING_ATOMIC_POINTER +#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/base/abseil/absl/base/internal/atomic_hook_test.cc b/base/abseil/absl/base/internal/atomic_hook_test.cc new file mode 100644 index 0000000..794072e --- /dev/null +++ b/base/abseil/absl/base/internal/atomic_hook_test.cc @@ -0,0 +1,94 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/atomic_hook.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook_test_helper.h" + +namespace { + +using ::testing::Eq; + +int value = 0; +void TestHook(int x) { value = x; } + +TEST(AtomicHookTest, NoDefaultFunction) { + ABSL_CONST_INIT static absl::base_internal::AtomicHook hook; + value = 0; + + // Test the default DummyFunction. + EXPECT_TRUE(hook.Load() == nullptr); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 0); + + // Test a stored hook. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 1); + + // Calling Store() with the same hook should not crash. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 1); + hook(2); + EXPECT_EQ(value, 2); +} + +TEST(AtomicHookTest, WithDefaultFunction) { + // Set the default value to TestHook at compile-time. + ABSL_CONST_INIT static absl::base_internal::AtomicHook hook( + TestHook); + value = 0; + + // Test the default value is TestHook. + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 1); + + // Calling Store() with the same hook should not crash. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 1); + hook(2); + EXPECT_EQ(value, 2); +} + +ABSL_CONST_INIT int override_func_calls = 0; +void OverrideFunc() { override_func_calls++; } +static struct OverrideInstaller { + OverrideInstaller() { absl::atomic_hook_internal::func.Store(OverrideFunc); } +} override_installer; + +TEST(AtomicHookTest, DynamicInitFromAnotherTU) { + // MSVC 14.2 doesn't do constexpr static init correctly; in particular it + // tends to sequence static init (i.e. defaults) of `AtomicHook` objects + // after their dynamic init (i.e. overrides), overwriting whatever value was + // written during dynamic init. This regression test validates the fix. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0)); + EXPECT_THAT(override_func_calls, Eq(0)); + absl::atomic_hook_internal::func(); + EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0)); + EXPECT_THAT(override_func_calls, Eq(1)); + EXPECT_THAT(absl::atomic_hook_internal::func.Load(), Eq(OverrideFunc)); +} + +} // namespace diff --git a/base/abseil/absl/base/internal/atomic_hook_test_helper.cc b/base/abseil/absl/base/internal/atomic_hook_test_helper.cc new file mode 100644 index 0000000..f95d962 --- /dev/null +++ b/base/abseil/absl/base/internal/atomic_hook_test_helper.cc @@ -0,0 +1,31 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/atomic_hook_test_helper.h" + +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace atomic_hook_internal { + +ABSL_CONST_INIT absl::base_internal::AtomicHook func(DefaultFunc); +ABSL_CONST_INIT int default_func_calls = 0; +void DefaultFunc() { default_func_calls++; } +void RegisterFunc(VoidF f) { func.Store(f); } + +} // namespace atomic_hook_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/atomic_hook_test_helper.h b/base/abseil/absl/base/internal/atomic_hook_test_helper.h new file mode 100644 index 0000000..3e72b49 --- /dev/null +++ b/base/abseil/absl/base/internal/atomic_hook_test_helper.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ +#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ + +#include "absl/base/internal/atomic_hook.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace atomic_hook_internal { + +using VoidF = void (*)(); +extern absl::base_internal::AtomicHook func; +extern int default_func_calls; +void DefaultFunc(); +void RegisterFunc(VoidF func); + +} // namespace atomic_hook_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ diff --git a/base/abseil/absl/base/internal/bits.h b/base/abseil/absl/base/internal/bits.h new file mode 100644 index 0000000..8b03453 --- /dev/null +++ b/base/abseil/absl/base/internal/bits.h @@ -0,0 +1,218 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_BITS_H_ +#define ABSL_BASE_INTERNAL_BITS_H_ + +// This file contains bitwise ops which are implementation details of various +// absl libraries. + +#include + +#include "absl/base/config.h" + +// Clang on Windows has __builtin_clzll; otherwise we need to use the +// windows intrinsic functions. +#if defined(_MSC_VER) +#include +#if defined(_M_X64) +#pragma intrinsic(_BitScanReverse64) +#pragma intrinsic(_BitScanForward64) +#endif +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#endif + +#include "absl/base/attributes.h" + +#if defined(_MSC_VER) +// We can achieve something similar to attribute((always_inline)) with MSVC by +// using the __forceinline keyword, however this is not perfect. MSVC is +// much less aggressive about inlining, and even with the __forceinline keyword. +#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline +#else +// Use default attribute inline. +#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) { + int zeroes = 60; + if (n >> 32) { + zeroes -= 32; + n >>= 32; + } + if (n >> 16) { + zeroes -= 16; + n >>= 16; + } + if (n >> 8) { + zeroes -= 8; + n >>= 8; + } + if (n >> 4) { + zeroes -= 4; + n >>= 4; + } + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) { +#if defined(_MSC_VER) && defined(_M_X64) + // MSVC does not have __buitin_clzll. Use _BitScanReverse64. + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse64(&result, n)) { + return 63 - result; + } + return 64; +#elif defined(_MSC_VER) + // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse + unsigned long result = 0; // NOLINT(runtime/int) + if ((n >> 32) && _BitScanReverse(&result, n >> 32)) { + return 31 - result; + } + if (_BitScanReverse(&result, n)) { + return 63 - result; + } + return 64; +#elif defined(__GNUC__) + // Use __builtin_clzll, which uses the following instructions: + // x86: bsr + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) + "__builtin_clzll does not take 64-bit arg"); + + // Handle 0 as a special case because __builtin_clzll(0) is undefined. + if (n == 0) { + return 64; + } + return __builtin_clzll(n); +#else + return CountLeadingZeros64Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) { + int zeroes = 28; + if (n >> 16) { + zeroes -= 16; + n >>= 16; + } + if (n >> 8) { + zeroes -= 8; + n >>= 8; + } + if (n >> 4) { + zeroes -= 4; + n >>= 4; + } + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) { +#if defined(_MSC_VER) + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse(&result, n)) { + return 31 - result; + } + return 32; +#elif defined(__GNUC__) + // Use __builtin_clz, which uses the following instructions: + // x86: bsr + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(int) == sizeof(n), + "__builtin_clz does not take 32-bit arg"); + + // Handle 0 as a special case because __builtin_clz(0) is undefined. + if (n == 0) { + return 32; + } + return __builtin_clz(n); +#else + return CountLeadingZeros32Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) { + int c = 63; + n &= ~n + 1; + if (n & 0x00000000FFFFFFFF) c -= 32; + if (n & 0x0000FFFF0000FFFF) c -= 16; + if (n & 0x00FF00FF00FF00FF) c -= 8; + if (n & 0x0F0F0F0F0F0F0F0F) c -= 4; + if (n & 0x3333333333333333) c -= 2; + if (n & 0x5555555555555555) c -= 1; + return c; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) { +#if defined(_MSC_VER) && defined(_M_X64) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward64(&result, n); + return result; +#elif defined(_MSC_VER) + unsigned long result = 0; // NOLINT(runtime/int) + if (static_cast(n) == 0) { + _BitScanForward(&result, n >> 32); + return result + 32; + } + _BitScanForward(&result, n); + return result; +#elif defined(__GNUC__) + static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) + "__builtin_ctzll does not take 64-bit arg"); + return __builtin_ctzll(n); +#else + return CountTrailingZerosNonZero64Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) { + int c = 31; + n &= ~n + 1; + if (n & 0x0000FFFF) c -= 16; + if (n & 0x00FF00FF) c -= 8; + if (n & 0x0F0F0F0F) c -= 4; + if (n & 0x33333333) c -= 2; + if (n & 0x55555555) c -= 1; + return c; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) { +#if defined(_MSC_VER) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward(&result, n); + return result; +#elif defined(__GNUC__) + static_assert(sizeof(int) == sizeof(n), + "__builtin_ctz does not take 32-bit arg"); + return __builtin_ctz(n); +#else + return CountTrailingZerosNonZero32Slow(n); +#endif +} + +#undef ABSL_BASE_INTERNAL_FORCEINLINE + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_BITS_H_ diff --git a/base/abseil/absl/base/internal/bits_test.cc b/base/abseil/absl/base/internal/bits_test.cc new file mode 100644 index 0000000..7855fa6 --- /dev/null +++ b/base/abseil/absl/base/internal/bits_test.cc @@ -0,0 +1,97 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/bits.h" + +#include "gtest/gtest.h" + +namespace { + +int CLZ64(uint64_t n) { + int fast = absl::base_internal::CountLeadingZeros64(n); + int slow = absl::base_internal::CountLeadingZeros64Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountLeadingZeros64) { + EXPECT_EQ(64, CLZ64(uint64_t{})); + EXPECT_EQ(0, CLZ64(~uint64_t{})); + + for (int index = 0; index < 64; index++) { + uint64_t x = static_cast(1) << index; + const auto cnt = 63 - index; + ASSERT_EQ(cnt, CLZ64(x)) << index; + ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index; + } +} + +int CLZ32(uint32_t n) { + int fast = absl::base_internal::CountLeadingZeros32(n); + int slow = absl::base_internal::CountLeadingZeros32Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountLeadingZeros32) { + EXPECT_EQ(32, CLZ32(uint32_t{})); + EXPECT_EQ(0, CLZ32(~uint32_t{})); + + for (int index = 0; index < 32; index++) { + uint32_t x = static_cast(1) << index; + const auto cnt = 31 - index; + ASSERT_EQ(cnt, CLZ32(x)) << index; + ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index; + ASSERT_EQ(CLZ64(x), CLZ32(x) + 32); + } +} + +int CTZ64(uint64_t n) { + int fast = absl::base_internal::CountTrailingZerosNonZero64(n); + int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountTrailingZerosNonZero64) { + EXPECT_EQ(0, CTZ64(~uint64_t{})); + + for (int index = 0; index < 64; index++) { + uint64_t x = static_cast(1) << index; + const auto cnt = index; + ASSERT_EQ(cnt, CTZ64(x)) << index; + ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index; + } +} + +int CTZ32(uint32_t n) { + int fast = absl::base_internal::CountTrailingZerosNonZero32(n); + int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountTrailingZerosNonZero32) { + EXPECT_EQ(0, CTZ32(~uint32_t{})); + + for (int index = 0; index < 32; index++) { + uint32_t x = static_cast(1) << index; + const auto cnt = index; + ASSERT_EQ(cnt, CTZ32(x)) << index; + ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index; + } +} + + +} // namespace diff --git a/base/abseil/absl/base/internal/cmake_thread_test.cc b/base/abseil/absl/base/internal/cmake_thread_test.cc new file mode 100644 index 0000000..f70bb24 --- /dev/null +++ b/base/abseil/absl/base/internal/cmake_thread_test.cc @@ -0,0 +1,22 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "absl/base/internal/thread_identity.h" + +int main() { + auto* tid = absl::base_internal::CurrentThreadIdentityIfPresent(); + // Make sure the above call can't be optimized out + std::cout << (void*)tid << std::endl; +} diff --git a/base/abseil/absl/base/internal/cycleclock.cc b/base/abseil/absl/base/internal/cycleclock.cc new file mode 100644 index 0000000..0e65005 --- /dev/null +++ b/base/abseil/absl/base/internal/cycleclock.cc @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The implementation of CycleClock::Frequency. +// +// NOTE: only i386 and x86_64 have been well tested. +// PPC, sparc, alpha, and ia64 are based on +// http://peter.kuscsik.com/wordpress/?p=14 +// with modifications by m3b. See also +// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h + +#include "absl/base/internal/cycleclock.h" + +#include +#include // NOLINT(build/c++11) + +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +namespace { + +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +// Not debug mode and the UnscaledCycleClock frequency is the CPU +// frequency. Scale the CycleClock to prevent overflow if someone +// tries to represent the time as cycles since the Unix epoch. +static constexpr int32_t kShift = 1; +#else +// Not debug mode and the UnscaledCycleClock isn't operating at the +// raw CPU frequency. There is no need to do any scaling, so don't +// needlessly sacrifice precision. +static constexpr int32_t kShift = 0; +#endif +#else +// In debug mode use a different shift to discourage depending on a +// particular shift value. +static constexpr int32_t kShift = 2; +#endif + +static constexpr double kFrequencyScale = 1.0 / (1 << kShift); +static std::atomic cycle_clock_source; + +CycleClockSourceFunc LoadCycleClockSource() { + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) { + return nullptr; + } + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback is + // invoked. + return cycle_clock_source.load(std::memory_order_acquire); +} + +} // namespace + +int64_t CycleClock::Now() { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; +} + +double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +void CycleClockSource::Register(CycleClockSourceFunc source) { + // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. + cycle_clock_source.store(source, std::memory_order_release); +} + +#else + +int64_t CycleClock::Now() { + return std::chrono::duration_cast( + std::chrono::steady_clock::now().time_since_epoch()) + .count(); +} + +double CycleClock::Frequency() { + return 1e9; +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/cycleclock.h b/base/abseil/absl/base/internal/cycleclock.h new file mode 100644 index 0000000..a18b584 --- /dev/null +++ b/base/abseil/absl/base/internal/cycleclock.h @@ -0,0 +1,94 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: cycleclock.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `CycleClock`, which yields the value and frequency +// of a cycle counter that increments at a rate that is approximately constant. +// +// NOTE: +// +// The cycle counter frequency is not necessarily related to the core clock +// frequency and should not be treated as such. That is, `CycleClock` cycles are +// not necessarily "CPU cycles" and code should not rely on that behavior, even +// if experimentally observed. +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// ----------------------------------------------------------------------------- +// CycleClock +// ----------------------------------------------------------------------------- +class CycleClock { + public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + + private: + CycleClock() = delete; // no instances + CycleClock(const CycleClock&) = delete; + CycleClock& operator=(const CycleClock&) = delete; +}; + +using CycleClockSourceFunc = int64_t (*)(); + +class CycleClockSource { + private: + // CycleClockSource::Register() + // + // Register a function that provides an alternate source for the unscaled CPU + // cycle count value. The source function must be async signal safe, must not + // call CycleClock::Now(), and must have a frequency that matches that of the + // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use + // the default source. + static void Register(CycleClockSourceFunc source); +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/base/abseil/absl/base/internal/direct_mmap.h b/base/abseil/absl/base/internal/direct_mmap.h new file mode 100644 index 0000000..5618867 --- /dev/null +++ b/base/abseil/absl/base/internal/direct_mmap.h @@ -0,0 +1,161 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for directly invoking mmap() via syscall, avoiding the case where +// mmap() has been locally overridden. + +#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ +#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ + +#include "absl/base/config.h" + +#if ABSL_HAVE_MMAP + +#include + +#ifdef __linux__ + +#include +#ifdef __BIONIC__ +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + +#ifdef __mips__ +// Include definitions of the ABI currently in use. +#ifdef __BIONIC__ +// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the +// definitions we need. +#include +#else +#include +#endif // __BIONIC__ +#endif // __mips__ + +// SYS_mmap and SYS_munmap are not defined in Android. +#ifdef __BIONIC__ +extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); +#if defined(__NR_mmap) && !defined(SYS_mmap) +#define SYS_mmap __NR_mmap +#endif +#ifndef SYS_munmap +#define SYS_munmap __NR_munmap +#endif +#endif // __BIONIC__ + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Platform specific logic extracted from +// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h +inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, + off64_t offset) noexcept { +#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ + (defined(__PPC__) && !defined(__PPC64__)) || \ + (defined(__s390__) && !defined(__s390x__)) + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) { +#if defined(__wasm__) || defined(__asmjs__) + pagesize = getpagesize(); +#else + pagesize = sysconf(_SC_PAGESIZE); +#endif + } + if (offset < 0 || offset % pagesize != 0) { + errno = EINVAL; + return MAP_FAILED; + } +#ifdef __BIONIC__ + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, offset / pagesize); +#else + return reinterpret_cast( + syscall(SYS_mmap2, start, length, prot, flags, fd, + static_cast(offset / pagesize))); +#endif +#elif defined(__s390x__) + // On s390x, mmap() arguments are passed in memory. + unsigned long buf[6] = {reinterpret_cast(start), // NOLINT + static_cast(length), // NOLINT + static_cast(prot), // NOLINT + static_cast(flags), // NOLINT + static_cast(fd), // NOLINT + static_cast(offset)}; // NOLINT + return reinterpret_cast(syscall(SYS_mmap, buf)); +#elif defined(__x86_64__) +// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. +// We need to explicitly cast to an unsigned 64 bit type to avoid implicit +// sign extension. We can't cast pointers directly because those are +// 32 bits, and gcc will dump ugly warnings about casting from a pointer +// to an integer of a different size. We also need to make sure __off64_t +// isn't truncated to 32-bits under x32. +#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) + return reinterpret_cast( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), + MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), + MMAP_SYSCALL_ARG(fd), static_cast(offset))); +#undef MMAP_SYSCALL_ARG +#else // Remaining 64-bit aritectures. + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast( + syscall(SYS_mmap, start, length, prot, flags, fd, offset)); +#endif +} + +inline int DirectMunmap(void* start, size_t length) { + return static_cast(syscall(SYS_munmap, start, length)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#else // !__linux__ + +// For non-linux platforms where we have mmap, just dispatch directly to the +// actual mmap()/munmap() methods. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, + off_t offset) { + return mmap(start, length, prot, flags, fd, offset); +} + +inline int DirectMunmap(void* start, size_t length) { + return munmap(start, length); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // __linux__ + +#endif // ABSL_HAVE_MMAP + +#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ diff --git a/base/abseil/absl/base/internal/endian.h b/base/abseil/absl/base/internal/endian.h new file mode 100644 index 0000000..9677530 --- /dev/null +++ b/base/abseil/absl/base/internal/endian.h @@ -0,0 +1,266 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +// The following guarantees declaration of the byte swap functions +#ifdef _MSC_VER +#include // NOLINT(build/include) +#elif defined(__FreeBSD__) +#include +#elif defined(__GLIBC__) +#include // IWYU pragma: export +#endif + +#include +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Use compiler byte-swapping intrinsics if they are available. 32-bit +// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0. +// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0. +// For simplicity, we enable them all only for GCC 4.8.0 or later. +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5)) +inline uint64_t gbswap_64(uint64_t host_int) { + return __builtin_bswap64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return __builtin_bswap32(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return __builtin_bswap16(host_int); +} + +#elif defined(_MSC_VER) +inline uint64_t gbswap_64(uint64_t host_int) { + return _byteswap_uint64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return _byteswap_ulong(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return _byteswap_ushort(host_int); +} + +#else +inline uint64_t gbswap_64(uint64_t host_int) { +#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) + // Adapted from /usr/include/byteswap.h. Not available on Mac. + if (__builtin_constant_p(host_int)) { + return __bswap_constant_64(host_int); + } else { + uint64_t result; + __asm__("bswap %0" : "=r"(result) : "0"(host_int)); + return result; + } +#elif defined(__GLIBC__) + return bswap_64(host_int); +#else + return (((host_int & uint64_t{0xFF}) << 56) | + ((host_int & uint64_t{0xFF00}) << 40) | + ((host_int & uint64_t{0xFF0000}) << 24) | + ((host_int & uint64_t{0xFF000000}) << 8) | + ((host_int & uint64_t{0xFF00000000}) >> 8) | + ((host_int & uint64_t{0xFF0000000000}) >> 24) | + ((host_int & uint64_t{0xFF000000000000}) >> 40) | + ((host_int & uint64_t{0xFF00000000000000}) >> 56)); +#endif // bswap_64 +} + +inline uint32_t gbswap_32(uint32_t host_int) { +#if defined(__GLIBC__) + return bswap_32(host_int); +#else + return (((host_int & uint32_t{0xFF}) << 24) | + ((host_int & uint32_t{0xFF00}) << 8) | + ((host_int & uint32_t{0xFF0000}) >> 8) | + ((host_int & uint32_t{0xFF000000}) >> 24)); +#endif +} + +inline uint16_t gbswap_16(uint16_t host_int) { +#if defined(__GLIBC__) + return bswap_16(host_int); +#else + return (((host_int & uint16_t{0xFF}) << 8) | + ((host_int & uint16_t{0xFF00}) >> 8)); +#endif +} + +#endif // intrinsics available + +#ifdef ABSL_IS_LITTLE_ENDIAN + +// Definitions for ntohl etc. that don't require us to include +// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather +// than just #defining them because in debug mode, gcc doesn't +// correctly handle the (rather involved) definitions of bswap_32. +// gcc guarantees that inline functions are as fast as macros, so +// this isn't a performance hit. +inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } +inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } +inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + +#elif defined ABSL_IS_BIG_ENDIAN + +// These definitions are simpler on big-endian machines +// These are functions instead of macros to avoid self-assignment warnings +// on calls such as "i = ghtnol(i);". This also provides type checking. +inline uint16_t ghtons(uint16_t x) { return x; } +inline uint32_t ghtonl(uint32_t x) { return x; } +inline uint64_t ghtonll(uint64_t x) { return x; } + +#else +#error \ + "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + +inline uint16_t gntohs(uint16_t x) { return ghtons(x); } +inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } +inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } + +// Utilities to convert numbers between the current hosts's native byte +// order and little-endian byte order +// +// Load/Store methods are alignment safe +namespace little_endian { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in little-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace little_endian + +// Utilities to convert numbers between the current hosts's native byte +// order and big-endian byte order (same as network byte order) +// +// Load/Store methods are alignment safe +namespace big_endian { +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in big-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace big_endian + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/base/abseil/absl/base/internal/endian_test.cc b/base/abseil/absl/base/internal/endian_test.cc new file mode 100644 index 0000000..aa6b849 --- /dev/null +++ b/base/abseil/absl/base/internal/endian_test.cc @@ -0,0 +1,265 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/endian.h" + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +const uint64_t kInitialNumber{0x0123456789abcdef}; +const uint64_t k64Value{kInitialNumber}; +const uint32_t k32Value{0x01234567}; +const uint16_t k16Value{0x0123}; +const int kNumValuesToTest = 1000000; +const int kRandomSeed = 12345; + +#if defined(ABSL_IS_BIG_ENDIAN) +const uint64_t kInitialInNetworkOrder{kInitialNumber}; +const uint64_t k64ValueLE{0xefcdab8967452301}; +const uint32_t k32ValueLE{0x67452301}; +const uint16_t k16ValueLE{0x2301}; + +const uint64_t k64ValueBE{kInitialNumber}; +const uint32_t k32ValueBE{k32Value}; +const uint16_t k16ValueBE{k16Value}; +#elif defined(ABSL_IS_LITTLE_ENDIAN) +const uint64_t kInitialInNetworkOrder{0xefcdab8967452301}; +const uint64_t k64ValueLE{kInitialNumber}; +const uint32_t k32ValueLE{k32Value}; +const uint16_t k16ValueLE{k16Value}; + +const uint64_t k64ValueBE{0xefcdab8967452301}; +const uint32_t k32ValueBE{0x67452301}; +const uint16_t k16ValueBE{0x2301}; +#endif + +template +std::vector GenerateAllValuesForType() { + std::vector result; + T next = std::numeric_limits::min(); + while (true) { + result.push_back(next); + if (next == std::numeric_limits::max()) { + return result; + } + ++next; + } +} + +template +std::vector GenerateRandomIntegers(size_t numValuesToTest) { + std::vector result; + std::mt19937_64 rng(kRandomSeed); + for (size_t i = 0; i < numValuesToTest; ++i) { + result.push_back(rng()); + } + return result; +} + +void ManualByteSwap(char* bytes, int length) { + if (length == 1) + return; + + EXPECT_EQ(0, length % 2); + for (int i = 0; i < length / 2; ++i) { + int j = (length - 1) - i; + using std::swap; + swap(bytes[i], bytes[j]); + } +} + +template +inline T UnalignedLoad(const char* p) { + static_assert( + sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8, + "Unexpected type size"); + + switch (sizeof(T)) { + case 1: return *reinterpret_cast(p); + case 2: + return ABSL_INTERNAL_UNALIGNED_LOAD16(p); + case 4: + return ABSL_INTERNAL_UNALIGNED_LOAD32(p); + case 8: + return ABSL_INTERNAL_UNALIGNED_LOAD64(p); + default: + // Suppresses invalid "not all control paths return a value" on MSVC + return {}; + } +} + +template +static void GBSwapHelper(const std::vector& host_values_to_test, + const ByteSwapper& byte_swapper) { + // Test byte_swapper against a manual byte swap. + for (typename std::vector::const_iterator it = host_values_to_test.begin(); + it != host_values_to_test.end(); ++it) { + T host_value = *it; + + char actual_value[sizeof(host_value)]; + memcpy(actual_value, &host_value, sizeof(host_value)); + byte_swapper(actual_value); + + char expected_value[sizeof(host_value)]; + memcpy(expected_value, &host_value, sizeof(host_value)); + ManualByteSwap(expected_value, sizeof(host_value)); + + ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value))) + << "Swap output for 0x" << std::hex << host_value << " does not match. " + << "Expected: 0x" << UnalignedLoad(expected_value) << "; " + << "actual: 0x" << UnalignedLoad(actual_value); + } +} + +void Swap16(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE16( + bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes))); +} + +void Swap32(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE32( + bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes))); +} + +void Swap64(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE64( + bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes))); +} + +TEST(EndianessTest, Uint16) { + GBSwapHelper(GenerateAllValuesForType(), &Swap16); +} + +TEST(EndianessTest, Uint32) { + GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap32); +} + +TEST(EndianessTest, Uint64) { + GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap64); +} + +TEST(EndianessTest, ghtonll_gntohll) { + // Test that absl::ghtonl compiles correctly + uint32_t test = 0x01234567; + EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test); + + uint64_t comp = absl::ghtonll(kInitialNumber); + EXPECT_EQ(comp, kInitialInNetworkOrder); + comp = absl::gntohll(kInitialInNetworkOrder); + EXPECT_EQ(comp, kInitialNumber); + + // Test that htonll and ntohll are each others' inverse functions on a + // somewhat assorted batch of numbers. 37 is chosen to not be anything + // particularly nice base 2. + uint64_t value = 1; + for (int i = 0; i < 100; ++i) { + comp = absl::ghtonll(absl::gntohll(value)); + EXPECT_EQ(value, comp); + comp = absl::gntohll(absl::ghtonll(value)); + EXPECT_EQ(value, comp); + value *= 37; + } +} + +TEST(EndianessTest, little_endian) { + // Check little_endian uint16_t. + uint64_t comp = little_endian::FromHost16(k16Value); + EXPECT_EQ(comp, k16ValueLE); + comp = little_endian::ToHost16(k16ValueLE); + EXPECT_EQ(comp, k16Value); + + // Check little_endian uint32_t. + comp = little_endian::FromHost32(k32Value); + EXPECT_EQ(comp, k32ValueLE); + comp = little_endian::ToHost32(k32ValueLE); + EXPECT_EQ(comp, k32Value); + + // Check little_endian uint64_t. + comp = little_endian::FromHost64(k64Value); + EXPECT_EQ(comp, k64ValueLE); + comp = little_endian::ToHost64(k64ValueLE); + EXPECT_EQ(comp, k64Value); + + // Check little-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + little_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueLE); + comp = little_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + little_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueLE); + comp = little_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + little_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueLE); + comp = little_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); +} + +TEST(EndianessTest, big_endian) { + // Check big-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + unsigned char buffer[10]; + big_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + uint64_t comp = big_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); + + big_endian::Store16(buffer + 1, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + comp = big_endian::Load16(buffer + 1); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(buffer + 1, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(buffer + 1); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(buffer + 1, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(buffer + 1); + EXPECT_EQ(comp, k64Value); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/exception_safety_testing.cc b/base/abseil/absl/base/internal/exception_safety_testing.cc new file mode 100644 index 0000000..6ccac41 --- /dev/null +++ b/base/abseil/absl/base/internal/exception_safety_testing.cc @@ -0,0 +1,79 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exception_safety_testing.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" + +namespace testing { + +exceptions_internal::NoThrowTag nothrow_ctor; + +exceptions_internal::StrongGuaranteeTagType strong_guarantee; + +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() { + return {}; +} + +namespace exceptions_internal { + +int countdown = -1; + +ConstructorTracker* ConstructorTracker::current_tracker_instance_ = nullptr; + +void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) { + if (countdown-- == 0) { + if (throw_bad_alloc) throw TestBadAllocException(msg); + throw TestException(msg); + } +} + +testing::AssertionResult FailureMessage(const TestException& e, + int countdown) noexcept { + return testing::AssertionFailure() << "Exception thrown from " << e.what(); +} + +std::string GetSpecString(TypeSpec spec) { + std::string out; + absl::string_view sep; + const auto append = [&](absl::string_view s) { + absl::StrAppend(&out, sep, s); + sep = " | "; + }; + if (static_cast(TypeSpec::kNoThrowCopy & spec)) { + append("kNoThrowCopy"); + } + if (static_cast(TypeSpec::kNoThrowMove & spec)) { + append("kNoThrowMove"); + } + if (static_cast(TypeSpec::kNoThrowNew & spec)) { + append("kNoThrowNew"); + } + return out; +} + +std::string GetSpecString(AllocSpec spec) { + return static_cast(AllocSpec::kNoThrowAllocate & spec) + ? "kNoThrowAllocate" + : ""; +} + +} // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/base/abseil/absl/base/internal/exception_safety_testing.h b/base/abseil/absl/base/internal/exception_safety_testing.h new file mode 100644 index 0000000..6ba89d0 --- /dev/null +++ b/base/abseil/absl/base/internal/exception_safety_testing.h @@ -0,0 +1,1101 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Utilities for testing exception-safety + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/pretty_function.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/strings/substitute.h" +#include "absl/utility/utility.h" + +namespace testing { + +enum class TypeSpec; +enum class AllocSpec; + +constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); +} + +constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); +} + +constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); +} + +constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); +} + +namespace exceptions_internal { + +std::string GetSpecString(TypeSpec); +std::string GetSpecString(AllocSpec); + +struct NoThrowTag {}; +struct StrongGuaranteeTagType {}; + +// A simple exception class. We throw this so that test code can catch +// exceptions specifically thrown by ThrowingValue. +class TestException { + public: + explicit TestException(absl::string_view msg) : msg_(msg) {} + virtual ~TestException() {} + virtual const char* what() const noexcept { return msg_.c_str(); } + + private: + std::string msg_; +}; + +// TestBadAllocException exists because allocation functions must throw an +// exception which can be caught by a handler of std::bad_alloc. We use a child +// class of std::bad_alloc so we can customise the error message, and also +// derive from TestException so we don't accidentally end up catching an actual +// bad_alloc exception in TestExceptionSafety. +class TestBadAllocException : public std::bad_alloc, public TestException { + public: + explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {} + using TestException::what; +}; + +extern int countdown; + +// Allows the countdown variable to be set manually (defaulting to the initial +// value of 0) +inline void SetCountdown(int i = 0) { countdown = i; } +// Sets the countdown to the terminal value -1 +inline void UnsetCountdown() { SetCountdown(-1); } + +void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); + +testing::AssertionResult FailureMessage(const TestException& e, + int countdown) noexcept; + +struct TrackedAddress { + bool is_alive; + std::string description; +}; + +// Inspects the constructions and destructions of anything inheriting from +// TrackedObject. This allows us to safely "leak" TrackedObjects, as +// ConstructorTracker will destroy everything left over in its destructor. +class ConstructorTracker { + public: + explicit ConstructorTracker(int count) : countdown_(count) { + assert(current_tracker_instance_ == nullptr); + current_tracker_instance_ = this; + } + + ~ConstructorTracker() { + assert(current_tracker_instance_ == this); + current_tracker_instance_ = nullptr; + + for (auto& it : address_map_) { + void* address = it.first; + TrackedAddress& tracked_address = it.second; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + countdown_, "Object was not destroyed."); + } + } + } + + static void ObjectConstructed(void* address, std::string description) { + if (!CurrentlyTracking()) return; + + TrackedAddress& tracked_address = + current_tracker_instance_->address_map_[address]; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage( + address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-constructed. Current object was constructed by " + + description); + } + tracked_address = {true, std::move(description)}; + } + + static void ObjectDestructed(void* address) { + if (!CurrentlyTracking()) return; + + auto it = current_tracker_instance_->address_map_.find(address); + // Not tracked. Ignore. + if (it == current_tracker_instance_->address_map_.end()) return; + + TrackedAddress& tracked_address = it->second; + if (!tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-destroyed."); + } + tracked_address.is_alive = false; + } + + private: + static bool CurrentlyTracking() { + return current_tracker_instance_ != nullptr; + } + + static std::string ErrorMessage(void* address, + const std::string& address_description, + int countdown, + const std::string& error_description) { + return absl::Substitute( + "With coundtown at $0:\n" + " $1\n" + " Object originally constructed by $2\n" + " Object address: $3\n", + countdown, error_description, address_description, address); + } + + std::unordered_map address_map_; + int countdown_; + + static ConstructorTracker* current_tracker_instance_; +}; + +class TrackedObject { + public: + TrackedObject(const TrackedObject&) = delete; + TrackedObject(TrackedObject&&) = delete; + + protected: + explicit TrackedObject(std::string description) { + ConstructorTracker::ObjectConstructed(this, std::move(description)); + } + + ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); } +}; +} // namespace exceptions_internal + +extern exceptions_internal::NoThrowTag nothrow_ctor; + +extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; + +// A test class which is convertible to bool. The conversion can be +// instrumented to throw at a controlled time. +class ThrowingBool { + public: + ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit) + operator bool() const { // NOLINT + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return b_; + } + + private: + bool b_; +}; + +/* + * Configuration enum for the ThrowingValue type that defines behavior for the + * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer + * constructor from throwing. + * + * kEverythingThrows: Every operation can throw an exception + * kNoThrowCopy: Copy construction and copy assignment will not throw + * kNoThrowMove: Move construction and move assignment will not throw + * kNoThrowNew: Overloaded operators new and new[] will not throw + */ +enum class TypeSpec { + kEverythingThrows = 0, + kNoThrowCopy = 1, + kNoThrowMove = 1 << 1, + kNoThrowNew = 1 << 2, +}; + +/* + * A testing class instrumented to throw an exception at a controlled time. + * + * ThrowingValue implements a slightly relaxed version of the Regular concept -- + * that is it's a value type with the expected semantics. It also implements + * arithmetic operations. It doesn't implement member and pointer operators + * like operator-> or operator[]. + * + * ThrowingValue can be instrumented to have certain operations be noexcept by + * using compile-time bitfield template arguments. That is, to make an + * ThrowingValue which has noexcept move construction/assignment and noexcept + * copy construction/assignment, use the following: + * ThrowingValue my_thrwr{val}; + */ +template +class ThrowingValue : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(TypeSpec spec) { + return static_cast(Spec & spec); + } + + static constexpr int kDefaultValue = 0; + static constexpr int kBadValue = 938550620; + + public: + ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = kDefaultValue; + } + + ThrowingValue(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + ThrowingValue(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = i; + } + + ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept + : TrackedObject(GetInstanceString(i)), dummy_(i) {} + + // absl expects nothrow destructors + ~ThrowingValue() noexcept = default; + + ThrowingValue& operator=(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + ThrowingValue& operator=(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + // Arithmetic Operators + ThrowingValue operator+(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); + } + + ThrowingValue operator+() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_, nothrow_ctor); + } + + ThrowingValue operator-(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); + } + + ThrowingValue operator-() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(-dummy_, nothrow_ctor); + } + + ThrowingValue& operator++() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + ++dummy_; + return *this; + } + + ThrowingValue operator++(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + ++dummy_; + return out; + } + + ThrowingValue& operator--() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + --dummy_; + return *this; + } + + ThrowingValue operator--(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + --dummy_; + return out; + } + + ThrowingValue operator*(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); + } + + ThrowingValue operator/(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); + } + + ThrowingValue operator%(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); + } + + ThrowingValue operator<<(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ << shift, nothrow_ctor); + } + + ThrowingValue operator>>(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ >> shift, nothrow_ctor); + } + + // Comparison Operators + // NOTE: We use `ThrowingBool` instead of `bool` because most STL + // types/containers requires T to be convertible to bool. + friend ThrowingBool operator==(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ == b.dummy_; + } + friend ThrowingBool operator!=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ != b.dummy_; + } + friend ThrowingBool operator<(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ < b.dummy_; + } + friend ThrowingBool operator<=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ <= b.dummy_; + } + friend ThrowingBool operator>(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ > b.dummy_; + } + friend ThrowingBool operator>=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ >= b.dummy_; + } + + // Logical Operators + ThrowingBool operator!() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return !dummy_; + } + + ThrowingBool operator&&(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ && other.dummy_; + } + + ThrowingBool operator||(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ || other.dummy_; + } + + // Bitwise Logical Operators + ThrowingValue operator~() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(~dummy_, nothrow_ctor); + } + + ThrowingValue operator&(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); + } + + ThrowingValue operator|(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); + } + + ThrowingValue operator^(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); + } + + // Compound Assignment operators + ThrowingValue& operator+=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ += other.dummy_; + return *this; + } + + ThrowingValue& operator-=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ -= other.dummy_; + return *this; + } + + ThrowingValue& operator*=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ *= other.dummy_; + return *this; + } + + ThrowingValue& operator/=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ /= other.dummy_; + return *this; + } + + ThrowingValue& operator%=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ %= other.dummy_; + return *this; + } + + ThrowingValue& operator&=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ &= other.dummy_; + return *this; + } + + ThrowingValue& operator|=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ |= other.dummy_; + return *this; + } + + ThrowingValue& operator^=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ ^= other.dummy_; + return *this; + } + + ThrowingValue& operator<<=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ <<= shift; + return *this; + } + + ThrowingValue& operator>>=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ >>= shift; + return *this; + } + + // Pointer operators + void operator&() const = delete; // NOLINT(runtime/operator) + + // Stream operators + friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return os << GetInstanceString(tv.dummy_); + } + + friend std::istream& operator>>(std::istream& is, const ThrowingValue&) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return is; + } + + // Memory management operators + // Args.. allows us to overload regular and placement new in one shot + template + static void* operator new(size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s, std::forward(args)...); + } + + template + static void* operator new[](size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s, std::forward(args)...); + } + + // Abseil doesn't support throwing overloaded operator delete. These are + // provided so a throwing operator-new can clean up after itself. + // + // We provide both regular and templated operator delete because if only the + // templated version is provided as we did with operator new, the compiler has + // no way of knowing which overload of operator delete to call. See + // https://en.cppreference.com/w/cpp/memory/new/operator_delete and + // https://en.cppreference.com/w/cpp/language/delete for the gory details. + void operator delete(void* p) noexcept { ::operator delete(p); } + + template + void operator delete(void* p, Args&&... args) noexcept { + ::operator delete(p, std::forward(args)...); + } + + void operator delete[](void* p) noexcept { return ::operator delete[](p); } + + template + void operator delete[](void* p, Args&&... args) noexcept { + return ::operator delete[](p, std::forward(args)...); + } + + // Non-standard access to the actual contained value. No need for this to + // throw. + int& Get() noexcept { return dummy_; } + const int& Get() const noexcept { return dummy_; } + + private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingValue<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + int dummy_; +}; +// While not having to do with exceptions, explicitly delete comma operator, to +// make sure we don't use it on user-supplied types. +template +void operator,(const ThrowingValue&, T&&) = delete; +template +void operator,(T&&, const ThrowingValue&) = delete; + +/* + * Configuration enum for the ThrowingAllocator type that defines behavior for + * the lifetime of the instance. + * + * kEverythingThrows: Calls to the member functions may throw + * kNoThrowAllocate: Calls to the member functions will not throw + */ +enum class AllocSpec { + kEverythingThrows = 0, + kNoThrowAllocate = 1, +}; + +/* + * An allocator type which is instrumented to throw at a controlled time, or not + * to throw, using AllocSpec. The supported settings are the default of every + * function which is allowed to throw in a conforming allocator possibly + * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS + * configuration macro. + */ +template +class ThrowingAllocator : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(AllocSpec spec) { + return static_cast(Spec & spec); + } + + public: + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using void_pointer = void*; + using const_void_pointer = const void*; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using is_nothrow = + std::integral_constant; + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::false_type; + + ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = std::make_shared(next_id_++); + } + + template + ThrowingAllocator(const ThrowingAllocator& other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of + // allocator shall not exit via an exception, thus they are marked noexcept. + ThrowingAllocator(const ThrowingAllocator& other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + template + ThrowingAllocator(ThrowingAllocator&& other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ThrowingAllocator(ThrowingAllocator&& other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ~ThrowingAllocator() noexcept = default; + + ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=( + const ThrowingAllocator& other) noexcept { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=(ThrowingAllocator&& other) noexcept { + dummy_ = std::move(other.State()); + return *this; + } + + template + struct rebind { + using other = ThrowingAllocator; + }; + + pointer allocate(size_type n) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return static_cast(::operator new(n * sizeof(T))); + } + + pointer allocate(size_type n, const_void_pointer) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + return allocate(n); + } + + void deallocate(pointer ptr, size_type) noexcept { + ReadState(); + ::operator delete(static_cast(ptr)); + } + + template + void construct(U* ptr, Args&&... args) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + ::new (static_cast(ptr)) U(std::forward(args)...); + } + + template + void destroy(U* p) noexcept { + ReadState(); + p->~U(); + } + + size_type max_size() const noexcept { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + ThrowingAllocator select_on_container_copy_construction() noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + auto& out = *this; + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return out; + } + + template + bool operator==(const ThrowingAllocator& other) const noexcept { + return dummy_ == other.dummy_; + } + + template + bool operator!=(const ThrowingAllocator& other) const noexcept { + return dummy_ != other.dummy_; + } + + template + friend class ThrowingAllocator; + + private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingAllocator<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + const std::shared_ptr& State() const { return dummy_; } + std::shared_ptr& State() { return dummy_; } + + void ReadState() { + // we know that this will never be true, but the compiler doesn't, so this + // should safely force a read of the value. + if (*dummy_ < 0) std::abort(); + } + + void ReadStateAndMaybeThrow(absl::string_view msg) const { + if (!IsSpecified(AllocSpec::kNoThrowAllocate)) { + exceptions_internal::MaybeThrow( + absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg)); + } + } + + static int next_id_; + std::shared_ptr dummy_; +}; + +template +int ThrowingAllocator::next_id_ = 0; + +// Tests for resource leaks by attempting to construct a T using args repeatedly +// until successful, using the countdown method. Side effects can then be +// tested for resource leaks. +template +void TestThrowingCtor(Args&&... args) { + struct Cleanup { + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + exceptions_internal::SetCountdown(count); + try { + T temp(std::forward(args)...); + static_cast(temp); + break; + } catch (const exceptions_internal::TestException&) { + } + } +} + +// Tests the nothrow guarantee of the provided nullary operation. If the an +// exception is thrown, the result will be AssertionFailure(). Otherwise, it +// will be AssertionSuccess(). +template +testing::AssertionResult TestNothrowOp(const Operation& operation) { + struct Cleanup { + Cleanup() { exceptions_internal::SetCountdown(); } + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + try { + operation(); + return testing::AssertionSuccess(); + } catch (const exceptions_internal::TestException&) { + return testing::AssertionFailure() + << "TestException thrown during call to operation() when nothrow " + "guarantee was expected."; + } catch (...) { + return testing::AssertionFailure() + << "Unknown exception thrown during call to operation() when " + "nothrow guarantee was expected."; + } +} + +namespace exceptions_internal { + +// Dummy struct for ExceptionSafetyTestBuilder<> partial state. +struct UninitializedT {}; + +template +class DefaultFactory { + public: + explicit DefaultFactory(const T& t) : t_(t) {} + std::unique_ptr operator()() const { return absl::make_unique(t_); } + + private: + T t_; +}; + +template +using EnableIfTestable = typename absl::enable_if_t< + LazyContractsCount != 0 && + !std::is_same::value && + !std::is_same::value>; + +template +class ExceptionSafetyTestBuilder; + +} // namespace exceptions_internal + +/* + * Constructs an empty ExceptionSafetyTestBuilder. All + * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation + * methods return new instances of ExceptionSafetyTestBuilder. + * + * In order to test a T for exception safety, a factory for that T, a testable + * operation, and at least one contract callback returning an assertion + * result must be applied using the respective methods. + */ +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); + +namespace exceptions_internal { +template +struct IsUniquePtr : std::false_type {}; + +template +struct IsUniquePtr> : std::true_type {}; + +template +struct FactoryPtrTypeHelper { + using type = decltype(std::declval()()); + + static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); +}; + +template +using FactoryPtrType = typename FactoryPtrTypeHelper::type; + +template +using FactoryElementType = typename FactoryPtrType::element_type; + +template +class ExceptionSafetyTest { + using Factory = std::function()>; + using Operation = std::function; + using Contract = std::function; + + public: + template + explicit ExceptionSafetyTest(const Factory& f, const Operation& op, + const Contracts&... contracts) + : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {} + + AssertionResult Test() const { + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + + for (const auto& contract : contracts_) { + auto t_ptr = factory_(); + try { + SetCountdown(count); + operation_(t_ptr.get()); + // Unset for the case that the operation throws no exceptions, which + // would leave the countdown set and break the *next* exception safety + // test after this one. + UnsetCountdown(); + return AssertionSuccess(); + } catch (const exceptions_internal::TestException& e) { + if (!contract(t_ptr.get())) { + return AssertionFailure() << e.what() << " failed contract check"; + } + } + } + } + } + + private: + template + Contract WrapContract(const ContractFn& contract) { + return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); }; + } + + Contract WrapContract(StrongGuaranteeTagType) { + return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); }; + } + + Factory factory_; + Operation operation_; + std::vector contracts_; +}; + +/* + * Builds a tester object that tests if performing a operation on a T follows + * exception safety guarantees. Verification is done via contract assertion + * callbacks applied to T instances post-throw. + * + * Template parameters for ExceptionSafetyTestBuilder: + * + * - Factory: The factory object (passed in via tester.WithFactory(...) or + * tester.WithInitialValue(...)) must be invocable with the signature + * `std::unique_ptr operator()() const` where T is the type being tested. + * It is used for reliably creating identical T instances to test on. + * + * - Operation: The operation object (passsed in via tester.WithOperation(...) + * or tester.Test(...)) must be invocable with the signature + * `void operator()(T*) const` where T is the type being tested. It is used + * for performing steps on a T instance that may throw and that need to be + * checked for exception safety. Each call to the operation will receive a + * fresh T instance so it's free to modify and destroy the T instances as it + * pleases. + * + * - Contracts...: The contract assertion callback objects (passed in via + * tester.WithContracts(...)) must be invocable with the signature + * `testing::AssertionResult operator()(T*) const` where T is the type being + * tested. Contract assertion callbacks are provided T instances post-throw. + * They must return testing::AssertionSuccess when the type contracts of the + * provided T instance hold. If the type contracts of the T instance do not + * hold, they must return testing::AssertionFailure. Execution order of + * Contracts... is unspecified. They will each individually get a fresh T + * instance so they are free to modify and destroy the T instances as they + * please. + */ +template +class ExceptionSafetyTestBuilder { + public: + /* + * Returns a new ExceptionSafetyTestBuilder with an included T factory based + * on the provided T instance. The existing factory will not be included in + * the newly created tester instance. The created factory returns a new T + * instance by copy-constructing the provided const T& t. + * + * Preconditions for tester.WithInitialValue(const T& t): + * + * - The const T& t object must be copy-constructible where T is the type + * being tested. For non-copy-constructible objects, use the method + * tester.WithFactory(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithInitialValue(const T& t) const { + return WithFactory(DefaultFactory(t)); + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided T factory + * included. The existing factory will not be included in the newly-created + * tester instance. This method is intended for use with types lacking a copy + * constructor. Types that can be copy-constructed should instead use the + * method tester.WithInitialValue(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithFactory(const NewFactory& new_factory) const { + return {new_factory, operation_, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided testable + * operation included. The existing operation will not be included in the + * newly created tester. + */ + template + ExceptionSafetyTestBuilder, Contracts...> + WithOperation(const NewOperation& new_operation) const { + return {factory_, new_operation, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... + * combined with the Contracts... that were already included in the instance + * on which the method was called. Contracts... cannot be removed or replaced + * once added to an ExceptionSafetyTestBuilder instance. A fresh object must + * be created in order to get an empty Contracts... list. + * + * In addition to passing in custom contract assertion callbacks, this method + * accepts `testing::strong_guarantee` as an argument which checks T instances + * post-throw against freshly created T instances via operator== to verify + * that any state changes made during the execution of the operation were + * properly rolled back. + */ + template + ExceptionSafetyTestBuilder...> + WithContracts(const MoreContracts&... more_contracts) const { + return { + factory_, operation_, + std::tuple_cat(contracts_, std::tuple...>( + more_contracts...))}; + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * The passed-in testable operation will not be saved in a new tester instance + * nor will it modify/replace the existing tester instance. This is useful + * when each operation being tested is unique and does not need to be reused. + * + * Preconditions for tester.Test(const NewOperation& new_operation): + * + * - May only be called after at least one contract assertion callback and a + * factory or initial value have been provided. + */ + template < + typename NewOperation, + typename = EnableIfTestable> + testing::AssertionResult Test(const NewOperation& new_operation) const { + return TestImpl(new_operation, absl::index_sequence_for()); + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * Preconditions for tester.Test(): + * + * - May only be called after at least one contract assertion callback, a + * factory or initial value and a testable operation have been provided. + */ + template < + typename LazyOperation = Operation, + typename = EnableIfTestable> + testing::AssertionResult Test() const { + return Test(operation_); + } + + private: + template + friend class ExceptionSafetyTestBuilder; + + friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); + + ExceptionSafetyTestBuilder() {} + + ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, + const std::tuple& i) + : factory_(f), operation_(o), contracts_(i) {} + + template + testing::AssertionResult TestImpl(SelectedOperation selected_operation, + absl::index_sequence) const { + return ExceptionSafetyTest>( + factory_, selected_operation, std::get(contracts_)...) + .Test(); + } + + Factory factory_; + Operation operation_; + std::tuple contracts_; +}; + +} // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ diff --git a/base/abseil/absl/base/internal/exception_testing.h b/base/abseil/absl/base/internal/exception_testing.h new file mode 100644 index 0000000..01b5465 --- /dev/null +++ b/base/abseil/absl/base/internal/exception_testing.h @@ -0,0 +1,42 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Testing utilities for ABSL types which throw exceptions. + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception +// if exceptions are enabled, or for death with a specified text in the error +// message +#ifdef ABSL_HAVE_EXCEPTIONS + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_THROW(expr, exception_t) + +#elif defined(__ANDROID__) +// Android asserts do not log anywhere that gtest can currently inspect. +// So we expect exit, but cannot match the message. +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH(expr, ".*") +#else +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH_IF_SUPPORTED(expr, text) + +#endif + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ diff --git a/base/abseil/absl/base/internal/exponential_biased.cc b/base/abseil/absl/base/internal/exponential_biased.cc new file mode 100644 index 0000000..1b30c06 --- /dev/null +++ b/base/abseil/absl/base/internal/exponential_biased.cc @@ -0,0 +1,93 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exponential_biased.h" + +#include + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/optimization.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// The algorithm generates a random number between 0 and 1 and applies the +// inverse cumulative distribution function for an exponential. Specifically: +// Let m be the inverse of the sample period, then the probability +// distribution function is m*exp(-mx) so the CDF is +// p = 1 - exp(-mx), so +// q = 1 - p = exp(-mx) +// log_e(q) = -mx +// -log_e(q)/m = x +// log_2(q) * (-log_e(2) * 1/m) = x +// In the code, q is actually in the range 1 to 2**26, hence the -26 below +int64_t ExponentialBiased::GetSkipCount(int64_t mean) { + if (ABSL_PREDICT_FALSE(!initialized_)) { + Initialize(); + } + + uint64_t rng = NextRandom(rng_); + rng_ = rng; + + // Take the top 26 bits as the random number + // (This plus the 1<<58 sampling bound give a max possible step of + // 5194297183973780480 bytes.) + // The uint32_t cast is to prevent a (hard-to-reproduce) NAN + // under piii debug for some binaries. + double q = static_cast(rng >> (kPrngNumBits - 26)) + 1.0; + // Put the computed p-value through the CDF of a geometric. + double interval = bias_ + (std::log2(q) - 26) * (-std::log(2.0) * mean); + // Very large values of interval overflow int64_t. To avoid that, we will + // cheat and clamp any huge values to (int64_t max)/2. This is a potential + // source of bias, but the mean would need to be such a large value that it's + // not likely to come up. For example, with a mean of 1e18, the probability of + // hitting this condition is about 1/1000. For a mean of 1e17, standard + // calculators claim that this event won't happen. + if (interval > static_cast(std::numeric_limits::max() / 2)) { + // Assume huge values are bias neutral, retain bias for next call. + return std::numeric_limits::max() / 2; + } + double value = std::round(interval); + bias_ = interval - value; + return value; +} + +int64_t ExponentialBiased::GetStride(int64_t mean) { + return GetSkipCount(mean - 1) + 1; +} + +void ExponentialBiased::Initialize() { + // We don't get well distributed numbers from `this` so we call NextRandom() a + // bunch to mush the bits around. We use a global_rand to handle the case + // where the same thread (by memory address) gets created and destroyed + // repeatedly. + ABSL_CONST_INIT static std::atomic global_rand(0); + uint64_t r = reinterpret_cast(this) + + global_rand.fetch_add(1, std::memory_order_relaxed); + for (int i = 0; i < 20; ++i) { + r = NextRandom(r); + } + rng_ = r; + initialized_ = true; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/exponential_biased.h b/base/abseil/absl/base/internal/exponential_biased.h new file mode 100644 index 0000000..94f79a3 --- /dev/null +++ b/base/abseil/absl/base/internal/exponential_biased.h @@ -0,0 +1,130 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ +#define ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// ExponentialBiased provides a small and fast random number generator for a +// rounded exponential distribution. This generator manages very little state, +// and imposes no synchronization overhead. This makes it useful in specialized +// scenarios requiring minimum overhead, such as stride based periodic sampling. +// +// ExponentialBiased provides two closely related functions, GetSkipCount() and +// GetStride(), both returning a rounded integer defining a number of events +// required before some event with a given mean probability occurs. +// +// The distribution is useful to generate a random wait time or some periodic +// event with a given mean probability. For example, if an action is supposed to +// happen on average once every 'N' events, then we can get a random 'stride' +// counting down how long before the event to happen. For example, if we'd want +// to sample one in every 1000 'Frobber' calls, our code could look like this: +// +// Frobber::Frobber() { +// stride_ = exponential_biased_.GetStride(1000); +// } +// +// void Frobber::Frob(int arg) { +// if (--stride == 0) { +// SampleFrob(arg); +// stride_ = exponential_biased_.GetStride(1000); +// } +// ... +// } +// +// The rounding of the return value creates a bias, especially for smaller means +// where the distribution of the fraction is not evenly distributed. We correct +// this bias by tracking the fraction we rounded up or down on each iteration, +// effectively tracking the distance between the cumulative value, and the +// rounded cumulative value. For example, given a mean of 2: +// +// raw = 1.63076, cumulative = 1.63076, rounded = 2, bias = -0.36923 +// raw = 0.14624, cumulative = 1.77701, rounded = 2, bias = 0.14624 +// raw = 4.93194, cumulative = 6.70895, rounded = 7, bias = -0.06805 +// raw = 0.24206, cumulative = 6.95101, rounded = 7, bias = 0.24206 +// etc... +// +// Adjusting with rounding bias is relatively trivial: +// +// double value = bias_ + exponential_distribution(mean)(); +// double rounded_value = std::round(value); +// bias_ = value - rounded_value; +// return rounded_value; +// +// This class is thread-compatible. +class ExponentialBiased { + public: + // The number of bits set by NextRandom. + static constexpr int kPrngNumBits = 48; + + // `GetSkipCount()` returns the number of events to skip before some chosen + // event happens. For example, randomly tossing a coin, we will on average + // throw heads once before we get tails. We can simulate random coin tosses + // using GetSkipCount() as: + // + // ExponentialBiased eb; + // for (...) { + // int number_of_heads_before_tail = eb.GetSkipCount(1); + // for (int flips = 0; flips < number_of_heads_before_tail; ++flips) { + // printf("head..."); + // } + // printf("tail\n"); + // } + // + int64_t GetSkipCount(int64_t mean); + + // GetStride() returns the number of events required for a specific event to + // happen. See the class comments for a usage example. `GetStride()` is + // equivalent to `GetSkipCount(mean - 1) + 1`. When to use `GetStride()` or + // `GetSkipCount()` depends mostly on what best fits the use case. + int64_t GetStride(int64_t mean); + + // Computes a random number in the range [0, 1<<(kPrngNumBits+1) - 1] + // + // This is public to enable testing. + static uint64_t NextRandom(uint64_t rnd); + + private: + void Initialize(); + + uint64_t rng_{0}; + double bias_{0}; + bool initialized_{false}; +}; + +// Returns the next prng value. +// pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48 +// This is the lrand64 generator. +inline uint64_t ExponentialBiased::NextRandom(uint64_t rnd) { + const uint64_t prng_mult = uint64_t{0x5DEECE66D}; + const uint64_t prng_add = 0xB; + const uint64_t prng_mod_power = 48; + const uint64_t prng_mod_mask = + ~((~static_cast(0)) << prng_mod_power); + return (prng_mult * rnd + prng_add) & prng_mod_mask; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ diff --git a/base/abseil/absl/base/internal/exponential_biased_test.cc b/base/abseil/absl/base/internal/exponential_biased_test.cc new file mode 100644 index 0000000..90a482d --- /dev/null +++ b/base/abseil/absl/base/internal/exponential_biased_test.cc @@ -0,0 +1,199 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exponential_biased.h" + +#include + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/str_cat.h" + +using ::testing::Ge; + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +MATCHER_P2(IsBetween, a, b, + absl::StrCat(std::string(negation ? "isn't" : "is"), " between ", a, + " and ", b)) { + return a <= arg && arg <= b; +} + +// Tests of the quality of the random numbers generated +// This uses the Anderson Darling test for uniformity. +// See "Evaluating the Anderson-Darling Distribution" by Marsaglia +// for details. + +// Short cut version of ADinf(z), z>0 (from Marsaglia) +// This returns the p-value for Anderson Darling statistic in +// the limit as n-> infinity. For finite n, apply the error fix below. +double AndersonDarlingInf(double z) { + if (z < 2) { + return exp(-1.2337141 / z) / sqrt(z) * + (2.00012 + + (0.247105 - + (0.0649821 - (0.0347962 - (0.011672 - 0.00168691 * z) * z) * z) * + z) * + z); + } + return exp( + -exp(1.0776 - + (2.30695 - + (0.43424 - (0.082433 - (0.008056 - 0.0003146 * z) * z) * z) * z) * + z)); +} + +// Corrects the approximation error in AndersonDarlingInf for small values of n +// Add this to AndersonDarlingInf to get a better approximation +// (from Marsaglia) +double AndersonDarlingErrFix(int n, double x) { + if (x > 0.8) { + return (-130.2137 + + (745.2337 - + (1705.091 - (1950.646 - (1116.360 - 255.7844 * x) * x) * x) * x) * + x) / + n; + } + double cutoff = 0.01265 + 0.1757 / n; + if (x < cutoff) { + double t = x / cutoff; + t = sqrt(t) * (1 - t) * (49 * t - 102); + return t * (0.0037 / (n * n) + 0.00078 / n + 0.00006) / n; + } else { + double t = (x - cutoff) / (0.8 - cutoff); + t = -0.00022633 + + (6.54034 - (14.6538 - (14.458 - (8.259 - 1.91864 * t) * t) * t) * t) * + t; + return t * (0.04213 + 0.01365 / n) / n; + } +} + +// Returns the AndersonDarling p-value given n and the value of the statistic +double AndersonDarlingPValue(int n, double z) { + double ad = AndersonDarlingInf(z); + double errfix = AndersonDarlingErrFix(n, ad); + return ad + errfix; +} + +double AndersonDarlingStatistic(const std::vector& random_sample) { + int n = random_sample.size(); + double ad_sum = 0; + for (int i = 0; i < n; i++) { + ad_sum += (2 * i + 1) * + std::log(random_sample[i] * (1 - random_sample[n - 1 - i])); + } + double ad_statistic = -n - 1 / static_cast(n) * ad_sum; + return ad_statistic; +} + +// Tests if the array of doubles is uniformly distributed. +// Returns the p-value of the Anderson Darling Statistic +// for the given set of sorted random doubles +// See "Evaluating the Anderson-Darling Distribution" by +// Marsaglia and Marsaglia for details. +double AndersonDarlingTest(const std::vector& random_sample) { + double ad_statistic = AndersonDarlingStatistic(random_sample); + double p = AndersonDarlingPValue(random_sample.size(), ad_statistic); + return p; +} + +TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { + ExponentialBiased eb; + for (int runs = 0; runs < 10; ++runs) { + for (int flips = eb.GetSkipCount(1); flips > 0; --flips) { + printf("head..."); + } + printf("tail\n"); + } + int heads = 0; + for (int i = 0; i < 10000000; i += 1 + eb.GetSkipCount(1)) { + ++heads; + } + printf("Heads = %d (%f%%)\n", heads, 100.0 * heads / 10000000); +} + +TEST(ExponentialBiasedTest, SampleDemoWithStride) { + ExponentialBiased eb; + int stride = eb.GetStride(10); + int samples = 0; + for (int i = 0; i < 10000000; ++i) { + if (--stride == 0) { + ++samples; + stride = eb.GetStride(10); + } + } + printf("Samples = %d (%f%%)\n", samples, 100.0 * samples / 10000000); +} + + +// Testing that NextRandom generates uniform random numbers. Applies the +// Anderson-Darling test for uniformity +TEST(ExponentialBiasedTest, TestNextRandom) { + for (auto n : std::vector({ + 10, // Check short-range correlation + 100, 1000, + 10000 // Make sure there's no systemic error + })) { + uint64_t x = 1; + // This assumes that the prng returns 48 bit numbers + uint64_t max_prng_value = static_cast(1) << 48; + // Initialize. + for (int i = 1; i <= 20; i++) { + x = ExponentialBiased::NextRandom(x); + } + std::vector int_random_sample(n); + // Collect samples + for (int i = 0; i < n; i++) { + int_random_sample[i] = x; + x = ExponentialBiased::NextRandom(x); + } + // First sort them... + std::sort(int_random_sample.begin(), int_random_sample.end()); + std::vector random_sample(n); + // Convert them to uniform randoms (in the range [0,1]) + for (int i = 0; i < n; i++) { + random_sample[i] = + static_cast(int_random_sample[i]) / max_prng_value; + } + // Now compute the Anderson-Darling statistic + double ad_pvalue = AndersonDarlingTest(random_sample); + EXPECT_GT(std::min(ad_pvalue, 1 - ad_pvalue), 0.0001) + << "prng is not uniform: n = " << n << " p = " << ad_pvalue; + } +} + +// The generator needs to be available as a thread_local and as a static +// variable. +TEST(ExponentialBiasedTest, InitializationModes) { + ABSL_CONST_INIT static ExponentialBiased eb_static; + EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0)); + +#if ABSL_HAVE_THREAD_LOCAL + thread_local ExponentialBiased eb_thread; + EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0)); +#endif + + ExponentialBiased eb_stack; + EXPECT_THAT(eb_stack.GetSkipCount(2), Ge(0)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/hide_ptr.h b/base/abseil/absl/base/internal/hide_ptr.h new file mode 100644 index 0000000..1dba809 --- /dev/null +++ b/base/abseil/absl/base/internal/hide_ptr.h @@ -0,0 +1,51 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_ +#define ABSL_BASE_INTERNAL_HIDE_PTR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Arbitrary value with high bits set. Xor'ing with it is unlikely +// to map one valid pointer to another valid pointer. +constexpr uintptr_t HideMask() { + return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; +} + +// Hide a pointer from the leak checker. For internal use only. +// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr +// and all objects reachable from ptr to be ignored by the leak checker. +template +inline uintptr_t HidePtr(T* ptr) { + return reinterpret_cast(ptr) ^ HideMask(); +} + +// Return a pointer that has been hidden from the leak checker. +// For internal use only. +template +inline T* UnhidePtr(uintptr_t hidden) { + return reinterpret_cast(hidden ^ HideMask()); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ diff --git a/base/abseil/absl/base/internal/identity.h b/base/abseil/absl/base/internal/identity.h new file mode 100644 index 0000000..a3154ed --- /dev/null +++ b/base/abseil/absl/base/internal/identity.h @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_IDENTITY_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace internal { + +template +struct identity { + typedef T type; +}; + +template +using identity_t = typename identity::type; + +} // namespace internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/base/abseil/absl/base/internal/inline_variable.h b/base/abseil/absl/base/internal/inline_variable.h new file mode 100644 index 0000000..130d8c2 --- /dev/null +++ b/base/abseil/absl/base/internal/inline_variable.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ + +#include + +#include "absl/base/internal/identity.h" + +// File: +// This file define a macro that allows the creation of or emulation of C++17 +// inline variables based on whether or not the feature is supported. + +//////////////////////////////////////////////////////////////////////////////// +// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) +// +// Description: +// Expands to the equivalent of an inline constexpr instance of the specified +// `type` and `name`, initialized to the value `init`. If the compiler being +// used is detected as supporting actual inline variables as a language +// feature, then the macro expands to an actual inline variable definition. +// +// Requires: +// `type` is a type that is usable in an extern variable declaration. +// +// Requires: `name` is a valid identifier +// +// Requires: +// `init` is an expression that can be used in the following definition: +// constexpr type name = init; +// +// Usage: +// +// // Equivalent to: `inline constexpr size_t variant_npos = -1;` +// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1); +// +// Differences in implementation: +// For a direct, language-level inline variable, decltype(name) will be the +// type that was specified along with const qualification, whereas for +// emulated inline variables, decltype(name) may be different (in practice +// it will likely be a reference type). +//////////////////////////////////////////////////////////////////////////////// + +#ifdef __cpp_inline_variables + +// Clang's -Wmissing-variable-declarations option erroneously warned that +// inline constexpr objects need to be pre-declared. This has now been fixed, +// but we will need to support this workaround for people building with older +// versions of clang. +// +// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862 +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#if defined(__clang__) +#define ABSL_INTERNAL_EXTERN_DECL(type, name) \ + extern const ::absl::internal::identity_t name; +#else // Otherwise, just define the macro to do nothing. +#define ABSL_INTERNAL_EXTERN_DECL(type, name) +#endif // defined(__clang__) + +// See above comment at top of file for details. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ + ABSL_INTERNAL_EXTERN_DECL(type, name) \ + inline constexpr ::absl::internal::identity_t name = init + +#else + +// See above comment at top of file for details. +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ + template \ + struct AbslInternalInlineVariableHolder##name { \ + static constexpr ::absl::internal::identity_t kInstance = init; \ + }; \ + \ + template \ + constexpr ::absl::internal::identity_t \ + AbslInternalInlineVariableHolder##name::kInstance; \ + \ + static constexpr const ::absl::internal::identity_t& \ + name = /* NOLINT */ \ + AbslInternalInlineVariableHolder##name<>::kInstance; \ + static_assert(sizeof(void (*)(decltype(name))) != 0, \ + "Silence unused variable warnings.") + +#endif // __cpp_inline_variables + +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ diff --git a/base/abseil/absl/base/internal/inline_variable_testing.h b/base/abseil/absl/base/internal/inline_variable_testing.h new file mode 100644 index 0000000..3856b9f --- /dev/null +++ b/base/abseil/absl/base/internal/inline_variable_testing.h @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_ +#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_ + +#include "absl/base/internal/inline_variable.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +struct Foo { + int value = 5; +}; + +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); + +ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); +ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); + +ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr); + +const Foo& get_foo_a(); +const Foo& get_foo_b(); + +const int& get_int_a(); +const int& get_int_b(); + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_ diff --git a/base/abseil/absl/base/internal/invoke.h b/base/abseil/absl/base/internal/invoke.h new file mode 100644 index 0000000..c4eceeb --- /dev/null +++ b/base/abseil/absl/base/internal/invoke.h @@ -0,0 +1,187 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// absl::base_internal::Invoke(f, args...) is an implementation of +// INVOKE(f, args...) from section [func.require] of the C++ standard. +// +// [func.require] +// Define INVOKE (f, t1, t2, ..., tN) as follows: +// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T; +// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item; +// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T; +// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item; +// 5. f(t1, t2, ..., tN) in all other cases. +// +// The implementation is SFINAE-friendly: substitution failure within Invoke() +// isn't an error. + +#ifndef ABSL_BASE_INTERNAL_INVOKE_H_ +#define ABSL_BASE_INTERNAL_INVOKE_H_ + +#include +#include +#include + +#include "absl/meta/type_traits.h" + +// The following code is internal implementation detail. See the comment at the +// top of this file for the API documentation. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// The five classes below each implement one of the clauses from the definition +// of INVOKE. The inner class template Accept checks whether the +// clause is applicable; static function template Invoke(f, args...) does the +// invocation. +// +// By separating the clause selection logic from invocation we make sure that +// Invoke() does exactly what the standard says. + +template +struct StrippedAccept { + template + struct Accept : Derived::template AcceptImpl::type>::type...> {}; +}; + +// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T. +struct MemFunAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + absl::is_function::value> { + }; + + template + static decltype((std::declval().* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { + return (std::forward(obj).* + std::forward(mem_fun))(std::forward(args)...); + } +}; + +// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item. +struct MemFunAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + absl::is_function::value> { + }; + + template + static decltype(((*std::declval()).* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { + return ((*std::forward(ptr)).* + std::forward(mem_fun))(std::forward(args)...); + } +}; + +// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T. +struct DataMemAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + !absl::is_function::value> {}; + + template + static decltype(std::declval().*std::declval()) Invoke( + DataMem&& data_mem, Ref&& ref) { + return std::forward(ref).*std::forward(data_mem); + } +}; + +// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item. +struct DataMemAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + !absl::is_function::value> {}; + + template + static decltype((*std::declval()).*std::declval()) Invoke( + DataMem&& data_mem, Ptr&& ptr) { + return (*std::forward(ptr)).*std::forward(data_mem); + } +}; + +// f(t1, t2, ..., tN) in all other cases. +struct Callable { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template + static decltype(std::declval()(std::declval()...)) Invoke( + F&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); + } +}; + +// Resolves to the first matching clause. +template +struct Invoker { + typedef typename std::conditional< + MemFunAndRef::Accept::value, MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept::value, MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept::value, DataMemAndRef, + typename std::conditional::value, + DataMemAndPtr, Callable>::type>::type>:: + type>::type type; +}; + +// The result type of Invoke. +template +using InvokeT = decltype(Invoker::type::Invoke( + std::declval(), std::declval()...)); + +// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section +// [func.require] of the C++ standard. +template +InvokeT Invoke(F&& f, Args&&... args) { + return Invoker::type::Invoke(std::forward(f), + std::forward(args)...); +} +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/base/abseil/absl/base/internal/low_level_alloc.cc b/base/abseil/absl/base/internal/low_level_alloc.cc new file mode 100644 index 0000000..225abc2 --- /dev/null +++ b/base/abseil/absl/base/internal/low_level_alloc.cc @@ -0,0 +1,619 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A low-level allocator that can be used by other low-level +// modules without introducing dependency cycles. +// This allocator is slow and wasteful of memory; +// it should not be used when performance is key. + +#include "absl/base/internal/low_level_alloc.h" + +#include + +#include "absl/base/call_once.h" +#include "absl/base/config.h" +#include "absl/base/internal/direct_mmap.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING + +#ifndef _WIN32 +#include +#include +#include +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include // for placement-new + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +// MAP_ANONYMOUS +#if defined(__APPLE__) +// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is +// deprecated. In Darwin, MAP_ANON is all there is. +#if !defined MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif // !MAP_ANONYMOUS +#endif // __APPLE__ + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// A first-fit allocator with amortized logarithmic free() time. + +// --------------------------------------------------------------------------- +static const int kMaxLevel = 30; + +namespace { +// This struct describes one allocated block, or one free block. +struct AllocList { + struct Header { + // Size of entire region, including this field. Must be + // first. Valid in both allocated and unallocated blocks. + uintptr_t size; + + // kMagicAllocated or kMagicUnallocated xor this. + uintptr_t magic; + + // Pointer to parent arena. + LowLevelAlloc::Arena *arena; + + // Aligns regions to 0 mod 2*sizeof(void*). + void *dummy_for_alignment; + } header; + + // Next two fields: in unallocated blocks: freelist skiplist data + // in allocated blocks: overlaps with client data + + // Levels in skiplist used. + int levels; + + // Actually has levels elements. The AllocList node may not have room + // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels(). + AllocList *next[kMaxLevel]; +}; +} // namespace + +// --------------------------------------------------------------------------- +// A trivial skiplist implementation. This is used to keep the freelist +// in address order while taking only logarithmic time per insert and delete. + +// An integer approximation of log2(size/base) +// Requires size >= base. +static int IntLog2(size_t size, size_t base) { + int result = 0; + for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) + result++; + } + // floor(size / 2**result) <= base < floor(size / 2**(result-1)) + // => log2(size/(base+1)) <= result < 1+log2(size/base) + // => result ~= log2(size/base) + return result; +} + +// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. +static int Random(uint32_t *state) { + uint32_t r = *state; + int result = 1; + while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) { + result++; + } + *state = r; + return result; +} + +// Return a number of skiplist levels for a node of size bytes, where +// base is the minimum node size. Compute level=log2(size / base)+n +// where n is 1 if random is false and otherwise a random number generated with +// the standard distribution for a skiplist: See Random() above. +// Bigger nodes tend to have more skiplist levels due to the log2(size / base) +// term, so first-fit searches touch fewer nodes. "level" is clipped so +// level(level) > max_fit) level = static_cast(max_fit); + if (level > kMaxLevel-1) level = kMaxLevel - 1; + ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); + return level; +} + +// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. +// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater +// points to the last element at level i in the AllocList less than *e, or is +// head if no such element exists. +static AllocList *LLA_SkiplistSearch(AllocList *head, + AllocList *e, AllocList **prev) { + AllocList *p = head; + for (int level = head->levels - 1; level >= 0; level--) { + for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { + } + prev[level] = p; + } + return (head->levels == 0) ? nullptr : prev[0]->next[0]; +} + +// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. +// Requires that e->levels be previously set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistInsert(AllocList *head, AllocList *e, + AllocList **prev) { + LLA_SkiplistSearch(head, e, prev); + for (; head->levels < e->levels; head->levels++) { // extend prev pointers + prev[head->levels] = head; // to all *e's levels + } + for (int i = 0; i != e->levels; i++) { // add element to list + e->next[i] = prev[i]->next[i]; + prev[i]->next[i] = e; + } +} + +// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). +// Requires that e->levels be previous set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistDelete(AllocList *head, AllocList *e, + AllocList **prev) { + AllocList *found = LLA_SkiplistSearch(head, e, prev); + ABSL_RAW_CHECK(e == found, "element not in freelist"); + for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { + prev[i]->next[i] = e->next[i]; + } + while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { + head->levels--; // reduce head->levels if level unused + } +} + +// --------------------------------------------------------------------------- +// Arena implementation + +// Metadata for an LowLevelAlloc arena instance. +struct LowLevelAlloc::Arena { + // Constructs an arena with the given LowLevelAlloc flags. + explicit Arena(uint32_t flags_value); + + base_internal::SpinLock mu; + // Head of free list, sorted by address + AllocList freelist ABSL_GUARDED_BY(mu); + // Count of allocated blocks + int32_t allocation_count ABSL_GUARDED_BY(mu); + // flags passed to NewArena + const uint32_t flags; + // Result of sysconf(_SC_PAGESIZE) + const size_t pagesize; + // Lowest power of two >= max(16, sizeof(AllocList)) + const size_t round_up; + // Smallest allocation block size + const size_t min_size; + // PRNG state + uint32_t random ABSL_GUARDED_BY(mu); +}; + +namespace { +using ArenaStorage = std::aligned_storage::type; + +// Static storage space for the lazily-constructed, default global arena +// instances. We require this space because the whole point of LowLevelAlloc +// is to avoid relying on malloc/new. +ArenaStorage default_arena_storage; +ArenaStorage unhooked_arena_storage; +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +ArenaStorage unhooked_async_sig_safe_arena_storage; +#endif + +// We must use LowLevelCallOnce here to construct the global arenas, rather than +// using function-level statics, to avoid recursively invoking the scheduler. +absl::once_flag create_globals_once; + +void CreateGlobalArenas() { + new (&default_arena_storage) + LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook); + new (&unhooked_arena_storage) LowLevelAlloc::Arena(0); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + new (&unhooked_async_sig_safe_arena_storage) + LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe); +#endif +} + +// Returns a global arena that does not call into hooks. Used by NewArena() +// when kCallMallocHook is not set. +LowLevelAlloc::Arena* UnhookedArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast(&unhooked_arena_storage); +} + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +// Returns a global arena that is async-signal safe. Used by NewArena() when +// kAsyncSignalSafe is set. +LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast( + &unhooked_async_sig_safe_arena_storage); +} +#endif + +} // namespace + +// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends. +LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast(&default_arena_storage); +} + +// magic numbers to identify allocated and unallocated blocks +static const uintptr_t kMagicAllocated = 0x4c833e95U; +static const uintptr_t kMagicUnallocated = ~kMagicAllocated; + +namespace { +class ABSL_SCOPED_LOCKABLE ArenaLock { + public: + explicit ArenaLock(LowLevelAlloc::Arena *arena) + ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu) + : arena_(arena) { +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + sigset_t all; + sigfillset(&all); + mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; + } +#endif + arena_->mu.Lock(); + } + ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } + void Leave() ABSL_UNLOCK_FUNCTION() { + arena_->mu.Unlock(); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (mask_valid_) { + const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); + if (err != 0) { + ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err); + } + } +#endif + left_ = true; + } + + private: + bool left_ = false; // whether left region +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + bool mask_valid_ = false; + sigset_t mask_; // old mask of blocked signals +#endif + LowLevelAlloc::Arena *arena_; + ArenaLock(const ArenaLock &) = delete; + ArenaLock &operator=(const ArenaLock &) = delete; +}; +} // namespace + +// create an appropriate magic number for an object at "ptr" +// "magic" should be kMagicAllocated or kMagicUnallocated +inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { + return magic ^ reinterpret_cast(ptr); +} + +namespace { +size_t GetPageSize() { +#ifdef _WIN32 + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); +#elif defined(__wasm__) || defined(__asmjs__) + return getpagesize(); +#else + return sysconf(_SC_PAGESIZE); +#endif +} + +size_t RoundedUpBlockSize() { + // Round up block sizes to a power of two close to the header size. + size_t round_up = 16; + while (round_up < sizeof(AllocList::Header)) { + round_up += round_up; + } + return round_up; +} + +} // namespace + +LowLevelAlloc::Arena::Arena(uint32_t flags_value) + : mu(base_internal::SCHEDULE_KERNEL_ONLY), + allocation_count(0), + flags(flags_value), + pagesize(GetPageSize()), + round_up(RoundedUpBlockSize()), + min_size(2 * round_up), + random(0) { + freelist.header.size = 0; + freelist.header.magic = + Magic(kMagicUnallocated, &freelist.header); + freelist.header.arena = this; + freelist.levels = 0; + memset(freelist.next, 0, sizeof(freelist.next)); +} + +// L < meta_data_arena->mu +LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) { + Arena *meta_data_arena = DefaultArena(); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + meta_data_arena = UnhookedAsyncSigSafeArena(); + } else // NOLINT(readability/braces) +#endif + if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { + meta_data_arena = UnhookedArena(); + } + Arena *result = + new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags); + return result; +} + +// L < arena->mu, L < arena->arena->mu +bool LowLevelAlloc::DeleteArena(Arena *arena) { + ABSL_RAW_CHECK( + arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(), + "may not delete default arena"); + ArenaLock section(arena); + if (arena->allocation_count != 0) { + section.Leave(); + return false; + } + while (arena->freelist.next[0] != nullptr) { + AllocList *region = arena->freelist.next[0]; + size_t size = region->header.size; + arena->freelist.next[0] = region->next[0]; + ABSL_RAW_CHECK( + region->header.magic == Magic(kMagicUnallocated, ®ion->header), + "bad magic number in DeleteArena()"); + ABSL_RAW_CHECK(region->header.arena == arena, + "bad arena pointer in DeleteArena()"); + ABSL_RAW_CHECK(size % arena->pagesize == 0, + "empty arena has non-page-aligned block size"); + ABSL_RAW_CHECK(reinterpret_cast(region) % arena->pagesize == 0, + "empty arena has non-page-aligned block"); + int munmap_result; +#ifdef _WIN32 + munmap_result = VirtualFree(region, 0, MEM_RELEASE); + ABSL_RAW_CHECK(munmap_result != 0, + "LowLevelAlloc::DeleteArena: VitualFree failed"); +#else +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { + munmap_result = munmap(region, size); + } else { + munmap_result = base_internal::DirectMunmap(region, size); + } +#else + munmap_result = munmap(region, size); +#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (munmap_result != 0) { + ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", + errno); + } +#endif // _WIN32 + } + section.Leave(); + arena->~Arena(); + Free(arena); + return true; +} + +// --------------------------------------------------------------------------- + +// Addition, checking for overflow. The intent is to die if an external client +// manages to push through a request that would cause arithmetic to fail. +static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { + uintptr_t sum = a + b; + ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); + return sum; +} + +// Return value rounded up to next multiple of align. +// align must be a power of two. +static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { + return CheckedAdd(addr, align - 1) & ~(align - 1); +} + +// Equivalent to "return prev->next[i]" but with sanity checking +// that the freelist is in the correct order, that it +// consists of regions marked "unallocated", and that no two regions +// are adjacent in memory (they should have been coalesced). +// L >= arena->mu +static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { + ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); + AllocList *next = prev->next[i]; + if (next != nullptr) { + ABSL_RAW_CHECK( + next->header.magic == Magic(kMagicUnallocated, &next->header), + "bad magic number in Next()"); + ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); + if (prev != &arena->freelist) { + ABSL_RAW_CHECK(prev < next, "unordered freelist"); + ABSL_RAW_CHECK(reinterpret_cast(prev) + prev->header.size < + reinterpret_cast(next), + "malformed freelist"); + } + } + return next; +} + +// Coalesce list item "a" with its successor if they are adjacent. +static void Coalesce(AllocList *a) { + AllocList *n = a->next[0]; + if (n != nullptr && reinterpret_cast(a) + a->header.size == + reinterpret_cast(n)) { + LowLevelAlloc::Arena *arena = a->header.arena; + a->header.size += n->header.size; + n->header.magic = 0; + n->header.arena = nullptr; + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, n, prev); + LLA_SkiplistDelete(&arena->freelist, a, prev); + a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, + &arena->random); + LLA_SkiplistInsert(&arena->freelist, a, prev); + } +} + +// Adds block at location "v" to the free list +// L >= arena->mu +static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { + AllocList *f = reinterpret_cast( + reinterpret_cast(v) - sizeof (f->header)); + ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), + "bad magic number in AddToFreelist()"); + ABSL_RAW_CHECK(f->header.arena == arena, + "bad arena pointer in AddToFreelist()"); + f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, + &arena->random); + AllocList *prev[kMaxLevel]; + LLA_SkiplistInsert(&arena->freelist, f, prev); + f->header.magic = Magic(kMagicUnallocated, &f->header); + Coalesce(f); // maybe coalesce with successor + Coalesce(prev[0]); // maybe coalesce with predecessor +} + +// Frees storage allocated by LowLevelAlloc::Alloc(). +// L < arena->mu +void LowLevelAlloc::Free(void *v) { + if (v != nullptr) { + AllocList *f = reinterpret_cast( + reinterpret_cast(v) - sizeof (f->header)); + LowLevelAlloc::Arena *arena = f->header.arena; + ArenaLock section(arena); + AddToFreelist(v, arena); + ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); + arena->allocation_count--; + section.Leave(); + } +} + +// allocates and returns a block of size bytes, to be freed with Free() +// L < arena->mu +static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { + void *result = nullptr; + if (request != 0) { + AllocList *s; // will point to region that satisfies request + ArenaLock section(arena); + // round up with header + size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)), + arena->round_up); + for (;;) { // loop until we find a suitable region + // find the minimum levels that a block of this size must have + int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; + if (i < arena->freelist.levels) { // potential blocks exist + AllocList *before = &arena->freelist; // predecessor of s + while ((s = Next(i, before, arena)) != nullptr && + s->header.size < req_rnd) { + before = s; + } + if (s != nullptr) { // we found a region + break; + } + } + // we unlock before mmap() both because mmap() may call a callback hook, + // and because it may be slow. + arena->mu.Unlock(); + // mmap generous 64K chunks to decrease + // the chances/impact of fragmentation: + size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); + void *new_pages; +#ifdef _WIN32 + new_pages = VirtualAlloc(0, new_pages_size, + MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); +#else +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + new_pages = base_internal::DirectMmap(nullptr, new_pages_size, + PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + } else { + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + } +#else + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (new_pages == MAP_FAILED) { + ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); + } + +#endif // _WIN32 + arena->mu.Lock(); + s = reinterpret_cast(new_pages); + s->header.size = new_pages_size; + // Pretend the block is allocated; call AddToFreelist() to free it. + s->header.magic = Magic(kMagicAllocated, &s->header); + s->header.arena = arena; + AddToFreelist(&s->levels, arena); // insert new region into free list + } + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list + // s points to the first free region that's big enough + if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { + // big enough to split + AllocList *n = reinterpret_cast + (req_rnd + reinterpret_cast(s)); + n->header.size = s->header.size - req_rnd; + n->header.magic = Magic(kMagicAllocated, &n->header); + n->header.arena = arena; + s->header.size = req_rnd; + AddToFreelist(&n->levels, arena); + } + s->header.magic = Magic(kMagicAllocated, &s->header); + ABSL_RAW_CHECK(s->header.arena == arena, ""); + arena->allocation_count++; + section.Leave(); + result = &s->levels; + } + ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); + return result; +} + +void *LowLevelAlloc::Alloc(size_t request) { + void *result = DoAllocWithArena(request, DefaultArena()); + return result; +} + +void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { + ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); + void *result = DoAllocWithArena(request, arena); + return result; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOW_LEVEL_ALLOC_MISSING diff --git a/base/abseil/absl/base/internal/low_level_alloc.h b/base/abseil/absl/base/internal/low_level_alloc.h new file mode 100644 index 0000000..db91951 --- /dev/null +++ b/base/abseil/absl/base/internal/low_level_alloc.h @@ -0,0 +1,126 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ + +// A simple thread-safe memory allocator that does not depend on +// mutexes or thread-specific data. It is intended to be used +// sparingly, and only when malloc() would introduce an unwanted +// dependency, such as inside the heap-checker, or the Mutex +// implementation. + +// IWYU pragma: private, include "base/low_level_alloc.h" + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING +#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set +#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_MISSING 1 +#endif + +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or +// asm.js / WebAssembly. +// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html +// for more information. +#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set +#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) +#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 +#endif + +#include + +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class LowLevelAlloc { + public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void *AllocWithArena(size_t request, Arena *arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, +#endif + }; + // Construct a new arena. The allocation of the underlying metadata honors + // the provided flags. For example, the call NewArena(kAsyncSignalSafe) + // is itself async-signal-safe, as well as generatating an arena that provides + // async-signal-safe Alloc/Free. + static Arena *NewArena(int32_t flags); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena *arena); + + // The default arena that always exists. + static Arena *DefaultArena(); + + private: + LowLevelAlloc(); // no instances +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/base/abseil/absl/base/internal/low_level_alloc_test.cc b/base/abseil/absl/base/internal/low_level_alloc_test.cc new file mode 100644 index 0000000..7abbbf9 --- /dev/null +++ b/base/abseil/absl/base/internal/low_level_alloc_test.cc @@ -0,0 +1,160 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/low_level_alloc.h" + +#include +#include +#include +#include // NOLINT(build/c++11) +#include +#include + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +// This test doesn't use gtest since it needs to test that everything +// works before main(). +#define TEST_ASSERT(x) \ + if (!(x)) { \ + printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \ + abort(); \ + } + +// a block of memory obtained from the allocator +struct BlockDesc { + char *ptr; // pointer to memory + int len; // number of bytes + int fill; // filled with data starting with this +}; + +// Check that the pattern placed in the block d +// by RandomizeBlockDesc is still there. +static void CheckBlockDesc(const BlockDesc &d) { + for (int i = 0; i != d.len; i++) { + TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff)); + } +} + +// Fill the block "*d" with a pattern +// starting with a random byte. +static void RandomizeBlockDesc(BlockDesc *d) { + d->fill = rand() & 0xff; + for (int i = 0; i != d->len; i++) { + d->ptr[i] = (d->fill + i) & 0xff; + } +} + +// Use to indicate to the malloc hooks that +// this calls is from LowLevelAlloc. +static bool using_low_level_alloc = false; + +// n times, toss a coin, and based on the outcome +// either allocate a new block or deallocate an old block. +// New blocks are placed in a std::unordered_map with a random key +// and initialized with RandomizeBlockDesc(). +// If keys conflict, the older block is freed. +// Old blocks are always checked with CheckBlockDesc() +// before being freed. At the end of the run, +// all remaining allocated blocks are freed. +// If use_new_arena is true, use a fresh arena, and then delete it. +// If call_malloc_hook is true and user_arena is true, +// allocations and deallocations are reported via the MallocHook +// interface. +static void Test(bool use_new_arena, bool call_malloc_hook, int n) { + typedef std::unordered_map AllocMap; + AllocMap allocated; + AllocMap::iterator it; + BlockDesc block_desc; + int rnd; + LowLevelAlloc::Arena *arena = 0; + if (use_new_arena) { + int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; + arena = LowLevelAlloc::NewArena(flags); + } + for (int i = 0; i != n; i++) { + if (i != 0 && i % 10000 == 0) { + printf("."); + fflush(stdout); + } + + switch (rand() & 1) { // toss a coin + case 0: // coin came up heads: add a block + using_low_level_alloc = true; + block_desc.len = rand() & 0x3fff; + block_desc.ptr = + reinterpret_cast( + arena == 0 + ? LowLevelAlloc::Alloc(block_desc.len) + : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); + using_low_level_alloc = false; + RandomizeBlockDesc(&block_desc); + rnd = rand(); + it = allocated.find(rnd); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + it->second = block_desc; + } else { + allocated[rnd] = block_desc; + } + break; + case 1: // coin came up tails: remove a block + it = allocated.begin(); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + break; + } + } + // remove all remaining blocks + while ((it = allocated.begin()) != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + if (use_new_arena) { + TEST_ASSERT(LowLevelAlloc::DeleteArena(arena)); + } +} + +// LowLevelAlloc is designed to be safe to call before main(). +static struct BeforeMain { + BeforeMain() { + Test(false, false, 50000); + Test(true, false, 50000); + Test(true, true, 50000); + } +} before_main; + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +int main(int argc, char *argv[]) { + // The actual test runs in the global constructor of `before_main`. + printf("PASS\n"); + return 0; +} diff --git a/base/abseil/absl/base/internal/low_level_scheduling.h b/base/abseil/absl/base/internal/low_level_scheduling.h new file mode 100644 index 0000000..961cc98 --- /dev/null +++ b/base/abseil/absl/base/internal/low_level_scheduling.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ + +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" + +// The following two declarations exist so SchedulingGuard may friend them with +// the appropriate language linkage. These callbacks allow libc internals, such +// as function level statics, to schedule cooperatively when locking. +extern "C" bool __google_disable_rescheduling(void); +extern "C" void __google_enable_rescheduling(bool disable_result); + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class SchedulingHelper; // To allow use of SchedulingGuard. +class SpinLock; // To allow use of SchedulingGuard. + +// SchedulingGuard +// Provides guard semantics that may be used to disable cooperative rescheduling +// of the calling thread within specific program blocks. This is used to +// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative +// scheduling depends on. +// +// Domain implementations capable of rescheduling in reaction to involuntary +// kernel thread actions (e.g blocking due to a pagefault or syscall) must +// guarantee that an annotated thread is not allowed to (cooperatively) +// reschedule until the annotated region is complete. +// +// It is an error to attempt to use a cooperatively scheduled resource (e.g. +// Mutex) within a rescheduling-disabled region. +// +// All methods are async-signal safe. +class SchedulingGuard { + public: + // Returns true iff the calling thread may be cooperatively rescheduled. + static bool ReschedulingIsAllowed(); + + private: + // Disable cooperative rescheduling of the calling thread. It may still + // initiate scheduling operations (e.g. wake-ups), however, it may not itself + // reschedule. Nestable. The returned result is opaque, clients should not + // attempt to interpret it. + // REQUIRES: Result must be passed to a pairing EnableScheduling(). + static bool DisableRescheduling(); + + // Marks the end of a rescheduling disabled region, previously started by + // DisableRescheduling(). + // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). + static void EnableRescheduling(bool disable_result); + + // A scoped helper for {Disable, Enable}Rescheduling(). + // REQUIRES: destructor must run in same thread as constructor. + struct ScopedDisable { + ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); } + ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); } + + bool disabled; + }; + + // Access to SchedulingGuard is explicitly white-listed. + friend class SchedulingHelper; + friend class SpinLock; + + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +//------------------------------------------------------------------------------ + +inline bool SchedulingGuard::ReschedulingIsAllowed() { + return false; +} + +inline bool SchedulingGuard::DisableRescheduling() { + return false; +} + +inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { + return; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/base/abseil/absl/base/internal/per_thread_tls.h b/base/abseil/absl/base/internal/per_thread_tls.h new file mode 100644 index 0000000..cf5e97a --- /dev/null +++ b/base/abseil/absl/base/internal/per_thread_tls.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ +#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ + +// This header defines two macros: +// +// If the platform supports thread-local storage: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a +// thread-local variable +// * ABSL_PER_THREAD_TLS is 1 +// +// Otherwise: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is empty +// * ABSL_PER_THREAD_TLS is 0 +// +// Microsoft C supports thread-local storage. +// GCC supports it if the appropriate version of glibc is available, +// which the programmer can indicate by defining ABSL_HAVE_TLS + +#include "absl/base/port.h" // For ABSL_HAVE_TLS + +#if defined(ABSL_PER_THREAD_TLS) +#error ABSL_PER_THREAD_TLS cannot be directly set +#elif defined(ABSL_PER_THREAD_TLS_KEYWORD) +#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set +#elif defined(ABSL_HAVE_TLS) +#define ABSL_PER_THREAD_TLS_KEYWORD __thread +#define ABSL_PER_THREAD_TLS 1 +#elif defined(_MSC_VER) +#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) +#define ABSL_PER_THREAD_TLS 1 +#else +#define ABSL_PER_THREAD_TLS_KEYWORD +#define ABSL_PER_THREAD_TLS 0 +#endif + +#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ diff --git a/base/abseil/absl/base/internal/periodic_sampler.cc b/base/abseil/absl/base/internal/periodic_sampler.cc new file mode 100644 index 0000000..520dabb --- /dev/null +++ b/base/abseil/absl/base/internal/periodic_sampler.cc @@ -0,0 +1,53 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/periodic_sampler.h" + +#include + +#include "absl/base/internal/exponential_biased.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +int64_t PeriodicSamplerBase::GetExponentialBiased(int period) noexcept { + return rng_.GetStride(period); +} + +bool PeriodicSamplerBase::SubtleConfirmSample() noexcept { + int current_period = period(); + + // Deal with period case 0 (always off) and 1 (always on) + if (ABSL_PREDICT_FALSE(current_period < 2)) { + stride_ = 0; + return current_period == 1; + } + + // Check if this is the first call to Sample() + if (ABSL_PREDICT_FALSE(stride_ == 1)) { + stride_ = static_cast(-GetExponentialBiased(current_period)); + if (static_cast(stride_) < -1) { + ++stride_; + return false; + } + } + + stride_ = static_cast(-GetExponentialBiased(current_period)); + return true; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/periodic_sampler.h b/base/abseil/absl/base/internal/periodic_sampler.h new file mode 100644 index 0000000..f8a8679 --- /dev/null +++ b/base/abseil/absl/base/internal/periodic_sampler.h @@ -0,0 +1,211 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ +#define ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ + +#include + +#include + +#include "absl/base/internal/exponential_biased.h" +#include "absl/base/optimization.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// PeriodicSamplerBase provides the basic period sampler implementation. +// +// This is the base class for the templated PeriodicSampler class, which holds +// a global std::atomic value identified by a user defined tag, such that +// each specific PeriodSampler implementation holds its own global period. +// +// PeriodicSamplerBase is thread-compatible except where stated otherwise. +class PeriodicSamplerBase { + public: + // PeriodicSamplerBase is trivial / copyable / movable / destructible. + PeriodicSamplerBase() = default; + PeriodicSamplerBase(PeriodicSamplerBase&&) = default; + PeriodicSamplerBase(const PeriodicSamplerBase&) = default; + + // Returns true roughly once every `period` calls. This is established by a + // randomly picked `stride` that is counted down on each call to `Sample`. + // This stride is picked such that the probability of `Sample()` returning + // true is 1 in `period`. + inline bool Sample() noexcept; + + // The below methods are intended for optimized use cases where the + // size of the inlined fast path code is highly important. Applications + // should use the `Sample()` method unless they have proof that their + // specific use case requires the optimizations offered by these methods. + // + // An example of such a use case is SwissTable sampling. All sampling checks + // are in inlined SwissTable methods, and the number of call sites is huge. + // In this case, the inlined code size added to each translation unit calling + // SwissTable methods is non-trivial. + // + // The `SubtleMaybeSample()` function spuriously returns true even if the + // function should not be sampled, applications MUST match each call to + // 'SubtleMaybeSample()' returning true with a `SubtleConfirmSample()` call, + // and use the result of the latter as the sampling decision. + // In other words: the code should logically be equivalent to: + // + // if (SubtleMaybeSample() && SubtleConfirmSample()) { + // // Sample this call + // } + // + // In the 'inline-size' optimized case, the `SubtleConfirmSample()` call can + // be placed out of line, for example, the typical use case looks as follows: + // + // // --- frobber.h ----------- + // void FrobberSampled(); + // + // inline void FrobberImpl() { + // // ... + // } + // + // inline void Frobber() { + // if (ABSL_PREDICT_FALSE(sampler.SubtleMaybeSample())) { + // FrobberSampled(); + // } else { + // FrobberImpl(); + // } + // } + // + // // --- frobber.cc ----------- + // void FrobberSampled() { + // if (!sampler.SubtleConfirmSample())) { + // // Spurious false positive + // FrobberImpl(); + // return; + // } + // + // // Sampled execution + // // ... + // } + inline bool SubtleMaybeSample() noexcept; + bool SubtleConfirmSample() noexcept; + + protected: + // We explicitly don't use a virtual destructor as this class is never + // virtually destroyed, and it keeps the class trivial, which avoids TLS + // prologue and epilogue code for our TLS instances. + ~PeriodicSamplerBase() = default; + + // Returns the next stride for our sampler. + // This function is virtual for testing purposes only. + virtual int64_t GetExponentialBiased(int period) noexcept; + + private: + // Returns the current period of this sampler. Thread-safe. + virtual int period() const noexcept = 0; + + // Keep and decrement stride_ as an unsigned integer, but compare the value + // to zero casted as a signed int. clang and msvc do not create optimum code + // if we use signed for the combined decrement and sign comparison. + // + // Below 3 alternative options, all compiles generate the best code + // using the unsigned increment <---> signed int comparison option. + // + // Option 1: + // int64_t stride_; + // if (ABSL_PREDICT_TRUE(++stride_ < 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/R5MzzA + // GCC ppc (OK) : https://gcc.godbolt.org/z/z7NZAt + // Clang x64 (BAD): https://gcc.godbolt.org/z/t4gPsd + // ICC x64 (OK) : https://gcc.godbolt.org/z/rE6s8W + // MSVC x64 (OK) : https://gcc.godbolt.org/z/ARMXqS + // + // Option 2: + // int64_t stride_ = 0; + // if (ABSL_PREDICT_TRUE(--stride_ >= 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/jSQxYK + // GCC ppc (OK) : https://gcc.godbolt.org/z/VJdYaA + // Clang x64 (BAD): https://gcc.godbolt.org/z/Xm4NjX + // ICC x64 (OK) : https://gcc.godbolt.org/z/4snaFd + // MSVC x64 (BAD): https://gcc.godbolt.org/z/BgnEKE + // + // Option 3: + // uint64_t stride_; + // if (ABSL_PREDICT_TRUE(static_cast(++stride_) < 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/bFbfPy + // GCC ppc (OK) : https://gcc.godbolt.org/z/S9KkUE + // Clang x64 (OK) : https://gcc.godbolt.org/z/UYzRb4 + // ICC x64 (OK) : https://gcc.godbolt.org/z/ptTNfD + // MSVC x64 (OK) : https://gcc.godbolt.org/z/76j4-5 + uint64_t stride_ = 0; + ExponentialBiased rng_; +}; + +inline bool PeriodicSamplerBase::SubtleMaybeSample() noexcept { + // See comments on `stride_` for the unsigned increment / signed compare. + if (ABSL_PREDICT_TRUE(static_cast(++stride_) < 0)) { + return false; + } + return true; +} + +inline bool PeriodicSamplerBase::Sample() noexcept { + return ABSL_PREDICT_FALSE(SubtleMaybeSample()) ? SubtleConfirmSample() + : false; +} + +// PeriodicSampler is a concreted periodic sampler implementation. +// The user provided Tag identifies the implementation, and is required to +// isolate the global state of this instance from other instances. +// +// Typical use case: +// +// struct HashTablezTag {}; +// thread_local PeriodicSampler sampler; +// +// void HashTableSamplingLogic(...) { +// if (sampler.Sample()) { +// HashTableSlowSamplePath(...); +// } +// } +// +template +class PeriodicSampler final : public PeriodicSamplerBase { + public: + ~PeriodicSampler() = default; + + int period() const noexcept final { + return period_.load(std::memory_order_relaxed); + } + + // Sets the global period for this sampler. Thread-safe. + // Setting a period of 0 disables the sampler, i.e., every call to Sample() + // will return false. Setting a period of 1 puts the sampler in 'always on' + // mode, i.e., every call to Sample() returns true. + static void SetGlobalPeriod(int period) { + period_.store(period, std::memory_order_relaxed); + } + + private: + static std::atomic period_; +}; + +template +std::atomic PeriodicSampler::period_(default_period); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ diff --git a/base/abseil/absl/base/internal/periodic_sampler_benchmark.cc b/base/abseil/absl/base/internal/periodic_sampler_benchmark.cc new file mode 100644 index 0000000..5ad469c --- /dev/null +++ b/base/abseil/absl/base/internal/periodic_sampler_benchmark.cc @@ -0,0 +1,79 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "absl/base/internal/periodic_sampler.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +template +void BM_Sample(Sampler* sampler, benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(sampler); + benchmark::DoNotOptimize(sampler->Sample()); + } +} + +template +void BM_SampleMinunumInlined(Sampler* sampler, benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(sampler); + if (ABSL_PREDICT_FALSE(sampler->SubtleMaybeSample())) { + benchmark::DoNotOptimize(sampler->SubtleConfirmSample()); + } + } +} + +void BM_PeriodicSampler_TinySample(benchmark::State& state) { + struct Tag {}; + PeriodicSampler sampler; + BM_Sample(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_TinySample); + +void BM_PeriodicSampler_ShortSample(benchmark::State& state) { + struct Tag {}; + PeriodicSampler sampler; + BM_Sample(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_ShortSample); + +void BM_PeriodicSampler_LongSample(benchmark::State& state) { + struct Tag {}; + PeriodicSampler sampler; + BM_Sample(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_LongSample); + +void BM_PeriodicSampler_LongSampleMinunumInlined(benchmark::State& state) { + struct Tag {}; + PeriodicSampler sampler; + BM_SampleMinunumInlined(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_LongSampleMinunumInlined); + +void BM_PeriodicSampler_Disabled(benchmark::State& state) { + struct Tag {}; + PeriodicSampler sampler; + BM_Sample(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_Disabled); + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/periodic_sampler_test.cc b/base/abseil/absl/base/internal/periodic_sampler_test.cc new file mode 100644 index 0000000..3b301e3 --- /dev/null +++ b/base/abseil/absl/base/internal/periodic_sampler_test.cc @@ -0,0 +1,177 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/periodic_sampler.h" + +#include // NOLINT(build/c++11) + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/macros.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +using testing::Eq; +using testing::Return; +using testing::StrictMock; + +class MockPeriodicSampler : public PeriodicSamplerBase { + public: + virtual ~MockPeriodicSampler() = default; + + MOCK_METHOD(int, period, (), (const, noexcept)); + MOCK_METHOD(int64_t, GetExponentialBiased, (int), (noexcept)); +}; + +TEST(PeriodicSamplerBaseTest, Sample) { + StrictMock sampler; + + EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(16)); + EXPECT_CALL(sampler, GetExponentialBiased(16)) + .WillOnce(Return(2)) + .WillOnce(Return(3)) + .WillOnce(Return(4)); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, ImmediatelySample) { + StrictMock sampler; + + EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(16)); + EXPECT_CALL(sampler, GetExponentialBiased(16)) + .WillOnce(Return(1)) + .WillOnce(Return(2)) + .WillOnce(Return(3)); + + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, Disabled) { + StrictMock sampler; + + EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(0)); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, AlwaysOn) { + StrictMock sampler; + + EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(1)); + + EXPECT_TRUE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, Disable) { + StrictMock sampler; + + EXPECT_CALL(sampler, period()).WillOnce(Return(16)); + EXPECT_CALL(sampler, GetExponentialBiased(16)).WillOnce(Return(3)); + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + + EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(0)); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, Enable) { + StrictMock sampler; + + EXPECT_CALL(sampler, period()).WillOnce(Return(0)); + EXPECT_FALSE(sampler.Sample()); + + EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(16)); + EXPECT_CALL(sampler, GetExponentialBiased(16)) + .Times(2) + .WillRepeatedly(Return(3)); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerTest, ConstructConstInit) { + struct Tag {}; + ABSL_CONST_INIT static PeriodicSampler sampler; + (void)sampler; +} + +TEST(PeriodicSamplerTest, DefaultPeriod0) { + struct Tag {}; + PeriodicSampler sampler; + EXPECT_THAT(sampler.period(), Eq(0)); +} + +TEST(PeriodicSamplerTest, DefaultPeriod) { + struct Tag {}; + PeriodicSampler sampler; + EXPECT_THAT(sampler.period(), Eq(100)); +} + +TEST(PeriodicSamplerTest, SetGlobalPeriod) { + struct Tag1 {}; + struct Tag2 {}; + PeriodicSampler sampler1; + PeriodicSampler sampler2; + + EXPECT_THAT(sampler1.period(), Eq(25)); + EXPECT_THAT(sampler2.period(), Eq(50)); + + std::thread thread([] { + PeriodicSampler sampler1; + PeriodicSampler sampler2; + EXPECT_THAT(sampler1.period(), Eq(25)); + EXPECT_THAT(sampler2.period(), Eq(50)); + sampler1.SetGlobalPeriod(10); + sampler2.SetGlobalPeriod(20); + }); + thread.join(); + + EXPECT_THAT(sampler1.period(), Eq(10)); + EXPECT_THAT(sampler2.period(), Eq(20)); +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/pretty_function.h b/base/abseil/absl/base/internal/pretty_function.h new file mode 100644 index 0000000..35d5167 --- /dev/null +++ b/base/abseil/absl/base/internal/pretty_function.h @@ -0,0 +1,33 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ +#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ + +// ABSL_PRETTY_FUNCTION +// +// In C++11, __func__ gives the undecorated name of the current function. That +// is, "main", not "int main()". Various compilers give extra macros to get the +// decorated function name, including return type and arguments, to +// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable +// version of these macros which forwards to the correct macro on each compiler. +#if defined(_MSC_VER) +#define ABSL_PRETTY_FUNCTION __FUNCSIG__ +#elif defined(__GNUC__) +#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#error "Unsupported compiler" +#endif + +#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ diff --git a/base/abseil/absl/base/internal/raw_logging.cc b/base/abseil/absl/base/internal/raw_logging.cc new file mode 100644 index 0000000..d79c548 --- /dev/null +++ b/base/abseil/absl/base/internal/raw_logging.cc @@ -0,0 +1,237 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/raw_logging.h" + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/log_severity.h" + +// We know how to perform low-level writes to stderr in POSIX and Windows. For +// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED. +// Much of raw_logging.cc becomes a no-op when we can't output messages, +// although a FATAL ABSL_RAW_LOG message will still abort the process. + +// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write() +// (as from unistd.h) +// +// This preprocessor token is also defined in raw_io.cc. If you need to copy +// this, consider moving both to config.h instead. +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__Fuchsia__) || defined(__native_client__) || \ + defined(__EMSCRIPTEN__) || defined(__ASYLO__) + +#include + +#define ABSL_HAVE_POSIX_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_POSIX_WRITE +#endif + +// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall +// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); +// for low level operations that want to avoid libc. +#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__) +#include +#define ABSL_HAVE_SYSCALL_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_SYSCALL_WRITE +#endif + +#ifdef _WIN32 +#include + +#define ABSL_HAVE_RAW_IO 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_RAW_IO +#endif + +// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. +// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a +// whitelisted set of platforms for which we expect not to be able to raw log. + +ABSL_CONST_INIT static absl::base_internal::AtomicHook< + absl::raw_logging_internal::LogPrefixHook> log_prefix_hook; +ABSL_CONST_INIT static absl::base_internal::AtomicHook< + absl::raw_logging_internal::AbortHook> abort_hook; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED +static const char kTruncated[] = " ... (message truncated)\n"; + +// sprintf the format to the buffer, adjusting *buf and *size to reflect the +// consumed bytes, and return whether the message fit without truncation. If +// truncation occurred, if possible leave room in the buffer for the message +// kTruncated[]. +inline static bool VADoRawLog(char** buf, int* size, const char* format, + va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); +inline static bool VADoRawLog(char** buf, int* size, + const char* format, va_list ap) { + int n = vsnprintf(*buf, *size, format, ap); + bool result = true; + if (n < 0 || n > *size) { + result = false; + if (static_cast(*size) > sizeof(kTruncated)) { + n = *size - sizeof(kTruncated); // room for truncation message + } else { + n = 0; // no room for truncation message + } + } + *size -= n; + *buf += n; + return result; +} +#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED + +static constexpr int kLogBufSize = 3000; + +namespace { + +// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths +// that invoke malloc() and getenv() that might acquire some locks. + +// Helper for RawLog below. +// *DoRawLog writes to *buf of *size and move them past the written portion. +// It returns true iff there was no overflow or error. +bool DoRawLog(char** buf, int* size, const char* format, ...) + ABSL_PRINTF_ATTRIBUTE(3, 4); +bool DoRawLog(char** buf, int* size, const char* format, ...) { + va_list ap; + va_start(ap, format); + int n = vsnprintf(*buf, *size, format, ap); + va_end(ap); + if (n < 0 || n > *size) return false; + *size -= n; + *buf += n; + return true; +} + +void RawLogVA(absl::LogSeverity severity, const char* file, int line, + const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0); +void RawLogVA(absl::LogSeverity severity, const char* file, int line, + const char* format, va_list ap) { + char buffer[kLogBufSize]; + char* buf = buffer; + int size = sizeof(buffer); +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + bool enabled = true; +#else + bool enabled = false; +#endif + +#ifdef ABSL_MIN_LOG_LEVEL + if (severity < static_cast(ABSL_MIN_LOG_LEVEL) && + severity < absl::LogSeverity::kFatal) { + enabled = false; + } +#endif + + auto log_prefix_hook_ptr = log_prefix_hook.Load(); + if (log_prefix_hook_ptr) { + enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size); + } else { + if (enabled) { + DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line); + } + } + const char* const prefix_end = buf; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + if (enabled) { + bool no_chop = VADoRawLog(&buf, &size, format, ap); + if (no_chop) { + DoRawLog(&buf, &size, "\n"); + } else { + DoRawLog(&buf, &size, "%s", kTruncated); + } + absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer)); + } +#else + static_cast(format); + static_cast(ap); +#endif + + // Abort the process after logging a FATAL message, even if the output itself + // was suppressed. + if (severity == absl::LogSeverity::kFatal) { + abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize); + abort(); + } +} + +} // namespace + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_logging_internal { +void SafeWriteToStderr(const char *s, size_t len) { +#if defined(ABSL_HAVE_SYSCALL_WRITE) + syscall(SYS_write, STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_POSIX_WRITE) + write(STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_RAW_IO) + _write(/* stderr */ 2, s, len); +#else + // stderr logging unsupported on this platform + (void) s; + (void) len; +#endif +} + +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) { + va_list ap; + va_start(ap, format); + RawLogVA(severity, file, line, format, ap); + va_end(ap); +} + +// Non-formatting version of RawLog(). +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +static void DefaultInternalLog(absl::LogSeverity severity, const char* file, + int line, const std::string& message) { + RawLog(severity, file, line, "%s", message.c_str()); +} + +bool RawLoggingFullySupported() { +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + return true; +#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED + return false; +#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED +} + +ABSL_CONST_INIT absl::base_internal::AtomicHook + internal_log_function(DefaultInternalLog); + +void RegisterInternalLogFunction(InternalLogFunction func) { + internal_log_function.Store(func); +} + +} // namespace raw_logging_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/raw_logging.h b/base/abseil/absl/base/internal/raw_logging.h new file mode 100644 index 0000000..cff4505 --- /dev/null +++ b/base/abseil/absl/base/internal/raw_logging.h @@ -0,0 +1,179 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Thread-safe logging routines that do not allocate any memory or +// acquire any locks, and can therefore be used by low-level memory +// allocation, synchronization, and signal-handling code. + +#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ +#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/log_severity.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" + +// This is similar to LOG(severity) << format..., but +// * it is to be used ONLY by low-level modules that can't use normal LOG() +// * it is designed to be a low-level logger that does not allocate any +// memory and does not need any locks, hence: +// * it logs straight and ONLY to STDERR w/o buffering +// * it uses an explicit printf-format and arguments list +// * it will silently chop off really long message strings +// Usage example: +// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error); +// This will print an almost standard log line like this to stderr only: +// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file + +#define ABSL_RAW_LOG(severity, ...) \ + do { \ + constexpr const char* absl_raw_logging_internal_basename = \ + ::absl::raw_logging_internal::Basename(__FILE__, \ + sizeof(__FILE__) - 1); \ + ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_basename, \ + __LINE__, __VA_ARGS__); \ + } while (0) + +// Similar to CHECK(condition) << message, but for low-level modules: +// we use only ABSL_RAW_LOG that does not allocate memory. +// We do not want to provide args list here to encourage this usage: +// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); +// so that the args are not computed when not needed. +#define ABSL_RAW_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ + } \ + } while (0) + +// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, +// except that if the richer log library is linked into the binary, we dispatch +// to that instead. This is potentially useful for internal logging and +// assertions, where we are using RAW_LOG neither for its async-signal-safety +// nor for its non-allocating nature, but rather because raw logging has very +// few other dependencies. +// +// The API is a subset of the above: each macro only takes two arguments. Use +// StrCat if you need to build a richer message. +#define ABSL_INTERNAL_LOG(severity, message) \ + do { \ + ::absl::raw_logging_internal::internal_log_function( \ + ABSL_RAW_LOGGING_INTERNAL_##severity, __FILE__, __LINE__, message); \ + } while (0) + +#define ABSL_INTERNAL_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + std::string death_message = "Check " #condition " failed: "; \ + death_message += std::string(message); \ + ABSL_INTERNAL_LOG(FATAL, death_message); \ + } \ + } while (0) + +#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo +#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning +#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError +#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ + ::absl::NormalizeLogSeverity(severity) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_logging_internal { + +// Helper function to implement ABSL_RAW_LOG +// Logs format... at "severity" level, reporting it +// as called from file:line. +// This does not allocate memory or acquire locks. +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); + +// Writes the provided buffer directly to stderr, in a safe, low-level manner. +// +// In POSIX this means calling write(), which is async-signal safe and does +// not malloc. If the platform supports the SYS_write syscall, we invoke that +// directly to side-step any libc interception. +void SafeWriteToStderr(const char *s, size_t len); + +// compile-time function to get the "base" filename, that is, the part of +// a filename after the last "/" or "\" path separator. The search starts at +// the end of the string; the second parameter is the length of the string. +constexpr const char* Basename(const char* fname, int offset) { + return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' + ? fname + offset + : Basename(fname, offset - 1); +} + +// For testing only. +// Returns true if raw logging is fully supported. When it is not +// fully supported, no messages will be emitted, but a log at FATAL +// severity will cause an abort. +// +// TODO(gfalcon): Come up with a better name for this method. +bool RawLoggingFullySupported(); + +// Function type for a raw_logging customization hook for suppressing messages +// by severity, and for writing custom prefixes on non-suppressed messages. +// +// The installed hook is called for every raw log invocation. The message will +// be logged to stderr only if the hook returns true. FATAL errors will cause +// the process to abort, even if writing to stderr is suppressed. The hook is +// also provided with an output buffer, where it can write a custom log message +// prefix. +// +// The raw_logging system does not allocate memory or grab locks. User-provided +// hooks must avoid these operations, and must not throw exceptions. +// +// 'severity' is the severity level of the message being written. +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the +// hook writes a prefix, it must increment *buffer and decrement *buf_size +// accordingly. +using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, + int line, char** buffer, int* buf_size); + +// Function type for a raw_logging customization hook called to abort a process +// when a FATAL message is logged. If the provided AbortHook() returns, the +// logging system will call abort(). +// +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// The NUL-terminated logged message lives in the buffer between 'buf_start' +// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the +// buffer (as written by the LogPrefixHook.) +using AbortHook = void (*)(const char* file, int line, const char* buf_start, + const char* prefix_end, const char* buf_end); + +// Internal logging function for ABSL_INTERNAL_LOG to dispatch to. +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +using InternalLogFunction = void (*)(absl::LogSeverity severity, + const char* file, int line, + const std::string& message); + +extern base_internal::AtomicHook internal_log_function; + +void RegisterInternalLogFunction(InternalLogFunction func); + +} // namespace raw_logging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/base/abseil/absl/base/internal/scheduling_mode.h b/base/abseil/absl/base/internal/scheduling_mode.h new file mode 100644 index 0000000..8be5ab6 --- /dev/null +++ b/base/abseil/absl/base/internal/scheduling_mode.h @@ -0,0 +1,58 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ +#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Used to describe how a thread may be scheduled. Typically associated with +// the declaration of a resource supporting synchronized access. +// +// SCHEDULE_COOPERATIVE_AND_KERNEL: +// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may +// reschedule (using base::scheduling semantics); allowing other cooperative +// threads to proceed. +// +// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") +// Specifies that no cooperative scheduling semantics may be used, even if the +// current thread is itself cooperatively scheduled. This means that +// cooperative threads will NOT allow other cooperative threads to execute in +// their place while waiting for a resource of this type. Host operating system +// semantics (e.g. a futex) may still be used. +// +// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL +// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which +// base::scheduling (e.g. the implementation of a Scheduler) may depend. +// +// NOTE: Cooperative resources may not be nested below non-cooperative ones. +// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL +// resource if a SCHEDULE_KERNEL_ONLY resource is already held. +enum SchedulingMode { + SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. + SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/base/abseil/absl/base/internal/scoped_set_env.cc b/base/abseil/absl/base/internal/scoped_set_env.cc new file mode 100644 index 0000000..8a934cb --- /dev/null +++ b/base/abseil/absl/base/internal/scoped_set_env.cc @@ -0,0 +1,81 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/scoped_set_env.h" + +#ifdef _WIN32 +#include +#endif + +#include + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +namespace { + +#ifdef _WIN32 +const int kMaxEnvVarValueSize = 1024; +#endif + +void SetEnvVar(const char* name, const char* value) { +#ifdef _WIN32 + SetEnvironmentVariableA(name, value); +#else + if (value == nullptr) { + ::unsetenv(name); + } else { + ::setenv(name, value, 1); + } +#endif +} + +} // namespace + +ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value) + : var_name_(var_name), was_unset_(false) { +#ifdef _WIN32 + char buf[kMaxEnvVarValueSize]; + auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf)); + ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size"); + + if (get_res == 0) { + was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND); + } else { + old_value_.assign(buf, get_res); + } + + SetEnvironmentVariableA(var_name_.c_str(), new_value); +#else + const char* val = ::getenv(var_name_.c_str()); + if (val == nullptr) { + was_unset_ = true; + } else { + old_value_ = val; + } +#endif + + SetEnvVar(var_name_.c_str(), new_value); +} + +ScopedSetEnv::~ScopedSetEnv() { + SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str()); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/scoped_set_env.h b/base/abseil/absl/base/internal/scoped_set_env.h new file mode 100644 index 0000000..19ec7b5 --- /dev/null +++ b/base/abseil/absl/base/internal/scoped_set_env.h @@ -0,0 +1,45 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ +#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ScopedSetEnv { + public: + ScopedSetEnv(const char* var_name, const char* new_value); + ~ScopedSetEnv(); + + private: + std::string var_name_; + std::string old_value_; + + // True if the environment variable was initially not set. + bool was_unset_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ diff --git a/base/abseil/absl/base/internal/scoped_set_env_test.cc b/base/abseil/absl/base/internal/scoped_set_env_test.cc new file mode 100644 index 0000000..5cbad24 --- /dev/null +++ b/base/abseil/absl/base/internal/scoped_set_env_test.cc @@ -0,0 +1,99 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifdef _WIN32 +#include +#endif + +#include "gtest/gtest.h" +#include "absl/base/internal/scoped_set_env.h" + +namespace { + +using absl::base_internal::ScopedSetEnv; + +std::string GetEnvVar(const char* name) { +#ifdef _WIN32 + char buf[1024]; + auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf)); + if (get_res >= sizeof(buf)) { + return "TOO_BIG"; + } + + if (get_res == 0) { + return "UNSET"; + } + + return std::string(buf, get_res); +#else + const char* val = ::getenv(name); + if (val == nullptr) { + return "UNSET"; + } + + return val; +#endif +} + +TEST(ScopedSetEnvTest, SetNonExistingVarToString) { + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); +} + +TEST(ScopedSetEnvTest, SetNonExistingVarToNull) { + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); +} + +TEST(ScopedSetEnvTest, SetExistingVarToString) { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value"); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); +} + +TEST(ScopedSetEnvTest, SetExistingVarToNull) { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); +} + +} // namespace diff --git a/base/abseil/absl/base/internal/spinlock.cc b/base/abseil/absl/base/internal/spinlock.cc new file mode 100644 index 0000000..8dd8dab --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock.cc @@ -0,0 +1,233 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/spinlock.h" + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */ +#include "absl/base/call_once.h" + +// Description of lock-word: +// 31..00: [............................3][2][1][0] +// +// [0]: kSpinLockHeld +// [1]: kSpinLockCooperative +// [2]: kSpinLockDisabledScheduling +// [31..3]: ONLY kSpinLockSleeper OR +// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT +// +// Detailed descriptions: +// +// Bit [0]: The lock is considered held iff kSpinLockHeld is set. +// +// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when +// contended iff kSpinLockCooperative is set. +// +// Bit [2]: This bit is exclusive from bit [1]. It is used only by a +// non-cooperative lock. When set, indicates that scheduling was +// successfully disabled when the lock was acquired. May be unset, +// even if non-cooperative, if a ThreadIdentity did not yet exist at +// time of acquisition. +// +// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was +// acquired without contention, however, at least one waiter exists. +// +// Otherwise, bits [31..3] represent the time spent by the current lock +// holder to acquire the lock. There may be outstanding waiter(s). + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +ABSL_CONST_INIT static base_internal::AtomicHook + submit_profile_data; + +void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, + int64_t wait_cycles)) { + submit_profile_data.Store(fn); +} + +// Uncommon constructors. +SpinLock::SpinLock(base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); +} + +SpinLock::SpinLock(base_internal::LinkerInitialized, + base_internal::SchedulingMode mode) { + ABSL_TSAN_MUTEX_CREATE(this, 0); + if (IsCooperative(mode)) { + InitLinkerInitializedAndCooperative(); + } + // Otherwise, lockword_ is already initialized. +} + +// Static (linker initialized) spinlocks always start life as functional +// non-cooperative locks. When their static constructor does run, it will call +// this initializer to augment the lockword with the cooperative bit. By +// actually taking the lock when we do this we avoid the need for an atomic +// operation in the regular unlock path. +// +// SlowLock() must be careful to re-test for this bit so that any outstanding +// waiters may be upgraded to cooperative status. +void SpinLock::InitLinkerInitializedAndCooperative() { + Lock(); + lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed); + Unlock(); +} + +// Monitor the lock to see if its value changes within some time period +// (adaptive_spin_count loop iterations). The last value read from the lock +// is returned from the method. +uint32_t SpinLock::SpinLoop() { + // We are already in the slow path of SpinLock, initialize the + // adaptive_spin_count here. + ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count; + ABSL_CONST_INIT static int adaptive_spin_count = 0; + base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() { + adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1; + }); + + int c = adaptive_spin_count; + uint32_t lock_value; + do { + lock_value = lockword_.load(std::memory_order_relaxed); + } while ((lock_value & kSpinLockHeld) != 0 && --c > 0); + return lock_value; +} + +void SpinLock::SlowLock() { + uint32_t lock_value = SpinLoop(); + lock_value = TryLockInternal(lock_value, 0); + if ((lock_value & kSpinLockHeld) == 0) { + return; + } + // The lock was not obtained initially, so this thread needs to wait for + // it. Record the current timestamp in the local variable wait_start_time + // so the total wait time can be stored in the lockword once this thread + // obtains the lock. + int64_t wait_start_time = CycleClock::Now(); + uint32_t wait_cycles = 0; + int lock_wait_call_count = 0; + while ((lock_value & kSpinLockHeld) != 0) { + // If the lock is currently held, but not marked as having a sleeper, mark + // it as having a sleeper. + if ((lock_value & kWaitTimeMask) == 0) { + // Here, just "mark" that the thread is going to sleep. Don't store the + // lock wait time in the lock as that will cause the current lock + // owner to think it experienced contention. + if (lockword_.compare_exchange_strong( + lock_value, lock_value | kSpinLockSleeper, + std::memory_order_relaxed, std::memory_order_relaxed)) { + // Successfully transitioned to kSpinLockSleeper. Pass + // kSpinLockSleeper to the SpinLockWait routine to properly indicate + // the last lock_value observed. + lock_value |= kSpinLockSleeper; + } else if ((lock_value & kSpinLockHeld) == 0) { + // Lock is free again, so try and acquire it before sleeping. The + // new lock state will be the number of cycles this thread waited if + // this thread obtains the lock. + lock_value = TryLockInternal(lock_value, wait_cycles); + continue; // Skip the delay at the end of the loop. + } + } + + base_internal::SchedulingMode scheduling_mode; + if ((lock_value & kSpinLockCooperative) != 0) { + scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } else { + scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + } + // SpinLockDelay() calls into fiber scheduler, we need to see + // synchronization there to avoid false positives. + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + // Wait for an OS specific delay. + base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, + scheduling_mode); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + // Spin again after returning from the wait routine to give this thread + // some chance of obtaining the lock. + lock_value = SpinLoop(); + wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now()); + lock_value = TryLockInternal(lock_value, wait_cycles); + } +} + +void SpinLock::SlowUnlock(uint32_t lock_value) { + base_internal::SpinLockWake(&lockword_, + false); // wake waiter if necessary + + // If our acquisition was contended, collect contentionz profile info. We + // reserve a unitary wait time to represent that a waiter exists without our + // own acquisition having been contended. + if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) { + const uint64_t wait_cycles = DecodeWaitCycles(lock_value); + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + submit_profile_data(this, wait_cycles); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + } +} + +// We use the upper 29 bits of the lock word to store the time spent waiting to +// acquire this lock. This is reported by contentionz profiling. Since the +// lower bits of the cycle counter wrap very quickly on high-frequency +// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT +// sized units. On a 4Ghz machine this will lose track of wait times greater +// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare. +enum { PROFILE_TIMESTAMP_SHIFT = 7 }; +enum { LOCKWORD_RESERVED_SHIFT = 3 }; // We currently reserve the lower 3 bits. + +uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + static const int64_t kMaxWaitTime = + std::numeric_limits::max() >> LOCKWORD_RESERVED_SHIFT; + int64_t scaled_wait_time = + (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT; + + // Return a representation of the time spent waiting that can be stored in + // the lock word's upper bits. + uint32_t clamped = static_cast( + std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT); + + if (clamped == 0) { + return kSpinLockSleeper; // Just wake waiters, but don't record contention. + } + // Bump up value if necessary to avoid returning kSpinLockSleeper. + const uint32_t kMinWaitTime = + kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT); + if (clamped == kSpinLockSleeper) { + return kMinWaitTime; + } + return clamped; +} + +uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { + // Cast to uint32_t first to ensure bits [63:32] are cleared. + const uint64_t scaled_wait_time = + static_cast(lock_value & kWaitTimeMask); + return scaled_wait_time + << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/spinlock.h b/base/abseil/absl/base/internal/spinlock.h new file mode 100644 index 0000000..24e2e9a --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock.h @@ -0,0 +1,243 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Most users requiring mutual exclusion should use Mutex. +// SpinLock is provided for use in three situations: +// - for use in code that Mutex itself depends on +// - to get a faster fast-path release under low contention (without an +// atomic read-modify-write) In return, SpinLock has worse behaviour under +// contention, which is why Mutex is preferred in most situations. +// - for async signal safety (see below) + +// SpinLock is async signal safe. If a spinlock is used within a signal +// handler, all code that acquires the lock must ensure that the signal cannot +// arrive while they are holding the lock. Typically, this is done by blocking +// the signal. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_H_ + +#include +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ABSL_LOCKABLE SpinLock { + public: + SpinLock() : lockword_(kSpinLockCooperative) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } + + // Special constructor for use with static SpinLock objects. E.g., + // + // static SpinLock lock(base_internal::kLinkerInitialized); + // + // When initialized using this constructor, we depend on the fact + // that the linker has already initialized the memory appropriately. The lock + // is initialized in non-cooperative mode. + // + // A SpinLock constructed like this can be freely used from global + // initializers without worrying about the order in which global + // initializers run. + explicit SpinLock(base_internal::LinkerInitialized) { + // Does nothing; lockword_ is already initialized + ABSL_TSAN_MUTEX_CREATE(this, 0); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + SpinLock(base_internal::LinkerInitialized, + base_internal::SchedulingMode mode); + + ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } + + // Acquire this SpinLock. + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), + 0); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() ABSL_UNLOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, + std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + inline bool IsHeld() const { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static uint64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + + private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether a lock disables scheduling. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + enum { kSpinLockHeld = 1 }; + enum { kSpinLockCooperative = 2 }; + enum { kSpinLockDisabledScheduling = 4 }; + enum { kSpinLockSleeper = 8 }; + enum { kWaitTimeMask = // Includes kSpinLockSleeper. + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) }; + + // Returns true if the provided scheduling mode is cooperative. + static constexpr bool IsCooperative( + base_internal::SchedulingMode scheduling_mode) { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void InitLinkerInitializedAndCooperative(); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(); + + inline bool TryLockImpl() { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic lockword_; + + SpinLock(const SpinLock&) = delete; + SpinLock& operator=(const SpinLock&) = delete; +}; + +// Corresponding locker object that arranges to acquire a spinlock for +// the duration of a C++ scope. +class ABSL_SCOPED_LOCKABLE SpinLockHolder { + public: + inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) + : lock_(l) { + l->Lock(); + } + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } + + SpinLockHolder(const SpinLockHolder&) = delete; + SpinLockHolder& operator=(const SpinLockHolder&) = delete; + + private: + SpinLock* lock_; +}; + +// Register a hook for profiling support. +// +// The function pointer registered here will be called whenever a spinlock is +// contended. The callback is given an opaque handle to the contended spinlock +// and the number of wait cycles. This is thread-safe, but only a single +// profiler can be registered. It is an error to call this function multiple +// times with different arguments. +void RegisterSpinLockProfiler(void (*fn)(const void* lock, + int64_t wait_cycles)); + +//------------------------------------------------------------------------------ +// Public interface ends here. +//------------------------------------------------------------------------------ + +// If (result & kSpinLockHeld) == 0, then *this was successfully locked. +// Otherwise, returns last observed value for lockword_. +inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, + uint32_t wait_cycles) { + if ((lock_value & kSpinLockHeld) != 0) { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (!lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, std::memory_order_relaxed)) { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + } + + return lock_value; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/base/abseil/absl/base/internal/spinlock_akaros.inc b/base/abseil/absl/base/internal/spinlock_akaros.inc new file mode 100644 index 0000000..bc46894 --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock_akaros.inc @@ -0,0 +1,35 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is an Akaros-specific part of spinlock_wait.cc + +#include + +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic* /* lock_word */, uint32_t /* value */, + int /* loop */, absl::base_internal::SchedulingMode /* mode */) { + // In Akaros, one must take care not to call anything that could cause a + // malloc(), a blocking system call, or a uthread_yield() while holding a + // spinlock. Our callers assume will not call into libraries or other + // arbitrary code. +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/base/abseil/absl/base/internal/spinlock_benchmark.cc b/base/abseil/absl/base/internal/spinlock_benchmark.cc new file mode 100644 index 0000000..0451c65 --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock_benchmark.cc @@ -0,0 +1,52 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// See also //absl/synchronization:mutex_benchmark for a comparison of SpinLock +// and Mutex performance under varying levels of contention. + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock.h" +#include "absl/synchronization/internal/create_thread_identity.h" +#include "benchmark/benchmark.h" + +namespace { + +template +static void BM_SpinLock(benchmark::State& state) { + // Ensure a ThreadIdentity is installed. + ABSL_INTERNAL_CHECK( + absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() != + nullptr, + "GetOrCreateCurrentThreadIdentity() failed"); + + static auto* spinlock = new absl::base_internal::SpinLock(scheduling_mode); + for (auto _ : state) { + absl::base_internal::SpinLockHolder holder(spinlock); + } +} + +BENCHMARK_TEMPLATE(BM_SpinLock, + absl::base_internal::SCHEDULE_KERNEL_ONLY) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +BENCHMARK_TEMPLATE(BM_SpinLock, + absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +} // namespace diff --git a/base/abseil/absl/base/internal/spinlock_linux.inc b/base/abseil/absl/base/internal/spinlock_linux.inc new file mode 100644 index 0000000..28e29d1 --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock_linux.inc @@ -0,0 +1,67 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Linux-specific part of spinlock_wait.cc + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" + +// The SpinLock lockword is `std::atomic`. Here we assert that +// `std::atomic` is bitwise equivalent of the `int` expected +// by SYS_futex. We also assume that reads/writes done to the lockword +// by SYS_futex have rational semantics with regard to the +// std::atomic<> API. C++ provides no guarantees of these assumptions, +// but they are believed to hold in practice. +static_assert(sizeof(std::atomic) == sizeof(int), + "SpinLock lockword has the wrong size for a futex"); + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#endif + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode) { + int save_errno = errno; + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); + errno = save_errno; +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic *w, + bool all) { + syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); +} + +} // extern "C" diff --git a/base/abseil/absl/base/internal/spinlock_posix.inc b/base/abseil/absl/base/internal/spinlock_posix.inc new file mode 100644 index 0000000..f025b5f --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock_posix.inc @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Posix-specific part of spinlock_wait.cc + +#include +#include +#include +#include + +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/port.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + int save_errno = errno; + if (loop == 0) { + } else if (loop == 1) { + sched_yield(); + } else { + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + nanosleep(&tm, nullptr); + } + errno = save_errno; +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/base/abseil/absl/base/internal/spinlock_wait.cc b/base/abseil/absl/base/internal/spinlock_wait.cc new file mode 100644 index 0000000..fa824be --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock_wait.cc @@ -0,0 +1,81 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The OS-specific header included below must provide two calls: +// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake(). +// See spinlock_wait.h for the specs. + +#include +#include + +#include "absl/base/internal/spinlock_wait.h" + +#if defined(_WIN32) +#include "absl/base/internal/spinlock_win32.inc" +#elif defined(__linux__) +#include "absl/base/internal/spinlock_linux.inc" +#elif defined(__akaros__) +#include "absl/base/internal/spinlock_akaros.inc" +#else +#include "absl/base/internal/spinlock_posix.inc" +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// See spinlock_wait.h for spec. +uint32_t SpinLockWait(std::atomic *w, int n, + const SpinLockWaitTransition trans[], + base_internal::SchedulingMode scheduling_mode) { + int loop = 0; + for (;;) { + uint32_t v = w->load(std::memory_order_acquire); + int i; + for (i = 0; i != n && v != trans[i].from; i++) { + } + if (i == n) { + SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition + } else if (trans[i].to == v || // null transition + w->compare_exchange_strong(v, trans[i].to, + std::memory_order_acquire, + std::memory_order_relaxed)) { + if (trans[i].done) return v; + } + } +} + +static std::atomic delay_rand; + +// Return a suggested delay in nanoseconds for iteration number "loop" +int SpinLockSuggestedDelayNS(int loop) { + // Weak pseudo-random number generator to get some spread between threads + // when many are spinning. + uint64_t r = delay_rand.load(std::memory_order_relaxed); + r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() + delay_rand.store(r, std::memory_order_relaxed); + + if (loop < 0 || loop > 32) { // limit loop to 0..32 + loop = 32; + } + const int kMinDelay = 128 << 10; // 128us + // Double delay every 8 iterations, up to 16x (2ms). + int delay = kMinDelay << (loop / 8); + // Randomize in delay..2*delay range, for resulting 128us..4ms range. + return delay | ((delay - 1) & static_cast(r)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/spinlock_wait.h b/base/abseil/absl/base/internal/spinlock_wait.h new file mode 100644 index 0000000..169bc74 --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock_wait.h @@ -0,0 +1,93 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ + +// Operations to make atomic transitions on a word, and to allow +// waiting for those transitions to become possible. + +#include +#include + +#include "absl/base/internal/scheduling_mode.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// SpinLockWait() waits until it can perform one of several transitions from +// "from" to "to". It returns when it performs a transition where done==true. +struct SpinLockWaitTransition { + uint32_t from; + uint32_t to; + bool done; +}; + +// Wait until *w can transition from trans[i].from to trans[i].to for some i +// satisfying 0<=i *w, int n, + const SpinLockWaitTransition trans[], + SchedulingMode scheduling_mode); + +// If possible, wake some thread that has called SpinLockDelay(w, ...). If +// "all" is true, wake all such threads. This call is a hint, and on some +// systems it may be a no-op; threads calling SpinLockDelay() will always wake +// eventually even if SpinLockWake() is never called. +void SpinLockWake(std::atomic *w, bool all); + +// Wait for an appropriate spin delay on iteration "loop" of a +// spin loop on location *w, whose previously observed value was "value". +// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, +// or may wait for a delay that can be truncated by a call to SpinLockWake(w). +// In all cases, it must return in bounded time even if SpinLockWake() is not +// called. +void SpinLockDelay(std::atomic *w, uint32_t value, int loop, + base_internal::SchedulingMode scheduling_mode); + +// Helper used by AbslInternalSpinLockDelay. +// Returns a suggested delay in nanoseconds for iteration number "loop". +int SpinLockSuggestedDelayNS(int loop); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" { +void AbslInternalSpinLockWake(std::atomic *w, bool all); +void AbslInternalSpinLockDelay( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode); +} + +inline void absl::base_internal::SpinLockWake(std::atomic *w, + bool all) { + AbslInternalSpinLockWake(w, all); +} + +inline void absl::base_internal::SpinLockDelay( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode) { + AbslInternalSpinLockDelay(w, value, loop, scheduling_mode); +} + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/base/abseil/absl/base/internal/spinlock_win32.inc b/base/abseil/absl/base/internal/spinlock_win32.inc new file mode 100644 index 0000000..78654b5 --- /dev/null +++ b/base/abseil/absl/base/internal/spinlock_win32.inc @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Win32-specific part of spinlock_wait.cc + +#include +#include +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +void AbslInternalSpinLockDelay(std::atomic* /* lock_word */, + uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + if (loop == 0) { + } else if (loop == 1) { + Sleep(0); + } else { + Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000); + } +} + +void AbslInternalSpinLockWake(std::atomic* /* lock_word */, + bool /* all */) {} + +} // extern "C" diff --git a/base/abseil/absl/base/internal/sysinfo.cc b/base/abseil/absl/base/internal/sysinfo.cc new file mode 100644 index 0000000..7945322 --- /dev/null +++ b/base/abseil/absl/base/internal/sysinfo.cc @@ -0,0 +1,414 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#include "absl/base/attributes.h" + +#ifdef _WIN32 +#include +#else +#include +#include +#include +#include +#include +#endif + +#ifdef __linux__ +#include +#endif + +#if defined(__APPLE__) || defined(__FreeBSD__) +#include +#endif + +#if defined(__myriad2__) +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include // NOLINT(build/c++11) +#include +#include + +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +static once_flag init_system_info_once; +static int num_cpus = 0; +static double nominal_cpu_frequency = 1.0; // 0.0 might be dangerous. + +static int GetNumCPUs() { +#if defined(__myriad2__) + return 1; +#else + // Other possibilities: + // - Read /sys/devices/system/cpu/online and use cpumask_parse() + // - sysconf(_SC_NPROCESSORS_ONLN) + return std::thread::hardware_concurrency(); +#endif +} + +#if defined(_WIN32) + +static double GetNominalCPUFrequency() { +#pragma comment(lib, "advapi32.lib") // For Reg* functions. + HKEY key; + // Use the Reg* functions rather than the SH functions because shlwapi.dll + // pulls in gdi32.dll which makes process destruction much more costly. + if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, + "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0, + KEY_READ, &key) == ERROR_SUCCESS) { + DWORD type = 0; + DWORD data = 0; + DWORD data_size = sizeof(data); + auto result = RegQueryValueExA(key, "~MHz", 0, &type, + reinterpret_cast(&data), &data_size); + RegCloseKey(key); + if (result == ERROR_SUCCESS && type == REG_DWORD && + data_size == sizeof(data)) { + return data * 1e6; // Value is MHz. + } + } + return 1.0; +} + +#elif defined(CTL_HW) && defined(HW_CPU_FREQ) + +static double GetNominalCPUFrequency() { + unsigned freq; + size_t size = sizeof(freq); + int mib[2] = {CTL_HW, HW_CPU_FREQ}; + if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) { + return static_cast(freq); + } + return 1.0; +} + +#else + +// Helper function for reading a long from a file. Returns true if successful +// and the memory location pointed to by value is set to the value read. +static bool ReadLongFromFile(const char *file, long *value) { + bool ret = false; + int fd = open(file, O_RDONLY); + if (fd != -1) { + char line[1024]; + char *err; + memset(line, '\0', sizeof(line)); + int len = read(fd, line, sizeof(line) - 1); + if (len <= 0) { + ret = false; + } else { + const long temp_value = strtol(line, &err, 10); + if (line[0] != '\0' && (*err == '\n' || *err == '\0')) { + *value = temp_value; + ret = true; + } + } + close(fd); + } + return ret; +} + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + +// Reads a monotonic time source and returns a value in +// nanoseconds. The returned value uses an arbitrary epoch, not the +// Unix epoch. +static int64_t ReadMonotonicClockNanos() { + struct timespec t; +#ifdef CLOCK_MONOTONIC_RAW + int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t); +#else + int rc = clock_gettime(CLOCK_MONOTONIC, &t); +#endif + if (rc != 0) { + perror("clock_gettime() failed"); + abort(); + } + return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec; +} + +class UnscaledCycleClockWrapperForInitializeFrequency { + public: + static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); } +}; + +struct TimeTscPair { + int64_t time; // From ReadMonotonicClockNanos(). + int64_t tsc; // From UnscaledCycleClock::Now(). +}; + +// Returns a pair of values (monotonic kernel time, TSC ticks) that +// approximately correspond to each other. This is accomplished by +// doing several reads and picking the reading with the lowest +// latency. This approach is used to minimize the probability that +// our thread was preempted between clock reads. +static TimeTscPair GetTimeTscPair() { + int64_t best_latency = std::numeric_limits::max(); + TimeTscPair best; + for (int i = 0; i < 10; ++i) { + int64_t t0 = ReadMonotonicClockNanos(); + int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now(); + int64_t t1 = ReadMonotonicClockNanos(); + int64_t latency = t1 - t0; + if (latency < best_latency) { + best_latency = latency; + best.time = t0; + best.tsc = tsc; + } + } + return best; +} + +// Measures and returns the TSC frequency by taking a pair of +// measurements approximately `sleep_nanoseconds` apart. +static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) { + auto t0 = GetTimeTscPair(); + struct timespec ts; + ts.tv_sec = 0; + ts.tv_nsec = sleep_nanoseconds; + while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {} + auto t1 = GetTimeTscPair(); + double elapsed_ticks = t1.tsc - t0.tsc; + double elapsed_time = (t1.time - t0.time) * 1e-9; + return elapsed_ticks / elapsed_time; +} + +// Measures and returns the TSC frequency by calling +// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the +// frequency measurement stabilizes. +static double MeasureTscFrequency() { + double last_measurement = -1.0; + int sleep_nanoseconds = 1000000; // 1 millisecond. + for (int i = 0; i < 8; ++i) { + double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds); + if (measurement * 0.99 < last_measurement && + last_measurement < measurement * 1.01) { + // Use the current measurement if it is within 1% of the + // previous measurement. + return measurement; + } + last_measurement = measurement; + sleep_nanoseconds *= 2; + } + return last_measurement; +} + +#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY + +static double GetNominalCPUFrequency() { + long freq = 0; + + // Google's production kernel has a patch to export the TSC + // frequency through sysfs. If the kernel is exporting the TSC + // frequency use that. There are issues where cpuinfo_max_freq + // cannot be relied on because the BIOS may be exporting an invalid + // p-state (on x86) or p-states may be used to put the processor in + // a new mode (turbo mode). Essentially, those frequencies cannot + // always be relied upon. The same reasons apply to /proc/cpuinfo as + // well. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) { + return freq * 1e3; // Value is kHz. + } + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + // On these platforms, the TSC frequency is the nominal CPU + // frequency. But without having the kernel export it directly + // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no + // other way to reliably get the TSC frequency, so we have to + // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by + // exporting "fake" frequencies for implementing new features. For + // example, Intel's turbo mode is enabled by exposing a p-state + // value with a higher frequency than that of the real TSC + // rate. Because of this, we prefer to measure the TSC rate + // ourselves on i386 and x86-64. + return MeasureTscFrequency(); +#else + + // If CPU scaling is in effect, we want to use the *maximum* + // frequency, not whatever CPU speed some random processor happens + // to be using now. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", + &freq)) { + return freq * 1e3; // Value is kHz. + } + + return 1.0; +#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +} + +#endif + +// InitializeSystemInfo() may be called before main() and before +// malloc is properly initialized, therefore this must not allocate +// memory. +static void InitializeSystemInfo() { + num_cpus = GetNumCPUs(); + nominal_cpu_frequency = GetNominalCPUFrequency(); +} + +int NumCPUs() { + base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo); + return num_cpus; +} + +double NominalCPUFrequency() { + base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo); + return nominal_cpu_frequency; +} + +#if defined(_WIN32) + +pid_t GetTID() { + return pid_t{GetCurrentThreadId()}; +} + +#elif defined(__linux__) + +#ifndef SYS_gettid +#define SYS_gettid __NR_gettid +#endif + +pid_t GetTID() { + return syscall(SYS_gettid); +} + +#elif defined(__akaros__) + +pid_t GetTID() { + // Akaros has a concept of "vcore context", which is the state the program + // is forced into when we need to make a user-level scheduling decision, or + // run a signal handler. This is analogous to the interrupt context that a + // CPU might enter if it encounters some kind of exception. + // + // There is no current thread context in vcore context, but we need to give + // a reasonable answer if asked for a thread ID (e.g., in a signal handler). + // Thread 0 always exists, so if we are in vcore context, we return that. + // + // Otherwise, we know (since we are using pthreads) that the uthread struct + // current_uthread is pointing to is the first element of a + // struct pthread_tcb, so we extract and return the thread ID from that. + // + // TODO(dcross): Akaros anticipates moving the thread ID to the uthread + // structure at some point. We should modify this code to remove the cast + // when that happens. + if (in_vcore_context()) + return 0; + return reinterpret_cast(current_uthread)->id; +} + +#elif defined(__myriad2__) + +pid_t GetTID() { + uint32_t tid; + rtems_task_ident(RTEMS_SELF, 0, &tid); + return tid; +} + +#else + +// Fallback implementation of GetTID using pthread_getspecific. +static once_flag tid_once; +static pthread_key_t tid_key; +static absl::base_internal::SpinLock tid_lock( + absl::base_internal::kLinkerInitialized); + +// We set a bit per thread in this array to indicate that an ID is in +// use. ID 0 is unused because it is the default value returned by +// pthread_getspecific(). +static std::vector* tid_array GUARDED_BY(tid_lock) = nullptr; +static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. + +// Returns the TID to tid_array. +static void FreeTID(void *v) { + intptr_t tid = reinterpret_cast(v); + int word = tid / kBitsPerWord; + uint32_t mask = ~(1u << (tid % kBitsPerWord)); + absl::base_internal::SpinLockHolder lock(&tid_lock); + assert(0 <= word && static_cast(word) < tid_array->size()); + (*tid_array)[word] &= mask; +} + +static void InitGetTID() { + if (pthread_key_create(&tid_key, FreeTID) != 0) { + // The logging system calls GetTID() so it can't be used here. + perror("pthread_key_create failed"); + abort(); + } + + // Initialize tid_array. + absl::base_internal::SpinLockHolder lock(&tid_lock); + tid_array = new std::vector(1); + (*tid_array)[0] = 1; // ID 0 is never-allocated. +} + +// Return a per-thread small integer ID from pthread's thread-specific data. +pid_t GetTID() { + absl::call_once(tid_once, InitGetTID); + + intptr_t tid = reinterpret_cast(pthread_getspecific(tid_key)); + if (tid != 0) { + return tid; + } + + int bit; // tid_array[word] = 1u << bit; + size_t word; + { + // Search for the first unused ID. + absl::base_internal::SpinLockHolder lock(&tid_lock); + // First search for a word in the array that is not all ones. + word = 0; + while (word < tid_array->size() && ~(*tid_array)[word] == 0) { + ++word; + } + if (word == tid_array->size()) { + tid_array->push_back(0); // No space left, add kBitsPerWord more IDs. + } + // Search for a zero bit in the word. + bit = 0; + while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { + ++bit; + } + tid = (word * kBitsPerWord) + bit; + (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. + } + + if (pthread_setspecific(tid_key, reinterpret_cast(tid)) != 0) { + perror("pthread_setspecific failed"); + abort(); + } + + return static_cast(tid); +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/sysinfo.h b/base/abseil/absl/base/internal/sysinfo.h new file mode 100644 index 0000000..7246d5d --- /dev/null +++ b/base/abseil/absl/base/internal/sysinfo.h @@ -0,0 +1,66 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes routines to find out characteristics +// of the machine a program is running on. It is undoubtedly +// system-dependent. + +// Functions listed here that accept a pid_t as an argument act on the +// current process if the pid_t argument is 0 +// All functions here are thread-hostile due to file caching unless +// commented otherwise. + +#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ +#define ABSL_BASE_INTERNAL_SYSINFO_H_ + +#ifndef _WIN32 +#include +#endif + +#include + +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Nominal core processor cycles per second of each processor. This is _not_ +// necessarily the frequency of the CycleClock counter (see cycleclock.h) +// Thread-safe. +double NominalCPUFrequency(); + +// Number of logical processors (hyperthreads) in system. Thread-safe. +int NumCPUs(); + +// Return the thread id of the current thread, as told by the system. +// No two currently-live threads implemented by the OS shall have the same ID. +// Thread ids of exited threads may be reused. Multiple user-level threads +// may have the same thread ID if multiplexed on the same OS thread. +// +// On Linux, you may send a signal to the resulting ID with kill(). However, +// it is recommended for portability that you use pthread_kill() instead. +#ifdef _WIN32 +// On Windows, process id and thread id are of the same type according to the +// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned +// 32-bit type. +using pid_t = uint32_t; +#endif +pid_t GetTID(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/base/abseil/absl/base/internal/sysinfo_test.cc b/base/abseil/absl/base/internal/sysinfo_test.cc new file mode 100644 index 0000000..cdec9b6 --- /dev/null +++ b/base/abseil/absl/base/internal/sysinfo_test.cc @@ -0,0 +1,100 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#ifndef _WIN32 +#include +#include +#endif + +#include // NOLINT(build/c++11) +#include +#include + +#include "gtest/gtest.h" +#include "absl/synchronization/barrier.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +TEST(SysinfoTest, NumCPUs) { + EXPECT_NE(NumCPUs(), 0) + << "NumCPUs() should not have the default value of 0"; +} + +TEST(SysinfoTest, NominalCPUFrequency) { +#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__) + EXPECT_GE(NominalCPUFrequency(), 1000.0) + << "NominalCPUFrequency() did not return a reasonable value"; +#else + // Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0. + // Emscripten does not have a sysfs to read from at all. + EXPECT_EQ(NominalCPUFrequency(), 1.0) + << "CPU frequency detection was fixed! Please update unittest."; +#endif +} + +TEST(SysinfoTest, GetTID) { + EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. +#ifdef __native_client__ + // Native Client has a race condition bug that leads to memory + // exaustion when repeatedly creating and joining threads. + // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027 + return; +#endif + // Test that TIDs are unique to each thread. + // Uses a few loops to exercise implementations that reallocate IDs. + for (int i = 0; i < 32; ++i) { + constexpr int kNumThreads = 64; + Barrier all_threads_done(kNumThreads); + std::vector threads; + + Mutex mutex; + std::unordered_set tids; + + for (int j = 0; j < kNumThreads; ++j) { + threads.push_back(std::thread([&]() { + pid_t id = GetTID(); + { + MutexLock lock(&mutex); + ASSERT_TRUE(tids.find(id) == tids.end()); + tids.insert(id); + } + // We can't simply join the threads here. The threads need to + // be alive otherwise the TID might have been reallocated to + // another live thread. + all_threads_done.Block(); + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +#ifdef __linux__ +TEST(SysinfoTest, LinuxGetTID) { + // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API. + EXPECT_EQ(GetTID(), getpid()); +} +#endif + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/thread_annotations.h b/base/abseil/absl/base/internal/thread_annotations.h new file mode 100644 index 0000000..4dab6a9 --- /dev/null +++ b/base/abseil/absl/base/internal/thread_annotations.h @@ -0,0 +1,271 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// WARNING: This is a backwards compatible header and it will be removed after +// the migration to prefixed thread annotations is finished; please include +// "absl/base/thread_annotations.h". +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ + +#if defined(__clang__) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +// GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ GUARDED_BY(mu_); +// ... +// }; +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +// PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_); +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +// ACQUIRED_AFTER() / ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER +// and ACQUIRED_BEFORE.) +// +// As with GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ACQUIRED_AFTER(m1_); +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a GUARDED_BY(mu1); +// int b GUARDED_BY(mu2); +// +// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#define EXCLUSIVE_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) + +#define SHARED_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) + +// LOCKS_EXCLUDED() +// +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are +// non-reentrant). +#define LOCKS_EXCLUDED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +// LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with LOCK_RETURNED. +#define LOCK_RETURNED(x) \ + THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +// LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#define LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(lockable) + +// SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define SCOPED_LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +// EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#define EXCLUSIVE_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) + +// SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#define SHARED_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) + +// UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define UNLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) + +// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) + +#define SHARED_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) + +// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define ASSERT_EXCLUSIVE_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) + +#define ASSERT_SHARED_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) + +// NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define TS_UNCHECKED(x) "" + +// TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to TS_UNCHECKED. +#define TS_FIXME(x) "" + +// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of +// a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS + +// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY +// annotation that needs to be fixed, because it is producing thread safety +// warning. It disables the GUARDED_BY. +#define GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) + + +namespace thread_safety_analysis { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +template +inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template +inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace thread_safety_analysis + +#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ diff --git a/base/abseil/absl/base/internal/thread_identity.cc b/base/abseil/absl/base/internal/thread_identity.cc new file mode 100644 index 0000000..6a28f24 --- /dev/null +++ b/base/abseil/absl/base/internal/thread_identity.cc @@ -0,0 +1,140 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#ifndef _WIN32 +#include +#include +#endif + +#include +#include +#include + +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +namespace { +// Used to co-ordinate one-time creation of our pthread_key +absl::once_flag init_thread_identity_key_once; +pthread_key_t thread_identity_pthread_key; +std::atomic pthread_key_initialized(false); + +void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { + pthread_key_create(&thread_identity_pthread_key, reclaimer); + pthread_key_initialized.store(true, std::memory_order_release); +} +} // namespace +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +// The actual TLS storage for a thread's currently associated ThreadIdentity. +// This is referenced by inline accessors in the header. +// "protected" visibility ensures that if multiple instances of Abseil code +// exist within a process (via dlopen() or similar), references to +// thread_identity_ptr from each instance of the code will refer to +// *different* instances of this ptr. +#ifdef __GNUC__ +__attribute__((visibility("protected"))) +#endif // __GNUC__ +#if ABSL_PER_THREAD_TLS +// Prefer __thread to thread_local as benchmarks indicate it is a bit faster. +ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) +thread_local ThreadIdentity* thread_identity_ptr = nullptr; +#endif // ABSL_PER_THREAD_TLS +#endif // TLS or CPP11 + +void SetCurrentThreadIdentity( + ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) { + assert(CurrentThreadIdentityIfPresent() == nullptr); + // Associate our destructor. + // NOTE: This call to pthread_setspecific is currently the only immovable + // barrier to CurrentThreadIdentity() always being async signal safe. +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + +#if defined(__EMSCRIPTEN__) || defined(__MINGW32__) + // Emscripten and MinGW pthread implementations does not support signals. + // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html + // for more information. + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); +#else + // We must mask signals around the call to setspecific as with current glibc, + // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent()) + // may zero our value. + // + // While not officially async-signal safe, getspecific within a signal handler + // is otherwise OK. + sigset_t all_signals; + sigset_t curr_signals; + sigfillset(&all_signals); + pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); + pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr); +#endif // !__EMSCRIPTEN__ && !__MINGW32__ + +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); + thread_identity_ptr = identity; +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_local std::unique_ptr + holder(identity, reclaimer); + thread_identity_ptr = identity; +#else +#error Unimplemented ABSL_THREAD_IDENTITY_MODE +#endif +} + +void ClearCurrentThreadIdentity() { +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_identity_ptr = nullptr; +#elif ABSL_THREAD_IDENTITY_MODE == \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // pthread_setspecific expected to clear value on destruction + assert(CurrentThreadIdentityIfPresent() == nullptr); +#endif +} + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +ThreadIdentity* CurrentThreadIdentityIfPresent() { + bool initialized = pthread_key_initialized.load(std::memory_order_acquire); + if (!initialized) { + return nullptr; + } + return reinterpret_cast( + pthread_getspecific(thread_identity_pthread_key)); +} +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/thread_identity.h b/base/abseil/absl/base/internal/thread_identity.h new file mode 100644 index 0000000..5dfd071 --- /dev/null +++ b/base/abseil/absl/base/internal/thread_identity.h @@ -0,0 +1,250 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Each active thread has an ThreadIdentity that may represent the thread in +// various level interfaces. ThreadIdentity objects are never deallocated. +// When a thread terminates, its ThreadIdentity object may be reused for a +// thread created later. + +#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ + +#ifndef _WIN32 +#include +// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when +// supported. +#include +#endif + +#include +#include + +#include "absl/base/internal/per_thread_tls.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +struct SynchLocksHeld; +struct SynchWaitParams; + +namespace base_internal { + +class SpinLock; +struct ThreadIdentity; + +// Used by the implementation of absl::Mutex and absl::CondVar. +struct PerThreadSynch { + // The internal representation of absl::Mutex and absl::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity* thread_identity() { + return reinterpret_cast(this); + } + + PerThreadSynch *next; // Circular waiter queue; initialized to 0. + PerThreadSynch *skip; // If non-zero, all entries in Mutex queue + // up to and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams *waitp; + + bool suppress_fatal_errors; // If true, try to proceed even in the face of + // broken invariants. This is used within fatal + // signal handlers to improve the chances of + // debug logging information being output + // successfully. + + intptr_t readers; // Number of readers in mutex. + int priority; // Priority of thread (updated every so often). + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State { + kAvailable, + kQueued + }; + std::atomic state; + + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully releases + // the lock. It may not be set to false by a reader + // that decrements the count to non-zero. + // protected by mutex spinlock + + bool wake; // This thread is to be woken from a Mutex. + + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld *all_locks; +}; + +struct ThreadIdentity { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState { + char data[128]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic* blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic ticker; // Tick counter, incremented once per second. + std::atomic wait_start; // Ticker value when thread started waiting. + std::atomic is_idle; // Has thread become idle yet? + + ThreadIdentity* next; +}; + +// Returns the ThreadIdentity object representing the calling thread; guaranteed +// to be unique for its lifetime. The returned object will remain valid for the +// program's lifetime; although it may be re-assigned to a subsequent thread. +// If one does not exist, return nullptr instead. +// +// Does not malloc(*), and is async-signal safe. +// [*] Technically pthread_setspecific() does malloc on first use; however this +// is handled internally within tcmalloc's initialization already. +// +// New ThreadIdentity objects can be constructed and associated with a thread +// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. +ThreadIdentity* CurrentThreadIdentityIfPresent(); + +using ThreadIdentityReclaimerFunction = void (*)(void*); + +// Sets the current thread identity to the given value. 'reclaimer' is a +// pointer to the global function for cleaning up instances on thread +// destruction. +void SetCurrentThreadIdentity(ThreadIdentity* identity, + ThreadIdentityReclaimerFunction reclaimer); + +// Removes the currently associated ThreadIdentity from the running thread. +// This must be called from inside the ThreadIdentityReclaimerFunction, and only +// from that function. +void ClearCurrentThreadIdentity(); + +// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE +#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set +#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) +#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE +#elif defined(_WIN32) && !defined(__MINGW32__) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ + (__GOOGLE_GRTE_VERSION__ >= 20140228L) +// Support for async-safe TLS was specifically added in GRTEv4. It's not +// present in the upstream eglibc. +// Note: Current default for production systems. +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS +#else +#define ABSL_THREAD_IDENTITY_MODE \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +#if ABSL_PER_THREAD_TLS +ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* + thread_identity_ptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) +ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; +#else +#error Thread-local storage not detected on this platform +#endif + +inline ThreadIdentity* CurrentThreadIdentityIfPresent() { + return thread_identity_ptr; +} + +#elif ABSL_THREAD_IDENTITY_MODE != \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error Unknown ABSL_THREAD_IDENTITY_MODE +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/base/abseil/absl/base/internal/thread_identity_benchmark.cc b/base/abseil/absl/base/internal/thread_identity_benchmark.cc new file mode 100644 index 0000000..0ae10f2 --- /dev/null +++ b/base/abseil/absl/base/internal/thread_identity_benchmark.cc @@ -0,0 +1,38 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "absl/base/internal/thread_identity.h" +#include "absl/synchronization/internal/create_thread_identity.h" +#include "absl/synchronization/internal/per_thread_sem.h" + +namespace { + +void BM_SafeCurrentThreadIdentity(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize( + absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()); + } +} +BENCHMARK(BM_SafeCurrentThreadIdentity); + +void BM_UnsafeCurrentThreadIdentity(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize( + absl::base_internal::CurrentThreadIdentityIfPresent()); + } +} +BENCHMARK(BM_UnsafeCurrentThreadIdentity); + +} // namespace diff --git a/base/abseil/absl/base/internal/thread_identity_test.cc b/base/abseil/absl/base/internal/thread_identity_test.cc new file mode 100644 index 0000000..3685779 --- /dev/null +++ b/base/abseil/absl/base/internal/thread_identity_test.cc @@ -0,0 +1,128 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#include // NOLINT(build/c++11) +#include + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/macros.h" +#include "absl/synchronization/internal/per_thread_sem.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +// protects num_identities_reused +static absl::base_internal::SpinLock map_lock( + absl::base_internal::kLinkerInitialized); +static int num_identities_reused; + +static const void* const kCheckNoIdentity = reinterpret_cast(1); + +static void TestThreadIdentityCurrent(const void* assert_no_identity) { + ThreadIdentity* identity; + + // We have to test this conditionally, because if the test framework relies + // on Abseil, then some previous action may have already allocated an + // identity. + if (assert_no_identity == kCheckNoIdentity) { + identity = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == nullptr); + } + + identity = synchronization_internal::GetOrCreateCurrentThreadIdentity(); + EXPECT_TRUE(identity != nullptr); + ThreadIdentity* identity_no_init; + identity_no_init = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == identity_no_init); + + // Check that per_thread_synch is correctly aligned. + EXPECT_EQ(0, reinterpret_cast(&identity->per_thread_synch) % + PerThreadSynch::kAlignment); + EXPECT_EQ(identity, identity->per_thread_synch.thread_identity()); + + absl::base_internal::SpinLockHolder l(&map_lock); + num_identities_reused++; +} + +TEST(ThreadIdentityTest, BasicIdentityWorks) { + // This tests for the main() thread. + TestThreadIdentityCurrent(nullptr); +} + +TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) { + // Now try the same basic test with multiple threads being created and + // destroyed. This makes sure that: + // - New threads are created without a ThreadIdentity. + // - We re-allocate ThreadIdentity objects from the free-list. + // - If a thread implementation chooses to recycle threads, that + // correct re-initialization occurs. + static const int kNumLoops = 3; + static const int kNumThreads = 400; + for (int iter = 0; iter < kNumLoops; iter++) { + std::vector threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back( + std::thread(TestThreadIdentityCurrent, kCheckNoIdentity)); + } + for (auto& thread : threads) { + thread.join(); + } + } + + // We should have recycled ThreadIdentity objects above; while (external) + // library threads allocating their own identities may preclude some + // reuse, we should have sufficient repetitions to exclude this. + EXPECT_LT(kNumThreads, num_identities_reused); +} + +TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { + // This test repeatly creates and joins a series of threads, each of + // which acquires and releases shared Mutex locks. This verifies + // Mutex operations work correctly under a reused + // ThreadIdentity. Note that the most likely failure mode of this + // test is a crash or deadlock. + static const int kNumLoops = 10; + static const int kNumThreads = 12; + static const int kNumMutexes = 3; + static const int kNumLockLoops = 5; + + Mutex mutexes[kNumMutexes]; + for (int iter = 0; iter < kNumLoops; ++iter) { + std::vector threads; + for (int thread = 0; thread < kNumThreads; ++thread) { + threads.push_back(std::thread([&]() { + for (int l = 0; l < kNumLockLoops; ++l) { + for (int m = 0; m < kNumMutexes; ++m) { + MutexLock lock(&mutexes[m]); + } + } + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/throw_delegate.cc b/base/abseil/absl/base/internal/throw_delegate.cc new file mode 100644 index 0000000..c055f75 --- /dev/null +++ b/base/abseil/absl/base/internal/throw_delegate.cc @@ -0,0 +1,108 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include +#include +#include +#include +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +namespace { +template +[[noreturn]] void Throw(const T& error) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw error; +#else + ABSL_RAW_LOG(FATAL, "%s", error.what()); + std::abort(); +#endif +} +} // namespace + +void ThrowStdLogicError(const std::string& what_arg) { + Throw(std::logic_error(what_arg)); +} +void ThrowStdLogicError(const char* what_arg) { + Throw(std::logic_error(what_arg)); +} +void ThrowStdInvalidArgument(const std::string& what_arg) { + Throw(std::invalid_argument(what_arg)); +} +void ThrowStdInvalidArgument(const char* what_arg) { + Throw(std::invalid_argument(what_arg)); +} + +void ThrowStdDomainError(const std::string& what_arg) { + Throw(std::domain_error(what_arg)); +} +void ThrowStdDomainError(const char* what_arg) { + Throw(std::domain_error(what_arg)); +} + +void ThrowStdLengthError(const std::string& what_arg) { + Throw(std::length_error(what_arg)); +} +void ThrowStdLengthError(const char* what_arg) { + Throw(std::length_error(what_arg)); +} + +void ThrowStdOutOfRange(const std::string& what_arg) { + Throw(std::out_of_range(what_arg)); +} +void ThrowStdOutOfRange(const char* what_arg) { + Throw(std::out_of_range(what_arg)); +} + +void ThrowStdRuntimeError(const std::string& what_arg) { + Throw(std::runtime_error(what_arg)); +} +void ThrowStdRuntimeError(const char* what_arg) { + Throw(std::runtime_error(what_arg)); +} + +void ThrowStdRangeError(const std::string& what_arg) { + Throw(std::range_error(what_arg)); +} +void ThrowStdRangeError(const char* what_arg) { + Throw(std::range_error(what_arg)); +} + +void ThrowStdOverflowError(const std::string& what_arg) { + Throw(std::overflow_error(what_arg)); +} +void ThrowStdOverflowError(const char* what_arg) { + Throw(std::overflow_error(what_arg)); +} + +void ThrowStdUnderflowError(const std::string& what_arg) { + Throw(std::underflow_error(what_arg)); +} +void ThrowStdUnderflowError(const char* what_arg) { + Throw(std::underflow_error(what_arg)); +} + +void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } + +void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/internal/throw_delegate.h b/base/abseil/absl/base/internal/throw_delegate.h new file mode 100644 index 0000000..075f527 --- /dev/null +++ b/base/abseil/absl/base/internal/throw_delegate.h @@ -0,0 +1,75 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ +#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Helper functions that allow throwing exceptions consistently from anywhere. +// The main use case is for header-based libraries (eg templates), as they will +// be built by many different targets with their own compiler options. +// In particular, this will allow a safe way to throw exceptions even if the +// caller is compiled with -fno-exceptions. This is intended for implementing +// things like map<>::at(), which the standard documents as throwing an +// exception on error. +// +// Using other techniques like #if tricks could lead to ODR violations. +// +// You shouldn't use it unless you're writing code that you know will be built +// both with and without exceptions and you need to conform to an interface +// that uses exceptions. + +[[noreturn]] void ThrowStdLogicError(const std::string& what_arg); +[[noreturn]] void ThrowStdLogicError(const char* what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); +[[noreturn]] void ThrowStdDomainError(const std::string& what_arg); +[[noreturn]] void ThrowStdDomainError(const char* what_arg); +[[noreturn]] void ThrowStdLengthError(const std::string& what_arg); +[[noreturn]] void ThrowStdLengthError(const char* what_arg); +[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); +[[noreturn]] void ThrowStdOutOfRange(const char* what_arg); +[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRuntimeError(const char* what_arg); +[[noreturn]] void ThrowStdRangeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRangeError(const char* what_arg); +[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdOverflowError(const char* what_arg); +[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdUnderflowError(const char* what_arg); + +[[noreturn]] void ThrowStdBadFunctionCall(); +[[noreturn]] void ThrowStdBadAlloc(); + +// ThrowStdBadArrayNewLength() cannot be consistently supported because +// std::bad_array_new_length is missing in libstdc++ until 4.9.0. +// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html +// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html +// libcxx (as of 3.2) and msvc (as of 2015) both have it. +// [[noreturn]] void ThrowStdBadArrayNewLength(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/base/abseil/absl/base/internal/tsan_mutex_interface.h b/base/abseil/absl/base/internal/tsan_mutex_interface.h new file mode 100644 index 0000000..2a51060 --- /dev/null +++ b/base/abseil/absl/base/internal/tsan_mutex_interface.h @@ -0,0 +1,66 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is intended solely for spinlock.h. +// It provides ThreadSanitizer annotations for custom mutexes. +// See for meaning of these annotations. + +#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ + +// ABSL_INTERNAL_HAVE_TSAN_INTERFACE +// Macro intended only for internal use. +// +// Checks whether LLVM Thread Sanitizer interfaces are available. +// First made available in LLVM 5.0 (Sep 2017). +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." +#endif + +#if defined(THREAD_SANITIZER) && defined(__has_include) +#if __has_include() +#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 +#endif +#endif + +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#include + +#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create +#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy +#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock +#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock +#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock +#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock +#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal +#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal +#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert +#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert + +#else + +#define ABSL_TSAN_MUTEX_CREATE(...) +#define ABSL_TSAN_MUTEX_DESTROY(...) +#define ABSL_TSAN_MUTEX_PRE_LOCK(...) +#define ABSL_TSAN_MUTEX_POST_LOCK(...) +#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) +#define ABSL_TSAN_MUTEX_POST_UNLOCK(...) +#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) +#define ABSL_TSAN_MUTEX_POST_SIGNAL(...) +#define ABSL_TSAN_MUTEX_PRE_DIVERT(...) +#define ABSL_TSAN_MUTEX_POST_DIVERT(...) + +#endif + +#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ diff --git a/base/abseil/absl/base/internal/unaligned_access.h b/base/abseil/absl/base/internal/unaligned_access.h new file mode 100644 index 0000000..6be56c8 --- /dev/null +++ b/base/abseil/absl/base/internal/unaligned_access.h @@ -0,0 +1,158 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) + +#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ + defined(MEMORY_SANITIZER) +// Consider we have an unaligned load/store of 4 bytes from address 0x...05. +// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and +// will miss a bug if 08 is the first unaddressable byte. +// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will +// miss a race between this access and some other accesses to 08. +// MemorySanitizer will correctly propagate the shadow on unaligned stores +// and correctly report bugs on unaligned loads, but it may not properly +// update and report the origin of the uninitialized memory. +// For all three tools, replacing an unaligned access with a tool-specific +// callback solves the problem. + +// Make sure uint16_t/uint32_t/uint64_t are defined. +#include + +extern "C" { +uint16_t __sanitizer_unaligned_load16(const void *p); +uint32_t __sanitizer_unaligned_load32(const void *p); +uint64_t __sanitizer_unaligned_load64(const void *p); +void __sanitizer_unaligned_store16(void *p, uint16_t v); +void __sanitizer_unaligned_store32(void *p, uint32_t v); +void __sanitizer_unaligned_store64(void *p, uint64_t v); +} // extern "C" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline uint16_t UnalignedLoad16(const void *p) { + return __sanitizer_unaligned_load16(p); +} + +inline uint32_t UnalignedLoad32(const void *p) { + return __sanitizer_unaligned_load32(p); +} + +inline uint64_t UnalignedLoad64(const void *p) { + return __sanitizer_unaligned_load64(p); +} + +inline void UnalignedStore16(void *p, uint16_t v) { + __sanitizer_unaligned_store16(p, v); +} + +inline void UnalignedStore32(void *p, uint32_t v) { + __sanitizer_unaligned_store32(p, v); +} + +inline void UnalignedStore64(void *p, uint64_t v) { + __sanitizer_unaligned_store64(p, v); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#else + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline uint16_t UnalignedLoad16(const void *p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(const void *p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#endif + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/base/abseil/absl/base/internal/unscaledcycleclock.cc b/base/abseil/absl/base/internal/unscaledcycleclock.cc new file mode 100644 index 0000000..a32936a --- /dev/null +++ b/base/abseil/absl/base/internal/unscaledcycleclock.cc @@ -0,0 +1,103 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/unscaledcycleclock.h" + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +#if defined(_WIN32) +#include +#endif + +#if defined(__powerpc__) || defined(__ppc__) +#include +#endif + +#include "absl/base/internal/sysinfo.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if defined(__i386__) + +int64_t UnscaledCycleClock::Now() { + int64_t ret; + __asm__ volatile("rdtsc" : "=A"(ret)); + return ret; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__x86_64__) + +int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return (high << 32) | low; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__powerpc__) || defined(__ppc__) + +int64_t UnscaledCycleClock::Now() { + return __ppc_get_timebase(); +} + +double UnscaledCycleClock::Frequency() { + return __ppc_get_timebase_freq(); +} + +#elif defined(__aarch64__) + +// System timer of ARMv8 runs at a different frequency than the CPU's. +// The frequency is fixed, typically in the range 1-50MHz. It can be +// read at CNTFRQ special register. We assume the OS has set up +// the virtual timer properly. +int64_t UnscaledCycleClock::Now() { + int64_t virtual_timer_value; + asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +} + +double UnscaledCycleClock::Frequency() { + uint64_t aarch64_timer_frequency; + asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency)); + return aarch64_timer_frequency; +} + +#elif defined(_M_IX86) || defined(_M_X64) + +#pragma intrinsic(__rdtsc) + +int64_t UnscaledCycleClock::Now() { + return __rdtsc(); +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK diff --git a/base/abseil/absl/base/internal/unscaledcycleclock.h b/base/abseil/absl/base/internal/unscaledcycleclock.h new file mode 100644 index 0000000..cdce9bf --- /dev/null +++ b/base/abseil/absl/base/internal/unscaledcycleclock.h @@ -0,0 +1,124 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UnscaledCycleClock +// An UnscaledCycleClock yields the value and frequency of a cycle counter +// that increments at a rate that is approximately constant. +// This class is for internal / whitelisted use only, you should consider +// using CycleClock instead. +// +// Notes: +// The cycle counter frequency is not necessarily the core clock frequency. +// That is, CycleCounter cycles are not necessarily "CPU cycles". +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ + +#include + +#if defined(__APPLE__) +#include +#endif + +#include "absl/base/port.h" + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || \ + defined(_M_IX86) || defined(_M_X64) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || \ + (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || \ + defined(_M_IX86) || defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { +class UnscaledCycleClockWrapperForGetCurrentTime; +} // namespace time_internal + +namespace base_internal { +class CycleClock; +class UnscaledCycleClockWrapperForInitializeFrequency; + +class UnscaledCycleClock { + private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Whitelisted friends. + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ diff --git a/base/abseil/absl/base/invoke_test.cc b/base/abseil/absl/base/invoke_test.cc new file mode 100644 index 0000000..6aa613c --- /dev/null +++ b/base/abseil/absl/base/invoke_test.cc @@ -0,0 +1,223 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/invoke.h" + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_cat.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +int Function(int a, int b) { return a - b; } + +int Sink(std::unique_ptr p) { + return *p; +} + +std::unique_ptr Factory(int n) { + return make_unique(n); +} + +void NoOp() {} + +struct ConstFunctor { + int operator()(int a, int b) const { return a - b; } +}; + +struct MutableFunctor { + int operator()(int a, int b) { return a - b; } +}; + +struct EphemeralFunctor { + int operator()(int a, int b) && { return a - b; } +}; + +struct OverloadedFunctor { + template + std::string operator()(const Args&... args) & { + return StrCat("&", args...); + } + template + std::string operator()(const Args&... args) const& { + return StrCat("const&", args...); + } + template + std::string operator()(const Args&... args) && { + return StrCat("&&", args...); + } +}; + +struct Class { + int Method(int a, int b) { return a - b; } + int ConstMethod(int a, int b) const { return a - b; } + int RefMethod(int a, int b) & { return a - b; } + int RefRefMethod(int a, int b) && { return a - b; } + int NoExceptMethod(int a, int b) noexcept { return a - b; } + int VolatileMethod(int a, int b) volatile { return a - b; } + + int member; +}; + +struct FlipFlop { + int ConstMethod() const { return member; } + FlipFlop operator*() const { return {-member}; } + + int member; +}; + +// CallMaybeWithArg(f) resolves either to Invoke(f) or Invoke(f, 42), depending +// on which one is valid. +template +decltype(Invoke(std::declval())) CallMaybeWithArg(const F& f) { + return Invoke(f); +} + +template +decltype(Invoke(std::declval(), 42)) CallMaybeWithArg(const F& f) { + return Invoke(f, 42); +} + +TEST(InvokeTest, Function) { + EXPECT_EQ(1, Invoke(Function, 3, 2)); + EXPECT_EQ(1, Invoke(&Function, 3, 2)); +} + +TEST(InvokeTest, NonCopyableArgument) { + EXPECT_EQ(42, Invoke(Sink, make_unique(42))); +} + +TEST(InvokeTest, NonCopyableResult) { + EXPECT_THAT(Invoke(Factory, 42), ::testing::Pointee(42)); +} + +TEST(InvokeTest, VoidResult) { + Invoke(NoOp); +} + +TEST(InvokeTest, ConstFunctor) { + EXPECT_EQ(1, Invoke(ConstFunctor(), 3, 2)); +} + +TEST(InvokeTest, MutableFunctor) { + MutableFunctor f; + EXPECT_EQ(1, Invoke(f, 3, 2)); + EXPECT_EQ(1, Invoke(MutableFunctor(), 3, 2)); +} + +TEST(InvokeTest, EphemeralFunctor) { + EphemeralFunctor f; + EXPECT_EQ(1, Invoke(std::move(f), 3, 2)); + EXPECT_EQ(1, Invoke(EphemeralFunctor(), 3, 2)); +} + +TEST(InvokeTest, OverloadedFunctor) { + OverloadedFunctor f; + const OverloadedFunctor& cf = f; + + EXPECT_EQ("&", Invoke(f)); + EXPECT_EQ("& 42", Invoke(f, " 42")); + + EXPECT_EQ("const&", Invoke(cf)); + EXPECT_EQ("const& 42", Invoke(cf, " 42")); + + EXPECT_EQ("&&", Invoke(std::move(f))); + EXPECT_EQ("&& 42", Invoke(std::move(f), " 42")); +} + +TEST(InvokeTest, ReferenceWrapper) { + ConstFunctor cf; + MutableFunctor mf; + EXPECT_EQ(1, Invoke(std::cref(cf), 3, 2)); + EXPECT_EQ(1, Invoke(std::ref(cf), 3, 2)); + EXPECT_EQ(1, Invoke(std::ref(mf), 3, 2)); +} + +TEST(InvokeTest, MemberFunction) { + std::unique_ptr p(new Class); + std::unique_ptr cp(new Class); + std::unique_ptr vp(new Class); + + EXPECT_EQ(1, Invoke(&Class::Method, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::Method, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::Method, *p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::RefMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::RefMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::RefMethod, *p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::RefRefMethod, std::move(*p), 3, 2)); // NOLINT + EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, *p, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::ConstMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, *p, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, *cp, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *vp, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::Method, make_unique(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique(), 3, 2)); +} + +TEST(InvokeTest, DataMember) { + std::unique_ptr p(new Class{42}); + std::unique_ptr cp(new Class{42}); + EXPECT_EQ(42, Invoke(&Class::member, p)); + EXPECT_EQ(42, Invoke(&Class::member, *p)); + EXPECT_EQ(42, Invoke(&Class::member, p.get())); + + Invoke(&Class::member, p) = 42; + Invoke(&Class::member, p.get()) = 42; + + EXPECT_EQ(42, Invoke(&Class::member, cp)); + EXPECT_EQ(42, Invoke(&Class::member, *cp)); + EXPECT_EQ(42, Invoke(&Class::member, cp.get())); +} + +TEST(InvokeTest, FlipFlop) { + FlipFlop obj = {42}; + // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or + // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former. + EXPECT_EQ(42, Invoke(&FlipFlop::ConstMethod, obj)); + EXPECT_EQ(42, Invoke(&FlipFlop::member, obj)); +} + +TEST(InvokeTest, SfinaeFriendly) { + CallMaybeWithArg(NoOp); + EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42)); +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/log_severity.cc b/base/abseil/absl/base/log_severity.cc new file mode 100644 index 0000000..72312af --- /dev/null +++ b/base/abseil/absl/base/log_severity.cc @@ -0,0 +1,27 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/log_severity.h" + +#include + +namespace absl { +ABSL_NAMESPACE_BEGIN + +std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) { + if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s); + return os << "absl::LogSeverity(" << static_cast(s) << ")"; +} +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/log_severity.h b/base/abseil/absl/base/log_severity.h new file mode 100644 index 0000000..65a3b16 --- /dev/null +++ b/base/abseil/absl/base/log_severity.h @@ -0,0 +1,121 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ +#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::LogSeverity +// +// Four severity levels are defined. Logging APIs should terminate the program +// when a message is logged at severity `kFatal`; the other levels have no +// special semantics. +// +// Values other than the four defined levels (e.g. produced by `static_cast`) +// are valid, but their semantics when passed to a function, macro, or flag +// depend on the function, macro, or flag. The usual behavior is to normalize +// such values to a defined severity level, however in some cases values other +// than the defined levels are useful for comparison. +// +// Exmaple: +// +// // Effectively disables all logging: +// SetMinLogLevel(static_cast(100)); +// +// Abseil flags may be defined with type `LogSeverity`. Dependency layering +// constraints require that the `AbslParseFlag()` overload be declared and +// defined in the flags library itself rather than here. The `AbslUnparseFlag()` +// overload is defined there as well for consistency. +// +// absl::LogSeverity Flag String Representation +// +// An `absl::LogSeverity` has a string representation used for parsing +// command-line flags based on the enumerator name (e.g. `kFatal`) or +// its unprefixed name (without the `k`) in any case-insensitive form. (E.g. +// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an +// unprefixed string representation in all caps (e.g. "FATAL") or an integer. +// +// Additionally, the parser accepts arbitrary integers (as if the type were +// `int`). +// +// Examples: +// +// --my_log_level=kInfo +// --my_log_level=INFO +// --my_log_level=info +// --my_log_level=0 +// +// Unparsing a flag produces the same result as `absl::LogSeverityName()` for +// the standard levels and a base-ten integer otherwise. +enum class LogSeverity : int { + kInfo = 0, + kWarning = 1, + kError = 2, + kFatal = 3, +}; + +// LogSeverities() +// +// Returns an iterable of all standard `absl::LogSeverity` values, ordered from +// least to most severe. +constexpr std::array LogSeverities() { + return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, + absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; +} + +// LogSeverityName() +// +// Returns the all-caps string representation (e.g. "INFO") of the specified +// severity level if it is one of the standard levels and "UNKNOWN" otherwise. +constexpr const char* LogSeverityName(absl::LogSeverity s) { + return s == absl::LogSeverity::kInfo + ? "INFO" + : s == absl::LogSeverity::kWarning + ? "WARNING" + : s == absl::LogSeverity::kError + ? "ERROR" + : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN"; +} + +// NormalizeLogSeverity() +// +// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` +// normalize to `kError` (**NOT** `kFatal`). +constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { + return s < absl::LogSeverity::kInfo + ? absl::LogSeverity::kInfo + : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s; +} +constexpr absl::LogSeverity NormalizeLogSeverity(int s) { + return absl::NormalizeLogSeverity(static_cast(s)); +} + +// operator<< +// +// The exact representation of a streamed `absl::LogSeverity` is deliberately +// unspecified; do not rely on it. +std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ diff --git a/base/abseil/absl/base/log_severity_test.cc b/base/abseil/absl/base/log_severity_test.cc new file mode 100644 index 0000000..1e3aafa --- /dev/null +++ b/base/abseil/absl/base/log_severity_test.cc @@ -0,0 +1,199 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/log_severity.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/flags/marshalling.h" +#include "absl/strings/str_cat.h" + +namespace { +using ::testing::Eq; +using ::testing::IsFalse; +using ::testing::IsTrue; +using ::testing::TestWithParam; +using ::testing::Values; + +std::string StreamHelper(absl::LogSeverity value) { + std::ostringstream stream; + stream << value; + return stream.str(); +} + +TEST(StreamTest, Works) { + EXPECT_THAT(StreamHelper(static_cast(-100)), + Eq("absl::LogSeverity(-100)")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kInfo), Eq("INFO")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kWarning), Eq("WARNING")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kError), Eq("ERROR")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kFatal), Eq("FATAL")); + EXPECT_THAT(StreamHelper(static_cast(4)), + Eq("absl::LogSeverity(4)")); +} + +using ParseFlagFromOutOfRangeIntegerTest = TestWithParam; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromOutOfRangeIntegerTest, + Values(static_cast(std::numeric_limits::min()) - 1, + static_cast(std::numeric_limits::max()) + 1)); +TEST_P(ParseFlagFromOutOfRangeIntegerTest, ReturnsError) { + const std::string to_parse = absl::StrCat(GetParam()); + absl::LogSeverity value; + std::string error; + EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value; +} + +using ParseFlagFromAlmostOutOfRangeIntegerTest = TestWithParam; +INSTANTIATE_TEST_SUITE_P(Instantiation, + ParseFlagFromAlmostOutOfRangeIntegerTest, + Values(std::numeric_limits::min(), + std::numeric_limits::max())); +TEST_P(ParseFlagFromAlmostOutOfRangeIntegerTest, YieldsExpectedValue) { + const auto expected = static_cast(GetParam()); + const std::string to_parse = absl::StrCat(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromIntegerMatchingEnumeratorTest = + TestWithParam>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromIntegerMatchingEnumeratorTest, + Values(std::make_tuple("0", absl::LogSeverity::kInfo), + std::make_tuple(" 0", absl::LogSeverity::kInfo), + std::make_tuple("-0", absl::LogSeverity::kInfo), + std::make_tuple("+0", absl::LogSeverity::kInfo), + std::make_tuple("00", absl::LogSeverity::kInfo), + std::make_tuple("0 ", absl::LogSeverity::kInfo), + std::make_tuple("0x0", absl::LogSeverity::kInfo), + std::make_tuple("1", absl::LogSeverity::kWarning), + std::make_tuple("+1", absl::LogSeverity::kWarning), + std::make_tuple("2", absl::LogSeverity::kError), + std::make_tuple("3", absl::LogSeverity::kFatal))); +TEST_P(ParseFlagFromIntegerMatchingEnumeratorTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const absl::LogSeverity expected = std::get<1>(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromOtherIntegerTest = + TestWithParam>; +INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromOtherIntegerTest, + Values(std::make_tuple("-1", -1), + std::make_tuple("4", 4), + std::make_tuple("010", 10), + std::make_tuple("0x10", 16))); +TEST_P(ParseFlagFromOtherIntegerTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const auto expected = static_cast(std::get<1>(GetParam())); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromEnumeratorTest = + TestWithParam>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromEnumeratorTest, + Values(std::make_tuple("INFO", absl::LogSeverity::kInfo), + std::make_tuple("info", absl::LogSeverity::kInfo), + std::make_tuple("kInfo", absl::LogSeverity::kInfo), + std::make_tuple("iNfO", absl::LogSeverity::kInfo), + std::make_tuple("kInFo", absl::LogSeverity::kInfo), + std::make_tuple("WARNING", absl::LogSeverity::kWarning), + std::make_tuple("warning", absl::LogSeverity::kWarning), + std::make_tuple("kWarning", absl::LogSeverity::kWarning), + std::make_tuple("WaRnInG", absl::LogSeverity::kWarning), + std::make_tuple("KwArNiNg", absl::LogSeverity::kWarning), + std::make_tuple("ERROR", absl::LogSeverity::kError), + std::make_tuple("error", absl::LogSeverity::kError), + std::make_tuple("kError", absl::LogSeverity::kError), + std::make_tuple("eRrOr", absl::LogSeverity::kError), + std::make_tuple("kErRoR", absl::LogSeverity::kError), + std::make_tuple("FATAL", absl::LogSeverity::kFatal), + std::make_tuple("fatal", absl::LogSeverity::kFatal), + std::make_tuple("kFatal", absl::LogSeverity::kFatal), + std::make_tuple("FaTaL", absl::LogSeverity::kFatal), + std::make_tuple("KfAtAl", absl::LogSeverity::kFatal))); +TEST_P(ParseFlagFromEnumeratorTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const absl::LogSeverity expected = std::get<1>(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromGarbageTest = TestWithParam; +INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromGarbageTest, + Values("", "\0", " ", "garbage", "kkinfo", "I")); +TEST_P(ParseFlagFromGarbageTest, ReturnsError) { + const absl::string_view to_parse = GetParam(); + absl::LogSeverity value; + std::string error; + EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value; +} + +using UnparseFlagToEnumeratorTest = + TestWithParam>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, UnparseFlagToEnumeratorTest, + Values(std::make_tuple(absl::LogSeverity::kInfo, "INFO"), + std::make_tuple(absl::LogSeverity::kWarning, "WARNING"), + std::make_tuple(absl::LogSeverity::kError, "ERROR"), + std::make_tuple(absl::LogSeverity::kFatal, "FATAL"))); +TEST_P(UnparseFlagToEnumeratorTest, ReturnsExpectedValueAndRoundTrips) { + const absl::LogSeverity to_unparse = std::get<0>(GetParam()); + const absl::string_view expected = std::get<1>(GetParam()); + const std::string stringified_value = absl::UnparseFlag(to_unparse); + EXPECT_THAT(stringified_value, Eq(expected)); + absl::LogSeverity reparsed_value; + std::string error; + EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error), + IsTrue()); + EXPECT_THAT(reparsed_value, Eq(to_unparse)); +} + +using UnparseFlagToOtherIntegerTest = TestWithParam; +INSTANTIATE_TEST_SUITE_P(Instantiation, UnparseFlagToOtherIntegerTest, + Values(std::numeric_limits::min(), -1, 4, + std::numeric_limits::max())); +TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) { + const absl::LogSeverity to_unparse = + static_cast(GetParam()); + const std::string expected = absl::StrCat(GetParam()); + const std::string stringified_value = absl::UnparseFlag(to_unparse); + EXPECT_THAT(stringified_value, Eq(expected)); + absl::LogSeverity reparsed_value; + std::string error; + EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error), + IsTrue()); + EXPECT_THAT(reparsed_value, Eq(to_unparse)); +} +} // namespace diff --git a/base/abseil/absl/base/macros.h b/base/abseil/absl/base/macros.h new file mode 100644 index 0000000..7565f02 --- /dev/null +++ b/base/abseil/absl/base/macros.h @@ -0,0 +1,220 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: macros.h +// ----------------------------------------------------------------------------- +// +// This header file defines the set of language macros used within Abseil code. +// For the set of macros used to determine supported compilers and platforms, +// see absl/base/config.h instead. +// +// This code is compiled directly on many platforms, including client +// platforms like Windows, Mac, and embedded systems. Before making +// any changes here, make sure that you're not breaking any platforms. + +#ifndef ABSL_BASE_MACROS_H_ +#define ABSL_BASE_MACROS_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// ABSL_ARRAYSIZE() +// +// Returns the number of elements in an array as a compile-time constant, which +// can be used in defining new arrays. If you use this macro on a pointer by +// mistake, you will get a compile-time error. +#define ABSL_ARRAYSIZE(array) \ + (sizeof(::absl::macros_internal::ArraySizeHelper(array))) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace macros_internal { +// Note: this internal template function declaration is used by ABSL_ARRAYSIZE. +// The function doesn't need a definition, as we only use its type. +template +auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; +} // namespace macros_internal +ABSL_NAMESPACE_END +} // namespace absl + +// kLinkerInitialized +// +// An enum used only as a constructor argument to indicate that a variable has +// static storage duration, and that the constructor should do nothing to its +// state. Use of this macro indicates to the reader that it is legal to +// declare a static instance of the class, provided the constructor is given +// the absl::base_internal::kLinkerInitialized argument. +// +// Normally, it is unsafe to declare a static variable that has a constructor or +// a destructor because invocation order is undefined. However, if the type can +// be zero-initialized (which the loader does for static variables) into a valid +// state and the type's destructor does not affect storage, then a constructor +// for static initialization can be declared. +// +// Example: +// // Declaration +// explicit MyClass(absl::base_internal:LinkerInitialized x) {} +// +// // Invocation +// static MyClass my_global(absl::base_internal::kLinkerInitialized); +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +enum LinkerInitialized { + kLinkerInitialized = 0, +}; +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_FALLTHROUGH_INTENDED +// +// Annotates implicit fall-through between switch labels, allowing a case to +// indicate intentional fallthrough and turn off warnings about any lack of a +// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by +// a semicolon and can be used in most places where `break` can, provided that +// no statements exist between it and the next switch label. +// +// Example: +// +// switch (x) { +// case 40: +// case 41: +// if (truth_is_out_there) { +// ++x; +// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations +// // in comments +// } else { +// return x; +// } +// case 42: +// ... +// +// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED +// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed +// when performing switch labels fall-through diagnostic +// (`-Wimplicit-fallthrough`). See clang documentation on language extensions +// for details: +// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +// +// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro +// has no effect on diagnostics. In any case this macro has no effect on runtime +// behavior and performance of code. +#ifdef ABSL_FALLTHROUGH_INTENDED +#error "ABSL_FALLTHROUGH_INTENDED should not be defined." +#endif + +// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported. +#if defined(__clang__) && defined(__has_warning) +#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough") +#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] +#endif +#elif defined(__GNUC__) && __GNUC__ >= 7 +#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] +#endif + +#ifndef ABSL_FALLTHROUGH_INTENDED +#define ABSL_FALLTHROUGH_INTENDED \ + do { \ + } while (0) +#endif + +// ABSL_DEPRECATED() +// +// Marks a deprecated class, struct, enum, function, method and variable +// declarations. The macro argument is used as a custom diagnostic message (e.g. +// suggestion of a better alternative). +// +// Examples: +// +// class ABSL_DEPRECATED("Use Bar instead") Foo {...}; +// +// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...} +// +// template +// ABSL_DEPRECATED("Use DoThat() instead") +// void DoThis(); +// +// Every usage of a deprecated entity will trigger a warning when compiled with +// clang's `-Wdeprecated-declarations` option. This option is turned off by +// default, but the warnings will be reported by clang-tidy. +#if defined(__clang__) && __cplusplus >= 201103L +#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) +#endif + +#ifndef ABSL_DEPRECATED +#define ABSL_DEPRECATED(message) +#endif + +// ABSL_BAD_CALL_IF() +// +// Used on a function overload to trap bad calls: any call that matches the +// overload will cause a compile-time error. This macro uses a clang-specific +// "enable_if" attribute, as described at +// http://clang.llvm.org/docs/AttributeReference.html#enable-if +// +// Overloads which use this macro should be bracketed by +// `#ifdef ABSL_BAD_CALL_IF`. +// +// Example: +// +// int isdigit(int c); +// #ifdef ABSL_BAD_CALL_IF +// int isdigit(int c) +// ABSL_BAD_CALL_IF(c <= -1 || c > 255, +// "'c' must have the value of an unsigned char or EOF"); +// #endif // ABSL_BAD_CALL_IF +#if ABSL_HAVE_ATTRIBUTE(enable_if) +#define ABSL_BAD_CALL_IF(expr, msg) \ + __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) +#endif + +// ABSL_ASSERT() +// +// In C++11, `assert` can't be used portably within constexpr functions. +// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr +// functions. Example: +// +// constexpr double Divide(double a, double b) { +// return ABSL_ASSERT(b != 0), a / b; +// } +// +// This macro is inspired by +// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ +#if defined(NDEBUG) +#define ABSL_ASSERT(expr) \ + (false ? static_cast(expr) : static_cast(0)) +#else +#define ABSL_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ + : [] { assert(false && #expr); }()) // NOLINT +#endif + +#ifdef ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY try +#define ABSL_INTERNAL_CATCH_ANY catch (...) +#define ABSL_INTERNAL_RETHROW do { throw; } while (false) +#else // ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY if (true) +#define ABSL_INTERNAL_CATCH_ANY else if (false) +#define ABSL_INTERNAL_RETHROW do {} while (false) +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_MACROS_H_ diff --git a/base/abseil/absl/base/optimization.h b/base/abseil/absl/base/optimization.h new file mode 100644 index 0000000..646523b --- /dev/null +++ b/base/abseil/absl/base/optimization.h @@ -0,0 +1,181 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: optimization.h +// ----------------------------------------------------------------------------- +// +// This header file defines portable macros for performance optimization. + +#ifndef ABSL_BASE_OPTIMIZATION_H_ +#define ABSL_BASE_OPTIMIZATION_H_ + +#include "absl/base/config.h" + +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION +// +// Instructs the compiler to avoid optimizing tail-call recursion. Use of this +// macro is useful when you wish to preserve the existing function order within +// a stack trace for logging, debugging, or profiling purposes. +// +// Example: +// +// int f() { +// int result = g(); +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +// return result; +// } +#if defined(__pnacl__) +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#elif defined(__clang__) +// Clang will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(__GNUC__) +// GCC will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(_MSC_VER) +#include +// The __nop() intrinsic blocks the optimisation. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() +#else +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#endif + +// ABSL_CACHELINE_SIZE +// +// Explicitly defines the size of the L1 cache for purposes of alignment. +// Setting the cacheline size allows you to specify that certain objects be +// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations. +// (See below.) +// +// NOTE: this macro should be replaced with the following C++17 features, when +// those are generally available: +// +// * `std::hardware_constructive_interference_size` +// * `std::hardware_destructive_interference_size` +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +#if defined(__GNUC__) +// Cache line alignment +#if defined(__i386__) || defined(__x86_64__) +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__powerpc64__) +#define ABSL_CACHELINE_SIZE 128 +#elif defined(__aarch64__) +// We would need to read special register ctr_el0 to find out L1 dcache size. +// This value is a good estimate based on a real aarch64 machine. +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__arm__) +// Cache line sizes for ARM: These values are not strictly correct since +// cache line sizes depend on implementations, not architectures. There +// are even implementations with cache line sizes configurable at boot +// time. +#if defined(__ARM_ARCH_5T__) +#define ABSL_CACHELINE_SIZE 32 +#elif defined(__ARM_ARCH_7A__) +#define ABSL_CACHELINE_SIZE 64 +#endif +#endif + +#ifndef ABSL_CACHELINE_SIZE +// A reasonable default guess. Note that overestimates tend to waste more +// space, while underestimates tend to waste more time. +#define ABSL_CACHELINE_SIZE 64 +#endif + +// ABSL_CACHELINE_ALIGNED +// +// Indicates that the declared object be cache aligned using +// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to +// load a set of related objects in the L1 cache for performance improvements. +// Cacheline aligning objects properly allows constructive memory sharing and +// prevents destructive (or "false") memory sharing. +// +// NOTE: this macro should be replaced with usage of `alignas()` using +// `std::hardware_constructive_interference_size` and/or +// `std::hardware_destructive_interference_size` when available within C++17. +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +// +// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__` +// or `__declspec` attribute. For compilers where this is not known to work, +// the macro expands to nothing. +// +// No further guarantees are made here. The result of applying the macro +// to variables and types is always implementation-defined. +// +// WARNING: It is easy to use this attribute incorrectly, even to the point +// of causing bugs that are difficult to diagnose, crash, etc. It does not +// of itself guarantee that objects are aligned to a cache line. +// +// NOTE: Some compilers are picky about the locations of annotations such as +// this attribute, so prefer to put it at the beginning of your declaration. +// For example, +// +// ABSL_CACHELINE_ALIGNED static Foo* foo = ... +// +// class ABSL_CACHELINE_ALIGNED Bar { ... +// +// Recommendations: +// +// 1) Consult compiler documentation; this comment is not kept in sync as +// toolchains evolve. +// 2) Verify your use has the intended effect. This often requires inspecting +// the generated machine code. +// 3) Prefer applying this attribute to individual variables. Avoid +// applying it to types. This tends to localize the effect. +#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE))) +#elif defined(_MSC_VER) +#define ABSL_CACHELINE_SIZE 64 +#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE)) +#else +#define ABSL_CACHELINE_SIZE 64 +#define ABSL_CACHELINE_ALIGNED +#endif + +// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE +// +// Enables the compiler to prioritize compilation using static analysis for +// likely paths within a boolean branch. +// +// Example: +// +// if (ABSL_PREDICT_TRUE(expression)) { +// return result; // Faster if more likely +// } else { +// return 0; +// } +// +// Compilers can use the information that a certain branch is not likely to be +// taken (for instance, a CHECK failure) to optimize for the common case in +// the absence of better information (ie. compiling gcc with `-fprofile-arcs`). +// +// Recommendation: Modern CPUs dynamically predict branch execution paths, +// typically with accuracy greater than 97%. As a result, annotating every +// branch in a codebase is likely counterproductive; however, annotating +// specific branches that are both hot and consistently mispredicted is likely +// to yield performance improvements. +#if ABSL_HAVE_BUILTIN(__builtin_expect) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) +#else +#define ABSL_PREDICT_FALSE(x) (x) +#define ABSL_PREDICT_TRUE(x) (x) +#endif + +#endif // ABSL_BASE_OPTIMIZATION_H_ diff --git a/base/abseil/absl/base/options.h b/base/abseil/absl/base/options.h new file mode 100644 index 0000000..3961e63 --- /dev/null +++ b/base/abseil/absl/base/options.h @@ -0,0 +1,188 @@ +#ifndef ABSL_BASE_OPTIONS_H_ +#define ABSL_BASE_OPTIONS_H_ + +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: options.h +// ----------------------------------------------------------------------------- +// +// This file contains Abseil configuration options for setting specific +// implementations instead of letting Abseil determine which implementation to +// use at compile-time. Setting these options may be useful for package or build +// managers who wish to guarantee ABI stability within binary builds (which are +// otherwise difficult to enforce). +// +// *** IMPORTANT NOTICE FOR PACKAGE MANAGERS: It is important that +// maintainers of package managers who wish to package Abseil read and +// understand this file! *** +// +// Abseil contains a number of possible configuration endpoints, based on +// parameters such as the detected platform, language version, or command-line +// flags used to invoke the underlying binary. As is the case with all +// libraries, binaries which contain Abseil code must ensure that separate +// packages use the same compiled copy of Abseil to avoid a diamond dependency +// problem, which can occur if two packages built with different Abseil +// configuration settings are linked together. Diamond dependency problems in +// C++ may manifest as violations to the One Definition Rule (ODR) (resulting in +// linker errors), or undefined behavior (resulting in crashes). +// +// Diamond dependency problems can be avoided if all packages utilize the same +// exact version of Abseil. Building from source code with the same compilation +// parameters is the easiest way to avoid such dependency problems. However, for +// package managers who cannot control such compilation parameters, we are +// providing the file to allow you to inject ABI (Application Binary Interface) +// stability across builds. Settings options in this file will neither change +// API nor ABI, providing a stable copy of Abseil between packages. +// +// Care must be taken to keep options within these configurations isolated +// from any other dynamic settings, such as command-line flags which could alter +// these options. This file is provided specifically to help build and package +// managers provide a stable copy of Abseil within their libraries and binaries; +// other developers should not have need to alter the contents of this file. +// +// ----------------------------------------------------------------------------- +// Usage +// ----------------------------------------------------------------------------- +// +// For any particular package release, set the appropriate definitions within +// this file to whatever value makes the most sense for your package(s). Note +// that, by default, most of these options, at the moment, affect the +// implementation of types; future options may affect other implementation +// details. +// +// NOTE: the defaults within this file all assume that Abseil can select the +// proper Abseil implementation at compile-time, which will not be sufficient +// to guarantee ABI stability to package managers. +// +// ----------------------------------------------------------------------------- +// Type Compatibility Options +// ----------------------------------------------------------------------------- +// +// ABSL_OPTION_USE_STD_ANY +// +// This option controls whether absl::any is implemented as an alias to +// std::any, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::any. This requires that all code +// using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::any is available. This option is +// useful when you are building your entire program, including all of its +// dependencies, from source. It should not be used otherwise -- for example, +// if you are distributing Abseil in a binary package manager -- since in +// mode 2, absl::any will name a different type, with a different mangled name +// and binary layout, depending on the compiler flags passed by the end user. +// For more info, see https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY. + +#define ABSL_OPTION_USE_STD_ANY 2 + + +// ABSL_OPTION_USE_STD_OPTIONAL +// +// This option controls whether absl::optional is implemented as an alias to +// std::optional, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::optional. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::optional is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::optional will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. + +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::optional is available. This option +// should not be used when your program is not built from source -- for example, +// if you are distributing Abseil in a binary package manager -- since in mode +// 2, absl::optional will name a different template class, with a different +// mangled name and binary layout, depending on the compiler flags passed by the +// end user. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::optional is a typedef of std::optional, use the feature macro +// ABSL_USES_STD_OPTIONAL. + +#define ABSL_OPTION_USE_STD_OPTIONAL 2 + + +// ABSL_OPTION_USE_STD_STRING_VIEW +// +// This option controls whether absl::string_view is implemented as an alias to +// std::string_view, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::string_view. This requires that +// all code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::string_view is available. This +// option is useful when you are building your program from source. It should +// not be used otherwise -- for example, if you are distributing Abseil in a +// binary package manager -- since in mode 2, absl::string_view will name a +// different type, with a different mangled name and binary layout, depending on +// the compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::string_view is a typedef of std::string_view, use the feature macro +// ABSL_USES_STD_STRING_VIEW. + +#define ABSL_OPTION_USE_STD_STRING_VIEW 2 + + +// ABSL_OPTION_USE_STD_VARIANT +// +// This option controls whether absl::variant is implemented as an alias to +// std::variant, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::variant. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::variant is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::variant will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::variant is a typedef of std::variant, use the feature macro +// ABSL_USES_STD_VARIANT. + +#define ABSL_OPTION_USE_STD_VARIANT 2 + +#endif // ABSL_BASE_OPTIONS_H_ diff --git a/base/abseil/absl/base/policy_checks.h b/base/abseil/absl/base/policy_checks.h new file mode 100644 index 0000000..4dfa49e --- /dev/null +++ b/base/abseil/absl/base/policy_checks.h @@ -0,0 +1,111 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: policy_checks.h +// ----------------------------------------------------------------------------- +// +// This header enforces a minimum set of policies at build time, such as the +// supported compiler and library versions. Unsupported configurations are +// reported with `#error`. This enforcement is best effort, so successfully +// compiling this header does not guarantee a supported configuration. + +#ifndef ABSL_BASE_POLICY_CHECKS_H_ +#define ABSL_BASE_POLICY_CHECKS_H_ + +// Included for the __GLIBC_PREREQ macro used below. +#include + +// Included for the _STLPORT_VERSION macro used below. +#if defined(__cplusplus) +#include +#endif + +// ----------------------------------------------------------------------------- +// Operating System Check +// ----------------------------------------------------------------------------- + +#if defined(__CYGWIN__) +#error "Cygwin is not supported." +#endif + +// ----------------------------------------------------------------------------- +// Compiler Check +// ----------------------------------------------------------------------------- + +// We support MSVC++ 14.0 update 2 and later. +// This minimum will go up. +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__) +#error "This package requires Visual Studio 2015 Update 2 or higher." +#endif + +// We support gcc 4.7 and later. +// This minimum will go up. +#if defined(__GNUC__) && !defined(__clang__) +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) +#error "This package requires gcc 4.7 or higher." +#endif +#endif + +// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later. +// This corresponds to Apple Xcode version 4.5. +// This minimum will go up. +#if defined(__apple_build_version__) && __apple_build_version__ < 4211165 +#error "This package requires __apple_build_version__ of 4211165 or higher." +#endif + +// ----------------------------------------------------------------------------- +// C++ Version Check +// ----------------------------------------------------------------------------- + +// Enforce C++11 as the minimum. Note that Visual Studio has not +// advanced __cplusplus despite being good enough for our purposes, so +// so we exempt it from the check. +#if defined(__cplusplus) && !defined(_MSC_VER) +#if __cplusplus < 201103L +#error "C++ versions less than C++11 are not supported." +#endif +#endif + +// ----------------------------------------------------------------------------- +// Standard Library Check +// ----------------------------------------------------------------------------- + +#if defined(_STLPORT_VERSION) +#error "STLPort is not supported." +#endif + +// ----------------------------------------------------------------------------- +// `char` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a +// platform where this is not the case, please provide us with the details about +// your platform so we can consider relaxing this requirement. +#if CHAR_BIT != 8 +#error "Abseil assumes CHAR_BIT == 8." +#endif + +// ----------------------------------------------------------------------------- +// `int` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes that an int is 4 bytes. If you would like to use +// Abseil on a platform where this is not the case, please provide us with the +// details about your platform so we can consider relaxing this requirement. +#if INT_MAX < 2147483647 +#error "Abseil assumes that int is at least 4 bytes. " +#endif + +#endif // ABSL_BASE_POLICY_CHECKS_H_ diff --git a/base/abseil/absl/base/port.h b/base/abseil/absl/base/port.h new file mode 100644 index 0000000..6c28068 --- /dev/null +++ b/base/abseil/absl/base/port.h @@ -0,0 +1,26 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This files is a forwarding header for other headers containing various +// portability macros and functions. +// This file is used for both C and C++! + +#ifndef ABSL_BASE_PORT_H_ +#define ABSL_BASE_PORT_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +#endif // ABSL_BASE_PORT_H_ diff --git a/base/abseil/absl/base/raw_logging_test.cc b/base/abseil/absl/base/raw_logging_test.cc new file mode 100644 index 0000000..3d30bd3 --- /dev/null +++ b/base/abseil/absl/base/raw_logging_test.cc @@ -0,0 +1,79 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This test serves primarily as a compilation test for base/raw_logging.h. +// Raw logging testing is covered by logging_unittest.cc, which is not as +// portable as this test. + +#include "absl/base/internal/raw_logging.h" + +#include + +#include "gtest/gtest.h" +#include "absl/strings/str_cat.h" + +namespace { + +TEST(RawLoggingCompilationTest, Log) { + ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d", 1, 2); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d", 1, 2, 3); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d", 1, 2, 3, 4); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d %d", 1, 2, 3, 4, 5); + ABSL_RAW_LOG(WARNING, "RAW WARNING: %d", 1); + ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1); +} + +TEST(RawLoggingCompilationTest, PassingCheck) { + ABSL_RAW_CHECK(true, "RAW CHECK"); +} + +// Not all platforms support output from raw log, so we don't verify any +// particular output for RAW check failures (expecting the empty string +// accomplishes this). This test is primarily a compilation test, but we +// are verifying process death when EXPECT_DEATH works for a platform. +const char kExpectedDeathOutput[] = ""; + +TEST(RawLoggingDeathTest, FailingCheck) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"), + kExpectedDeathOutput); +} + +TEST(RawLoggingDeathTest, LogFatal) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"), + kExpectedDeathOutput); +} + +TEST(InternalLog, CompilationTest) { + ABSL_INTERNAL_LOG(INFO, "Internal Log"); + std::string log_msg = "Internal Log"; + ABSL_INTERNAL_LOG(INFO, log_msg); + + ABSL_INTERNAL_LOG(INFO, log_msg + " 2"); + + float d = 1.1f; + ABSL_INTERNAL_LOG(INFO, absl::StrCat("Internal log ", 3, " + ", d)); +} + +TEST(InternalLogDeathTest, FailingCheck) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, "explanation"), + kExpectedDeathOutput); +} + +TEST(InternalLogDeathTest, LogFatal) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, "my dog has fleas"), + kExpectedDeathOutput); +} + +} // namespace diff --git a/base/abseil/absl/base/spinlock_test_common.cc b/base/abseil/absl/base/spinlock_test_common.cc new file mode 100644 index 0000000..08f61ba --- /dev/null +++ b/base/abseil/absl/base/spinlock_test_common.cc @@ -0,0 +1,271 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A bunch of threads repeatedly hash an array of ints protected by a +// spinlock. If the spinlock is working properly, all elements of the +// array should be equal at the end of the test. + +#include +#include +#include +#include // NOLINT(build/c++11) +#include + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/base/macros.h" +#include "absl/synchronization/blocking_counter.h" +#include "absl/synchronization/notification.h" + +constexpr int32_t kNumThreads = 10; +constexpr int32_t kIters = 1000; + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// This is defined outside of anonymous namespace so that it can be +// a friend of SpinLock to access protected methods for testing. +struct SpinLockTest { + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time); + } + static uint64_t DecodeWaitCycles(uint32_t lock_value) { + return SpinLock::DecodeWaitCycles(lock_value); + } +}; + +namespace { + +static constexpr int kArrayLength = 10; +static uint32_t values[kArrayLength]; + +static SpinLock static_spinlock(base_internal::kLinkerInitialized); +static SpinLock static_cooperative_spinlock( + base_internal::kLinkerInitialized, + base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); +static SpinLock static_noncooperative_spinlock( + base_internal::kLinkerInitialized, base_internal::SCHEDULE_KERNEL_ONLY); + +// Simple integer hash function based on the public domain lookup2 hash. +// http://burtleburtle.net/bob/c/lookup2.c +static uint32_t Hash32(uint32_t a, uint32_t c) { + uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value. + a -= b; a -= c; a ^= (c >> 13); + b -= c; b -= a; b ^= (a << 8); + c -= a; c -= b; c ^= (b >> 13); + a -= b; a -= c; a ^= (c >> 12); + b -= c; b -= a; b ^= (a << 16); + c -= a; c -= b; c ^= (b >> 5); + a -= b; a -= c; a ^= (c >> 3); + b -= c; b -= a; b ^= (a << 10); + c -= a; c -= b; c ^= (b >> 15); + return c; +} + +static void TestFunction(int thread_salt, SpinLock* spinlock) { + for (int i = 0; i < kIters; i++) { + SpinLockHolder h(spinlock); + for (int j = 0; j < kArrayLength; j++) { + const int index = (j + thread_salt) % kArrayLength; + values[index] = Hash32(values[index], thread_salt); + std::this_thread::yield(); + } + } +} + +static void ThreadedTest(SpinLock* spinlock) { + std::vector threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back(std::thread(TestFunction, i, spinlock)); + } + for (auto& thread : threads) { + thread.join(); + } + + SpinLockHolder h(spinlock); + for (int i = 1; i < kArrayLength; i++) { + EXPECT_EQ(values[0], values[i]); + } +} + +TEST(SpinLock, StackNonCooperativeDisablesScheduling) { + SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + spinlock.Lock(); + EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); + spinlock.Unlock(); +} + +TEST(SpinLock, StaticNonCooperativeDisablesScheduling) { + static_noncooperative_spinlock.Lock(); + EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); + static_noncooperative_spinlock.Unlock(); +} + +TEST(SpinLock, WaitCyclesEncoding) { + // These are implementation details not exported by SpinLock. + const int kProfileTimestampShift = 7; + const int kLockwordReservedShift = 3; + const uint32_t kSpinLockSleeper = 8; + + // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping + // but the lower kProfileTimestampShift will be dropped. + const int kMaxCyclesShift = + 32 - kLockwordReservedShift + kProfileTimestampShift; + const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; + + // These bits should be zero after encoding. + const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1; + + // These bits are dropped when wait cycles are encoded. + const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; + + // Test a bunch of random values + std::default_random_engine generator; + // Shift to avoid overflow below. + std::uniform_int_distribution time_distribution( + 0, std::numeric_limits::max() >> 4); + std::uniform_int_distribution cycle_distribution(0, kMaxCycles); + + for (int i = 0; i < 100; i++) { + int64_t start_time = time_distribution(generator); + int64_t cycles = cycle_distribution(generator); + int64_t end_time = start_time + cycles; + uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time); + EXPECT_EQ(0, lock_value & kLockwordReservedMask); + uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); + EXPECT_EQ(0, decoded & kProfileTimestampMask); + EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded); + } + + // Test corner cases + int64_t start_time = time_distribution(generator); + EXPECT_EQ(kSpinLockSleeper, + SpinLockTest::EncodeWaitCycles(start_time, start_time)); + EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0)); + EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask)); + EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask, + SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask)); + + // Check that we cannot produce kSpinLockSleeper during encoding. + int64_t sleeper_cycles = + kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift); + uint32_t sleeper_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles); + EXPECT_NE(sleeper_value, kSpinLockSleeper); + + // Test clamping + uint32_t max_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); + uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); + uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; + EXPECT_EQ(expected_max_value_decoded, max_value_decoded); + + const int64_t step = (1 << kProfileTimestampShift); + uint32_t after_max_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step); + uint64_t after_max_value_decoded = + SpinLockTest::DecodeWaitCycles(after_max_value); + EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded); + + uint32_t before_max_value = SpinLockTest::EncodeWaitCycles( + start_time, start_time + kMaxCycles - step); + uint64_t before_max_value_decoded = + SpinLockTest::DecodeWaitCycles(before_max_value); + EXPECT_GT(expected_max_value_decoded, before_max_value_decoded); +} + +TEST(SpinLockWithThreads, StaticSpinLock) { + ThreadedTest(&static_spinlock); +} + +TEST(SpinLockWithThreads, StackSpinLock) { + SpinLock spinlock; + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StackCooperativeSpinLock) { + SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) { + SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StaticCooperativeSpinLock) { + ThreadedTest(&static_cooperative_spinlock); +} + +TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) { + ThreadedTest(&static_noncooperative_spinlock); +} + +TEST(SpinLockWithThreads, DoesNotDeadlock) { + struct Helper { + static void NotifyThenLock(Notification* locked, SpinLock* spinlock, + BlockingCounter* b) { + locked->WaitForNotification(); // Wait for LockThenWait() to hold "s". + b->DecrementCount(); + SpinLockHolder l(spinlock); + } + + static void LockThenWait(Notification* locked, SpinLock* spinlock, + BlockingCounter* b) { + SpinLockHolder l(spinlock); + locked->Notify(); + b->Wait(); + } + + static void DeadlockTest(SpinLock* spinlock, int num_spinners) { + Notification locked; + BlockingCounter counter(num_spinners); + std::vector threads; + + threads.push_back( + std::thread(Helper::LockThenWait, &locked, spinlock, &counter)); + for (int i = 0; i < num_spinners; ++i) { + threads.push_back( + std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter)); + } + + for (auto& thread : threads) { + thread.join(); + } + } + }; + + SpinLock stack_cooperative_spinlock( + base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + Helper::DeadlockTest(&stack_cooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&stack_noncooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&static_cooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&static_noncooperative_spinlock, + base_internal::NumCPUs() * 2); +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/base/thread_annotations.h b/base/abseil/absl/base/thread_annotations.h new file mode 100644 index 0000000..5f51c0c --- /dev/null +++ b/base/abseil/absl/base/thread_annotations.h @@ -0,0 +1,280 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_THREAD_ANNOTATIONS_H_ + +#include "absl/base/config.h" +// TODO(mbonadei): Remove after the backward compatibility period. +#include "absl/base/internal/thread_annotations.h" // IWYU pragma: export + +#if defined(__clang__) +#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x)) +#else +#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op +#endif + +// ABSL_GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ ABSL_GUARDED_BY(mu_); +// ... +// }; +#define ABSL_GUARDED_BY(x) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x)) + +// ABSL_PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ ABSL_PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); +#define ABSL_PT_GUARDED_BY(x) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x)) + +// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER +// and ABSL_ACQUIRED_BEFORE.) +// +// As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); +#define ABSL_ACQUIRED_AFTER(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__)) + +#define ABSL_ACQUIRED_BEFORE(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__)) + +// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// ABSL_SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a ABSL_GUARDED_BY(mu1); +// int b ABSL_GUARDED_BY(mu2); +// +// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + exclusive_locks_required(__VA_ARGS__)) + +#define ABSL_SHARED_LOCKS_REQUIRED(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_locks_required(__VA_ARGS__)) + +// ABSL_LOCKS_EXCLUDED() +// +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are +// non-reentrant). +#define ABSL_LOCKS_EXCLUDED(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__)) + +// ABSL_LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with ABSL_LOCK_RETURNED. +#define ABSL_LOCK_RETURNED(x) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x)) + +// ABSL_LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#define ABSL_LOCKABLE ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lockable) + +// ABSL_SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define ABSL_SCOPED_LOCKABLE \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable) + +// ABSL_EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + exclusive_lock_function(__VA_ARGS__)) + +// ABSL_SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#define ABSL_SHARED_LOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_lock_function(__VA_ARGS__)) + +// ABSL_UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define ABSL_UNLOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(unlock_function(__VA_ARGS__)) + +// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + exclusive_trylock_function(__VA_ARGS__)) + +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + shared_trylock_function(__VA_ARGS__)) + +// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_exclusive_lock(__VA_ARGS__)) + +#define ABSL_ASSERT_SHARED_LOCK(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_lock(__VA_ARGS__)) + +// ABSL_NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#define ABSL_NO_THREAD_SAFETY_ANALYSIS \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis) + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define ABSL_TS_UNCHECKED(x) "" + +// ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED. +#define ABSL_TS_FIXME(x) "" + +// Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body +// of a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS + +// Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a +// ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing +// thread safety warning. It disables the ABSL_GUARDED_BY. +#define ABSL_GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead. +template +inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template +inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ diff --git a/base/abseil/absl/base/throw_delegate_test.cc b/base/abseil/absl/base/throw_delegate_test.cc new file mode 100644 index 0000000..5ba4ce5 --- /dev/null +++ b/base/abseil/absl/base/throw_delegate_test.cc @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include +#include +#include + +#include "absl/base/config.h" +#include "gtest/gtest.h" + +namespace { + +using absl::base_internal::ThrowStdLogicError; +using absl::base_internal::ThrowStdInvalidArgument; +using absl::base_internal::ThrowStdDomainError; +using absl::base_internal::ThrowStdLengthError; +using absl::base_internal::ThrowStdOutOfRange; +using absl::base_internal::ThrowStdRuntimeError; +using absl::base_internal::ThrowStdRangeError; +using absl::base_internal::ThrowStdOverflowError; +using absl::base_internal::ThrowStdUnderflowError; +using absl::base_internal::ThrowStdBadFunctionCall; +using absl::base_internal::ThrowStdBadAlloc; + +constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog"; + +template +void ExpectThrowChar(void (*f)(const char*)) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(what_arg); + FAIL() << "Didn't throw"; + } catch (const E& e) { + EXPECT_STREQ(e.what(), what_arg); + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg); +#endif +} + +template +void ExpectThrowString(void (*f)(const std::string&)) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(what_arg); + FAIL() << "Didn't throw"; + } catch (const E& e) { + EXPECT_STREQ(e.what(), what_arg); + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg); +#endif +} + +template +void ExpectThrowNoWhat(void (*f)()) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(); + FAIL() << "Didn't throw"; + } catch (const E& e) { + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(), ""); +#endif +} + +TEST(ThrowHelper, Test) { + // Not using EXPECT_THROW because we want to check the .what() message too. + ExpectThrowChar(ThrowStdLogicError); + ExpectThrowChar(ThrowStdInvalidArgument); + ExpectThrowChar(ThrowStdDomainError); + ExpectThrowChar(ThrowStdLengthError); + ExpectThrowChar(ThrowStdOutOfRange); + ExpectThrowChar(ThrowStdRuntimeError); + ExpectThrowChar(ThrowStdRangeError); + ExpectThrowChar(ThrowStdOverflowError); + ExpectThrowChar(ThrowStdUnderflowError); + + ExpectThrowString(ThrowStdLogicError); + ExpectThrowString(ThrowStdInvalidArgument); + ExpectThrowString(ThrowStdDomainError); + ExpectThrowString(ThrowStdLengthError); + ExpectThrowString(ThrowStdOutOfRange); + ExpectThrowString(ThrowStdRuntimeError); + ExpectThrowString(ThrowStdRangeError); + ExpectThrowString(ThrowStdOverflowError); + ExpectThrowString(ThrowStdUnderflowError); + + ExpectThrowNoWhat(ThrowStdBadFunctionCall); + ExpectThrowNoWhat(ThrowStdBadAlloc); +} + +} // namespace diff --git a/base/abseil/absl/compiler_config_setting.bzl b/base/abseil/absl/compiler_config_setting.bzl new file mode 100644 index 0000000..6696229 --- /dev/null +++ b/base/abseil/absl/compiler_config_setting.bzl @@ -0,0 +1,38 @@ +# +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Creates config_setting that allows selecting based on 'compiler' value.""" + +def create_llvm_config(name, visibility): + # The "do_not_use_tools_cpp_compiler_present" attribute exists to + # distinguish between older versions of Bazel that do not support + # "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do. + # In the future, the only way to select on the compiler will be through + # flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can + # be removed. + if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"): + native.config_setting( + name = name, + flag_values = { + "@bazel_tools//tools/cpp:compiler": "llvm", + }, + visibility = visibility, + ) + else: + native.config_setting( + name = name, + values = {"compiler": "llvm"}, + visibility = visibility, + ) diff --git a/base/abseil/absl/container/BUILD.bazel b/base/abseil/absl/container/BUILD.bazel new file mode 100644 index 0000000..1f7abe0 --- /dev/null +++ b/base/abseil/absl/container/BUILD.bazel @@ -0,0 +1,876 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") +load( + "//absl:copts/configure_copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_DEFAULT_LINKOPTS", + "ABSL_TEST_COPTS", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "compressed_tuple", + hdrs = ["internal/compressed_tuple.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/utility", + ], +) + +cc_test( + name = "compressed_tuple_test", + srcs = ["internal/compressed_tuple_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":compressed_tuple", + ":test_instance_tracker", + "//absl/memory", + "//absl/types:any", + "//absl/types:optional", + "//absl/utility", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "fixed_array", + hdrs = ["fixed_array.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":compressed_tuple", + "//absl/algorithm", + "//absl/base:core_headers", + "//absl/base:dynamic_annotations", + "//absl/base:throw_delegate", + "//absl/memory", + ], +) + +cc_test( + name = "fixed_array_test", + srcs = ["fixed_array_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":fixed_array", + "//absl/base:exception_testing", + "//absl/hash:hash_testing", + "//absl/memory", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "fixed_array_exception_safety_test", + srcs = ["fixed_array_exception_safety_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":fixed_array", + "//absl/base:config", + "//absl/base:exception_safety_testing", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "fixed_array_benchmark", + srcs = ["fixed_array_benchmark.cc"], + copts = ABSL_TEST_COPTS + ["$(STACK_FRAME_UNLIMITED)"], + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + deps = [ + ":fixed_array", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "inlined_vector_internal", + hdrs = ["internal/inlined_vector.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":compressed_tuple", + "//absl/base:core_headers", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/types:span", + ], +) + +cc_library( + name = "inlined_vector", + hdrs = ["inlined_vector.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":inlined_vector_internal", + "//absl/algorithm", + "//absl/base:core_headers", + "//absl/base:throw_delegate", + "//absl/memory", + ], +) + +cc_library( + name = "counting_allocator", + testonly = 1, + hdrs = ["internal/counting_allocator.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = ["//absl/base:config"], +) + +cc_test( + name = "inlined_vector_test", + srcs = ["inlined_vector_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":counting_allocator", + ":inlined_vector", + ":test_instance_tracker", + "//absl/base:core_headers", + "//absl/base:exception_testing", + "//absl/base:raw_logging_internal", + "//absl/hash:hash_testing", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "inlined_vector_benchmark", + srcs = ["inlined_vector_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + deps = [ + ":inlined_vector", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/strings", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_test( + name = "inlined_vector_exception_safety_test", + srcs = ["inlined_vector_exception_safety_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":inlined_vector", + "//absl/base:config", + "//absl/base:exception_safety_testing", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "test_instance_tracker", + testonly = 1, + srcs = ["internal/test_instance_tracker.cc"], + hdrs = ["internal/test_instance_tracker.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = ["//absl/types:compare"], +) + +cc_test( + name = "test_instance_tracker_test", + srcs = ["internal/test_instance_tracker_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":test_instance_tracker", + "@com_google_googletest//:gtest_main", + ], +) + +NOTEST_TAGS_NONMOBILE = [ + "no_test_darwin_x86_64", + "no_test_loonix", +] + +NOTEST_TAGS_MOBILE = [ + "no_test_android_arm", + "no_test_android_arm64", + "no_test_android_x86", + "no_test_ios_x86_64", +] + +NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE + +cc_library( + name = "flat_hash_map", + hdrs = ["flat_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":raw_hash_map", + "//absl/algorithm:container", + "//absl/memory", + ], +) + +cc_test( + name = "flat_hash_map_test", + srcs = ["flat_hash_map_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":flat_hash_map", + ":hash_generator_testing", + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_members_test", + ":unordered_map_modifiers_test", + "//absl/types:any", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "flat_hash_set", + hdrs = ["flat_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":raw_hash_set", + "//absl/algorithm:container", + "//absl/base:core_headers", + "//absl/memory", + ], +) + +cc_test( + name = "flat_hash_set_test", + srcs = ["flat_hash_set_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":flat_hash_set", + ":hash_generator_testing", + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_members_test", + ":unordered_set_modifiers_test", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_map", + hdrs = ["node_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":node_hash_policy", + ":raw_hash_map", + "//absl/algorithm:container", + "//absl/memory", + ], +) + +cc_test( + name = "node_hash_map_test", + srcs = ["node_hash_map_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":hash_generator_testing", + ":node_hash_map", + ":tracked", + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_members_test", + ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_set", + hdrs = ["node_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_function_defaults", + ":node_hash_policy", + ":raw_hash_set", + "//absl/algorithm:container", + "//absl/memory", + ], +) + +cc_test( + name = "node_hash_set_test", + srcs = ["node_hash_set_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":node_hash_set", + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_members_test", + ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "container_memory", + hdrs = ["internal/container_memory.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/memory", + "//absl/utility", + ], +) + +cc_test( + name = "container_memory_test", + srcs = ["internal/container_memory_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":container_memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_function_defaults", + hdrs = ["internal/hash_function_defaults.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + "//absl/hash", + "//absl/strings", + ], +) + +cc_test( + name = "hash_function_defaults_test", + srcs = ["internal/hash_function_defaults_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS, + deps = [ + ":hash_function_defaults", + "//absl/hash", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_generator_testing", + testonly = 1, + srcs = ["internal/hash_generator_testing.cc"], + hdrs = ["internal/hash_generator_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_policy_testing", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + ], +) + +cc_library( + name = "hash_policy_testing", + testonly = 1, + hdrs = ["internal/hash_policy_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/hash", + "//absl/strings", + ], +) + +cc_test( + name = "hash_policy_testing_test", + srcs = ["internal/hash_policy_testing_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_policy_testing", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_policy_traits", + hdrs = ["internal/hash_policy_traits.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = ["//absl/meta:type_traits"], +) + +cc_test( + name = "hash_policy_traits_test", + srcs = ["internal/hash_policy_traits_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_policy_traits", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hashtable_debug", + hdrs = ["internal/hashtable_debug.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hashtable_debug_hooks", + ], +) + +cc_library( + name = "hashtable_debug_hooks", + hdrs = ["internal/hashtable_debug_hooks.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + ], +) + +cc_library( + name = "hashtablez_sampler", + srcs = [ + "internal/hashtablez_sampler.cc", + "internal/hashtablez_sampler_force_weak_definition.cc", + ], + hdrs = ["internal/hashtablez_sampler.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":have_sse", + "//absl/base", + "//absl/base:core_headers", + "//absl/base:exponential_biased", + "//absl/debugging:stacktrace", + "//absl/memory", + "//absl/synchronization", + "//absl/utility", + ], +) + +cc_test( + name = "hashtablez_sampler_test", + srcs = ["internal/hashtablez_sampler_test.cc"], + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hashtablez_sampler", + ":have_sse", + "//absl/base:core_headers", + "//absl/synchronization", + "//absl/synchronization:thread_pool", + "//absl/time", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_policy", + hdrs = ["internal/node_hash_policy.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = ["//absl/base:config"], +) + +cc_test( + name = "node_hash_policy_test", + srcs = ["internal/node_hash_policy_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_policy_traits", + ":node_hash_policy", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "raw_hash_map", + hdrs = ["internal/raw_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container_memory", + ":raw_hash_set", + "//absl/base:throw_delegate", + ], +) + +cc_library( + name = "have_sse", + hdrs = ["internal/have_sse.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], +) + +cc_library( + name = "common", + hdrs = ["internal/common.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/meta:type_traits", + "//absl/types:optional", + ], +) + +cc_library( + name = "raw_hash_set", + srcs = ["internal/raw_hash_set.cc"], + hdrs = ["internal/raw_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":common", + ":compressed_tuple", + ":container_memory", + ":hash_policy_traits", + ":hashtable_debug_hooks", + ":hashtablez_sampler", + ":have_sse", + ":layout", + "//absl/base:bits", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:endian", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/utility", + ], +) + +cc_test( + name = "raw_hash_set_test", + srcs = ["internal/raw_hash_set_test.cc"], + copts = ABSL_TEST_COPTS, + linkstatic = 1, + tags = NOTEST_TAGS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":hash_policy_testing", + ":hashtable_debug", + ":raw_hash_set", + "//absl/base", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "raw_hash_set_allocator_test", + size = "small", + srcs = ["internal/raw_hash_set_allocator_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":raw_hash_set", + ":tracked", + "//absl/base:core_headers", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "layout", + hdrs = ["internal/layout.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:core_headers", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:span", + "//absl/utility", + ], +) + +cc_test( + name = "layout_test", + size = "small", + srcs = ["internal/layout_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS, + visibility = ["//visibility:private"], + deps = [ + ":layout", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/types:span", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "tracked", + testonly = 1, + hdrs = ["internal/tracked.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + ], +) + +cc_library( + name = "unordered_map_constructor_test", + testonly = 1, + hdrs = ["internal/unordered_map_constructor_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_lookup_test", + testonly = 1, + hdrs = ["internal/unordered_map_lookup_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_modifiers_test", + testonly = 1, + hdrs = ["internal/unordered_map_modifiers_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_constructor_test", + testonly = 1, + hdrs = ["internal/unordered_set_constructor_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "//absl/meta:type_traits", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_members_test", + testonly = 1, + hdrs = ["internal/unordered_set_members_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/meta:type_traits", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_members_test", + testonly = 1, + hdrs = ["internal/unordered_map_members_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/meta:type_traits", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_lookup_test", + testonly = 1, + hdrs = ["internal/unordered_set_lookup_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_modifiers_test", + testonly = 1, + hdrs = ["internal/unordered_set_modifiers_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "unordered_set_test", + srcs = ["internal/unordered_set_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_members_test", + ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "unordered_map_test", + srcs = ["internal/unordered_map_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_NONMOBILE, + deps = [ + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_members_test", + ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "btree", + srcs = [ + "internal/btree.h", + "internal/btree_container.h", + ], + hdrs = [ + "btree_map.h", + "btree_set.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:public"], + deps = [ + ":common", + ":compressed_tuple", + ":container_memory", + ":layout", + "//absl/base:core_headers", + "//absl/base:throw_delegate", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:compare", + "//absl/utility", + ], +) + +cc_library( + name = "btree_test_common", + testonly = 1, + hdrs = ["btree_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":btree", + ":flat_hash_set", + "//absl/strings", + "//absl/time", + ], +) + +cc_test( + name = "btree_test", + size = "large", + srcs = [ + "btree_test.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + shard_count = 10, + visibility = ["//visibility:private"], + deps = [ + ":btree", + ":btree_test_common", + ":counting_allocator", + ":test_instance_tracker", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/flags:flag", + "//absl/hash:hash_testing", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:compare", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/base/abseil/absl/container/CMakeLists.txt b/base/abseil/absl/container/CMakeLists.txt new file mode 100644 index 0000000..a931f33 --- /dev/null +++ b/base/abseil/absl/container/CMakeLists.txt @@ -0,0 +1,887 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is deprecated and will be removed in the future. It also doesn't do +# anything anyways. Prefer to use the library associated with the API you are +# using. +absl_cc_library( + NAME + container + PUBLIC +) + +absl_cc_library( + NAME + btree + HDRS + "btree_map.h" + "btree_set.h" + "internal/btree.h" + "internal/btree_container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::container_common + absl::compare + absl::compressed_tuple + absl::container_memory + absl::core_headers + absl::layout + absl::memory + absl::strings + absl::throw_delegate + absl::type_traits + absl::utility +) + +absl_cc_library( + NAME + btree_test_common + hdrs + "btree_test.h" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::btree + absl::flat_hash_set + absl::strings + absl::time + TESTONLY +) + +absl_cc_test( + NAME + btree_test + SRCS + "btree_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::btree + absl::btree_test_common + absl::compare + absl::core_headers + absl::counting_allocator + absl::flags + absl::hash_testing + absl::raw_logging_internal + absl::strings + absl::test_instance_tracker + absl::type_traits + gmock_main +) + +absl_cc_library( + NAME + compressed_tuple + HDRS + "internal/compressed_tuple.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + compressed_tuple_test + SRCS + "internal/compressed_tuple_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::any + absl::compressed_tuple + absl::memory + absl::optional + absl::test_instance_tracker + absl::utility + gmock_main +) + +absl_cc_library( + NAME + fixed_array + HDRS + "fixed_array.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::algorithm + absl::core_headers + absl::dynamic_annotations + absl::throw_delegate + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + fixed_array_test + SRCS + "fixed_array_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::fixed_array + absl::exception_testing + absl::hash_testing + absl::memory + gmock_main +) + +absl_cc_test( + NAME + fixed_array_exception_safety_test + SRCS + "fixed_array_exception_safety_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::fixed_array + absl::config + absl::exception_safety_testing + gmock_main +) + +absl_cc_library( + NAME + inlined_vector_internal + HDRS + "internal/inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::core_headers + absl::memory + absl::span + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + inlined_vector + HDRS + "inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::inlined_vector_internal + absl::throw_delegate + absl::memory + PUBLIC +) + +absl_cc_library( + NAME + counting_allocator + HDRS + "internal/counting_allocator.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config +) + +absl_cc_test( + NAME + inlined_vector_test + SRCS + "inlined_vector_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::counting_allocator + absl::inlined_vector + absl::test_instance_tracker + absl::core_headers + absl::exception_testing + absl::hash_testing + absl::memory + absl::raw_logging_internal + absl::strings + gmock_main +) + +absl_cc_test( + NAME + inlined_vector_exception_safety_test + SRCS + "inlined_vector_exception_safety_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::inlined_vector + absl::config + absl::exception_safety_testing + gmock_main +) + +absl_cc_library( + NAME + test_instance_tracker + HDRS + "internal/test_instance_tracker.h" + SRCS + "internal/test_instance_tracker.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compare + TESTONLY +) + +absl_cc_test( + NAME + test_instance_tracker_test + SRCS + "internal/test_instance_tracker_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::test_instance_tracker + gmock_main +) + +absl_cc_library( + NAME + flat_hash_map + HDRS + "flat_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + flat_hash_map_test + SRCS + "flat_hash_map_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::flat_hash_map + absl::hash_generator_testing + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_members_test + absl::unordered_map_modifiers_test + absl::any + gmock_main +) + +absl_cc_library( + NAME + flat_hash_set + HDRS + "flat_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::raw_hash_set + absl::algorithm_container + absl::core_headers + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + flat_hash_set_test + SRCS + "flat_hash_set_test.cc" + COPTS + ${ABSL_TEST_COPTS} + "-DUNORDERED_SET_CXX17" + DEPS + absl::flat_hash_set + absl::hash_generator_testing + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_members_test + absl::unordered_set_modifiers_test + absl::memory + absl::strings + gmock_main +) + +absl_cc_library( + NAME + node_hash_map + HDRS + "node_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::node_hash_policy + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + node_hash_map_test + SRCS + "node_hash_map_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::node_hash_map + absl::tracked + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_members_test + absl::unordered_map_modifiers_test + gmock_main +) + +absl_cc_library( + NAME + node_hash_set + HDRS + "node_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::hash_function_defaults + absl::node_hash_policy + absl::raw_hash_set + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + node_hash_set_test + SRCS + "node_hash_set_test.cc" + COPTS + ${ABSL_TEST_COPTS} + "-DUNORDERED_SET_CXX17" + DEPS + absl::hash_generator_testing + absl::node_hash_set + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_members_test + absl::unordered_set_modifiers_test + gmock_main +) + +absl_cc_library( + NAME + container_memory + HDRS + "internal/container_memory.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::memory + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + container_memory_test + SRCS + "internal/container_memory_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::container_memory + absl::strings + gmock_main +) + +absl_cc_library( + NAME + hash_function_defaults + HDRS + "internal/hash_function_defaults.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::hash + absl::strings + PUBLIC +) + +absl_cc_test( + NAME + hash_function_defaults_test + SRCS + "internal/hash_function_defaults_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_function_defaults + absl::hash + absl::strings + gmock_main +) + +absl_cc_library( + NAME + hash_generator_testing + HDRS + "internal/hash_generator_testing.h" + SRCS + "internal/hash_generator_testing.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_policy_testing + absl::memory + absl::meta + absl::strings + TESTONLY +) + +absl_cc_library( + NAME + hash_policy_testing + HDRS + "internal/hash_policy_testing.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash + absl::strings + TESTONLY +) + +absl_cc_test( + NAME + hash_policy_testing_test + SRCS + "internal/hash_policy_testing_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_policy_testing + gmock_main +) + +absl_cc_library( + NAME + hash_policy_traits + HDRS + "internal/hash_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::meta + PUBLIC +) + +absl_cc_test( + NAME + hash_policy_traits_test + SRCS + "internal/hash_policy_traits_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_policy_traits + gmock_main +) + +absl_cc_library( + NAME + hashtablez_sampler + HDRS + "internal/hashtablez_sampler.h" + SRCS + "internal/hashtablez_sampler.cc" + "internal/hashtablez_sampler_force_weak_definition.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::exponential_biased + absl::have_sse + absl::synchronization +) + +absl_cc_test( + NAME + hashtablez_sampler_test + SRCS + "internal/hashtablez_sampler_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hashtablez_sampler + absl::have_sse + gmock_main +) + +absl_cc_library( + NAME + hashtable_debug + HDRS + "internal/hashtable_debug.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::hashtable_debug_hooks +) + +absl_cc_library( + NAME + hashtable_debug_hooks + HDRS + "internal/hashtable_debug_hooks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + have_sse + HDRS + "internal/have_sse.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + node_hash_policy + HDRS + "internal/node_hash_policy.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_test( + NAME + node_hash_policy_test + SRCS + "internal/node_hash_policy_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_policy_traits + absl::node_hash_policy + gmock_main +) + +absl_cc_library( + NAME + raw_hash_map + HDRS + "internal/raw_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::raw_hash_set + absl::throw_delegate + PUBLIC +) + +absl_cc_library( + NAME + container_common + HDRS + "internal/commom.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::type_traits +) + +absl_cc_library( + NAME + raw_hash_set + HDRS + "internal/raw_hash_set.h" + SRCS + "internal/raw_hash_set.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bits + absl::compressed_tuple + absl::config + absl::container_common + absl::container_memory + absl::core_headers + absl::endian + absl::hash_policy_traits + absl::hashtable_debug_hooks + absl::have_sse + absl::layout + absl::memory + absl::meta + absl::optional + absl::utility + absl::hashtablez_sampler + PUBLIC +) + +absl_cc_test( + NAME + raw_hash_set_test + SRCS + "internal/raw_hash_set_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::hash_policy_testing + absl::hashtable_debug + absl::raw_hash_set + absl::base + absl::core_headers + absl::raw_logging_internal + absl::strings + gmock_main +) + +absl_cc_test( + NAME + raw_hash_set_allocator_test + SRCS + "internal/raw_hash_set_allocator_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::raw_hash_set + absl::tracked + absl::core_headers + gmock_main +) + +absl_cc_library( + NAME + layout + HDRS + "internal/layout.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::meta + absl::strings + absl::span + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + layout_test + SRCS + "internal/layout_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::layout + absl::core_headers + absl::raw_logging_internal + absl::span + gmock_main +) + +absl_cc_library( + NAME + tracked + HDRS + "internal/tracked.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + TESTONLY +) + +absl_cc_library( + NAME + unordered_map_constructor_test + HDRS + "internal/unordered_map_constructor_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_map_lookup_test + HDRS + "internal/unordered_map_lookup_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_map_members_test + HDRS + "internal/unordered_map_members_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::type_traits + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_map_modifiers_test + HDRS + "internal/unordered_map_modifiers_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_set_constructor_test + HDRS + "internal/unordered_set_constructor_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_set_lookup_test + HDRS + "internal/unordered_set_lookup_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_set_members_test + HDRS + "internal/unordered_set_members_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::type_traits + gmock + TESTONLY +) + +absl_cc_library( + NAME + unordered_set_modifiers_test + HDRS + "internal/unordered_set_modifiers_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + gmock + TESTONLY +) + +absl_cc_test( + NAME + unordered_set_test + SRCS + "internal/unordered_set_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_members_test + absl::unordered_set_modifiers_test + gmock_main +) + +absl_cc_test( + NAME + unordered_map_test + SRCS + "internal/unordered_map_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_members_test + absl::unordered_map_modifiers_test + gmock_main +) diff --git a/base/abseil/absl/container/btree_map.h b/base/abseil/absl/container/btree_map.h new file mode 100644 index 0000000..cbfcb58 --- /dev/null +++ b/base/abseil/absl/container/btree_map.h @@ -0,0 +1,735 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_map.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree maps: sorted associative containers mapping +// keys to values. +// +// * `absl::btree_map<>` +// * `absl::btree_multimap<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::map` and `std::multimap`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::map` and `std::multimap`, which are commonly implemented using +// red-black tree nodes, B-tree maps use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree maps perform better than their `std::map` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::map` and `std::multimap` as there are some API differences, which are +// noted in this header file. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()` and `erase()` return a valid iterator at the current +// position. + +#ifndef ABSL_CONTAINER_BTREE_MAP_H_ +#define ABSL_CONTAINER_BTREE_MAP_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::btree_map<> +// +// An `absl::btree_map` is an ordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::map` (in most cases). +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_map` uses a default allocator of +// `std::allocator>` to allocate (and deallocate) +// nodes, and construct and destruct values within those nodes. You may +// instead specify a custom allocator `A` (which in turn requires specifying a +// custom comparator `C`) as in `absl::btree_map`. +// +template , + typename Alloc = std::allocator>> +class btree_map + : public container_internal::btree_map_container< + container_internal::btree>> { + using Base = typename btree_map::btree_map_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_map` supports the same overload set as `std::map` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_map map1; + // + // * Initializer List constructor + // + // absl::btree_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_map map3(map2); + // + // * Copy assignment operator + // + // absl::btree_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_map map7(v.begin(), v.end()); + btree_map() {} + using Base::Base; + + // btree_map::begin() + // + // Returns an iterator to the beginning of the `btree_map`. + using Base::begin; + + // btree_map::cbegin() + // + // Returns a const iterator to the beginning of the `btree_map`. + using Base::cbegin; + + // btree_map::end() + // + // Returns an iterator to the end of the `btree_map`. + using Base::end; + + // btree_map::cend() + // + // Returns a const iterator to the end of the `btree_map`. + using Base::cend; + + // btree_map::empty() + // + // Returns whether or not the `btree_map` is empty. + using Base::empty; + + // btree_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_map`. + using Base::max_size; + + // btree_map::size() + // + // Returns the number of elements currently within the `btree_map`. + using Base::size; + + // btree_map::clear() + // + // Removes all elements from the `btree_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_map::erase() + // + // Erases elements within the `btree_map`. If an erase occurs, any references, + // pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_map`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased. + using Base::erase; + + // btree_map::insert() + // + // Inserts an element of the specified value into the `btree_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + // + // Overloads are listed below. + // + // std::pair try_emplace(const key_type& k, Args&&... args): + // std::pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // btree_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_map` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_map::merge() + // + // Extracts elements from a given `source` btree_map into this + // `btree_map`. If the destination `btree_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_map::swap(btree_map& other) + // + // Exchanges the contents of this `btree_map` with those of the `other` + // btree_map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // btree_map::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_map`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::contains; + + // btree_map::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_map`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_map`. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::count; + + // btree_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_map`. + using Base::equal_range; + + // btree_map::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_map`. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::find; + + // btree_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `btree_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. Otherwise iterators are not affected and references are not + // invalidated. Overloads are listed below. + // + // T& operator[](key_type&& key): + // T& operator[](const key_type& key): + // + // Inserts a value_type object constructed in-place if the element with the + // given key does not exist. + using Base::operator[]; + + // btree_map::get_allocator() + // + // Returns the allocator function associated with this `btree_map`. + using Base::get_allocator; + + // btree_map::key_comp(); + // + // Returns the key comparator associated with this `btree_map`. + using Base::key_comp; + + // btree_map::value_comp(); + // + // Returns the value comparator associated with this `btree_map`. + using Base::value_comp; +}; + +// absl::swap(absl::btree_map<>, absl::btree_map<>) +// +// Swaps the contents of two `absl::btree_map` containers. +template +void swap(btree_map &x, btree_map &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_map<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +template +void erase_if(btree_map &map, Pred pred) { + for (auto it = map.begin(); it != map.end();) { + if (pred(*it)) { + it = map.erase(it); + } else { + ++it; + } + } +} + +// absl::btree_multimap +// +// An `absl::btree_multimap` is an ordered associative container of +// keys and associated values designed to be a more efficient replacement for +// `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap +// allows multiple elements with equivalent keys. +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_multimap` uses a default allocator of +// `std::allocator>` to allocate (and deallocate) +// nodes, and construct and destruct values within those nodes. You may +// instead specify a custom allocator `A` (which in turn requires specifying a +// custom comparator `C`) as in `absl::btree_multimap`. +// +template , + typename Alloc = std::allocator>> +class btree_multimap + : public container_internal::btree_multimap_container< + container_internal::btree>> { + using Base = typename btree_multimap::btree_multimap_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multimap` supports the same overload set as `std::multimap` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multimap map1; + // + // * Initializer List constructor + // + // absl::btree_multimap map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_multimap map3(map2); + // + // * Copy assignment operator + // + // absl::btree_multimap map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multimap map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multimap map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_multimap map7(v.begin(), v.end()); + btree_multimap() {} + using Base::Base; + + // btree_multimap::begin() + // + // Returns an iterator to the beginning of the `btree_multimap`. + using Base::begin; + + // btree_multimap::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multimap`. + using Base::cbegin; + + // btree_multimap::end() + // + // Returns an iterator to the end of the `btree_multimap`. + using Base::end; + + // btree_multimap::cend() + // + // Returns a const iterator to the end of the `btree_multimap`. + using Base::cend; + + // btree_multimap::empty() + // + // Returns whether or not the `btree_multimap` is empty. + using Base::empty; + + // btree_multimap::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multimap` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multimap`. + using Base::max_size; + + // btree_multimap::size() + // + // Returns the number of elements currently within the `btree_multimap`. + using Base::size; + + // btree_multimap::clear() + // + // Removes all elements from the `btree_multimap`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multimap::erase() + // + // Erases elements within the `btree_multimap`. If an erase occurs, any + // references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multimap`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multimap::insert() + // + // Inserts an element of the specified value into the `btree_multimap`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multimap`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multimap`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multimap::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multimap::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multimap::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multimap` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multimap::merge() + // + // Extracts elements from a given `source` btree_multimap into this + // `btree_multimap`. If the destination `btree_multimap` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_multimap::swap(btree_multimap& other) + // + // Exchanges the contents of this `btree_multimap` with those of the `other` + // btree_multimap, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multimap` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multimap::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multimap`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::contains; + + // btree_multimap::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::count; + + // btree_multimap::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multimap`. + using Base::equal_range; + + // btree_multimap::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::find; + + // btree_multimap::get_allocator() + // + // Returns the allocator function associated with this `btree_multimap`. + using Base::get_allocator; + + // btree_multimap::key_comp(); + // + // Returns the key comparator associated with this `btree_multimap`. + using Base::key_comp; + + // btree_multimap::value_comp(); + // + // Returns the value comparator associated with this `btree_multimap`. + using Base::value_comp; +}; + +// absl::swap(absl::btree_multimap<>, absl::btree_multimap<>) +// +// Swaps the contents of two `absl::btree_multimap` containers. +template +void swap(btree_multimap &x, btree_multimap &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_multimap<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +template +void erase_if(btree_multimap &map, Pred pred) { + for (auto it = map.begin(); it != map.end();) { + if (pred(*it)) { + it = map.erase(it); + } else { + ++it; + } + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_MAP_H_ diff --git a/base/abseil/absl/container/btree_set.h b/base/abseil/absl/container/btree_set.h new file mode 100644 index 0000000..127fb94 --- /dev/null +++ b/base/abseil/absl/container/btree_set.h @@ -0,0 +1,683 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_set.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree sets: sorted associative containers of +// values. +// +// * `absl::btree_set<>` +// * `absl::btree_multiset<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::set` and `std::multiset`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::set` and `std::multiset`, which are commonly implemented using +// red-black tree nodes, B-tree sets use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree sets perform better than their `std::set` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::set` and `std::multiset` as there are some API differences, which are +// noted in this header file. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()` and `erase()` return a valid iterator at the current +// position. + +#ifndef ABSL_CONTAINER_BTREE_SET_H_ +#define ABSL_CONTAINER_BTREE_SET_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::btree_set<> +// +// An `absl::btree_set` is an ordered associative container of unique key +// values designed to be a more efficient replacement for `std::set` (in most +// cases). +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_set` uses a default allocator of `std::allocator` to +// allocate (and deallocate) nodes, and construct and destruct values within +// those nodes. You may instead specify a custom allocator `A` (which in turn +// requires specifying a custom comparator `C`) as in +// `absl::btree_set`. +// +template , + typename Alloc = std::allocator> +class btree_set + : public container_internal::btree_set_container< + container_internal::btree>> { + using Base = typename btree_set::btree_set_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_set` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_set set1; + // + // * Initializer List constructor + // + // absl::btree_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_set set3(set2); + // + // * Copy assignment operator + // + // absl::btree_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_set set7(v.begin(), v.end()); + btree_set() {} + using Base::Base; + + // btree_set::begin() + // + // Returns an iterator to the beginning of the `btree_set`. + using Base::begin; + + // btree_set::cbegin() + // + // Returns a const iterator to the beginning of the `btree_set`. + using Base::cbegin; + + // btree_set::end() + // + // Returns an iterator to the end of the `btree_set`. + using Base::end; + + // btree_set::cend() + // + // Returns a const iterator to the end of the `btree_set`. + using Base::cend; + + // btree_set::empty() + // + // Returns whether or not the `btree_set` is empty. + using Base::empty; + + // btree_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_set` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_set`. + using Base::max_size; + + // btree_set::size() + // + // Returns the number of elements currently within the `btree_set`. + using Base::size; + + // btree_set::clear() + // + // Removes all elements from the `btree_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_set::erase() + // + // Erases elements within the `btree_set`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_set`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased. + using Base::erase; + + // btree_set::insert() + // + // Inserts an element of the specified value into the `btree_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_set::merge() + // + // Extracts elements from a given `source` btree_set into this + // `btree_set`. If the destination `btree_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_set::swap(btree_set& other) + // + // Exchanges the contents of this `btree_set` with those of the `other` + // btree_set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_set::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_set`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::contains; + + // btree_set::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_set`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_set`. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::count; + + // btree_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_set`. + using Base::equal_range; + + // btree_set::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::find; + + // btree_set::get_allocator() + // + // Returns the allocator function associated with this `btree_set`. + using Base::get_allocator; + + // btree_set::key_comp(); + // + // Returns the key comparator associated with this `btree_set`. + using Base::key_comp; + + // btree_set::value_comp(); + // + // Returns the value comparator associated with this `btree_set`. The keys to + // sort the elements are the values themselves, therefore `value_comp` and its + // sibling member function `key_comp` are equivalent. + using Base::value_comp; +}; + +// absl::swap(absl::btree_set<>, absl::btree_set<>) +// +// Swaps the contents of two `absl::btree_set` containers. +template +void swap(btree_set &x, btree_set &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_set<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +template +void erase_if(btree_set &set, Pred pred) { + for (auto it = set.begin(); it != set.end();) { + if (pred(*it)) { + it = set.erase(it); + } else { + ++it; + } + } +} + +// absl::btree_multiset<> +// +// An `absl::btree_multiset` is an ordered associative container of +// keys and associated values designed to be a more efficient replacement +// for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree +// multiset allows equivalent elements. +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_multiset` uses a default allocator of `std::allocator` +// to allocate (and deallocate) nodes, and construct and destruct values within +// those nodes. You may instead specify a custom allocator `A` (which in turn +// requires specifying a custom comparator `C`) as in +// `absl::btree_multiset`. +// +template , + typename Alloc = std::allocator> +class btree_multiset + : public container_internal::btree_multiset_container< + container_internal::btree>> { + using Base = typename btree_multiset::btree_multiset_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multiset` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multiset set1; + // + // * Initializer List constructor + // + // absl::btree_multiset set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_multiset set3(set2); + // + // * Copy assignment operator + // + // absl::btree_multiset set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multiset set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multiset set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_multiset set7(v.begin(), v.end()); + btree_multiset() {} + using Base::Base; + + // btree_multiset::begin() + // + // Returns an iterator to the beginning of the `btree_multiset`. + using Base::begin; + + // btree_multiset::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multiset`. + using Base::cbegin; + + // btree_multiset::end() + // + // Returns an iterator to the end of the `btree_multiset`. + using Base::end; + + // btree_multiset::cend() + // + // Returns a const iterator to the end of the `btree_multiset`. + using Base::cend; + + // btree_multiset::empty() + // + // Returns whether or not the `btree_multiset` is empty. + using Base::empty; + + // btree_multiset::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multiset` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multiset`. + using Base::max_size; + + // btree_multiset::size() + // + // Returns the number of elements currently within the `btree_multiset`. + using Base::size; + + // btree_multiset::clear() + // + // Removes all elements from the `btree_multiset`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multiset::erase() + // + // Erases elements within the `btree_multiset`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multiset`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multiset::insert() + // + // Inserts an element of the specified value into the `btree_multiset`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multiset`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multiset`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multiset::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multiset::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multiset::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multiset` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multiset::merge() + // + // Extracts elements from a given `source` btree_multiset into this + // `btree_multiset`. If the destination `btree_multiset` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_multiset::swap(btree_multiset& other) + // + // Exchanges the contents of this `btree_multiset` with those of the `other` + // btree_multiset, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multiset` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multiset::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multiset`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::contains; + + // btree_multiset::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::count; + + // btree_multiset::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multiset`. + using Base::equal_range; + + // btree_multiset::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::find; + + // btree_multiset::get_allocator() + // + // Returns the allocator function associated with this `btree_multiset`. + using Base::get_allocator; + + // btree_multiset::key_comp(); + // + // Returns the key comparator associated with this `btree_multiset`. + using Base::key_comp; + + // btree_multiset::value_comp(); + // + // Returns the value comparator associated with this `btree_multiset`. The + // keys to sort the elements are the values themselves, therefore `value_comp` + // and its sibling member function `key_comp` are equivalent. + using Base::value_comp; +}; + +// absl::swap(absl::btree_multiset<>, absl::btree_multiset<>) +// +// Swaps the contents of two `absl::btree_multiset` containers. +template +void swap(btree_multiset &x, btree_multiset &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_multiset<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +template +void erase_if(btree_multiset &set, Pred pred) { + for (auto it = set.begin(); it != set.end();) { + if (pred(*it)) { + it = set.erase(it); + } else { + ++it; + } + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_SET_H_ diff --git a/base/abseil/absl/container/btree_test.cc b/base/abseil/absl/container/btree_test.cc new file mode 100644 index 0000000..8692b9c --- /dev/null +++ b/base/abseil/absl/container/btree_test.cc @@ -0,0 +1,2351 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/btree_test.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/internal/counting_allocator.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/flags/flag.h" +#include "absl/hash/hash_testing.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" + +ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests"); + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::test_internal::CopyableMovableInstance; +using ::absl::test_internal::InstanceTracker; +using ::absl::test_internal::MovableOnlyInstance; +using ::testing::ElementsAre; +using ::testing::ElementsAreArray; +using ::testing::IsEmpty; +using ::testing::Pair; + +template +void CheckPairEquals(const T &x, const U &y) { + ABSL_INTERNAL_CHECK(x == y, "Values are unequal."); +} + +template +void CheckPairEquals(const std::pair &x, const std::pair &y) { + CheckPairEquals(x.first, y.first); + CheckPairEquals(x.second, y.second); +} +} // namespace + +// The base class for a sorted associative container checker. TreeType is the +// container type to check and CheckerType is the container type to check +// against. TreeType is expected to be btree_{set,map,multiset,multimap} and +// CheckerType is expected to be {set,map,multiset,multimap}. +template +class base_checker { + public: + using key_type = typename TreeType::key_type; + using value_type = typename TreeType::value_type; + using key_compare = typename TreeType::key_compare; + using pointer = typename TreeType::pointer; + using const_pointer = typename TreeType::const_pointer; + using reference = typename TreeType::reference; + using const_reference = typename TreeType::const_reference; + using size_type = typename TreeType::size_type; + using difference_type = typename TreeType::difference_type; + using iterator = typename TreeType::iterator; + using const_iterator = typename TreeType::const_iterator; + using reverse_iterator = typename TreeType::reverse_iterator; + using const_reverse_iterator = typename TreeType::const_reverse_iterator; + + public: + base_checker() : const_tree_(tree_) {} + base_checker(const base_checker &x) + : tree_(x.tree_), const_tree_(tree_), checker_(x.checker_) {} + template + base_checker(InputIterator b, InputIterator e) + : tree_(b, e), const_tree_(tree_), checker_(b, e) {} + + iterator begin() { return tree_.begin(); } + const_iterator begin() const { return tree_.begin(); } + iterator end() { return tree_.end(); } + const_iterator end() const { return tree_.end(); } + reverse_iterator rbegin() { return tree_.rbegin(); } + const_reverse_iterator rbegin() const { return tree_.rbegin(); } + reverse_iterator rend() { return tree_.rend(); } + const_reverse_iterator rend() const { return tree_.rend(); } + + template + IterType iter_check(IterType tree_iter, CheckerIterType checker_iter) const { + if (tree_iter == tree_.end()) { + ABSL_INTERNAL_CHECK(checker_iter == checker_.end(), + "Checker iterator not at end."); + } else { + CheckPairEquals(*tree_iter, *checker_iter); + } + return tree_iter; + } + template + IterType riter_check(IterType tree_iter, CheckerIterType checker_iter) const { + if (tree_iter == tree_.rend()) { + ABSL_INTERNAL_CHECK(checker_iter == checker_.rend(), + "Checker iterator not at rend."); + } else { + CheckPairEquals(*tree_iter, *checker_iter); + } + return tree_iter; + } + void value_check(const value_type &x) { + typename KeyOfValue::type key_of_value; + const key_type &key = key_of_value(x); + CheckPairEquals(*find(key), x); + lower_bound(key); + upper_bound(key); + equal_range(key); + contains(key); + count(key); + } + void erase_check(const key_type &key) { + EXPECT_FALSE(tree_.contains(key)); + EXPECT_EQ(tree_.find(key), const_tree_.end()); + EXPECT_FALSE(const_tree_.contains(key)); + EXPECT_EQ(const_tree_.find(key), tree_.end()); + EXPECT_EQ(tree_.equal_range(key).first, + const_tree_.equal_range(key).second); + } + + iterator lower_bound(const key_type &key) { + return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); + } + const_iterator lower_bound(const key_type &key) const { + return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); + } + iterator upper_bound(const key_type &key) { + return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); + } + const_iterator upper_bound(const key_type &key) const { + return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); + } + std::pair equal_range(const key_type &key) { + std::pair + checker_res = checker_.equal_range(key); + std::pair tree_res = tree_.equal_range(key); + iter_check(tree_res.first, checker_res.first); + iter_check(tree_res.second, checker_res.second); + return tree_res; + } + std::pair equal_range( + const key_type &key) const { + std::pair + checker_res = checker_.equal_range(key); + std::pair tree_res = tree_.equal_range(key); + iter_check(tree_res.first, checker_res.first); + iter_check(tree_res.second, checker_res.second); + return tree_res; + } + iterator find(const key_type &key) { + return iter_check(tree_.find(key), checker_.find(key)); + } + const_iterator find(const key_type &key) const { + return iter_check(tree_.find(key), checker_.find(key)); + } + bool contains(const key_type &key) const { + return find(key) != end(); + } + size_type count(const key_type &key) const { + size_type res = checker_.count(key); + EXPECT_EQ(res, tree_.count(key)); + return res; + } + + base_checker &operator=(const base_checker &x) { + tree_ = x.tree_; + checker_ = x.checker_; + return *this; + } + + int erase(const key_type &key) { + int size = tree_.size(); + int res = checker_.erase(key); + EXPECT_EQ(res, tree_.count(key)); + EXPECT_EQ(res, tree_.erase(key)); + EXPECT_EQ(tree_.count(key), 0); + EXPECT_EQ(tree_.size(), size - res); + erase_check(key); + return res; + } + iterator erase(iterator iter) { + key_type key = iter.key(); + int size = tree_.size(); + int count = tree_.count(key); + auto checker_iter = checker_.lower_bound(key); + for (iterator tmp(tree_.lower_bound(key)); tmp != iter; ++tmp) { + ++checker_iter; + } + auto checker_next = checker_iter; + ++checker_next; + checker_.erase(checker_iter); + iter = tree_.erase(iter); + EXPECT_EQ(tree_.size(), checker_.size()); + EXPECT_EQ(tree_.size(), size - 1); + EXPECT_EQ(tree_.count(key), count - 1); + if (count == 1) { + erase_check(key); + } + return iter_check(iter, checker_next); + } + + void erase(iterator begin, iterator end) { + int size = tree_.size(); + int count = std::distance(begin, end); + auto checker_begin = checker_.lower_bound(begin.key()); + for (iterator tmp(tree_.lower_bound(begin.key())); tmp != begin; ++tmp) { + ++checker_begin; + } + auto checker_end = + end == tree_.end() ? checker_.end() : checker_.lower_bound(end.key()); + if (end != tree_.end()) { + for (iterator tmp(tree_.lower_bound(end.key())); tmp != end; ++tmp) { + ++checker_end; + } + } + checker_.erase(checker_begin, checker_end); + tree_.erase(begin, end); + EXPECT_EQ(tree_.size(), checker_.size()); + EXPECT_EQ(tree_.size(), size - count); + } + + void clear() { + tree_.clear(); + checker_.clear(); + } + void swap(base_checker &x) { + tree_.swap(x.tree_); + checker_.swap(x.checker_); + } + + void verify() const { + tree_.verify(); + EXPECT_EQ(tree_.size(), checker_.size()); + + // Move through the forward iterators using increment. + auto checker_iter = checker_.begin(); + const_iterator tree_iter(tree_.begin()); + for (; tree_iter != tree_.end(); ++tree_iter, ++checker_iter) { + CheckPairEquals(*tree_iter, *checker_iter); + } + + // Move through the forward iterators using decrement. + for (int n = tree_.size() - 1; n >= 0; --n) { + iter_check(tree_iter, checker_iter); + --tree_iter; + --checker_iter; + } + EXPECT_EQ(tree_iter, tree_.begin()); + EXPECT_EQ(checker_iter, checker_.begin()); + + // Move through the reverse iterators using increment. + auto checker_riter = checker_.rbegin(); + const_reverse_iterator tree_riter(tree_.rbegin()); + for (; tree_riter != tree_.rend(); ++tree_riter, ++checker_riter) { + CheckPairEquals(*tree_riter, *checker_riter); + } + + // Move through the reverse iterators using decrement. + for (int n = tree_.size() - 1; n >= 0; --n) { + riter_check(tree_riter, checker_riter); + --tree_riter; + --checker_riter; + } + EXPECT_EQ(tree_riter, tree_.rbegin()); + EXPECT_EQ(checker_riter, checker_.rbegin()); + } + + const TreeType &tree() const { return tree_; } + + size_type size() const { + EXPECT_EQ(tree_.size(), checker_.size()); + return tree_.size(); + } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { + EXPECT_EQ(tree_.empty(), checker_.empty()); + return tree_.empty(); + } + + protected: + TreeType tree_; + const TreeType &const_tree_; + CheckerType checker_; +}; + +namespace { +// A checker for unique sorted associative containers. TreeType is expected to +// be btree_{set,map} and CheckerType is expected to be {set,map}. +template +class unique_checker : public base_checker { + using super_type = base_checker; + + public: + using iterator = typename super_type::iterator; + using value_type = typename super_type::value_type; + + public: + unique_checker() : super_type() {} + unique_checker(const unique_checker &x) : super_type(x) {} + template + unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + unique_checker& operator=(const unique_checker&) = default; + + // Insertion routines. + std::pair insert(const value_type &x) { + int size = this->tree_.size(); + std::pair checker_res = + this->checker_.insert(x); + std::pair tree_res = this->tree_.insert(x); + CheckPairEquals(*tree_res.first, *checker_res.first); + EXPECT_EQ(tree_res.second, checker_res.second); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + tree_res.second); + return tree_res; + } + iterator insert(iterator position, const value_type &x) { + int size = this->tree_.size(); + std::pair checker_res = + this->checker_.insert(x); + iterator tree_res = this->tree_.insert(position, x); + CheckPairEquals(*tree_res, *checker_res.first); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + checker_res.second); + return tree_res; + } + template + void insert(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert(*b); + } + } +}; + +// A checker for multiple sorted associative containers. TreeType is expected +// to be btree_{multiset,multimap} and CheckerType is expected to be +// {multiset,multimap}. +template +class multi_checker : public base_checker { + using super_type = base_checker; + + public: + using iterator = typename super_type::iterator; + using value_type = typename super_type::value_type; + + public: + multi_checker() : super_type() {} + multi_checker(const multi_checker &x) : super_type(x) {} + template + multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + multi_checker& operator=(const multi_checker&) = default; + + // Insertion routines. + iterator insert(const value_type &x) { + int size = this->tree_.size(); + auto checker_res = this->checker_.insert(x); + iterator tree_res = this->tree_.insert(x); + CheckPairEquals(*tree_res, *checker_res); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + 1); + return tree_res; + } + iterator insert(iterator position, const value_type &x) { + int size = this->tree_.size(); + auto checker_res = this->checker_.insert(x); + iterator tree_res = this->tree_.insert(position, x); + CheckPairEquals(*tree_res, *checker_res); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + 1); + return tree_res; + } + template + void insert(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert(*b); + } + } +}; + +template +void DoTest(const char *name, T *b, const std::vector &values) { + typename KeyOfValue::type key_of_value; + + T &mutable_b = *b; + const T &const_b = *b; + + // Test insert. + for (int i = 0; i < values.size(); ++i) { + mutable_b.insert(values[i]); + mutable_b.value_check(values[i]); + } + ASSERT_EQ(mutable_b.size(), values.size()); + + const_b.verify(); + + // Test copy constructor. + T b_copy(const_b); + EXPECT_EQ(b_copy.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_copy.find(key_of_value(values[i])), values[i]); + } + + // Test range constructor. + T b_range(const_b.begin(), const_b.end()); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + + // Test range insertion for values that already exist. + b_range.insert(b_copy.begin(), b_copy.end()); + b_range.verify(); + + // Test range insertion for new values. + b_range.clear(); + b_range.insert(b_copy.begin(), b_copy.end()); + EXPECT_EQ(b_range.size(), b_copy.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + + // Test assignment to self. Nothing should change. + b_range.operator=(b_range); + EXPECT_EQ(b_range.size(), b_copy.size()); + + // Test assignment of new values. + b_range.clear(); + b_range = b_copy; + EXPECT_EQ(b_range.size(), b_copy.size()); + + // Test swap. + b_range.clear(); + b_range.swap(b_copy); + EXPECT_EQ(b_copy.size(), 0); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + b_range.swap(b_copy); + + // Test non-member function swap. + swap(b_range, b_copy); + EXPECT_EQ(b_copy.size(), 0); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + swap(b_range, b_copy); + + // Test erase via values. + for (int i = 0; i < values.size(); ++i) { + mutable_b.erase(key_of_value(values[i])); + // Erasing a non-existent key should have no effect. + ASSERT_EQ(mutable_b.erase(key_of_value(values[i])), 0); + } + + const_b.verify(); + EXPECT_EQ(const_b.size(), 0); + + // Test erase via iterators. + mutable_b = b_copy; + for (int i = 0; i < values.size(); ++i) { + mutable_b.erase(mutable_b.find(key_of_value(values[i]))); + } + + const_b.verify(); + EXPECT_EQ(const_b.size(), 0); + + // Test insert with hint. + for (int i = 0; i < values.size(); i++) { + mutable_b.insert(mutable_b.upper_bound(key_of_value(values[i])), values[i]); + } + + const_b.verify(); + + // Test range erase. + mutable_b.erase(mutable_b.begin(), mutable_b.end()); + EXPECT_EQ(mutable_b.size(), 0); + const_b.verify(); + + // First half. + mutable_b = b_copy; + typename T::iterator mutable_iter_end = mutable_b.begin(); + for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_end; + mutable_b.erase(mutable_b.begin(), mutable_iter_end); + EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 2); + const_b.verify(); + + // Second half. + mutable_b = b_copy; + typename T::iterator mutable_iter_begin = mutable_b.begin(); + for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_begin; + mutable_b.erase(mutable_iter_begin, mutable_b.end()); + EXPECT_EQ(mutable_b.size(), values.size() / 2); + const_b.verify(); + + // Second quarter. + mutable_b = b_copy; + mutable_iter_begin = mutable_b.begin(); + for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_begin; + mutable_iter_end = mutable_iter_begin; + for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_end; + mutable_b.erase(mutable_iter_begin, mutable_iter_end); + EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 4); + const_b.verify(); + + mutable_b.clear(); +} + +template +void ConstTest() { + using value_type = typename T::value_type; + typename KeyOfValue::type key_of_value; + + T mutable_b; + const T &const_b = mutable_b; + + // Insert a single value into the container and test looking it up. + value_type value = Generator(2)(2); + mutable_b.insert(value); + EXPECT_TRUE(mutable_b.contains(key_of_value(value))); + EXPECT_NE(mutable_b.find(key_of_value(value)), const_b.end()); + EXPECT_TRUE(const_b.contains(key_of_value(value))); + EXPECT_NE(const_b.find(key_of_value(value)), mutable_b.end()); + EXPECT_EQ(*const_b.lower_bound(key_of_value(value)), value); + EXPECT_EQ(const_b.upper_bound(key_of_value(value)), const_b.end()); + EXPECT_EQ(*const_b.equal_range(key_of_value(value)).first, value); + + // We can only create a non-const iterator from a non-const container. + typename T::iterator mutable_iter(mutable_b.begin()); + EXPECT_EQ(mutable_iter, const_b.begin()); + EXPECT_NE(mutable_iter, const_b.end()); + EXPECT_EQ(const_b.begin(), mutable_iter); + EXPECT_NE(const_b.end(), mutable_iter); + typename T::reverse_iterator mutable_riter(mutable_b.rbegin()); + EXPECT_EQ(mutable_riter, const_b.rbegin()); + EXPECT_NE(mutable_riter, const_b.rend()); + EXPECT_EQ(const_b.rbegin(), mutable_riter); + EXPECT_NE(const_b.rend(), mutable_riter); + + // We can create a const iterator from a non-const iterator. + typename T::const_iterator const_iter(mutable_iter); + EXPECT_EQ(const_iter, mutable_b.begin()); + EXPECT_NE(const_iter, mutable_b.end()); + EXPECT_EQ(mutable_b.begin(), const_iter); + EXPECT_NE(mutable_b.end(), const_iter); + typename T::const_reverse_iterator const_riter(mutable_riter); + EXPECT_EQ(const_riter, mutable_b.rbegin()); + EXPECT_NE(const_riter, mutable_b.rend()); + EXPECT_EQ(mutable_b.rbegin(), const_riter); + EXPECT_NE(mutable_b.rend(), const_riter); + + // Make sure various methods can be invoked on a const container. + const_b.verify(); + ASSERT_TRUE(!const_b.empty()); + EXPECT_EQ(const_b.size(), 1); + EXPECT_GT(const_b.max_size(), 0); + EXPECT_TRUE(const_b.contains(key_of_value(value))); + EXPECT_EQ(const_b.count(key_of_value(value)), 1); +} + +template +void BtreeTest() { + ConstTest(); + + using V = typename remove_pair_const::type; + const std::vector random_values = GenerateValuesWithSeed( + absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), + testing::GTEST_FLAG(random_seed)); + + unique_checker container; + + // Test key insertion/deletion in sorted order. + std::vector sorted_values(random_values); + std::sort(sorted_values.begin(), sorted_values.end()); + DoTest("sorted: ", &container, sorted_values); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(sorted_values.begin(), sorted_values.end()); + DoTest("rsorted: ", &container, sorted_values); + + // Test key insertion/deletion in random order. + DoTest("random: ", &container, random_values); +} + +template +void BtreeMultiTest() { + ConstTest(); + + using V = typename remove_pair_const::type; + const std::vector random_values = GenerateValuesWithSeed( + absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), + testing::GTEST_FLAG(random_seed)); + + multi_checker container; + + // Test keys in sorted order. + std::vector sorted_values(random_values); + std::sort(sorted_values.begin(), sorted_values.end()); + DoTest("sorted: ", &container, sorted_values); + + // Test keys in reverse sorted order. + std::reverse(sorted_values.begin(), sorted_values.end()); + DoTest("rsorted: ", &container, sorted_values); + + // Test keys in random order. + DoTest("random: ", &container, random_values); + + // Test keys in random order w/ duplicates. + std::vector duplicate_values(random_values); + duplicate_values.insert(duplicate_values.end(), random_values.begin(), + random_values.end()); + DoTest("duplicates:", &container, duplicate_values); + + // Test all identical keys. + std::vector identical_values(100); + std::fill(identical_values.begin(), identical_values.end(), + Generator(2)(2)); + DoTest("identical: ", &container, identical_values); +} + +template +struct PropagatingCountingAlloc : public CountingAllocator { + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + + using Base = CountingAllocator; + using Base::Base; + + template + explicit PropagatingCountingAlloc(const PropagatingCountingAlloc &other) + : Base(other.bytes_used_) {} + + template + struct rebind { + using other = PropagatingCountingAlloc; + }; +}; + +template +void BtreeAllocatorTest() { + using value_type = typename T::value_type; + + int64_t bytes1 = 0, bytes2 = 0; + PropagatingCountingAlloc allocator1(&bytes1); + PropagatingCountingAlloc allocator2(&bytes2); + Generator generator(1000); + + // Test that we allocate properly aligned memory. If we don't, then Layout + // will assert fail. + auto unused1 = allocator1.allocate(1); + auto unused2 = allocator2.allocate(1); + + // Test copy assignment + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should propagate the allocator. + b1 = b2; + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(b2.size(), 0); + EXPECT_EQ(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + // Test move assignment + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should propagate the allocator. + b1 = std::move(b2); + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + // Test swap + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should swap the allocators. + swap(b1, b2); + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(b2.size(), 1); + EXPECT_GT(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + allocator1.deallocate(unused1, 1); + allocator2.deallocate(unused2, 1); +} + +template +void BtreeMapTest() { + using value_type = typename T::value_type; + using mapped_type = typename T::mapped_type; + + mapped_type m = Generator(0)(0); + (void)m; + + T b; + + // Verify we can insert using operator[]. + for (int i = 0; i < 1000; i++) { + value_type v = Generator(1000)(i); + b[v.first] = v.second; + } + EXPECT_EQ(b.size(), 1000); + + // Test whether we can use the "->" operator on iterators and + // reverse_iterators. This stresses the btree_map_params::pair_pointer + // mechanism. + EXPECT_EQ(b.begin()->first, Generator(1000)(0).first); + EXPECT_EQ(b.begin()->second, Generator(1000)(0).second); + EXPECT_EQ(b.rbegin()->first, Generator(1000)(999).first); + EXPECT_EQ(b.rbegin()->second, Generator(1000)(999).second); +} + +template +void BtreeMultiMapTest() { + using mapped_type = typename T::mapped_type; + mapped_type m = Generator(0)(0); + (void)m; +} + +template +void SetTest() { + EXPECT_EQ( + sizeof(absl::btree_set), + 2 * sizeof(void *) + sizeof(typename absl::btree_set::size_type)); + using BtreeSet = absl::btree_set; + using CountingBtreeSet = + absl::btree_set, PropagatingCountingAlloc>; + BtreeTest>(); + BtreeAllocatorTest(); +} + +template +void MapTest() { + EXPECT_EQ( + sizeof(absl::btree_map), + 2 * sizeof(void *) + sizeof(typename absl::btree_map::size_type)); + using BtreeMap = absl::btree_map; + using CountingBtreeMap = + absl::btree_map, + PropagatingCountingAlloc>>; + BtreeTest>(); + BtreeAllocatorTest(); + BtreeMapTest(); +} + +TEST(Btree, set_int32) { SetTest(); } +TEST(Btree, set_int64) { SetTest(); } +TEST(Btree, set_string) { SetTest(); } +TEST(Btree, set_pair) { SetTest>(); } +TEST(Btree, map_int32) { MapTest(); } +TEST(Btree, map_int64) { MapTest(); } +TEST(Btree, map_string) { MapTest(); } +TEST(Btree, map_pair) { MapTest>(); } + +template +void MultiSetTest() { + EXPECT_EQ( + sizeof(absl::btree_multiset), + 2 * sizeof(void *) + sizeof(typename absl::btree_multiset::size_type)); + using BtreeMSet = absl::btree_multiset; + using CountingBtreeMSet = + absl::btree_multiset, PropagatingCountingAlloc>; + BtreeMultiTest>(); + BtreeAllocatorTest(); +} + +template +void MultiMapTest() { + EXPECT_EQ(sizeof(absl::btree_multimap), + 2 * sizeof(void *) + + sizeof(typename absl::btree_multimap::size_type)); + using BtreeMMap = absl::btree_multimap; + using CountingBtreeMMap = + absl::btree_multimap, + PropagatingCountingAlloc>>; + BtreeMultiTest>(); + BtreeMultiMapTest(); + BtreeAllocatorTest(); +} + +TEST(Btree, multiset_int32) { MultiSetTest(); } +TEST(Btree, multiset_int64) { MultiSetTest(); } +TEST(Btree, multiset_string) { MultiSetTest(); } +TEST(Btree, multiset_pair) { MultiSetTest>(); } +TEST(Btree, multimap_int32) { MultiMapTest(); } +TEST(Btree, multimap_int64) { MultiMapTest(); } +TEST(Btree, multimap_string) { MultiMapTest(); } +TEST(Btree, multimap_pair) { MultiMapTest>(); } + +struct CompareIntToString { + bool operator()(const std::string &a, const std::string &b) const { + return a < b; + } + bool operator()(const std::string &a, int b) const { + return a < absl::StrCat(b); + } + bool operator()(int a, const std::string &b) const { + return absl::StrCat(a) < b; + } + using is_transparent = void; +}; + +struct NonTransparentCompare { + template + bool operator()(const T& t, const U& u) const { + // Treating all comparators as transparent can cause inefficiencies (see + // N3657 C++ proposal). Test that for comparators without 'is_transparent' + // alias (like this one), we do not attempt heterogeneous lookup. + EXPECT_TRUE((std::is_same())); + return t < u; + } +}; + +template +bool CanEraseWithEmptyBrace(T t, decltype(t.erase({})) *) { + return true; +} + +template +bool CanEraseWithEmptyBrace(T, ...) { + return false; +} + +template +void TestHeterogeneous(T table) { + auto lb = table.lower_bound("3"); + EXPECT_EQ(lb, table.lower_bound(3)); + EXPECT_NE(lb, table.lower_bound(4)); + EXPECT_EQ(lb, table.lower_bound({"3"})); + EXPECT_NE(lb, table.lower_bound({})); + + auto ub = table.upper_bound("3"); + EXPECT_EQ(ub, table.upper_bound(3)); + EXPECT_NE(ub, table.upper_bound(5)); + EXPECT_EQ(ub, table.upper_bound({"3"})); + EXPECT_NE(ub, table.upper_bound({})); + + auto er = table.equal_range("3"); + EXPECT_EQ(er, table.equal_range(3)); + EXPECT_NE(er, table.equal_range(4)); + EXPECT_EQ(er, table.equal_range({"3"})); + EXPECT_NE(er, table.equal_range({})); + + auto it = table.find("3"); + EXPECT_EQ(it, table.find(3)); + EXPECT_NE(it, table.find(4)); + EXPECT_EQ(it, table.find({"3"})); + EXPECT_NE(it, table.find({})); + + EXPECT_TRUE(table.contains(3)); + EXPECT_FALSE(table.contains(4)); + EXPECT_TRUE(table.count({"3"})); + EXPECT_FALSE(table.contains({})); + + EXPECT_EQ(1, table.count(3)); + EXPECT_EQ(0, table.count(4)); + EXPECT_EQ(1, table.count({"3"})); + EXPECT_EQ(0, table.count({})); + + auto copy = table; + copy.erase(3); + EXPECT_EQ(table.size() - 1, copy.size()); + copy.erase(4); + EXPECT_EQ(table.size() - 1, copy.size()); + copy.erase({"5"}); + EXPECT_EQ(table.size() - 2, copy.size()); + EXPECT_FALSE(CanEraseWithEmptyBrace(table, nullptr)); + + // Also run it with const T&. + if (std::is_class()) TestHeterogeneous(table); +} + +TEST(Btree, HeterogeneousLookup) { + TestHeterogeneous(btree_set{"1", "3", "5"}); + TestHeterogeneous(btree_map{ + {"1", 1}, {"3", 3}, {"5", 5}}); + TestHeterogeneous( + btree_multiset{"1", "3", "5"}); + TestHeterogeneous(btree_multimap{ + {"1", 1}, {"3", 3}, {"5", 5}}); + + // Only maps have .at() + btree_map map{ + {"", -1}, {"1", 1}, {"3", 3}, {"5", 5}}; + EXPECT_EQ(1, map.at(1)); + EXPECT_EQ(3, map.at({"3"})); + EXPECT_EQ(-1, map.at({})); + const auto &cmap = map; + EXPECT_EQ(1, cmap.at(1)); + EXPECT_EQ(3, cmap.at({"3"})); + EXPECT_EQ(-1, cmap.at({})); +} + +TEST(Btree, NoHeterogeneousLookupWithoutAlias) { + using StringSet = absl::btree_set; + StringSet s; + ASSERT_TRUE(s.insert("hello").second); + ASSERT_TRUE(s.insert("world").second); + EXPECT_TRUE(s.end() == s.find("blah")); + EXPECT_TRUE(s.begin() == s.lower_bound("hello")); + EXPECT_EQ(1, s.count("world")); + EXPECT_TRUE(s.contains("hello")); + EXPECT_TRUE(s.contains("world")); + EXPECT_FALSE(s.contains("blah")); + + using StringMultiSet = + absl::btree_multiset; + StringMultiSet ms; + ms.insert("hello"); + ms.insert("world"); + ms.insert("world"); + EXPECT_TRUE(ms.end() == ms.find("blah")); + EXPECT_TRUE(ms.begin() == ms.lower_bound("hello")); + EXPECT_EQ(2, ms.count("world")); + EXPECT_TRUE(ms.contains("hello")); + EXPECT_TRUE(ms.contains("world")); + EXPECT_FALSE(ms.contains("blah")); +} + +TEST(Btree, DefaultTransparent) { + { + // `int` does not have a default transparent comparator. + // The input value is converted to key_type. + btree_set s = {1}; + double d = 1.1; + EXPECT_EQ(s.begin(), s.find(d)); + EXPECT_TRUE(s.contains(d)); + } + + { + // `std::string` has heterogeneous support. + btree_set s = {"A"}; + EXPECT_EQ(s.begin(), s.find(absl::string_view("A"))); + EXPECT_TRUE(s.contains(absl::string_view("A"))); + } +} + +class StringLike { + public: + StringLike() = default; + + StringLike(const char* s) : s_(s) { // NOLINT + ++constructor_calls_; + } + + bool operator<(const StringLike& a) const { + return s_ < a.s_; + } + + static void clear_constructor_call_count() { + constructor_calls_ = 0; + } + + static int constructor_calls() { + return constructor_calls_; + } + + private: + static int constructor_calls_; + std::string s_; +}; + +int StringLike::constructor_calls_ = 0; + +TEST(Btree, HeterogeneousLookupDoesntDegradePerformance) { + using StringSet = absl::btree_set; + StringSet s; + for (int i = 0; i < 100; ++i) { + ASSERT_TRUE(s.insert(absl::StrCat(i).c_str()).second); + } + StringLike::clear_constructor_call_count(); + s.find("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.contains("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.count("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.lower_bound("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.upper_bound("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.equal_range("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.erase("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); +} + +// Verify that swapping btrees swaps the key comparison functors and that we can +// use non-default constructible comparators. +struct SubstringLess { + SubstringLess() = delete; + explicit SubstringLess(int length) : n(length) {} + bool operator()(const std::string &a, const std::string &b) const { + return absl::string_view(a).substr(0, n) < + absl::string_view(b).substr(0, n); + } + int n; +}; + +TEST(Btree, SwapKeyCompare) { + using SubstringSet = absl::btree_set; + SubstringSet s1(SubstringLess(1), SubstringSet::allocator_type()); + SubstringSet s2(SubstringLess(2), SubstringSet::allocator_type()); + + ASSERT_TRUE(s1.insert("a").second); + ASSERT_FALSE(s1.insert("aa").second); + + ASSERT_TRUE(s2.insert("a").second); + ASSERT_TRUE(s2.insert("aa").second); + ASSERT_FALSE(s2.insert("aaa").second); + + swap(s1, s2); + + ASSERT_TRUE(s1.insert("b").second); + ASSERT_TRUE(s1.insert("bb").second); + ASSERT_FALSE(s1.insert("bbb").second); + + ASSERT_TRUE(s2.insert("b").second); + ASSERT_FALSE(s2.insert("bb").second); +} + +TEST(Btree, UpperBoundRegression) { + // Regress a bug where upper_bound would default-construct a new key_compare + // instead of copying the existing one. + using SubstringSet = absl::btree_set; + SubstringSet my_set(SubstringLess(3)); + my_set.insert("aab"); + my_set.insert("abb"); + // We call upper_bound("aaa"). If this correctly uses the length 3 + // comparator, aaa < aab < abb, so we should get aab as the result. + // If it instead uses the default-constructed length 2 comparator, + // aa == aa < ab, so we'll get abb as our result. + SubstringSet::iterator it = my_set.upper_bound("aaa"); + ASSERT_TRUE(it != my_set.end()); + EXPECT_EQ("aab", *it); +} + +TEST(Btree, Comparison) { + const int kSetSize = 1201; + absl::btree_set my_set; + for (int i = 0; i < kSetSize; ++i) { + my_set.insert(i); + } + absl::btree_set my_set_copy(my_set); + EXPECT_TRUE(my_set_copy == my_set); + EXPECT_TRUE(my_set == my_set_copy); + EXPECT_FALSE(my_set_copy != my_set); + EXPECT_FALSE(my_set != my_set_copy); + + my_set.insert(kSetSize); + EXPECT_FALSE(my_set_copy == my_set); + EXPECT_FALSE(my_set == my_set_copy); + EXPECT_TRUE(my_set_copy != my_set); + EXPECT_TRUE(my_set != my_set_copy); + + my_set.erase(kSetSize - 1); + EXPECT_FALSE(my_set_copy == my_set); + EXPECT_FALSE(my_set == my_set_copy); + EXPECT_TRUE(my_set_copy != my_set); + EXPECT_TRUE(my_set != my_set_copy); + + absl::btree_map my_map; + for (int i = 0; i < kSetSize; ++i) { + my_map[std::string(i, 'a')] = i; + } + absl::btree_map my_map_copy(my_map); + EXPECT_TRUE(my_map_copy == my_map); + EXPECT_TRUE(my_map == my_map_copy); + EXPECT_FALSE(my_map_copy != my_map); + EXPECT_FALSE(my_map != my_map_copy); + + ++my_map_copy[std::string(7, 'a')]; + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); + + my_map_copy = my_map; + my_map["hello"] = kSetSize; + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); + + my_map.erase(std::string(kSetSize - 1, 'a')); + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); +} + +TEST(Btree, RangeCtorSanity) { + std::vector ivec; + ivec.push_back(1); + std::map imap; + imap.insert(std::make_pair(1, 2)); + absl::btree_multiset tmset(ivec.begin(), ivec.end()); + absl::btree_multimap tmmap(imap.begin(), imap.end()); + absl::btree_set tset(ivec.begin(), ivec.end()); + absl::btree_map tmap(imap.begin(), imap.end()); + EXPECT_EQ(1, tmset.size()); + EXPECT_EQ(1, tmmap.size()); + EXPECT_EQ(1, tset.size()); + EXPECT_EQ(1, tmap.size()); +} + +TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { + absl::btree_map> m; + + std::unique_ptr &v = m["A"]; + EXPECT_TRUE(v == nullptr); + v.reset(new std::string("X")); + + auto iter = m.find("A"); + EXPECT_EQ("X", *iter->second); +} + +TEST(Btree, InitializerListConstructor) { + absl::btree_set set({"a", "b"}); + EXPECT_EQ(set.count("a"), 1); + EXPECT_EQ(set.count("b"), 1); + + absl::btree_multiset mset({1, 1, 4}); + EXPECT_EQ(mset.count(1), 2); + EXPECT_EQ(mset.count(4), 1); + + absl::btree_map map({{1, 5}, {2, 10}}); + EXPECT_EQ(map[1], 5); + EXPECT_EQ(map[2], 10); + + absl::btree_multimap mmap({{1, 5}, {1, 10}}); + auto range = mmap.equal_range(1); + auto it = range.first; + ASSERT_NE(it, range.second); + EXPECT_EQ(it->second, 5); + ASSERT_NE(++it, range.second); + EXPECT_EQ(it->second, 10); + EXPECT_EQ(++it, range.second); +} + +TEST(Btree, InitializerListInsert) { + absl::btree_set set; + set.insert({"a", "b"}); + EXPECT_EQ(set.count("a"), 1); + EXPECT_EQ(set.count("b"), 1); + + absl::btree_multiset mset; + mset.insert({1, 1, 4}); + EXPECT_EQ(mset.count(1), 2); + EXPECT_EQ(mset.count(4), 1); + + absl::btree_map map; + map.insert({{1, 5}, {2, 10}}); + // Test that inserting one element using an initializer list also works. + map.insert({3, 15}); + EXPECT_EQ(map[1], 5); + EXPECT_EQ(map[2], 10); + EXPECT_EQ(map[3], 15); + + absl::btree_multimap mmap; + mmap.insert({{1, 5}, {1, 10}}); + auto range = mmap.equal_range(1); + auto it = range.first; + ASSERT_NE(it, range.second); + EXPECT_EQ(it->second, 5); + ASSERT_NE(++it, range.second); + EXPECT_EQ(it->second, 10); + EXPECT_EQ(++it, range.second); +} + +template +void AssertKeyCompareToAdapted() { + using Adapted = typename key_compare_to_adapter::type; + static_assert(!std::is_same::value, + "key_compare_to_adapter should have adapted this comparator."); + static_assert( + std::is_same>::value, + "Adapted comparator should be a key-compare-to comparator."); +} +template +void AssertKeyCompareToNotAdapted() { + using Unadapted = typename key_compare_to_adapter::type; + static_assert( + std::is_same::value, + "key_compare_to_adapter shouldn't have adapted this comparator."); + static_assert( + std::is_same>::value, + "Un-adapted comparator should return bool."); +} + +TEST(Btree, KeyCompareToAdapter) { + AssertKeyCompareToAdapted, std::string>(); + AssertKeyCompareToAdapted, std::string>(); + AssertKeyCompareToAdapted, absl::string_view>(); + AssertKeyCompareToAdapted, + absl::string_view>(); + AssertKeyCompareToNotAdapted, int>(); + AssertKeyCompareToNotAdapted, int>(); +} + +TEST(Btree, RValueInsert) { + InstanceTracker tracker; + + absl::btree_set set; + set.insert(MovableOnlyInstance(1)); + set.insert(MovableOnlyInstance(3)); + MovableOnlyInstance two(2); + set.insert(set.find(MovableOnlyInstance(3)), std::move(two)); + auto it = set.find(MovableOnlyInstance(2)); + ASSERT_NE(it, set.end()); + ASSERT_NE(++it, set.end()); + EXPECT_EQ(it->value(), 3); + + absl::btree_multiset mset; + MovableOnlyInstance zero(0); + MovableOnlyInstance zero2(0); + mset.insert(std::move(zero)); + mset.insert(mset.find(MovableOnlyInstance(0)), std::move(zero2)); + EXPECT_EQ(mset.count(MovableOnlyInstance(0)), 2); + + absl::btree_map map; + std::pair p1 = {1, MovableOnlyInstance(5)}; + std::pair p2 = {2, MovableOnlyInstance(10)}; + std::pair p3 = {3, MovableOnlyInstance(15)}; + map.insert(std::move(p1)); + map.insert(std::move(p3)); + map.insert(map.find(3), std::move(p2)); + ASSERT_NE(map.find(2), map.end()); + EXPECT_EQ(map.find(2)->second.value(), 10); + + absl::btree_multimap mmap; + std::pair p4 = {1, MovableOnlyInstance(5)}; + std::pair p5 = {1, MovableOnlyInstance(10)}; + mmap.insert(std::move(p4)); + mmap.insert(mmap.find(1), std::move(p5)); + auto range = mmap.equal_range(1); + auto it1 = range.first; + ASSERT_NE(it1, range.second); + EXPECT_EQ(it1->second.value(), 10); + ASSERT_NE(++it1, range.second); + EXPECT_EQ(it1->second.value(), 5); + EXPECT_EQ(++it1, range.second); + + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.swaps(), 0); +} + +} // namespace + +class BtreeNodePeer { + public: + // Yields the size of a leaf node with a specific number of values. + template + constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) { + return btree_node< + set_params, std::allocator, + /*TargetNodeSize=*/256, // This parameter isn't used here. + /*Multi=*/false>>::SizeWithNValues(target_values_per_node); + } + + // Yields the number of values in a (non-root) leaf node for this set. + template + constexpr static size_t GetNumValuesPerNode() { + return btree_node::kNodeValues; + } +}; + +namespace { + +// A btree set with a specific number of values per node. +template > +class SizedBtreeSet + : public btree_set_container, + BtreeNodePeer::GetTargetNodeSize(TargetValuesPerNode), + /*Multi=*/false>>> { + using Base = typename SizedBtreeSet::btree_set_container; + + public: + SizedBtreeSet() {} + using Base::Base; +}; + +template +void ExpectOperationCounts(const int expected_moves, + const int expected_comparisons, + const std::vector &values, + InstanceTracker *tracker, Set *set) { + for (const int v : values) set->insert(MovableOnlyInstance(v)); + set->clear(); + EXPECT_EQ(tracker->moves(), expected_moves); + EXPECT_EQ(tracker->comparisons(), expected_comparisons); + EXPECT_EQ(tracker->copies(), 0); + EXPECT_EQ(tracker->swaps(), 0); + tracker->ResetCopiesMovesSwaps(); +} + +// Note: when the values in this test change, it is expected to have an impact +// on performance. +TEST(Btree, MovesComparisonsCopiesSwapsTracking) { + InstanceTracker tracker; + // Note: this is minimum number of values per node. + SizedBtreeSet set3; + // Note: this is the default number of values per node for a set of int32s + // (with 64-bit pointers). + SizedBtreeSet set61; + SizedBtreeSet set100; + + // Don't depend on flags for random values because then the expectations will + // fail if the flags change. + std::vector values = + GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); + + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 3); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 100); + if (sizeof(void *) == 8) { + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode>(), + BtreeNodePeer::GetNumValuesPerNode()); + } + + // Test key insertion/deletion in random order. + ExpectOperationCounts(45281, 132551, values, &tracker, &set3); + ExpectOperationCounts(386718, 129807, values, &tracker, &set61); + ExpectOperationCounts(586761, 130310, values, &tracker, &set100); + + // Test key insertion/deletion in sorted order. + std::sort(values.begin(), values.end()); + ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(20208, 87757, values, &tracker, &set61); + ExpectOperationCounts(20124, 96583, values, &tracker, &set100); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(values.begin(), values.end()); + ExpectOperationCounts(49951, 119325, values, &tracker, &set3); + ExpectOperationCounts(338813, 118266, values, &tracker, &set61); + ExpectOperationCounts(534529, 125279, values, &tracker, &set100); +} + +struct MovableOnlyInstanceThreeWayCompare { + absl::weak_ordering operator()(const MovableOnlyInstance &a, + const MovableOnlyInstance &b) const { + return a.compare(b); + } +}; + +// Note: when the values in this test change, it is expected to have an impact +// on performance. +TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { + InstanceTracker tracker; + // Note: this is minimum number of values per node. + SizedBtreeSet + set3; + // Note: this is the default number of values per node for a set of int32s + // (with 64-bit pointers). + SizedBtreeSet + set61; + SizedBtreeSet + set100; + + // Don't depend on flags for random values because then the expectations will + // fail if the flags change. + std::vector values = + GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); + + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 3); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 100); + if (sizeof(void *) == 8) { + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode>(), + BtreeNodePeer::GetNumValuesPerNode()); + } + + // Test key insertion/deletion in random order. + ExpectOperationCounts(45281, 122560, values, &tracker, &set3); + ExpectOperationCounts(386718, 119816, values, &tracker, &set61); + ExpectOperationCounts(586761, 120319, values, &tracker, &set100); + + // Test key insertion/deletion in sorted order. + std::sort(values.begin(), values.end()); + ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(20208, 87757, values, &tracker, &set61); + ExpectOperationCounts(20124, 96583, values, &tracker, &set100); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(values.begin(), values.end()); + ExpectOperationCounts(49951, 109326, values, &tracker, &set3); + ExpectOperationCounts(338813, 108267, values, &tracker, &set61); + ExpectOperationCounts(534529, 115280, values, &tracker, &set100); +} + +struct NoDefaultCtor { + int num; + explicit NoDefaultCtor(int i) : num(i) {} + + friend bool operator<(const NoDefaultCtor& a, const NoDefaultCtor& b) { + return a.num < b.num; + } +}; + +TEST(Btree, BtreeMapCanHoldNoDefaultCtorTypes) { + absl::btree_map m; + + for (int i = 1; i <= 99; ++i) { + SCOPED_TRACE(i); + EXPECT_TRUE(m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)).second); + } + EXPECT_FALSE(m.emplace(NoDefaultCtor(78), NoDefaultCtor(0)).second); + + auto iter99 = m.find(NoDefaultCtor(99)); + ASSERT_NE(iter99, m.end()); + EXPECT_EQ(iter99->second.num, 1); + + auto iter1 = m.find(NoDefaultCtor(1)); + ASSERT_NE(iter1, m.end()); + EXPECT_EQ(iter1->second.num, 99); + + auto iter50 = m.find(NoDefaultCtor(50)); + ASSERT_NE(iter50, m.end()); + EXPECT_EQ(iter50->second.num, 50); + + auto iter25 = m.find(NoDefaultCtor(25)); + ASSERT_NE(iter25, m.end()); + EXPECT_EQ(iter25->second.num, 75); +} + +TEST(Btree, BtreeMultimapCanHoldNoDefaultCtorTypes) { + absl::btree_multimap m; + + for (int i = 1; i <= 99; ++i) { + SCOPED_TRACE(i); + m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)); + } + + auto iter99 = m.find(NoDefaultCtor(99)); + ASSERT_NE(iter99, m.end()); + EXPECT_EQ(iter99->second.num, 1); + + auto iter1 = m.find(NoDefaultCtor(1)); + ASSERT_NE(iter1, m.end()); + EXPECT_EQ(iter1->second.num, 99); + + auto iter50 = m.find(NoDefaultCtor(50)); + ASSERT_NE(iter50, m.end()); + EXPECT_EQ(iter50->second.num, 50); + + auto iter25 = m.find(NoDefaultCtor(25)); + ASSERT_NE(iter25, m.end()); + EXPECT_EQ(iter25->second.num, 75); +} + +TEST(Btree, MapAt) { + absl::btree_map map = {{1, 2}, {2, 4}}; + EXPECT_EQ(map.at(1), 2); + EXPECT_EQ(map.at(2), 4); + map.at(2) = 8; + const absl::btree_map &const_map = map; + EXPECT_EQ(const_map.at(1), 2); + EXPECT_EQ(const_map.at(2), 8); +#ifdef ABSL_HAVE_EXCEPTIONS + EXPECT_THROW(map.at(3), std::out_of_range); +#else + EXPECT_DEATH(map.at(3), "absl::btree_map::at"); +#endif +} + +TEST(Btree, BtreeMultisetEmplace) { + const int value_to_insert = 123456; + absl::btree_multiset s; + auto iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + auto iter2 = s.emplace(value_to_insert); + EXPECT_NE(iter2, iter); + ASSERT_NE(iter2, s.end()); + EXPECT_EQ(*iter2, value_to_insert); + auto result = s.equal_range(value_to_insert); + EXPECT_EQ(std::distance(result.first, result.second), 2); +} + +TEST(Btree, BtreeMultisetEmplaceHint) { + const int value_to_insert = 123456; + absl::btree_multiset s; + auto iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + auto emplace_iter = s.emplace_hint(iter, value_to_insert); + EXPECT_NE(emplace_iter, iter); + ASSERT_NE(emplace_iter, s.end()); + EXPECT_EQ(*emplace_iter, value_to_insert); +} + +TEST(Btree, BtreeMultimapEmplace) { + const int key_to_insert = 123456; + const char value0[] = "a"; + absl::btree_multimap s; + auto iter = s.emplace(key_to_insert, value0); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value0); + const char value1[] = "b"; + auto iter2 = s.emplace(key_to_insert, value1); + EXPECT_NE(iter2, iter); + ASSERT_NE(iter2, s.end()); + EXPECT_EQ(iter2->first, key_to_insert); + EXPECT_EQ(iter2->second, value1); + auto result = s.equal_range(key_to_insert); + EXPECT_EQ(std::distance(result.first, result.second), 2); +} + +TEST(Btree, BtreeMultimapEmplaceHint) { + const int key_to_insert = 123456; + const char value0[] = "a"; + absl::btree_multimap s; + auto iter = s.emplace(key_to_insert, value0); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value0); + const char value1[] = "b"; + auto emplace_iter = s.emplace_hint(iter, key_to_insert, value1); + EXPECT_NE(emplace_iter, iter); + ASSERT_NE(emplace_iter, s.end()); + EXPECT_EQ(emplace_iter->first, key_to_insert); + EXPECT_EQ(emplace_iter->second, value1); +} + +TEST(Btree, ConstIteratorAccessors) { + absl::btree_set set; + for (int i = 0; i < 100; ++i) { + set.insert(i); + } + + auto it = set.cbegin(); + auto r_it = set.crbegin(); + for (int i = 0; i < 100; ++i, ++it, ++r_it) { + ASSERT_EQ(*it, i); + ASSERT_EQ(*r_it, 99 - i); + } + EXPECT_EQ(it, set.cend()); + EXPECT_EQ(r_it, set.crend()); +} + +TEST(Btree, StrSplitCompatible) { + const absl::btree_set split_set = absl::StrSplit("a,b,c", ','); + const absl::btree_set expected_set = {"a", "b", "c"}; + + EXPECT_EQ(split_set, expected_set); +} + +// We can't use EXPECT_EQ/etc. to compare absl::weak_ordering because they +// convert literal 0 to int and absl::weak_ordering can only be compared with +// literal 0. Defining this function allows for avoiding ClangTidy warnings. +bool Identity(const bool b) { return b; } + +TEST(Btree, ValueComp) { + absl::btree_set s; + EXPECT_TRUE(s.value_comp()(1, 2)); + EXPECT_FALSE(s.value_comp()(2, 2)); + EXPECT_FALSE(s.value_comp()(2, 1)); + + absl::btree_map m1; + EXPECT_TRUE(m1.value_comp()(std::make_pair(1, 0), std::make_pair(2, 0))); + EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0))); + EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0))); + + absl::btree_map m2; + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)) < 0)); + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)) == 0)); + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)) > 0)); +} + +TEST(Btree, DefaultConstruction) { + absl::btree_set s; + absl::btree_map m; + absl::btree_multiset ms; + absl::btree_multimap mm; + + EXPECT_TRUE(s.empty()); + EXPECT_TRUE(m.empty()); + EXPECT_TRUE(ms.empty()); + EXPECT_TRUE(mm.empty()); +} + +TEST(Btree, SwissTableHashable) { + static constexpr int kValues = 10000; + std::vector values(kValues); + std::iota(values.begin(), values.end(), 0); + std::vector> map_values; + for (int v : values) map_values.emplace_back(v, -v); + + using set = absl::btree_set; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + set{}, + set{1}, + set{2}, + set{1, 2}, + set{2, 1}, + set(values.begin(), values.end()), + set(values.rbegin(), values.rend()), + })); + + using mset = absl::btree_multiset; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + mset{}, + mset{1}, + mset{1, 1}, + mset{2}, + mset{2, 2}, + mset{1, 2}, + mset{1, 1, 2}, + mset{1, 2, 2}, + mset{1, 1, 2, 2}, + mset(values.begin(), values.end()), + mset(values.rbegin(), values.rend()), + })); + + using map = absl::btree_map; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + map{}, + map{{1, 0}}, + map{{1, 1}}, + map{{2, 0}}, + map{{2, 2}}, + map{{1, 0}, {2, 1}}, + map(map_values.begin(), map_values.end()), + map(map_values.rbegin(), map_values.rend()), + })); + + using mmap = absl::btree_multimap; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + mmap{}, + mmap{{1, 0}}, + mmap{{1, 1}}, + mmap{{1, 0}, {1, 1}}, + mmap{{1, 1}, {1, 0}}, + mmap{{2, 0}}, + mmap{{2, 2}}, + mmap{{1, 0}, {2, 1}}, + mmap(map_values.begin(), map_values.end()), + mmap(map_values.rbegin(), map_values.rend()), + })); +} + +TEST(Btree, ComparableSet) { + absl::btree_set s1 = {1, 2}; + absl::btree_set s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableSetsDifferentLength) { + absl::btree_set s1 = {1, 2}; + absl::btree_set s2 = {1, 2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); +} + +TEST(Btree, ComparableMultiset) { + absl::btree_multiset s1 = {1, 2}; + absl::btree_multiset s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableMap) { + absl::btree_map s1 = {{1, 2}}; + absl::btree_map s2 = {{2, 3}}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableMultimap) { + absl::btree_multimap s1 = {{1, 2}}; + absl::btree_multimap s2 = {{2, 3}}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableSetWithCustomComparator) { + // As specified by + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3337.pdf section + // [container.requirements.general].12, ordering associative containers always + // uses default '<' operator + // - even if otherwise the container uses custom functor. + absl::btree_set> s1 = {1, 2}; + absl::btree_set> s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, EraseReturnsIterator) { + absl::btree_set set = {1, 2, 3, 4, 5}; + auto result_it = set.erase(set.begin(), set.find(3)); + EXPECT_EQ(result_it, set.find(3)); + result_it = set.erase(set.find(5)); + EXPECT_EQ(result_it, set.end()); +} + +TEST(Btree, ExtractAndInsertNodeHandleSet) { + absl::btree_set src1 = {1, 2, 3, 4, 5}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(1, 2, 4, 5)); + absl::btree_set other; + absl::btree_set::insert_return_type res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_TRUE(res.inserted); + EXPECT_TRUE(res.node.empty()); + + absl::btree_set src2 = {3, 4}; + nh = src2.extract(src2.find(3)); + EXPECT_THAT(src2, ElementsAre(4)); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_FALSE(res.inserted); + ASSERT_FALSE(res.node.empty()); + EXPECT_EQ(res.node.value(), 3); +} + +template +void TestExtractWithTrackingForSet() { + InstanceTracker tracker; + { + Set s; + // Add enough elements to make sure we test internal nodes too. + const size_t kSize = 1000; + while (s.size() < kSize) { + s.insert(MovableOnlyInstance(s.size())); + } + for (int i = 0; i < kSize; ++i) { + // Extract with key + auto nh = s.extract(MovableOnlyInstance(i)); + EXPECT_EQ(s.size(), kSize - 1); + EXPECT_EQ(nh.value().value(), i); + // Insert with node + s.insert(std::move(nh)); + EXPECT_EQ(s.size(), kSize); + + // Extract with iterator + auto it = s.find(MovableOnlyInstance(i)); + nh = s.extract(it); + EXPECT_EQ(s.size(), kSize - 1); + EXPECT_EQ(nh.value().value(), i); + // Insert with node and hint + s.insert(s.begin(), std::move(nh)); + EXPECT_EQ(s.size(), kSize); + } + } + EXPECT_EQ(0, tracker.instances()); +} + +template +void TestExtractWithTrackingForMap() { + InstanceTracker tracker; + { + Map m; + // Add enough elements to make sure we test internal nodes too. + const size_t kSize = 1000; + while (m.size() < kSize) { + m.insert( + {CopyableMovableInstance(m.size()), MovableOnlyInstance(m.size())}); + } + for (int i = 0; i < kSize; ++i) { + // Extract with key + auto nh = m.extract(CopyableMovableInstance(i)); + EXPECT_EQ(m.size(), kSize - 1); + EXPECT_EQ(nh.key().value(), i); + EXPECT_EQ(nh.mapped().value(), i); + // Insert with node + m.insert(std::move(nh)); + EXPECT_EQ(m.size(), kSize); + + // Extract with iterator + auto it = m.find(CopyableMovableInstance(i)); + nh = m.extract(it); + EXPECT_EQ(m.size(), kSize - 1); + EXPECT_EQ(nh.key().value(), i); + EXPECT_EQ(nh.mapped().value(), i); + // Insert with node and hint + m.insert(m.begin(), std::move(nh)); + EXPECT_EQ(m.size(), kSize); + } + } + EXPECT_EQ(0, tracker.instances()); +} + +TEST(Btree, ExtractTracking) { + TestExtractWithTrackingForSet>(); + TestExtractWithTrackingForSet>(); + TestExtractWithTrackingForMap< + absl::btree_map>(); + TestExtractWithTrackingForMap< + absl::btree_multimap>(); +} + +TEST(Btree, ExtractAndInsertNodeHandleMultiSet) { + absl::btree_multiset src1 = {1, 2, 3, 3, 4, 5}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(1, 2, 3, 4, 5)); + absl::btree_multiset other; + auto res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res, other.find(3)); + + absl::btree_multiset src2 = {3, 4}; + nh = src2.extract(src2.find(3)); + EXPECT_THAT(src2, ElementsAre(4)); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3, 3)); + EXPECT_EQ(res, ++other.find(3)); +} + +TEST(Btree, ExtractAndInsertNodeHandleMap) { + absl::btree_map src1 = {{1, 2}, {3, 4}, {5, 6}}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); + absl::btree_map other; + absl::btree_map::insert_return_type res = + other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_TRUE(res.inserted); + EXPECT_TRUE(res.node.empty()); + + absl::btree_map src2 = {{3, 6}}; + nh = src2.extract(src2.find(3)); + EXPECT_TRUE(src2.empty()); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_FALSE(res.inserted); + ASSERT_FALSE(res.node.empty()); + EXPECT_EQ(res.node.key(), 3); + EXPECT_EQ(res.node.mapped(), 6); +} + +TEST(Btree, ExtractAndInsertNodeHandleMultiMap) { + absl::btree_multimap src1 = {{1, 2}, {3, 4}, {5, 6}}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); + absl::btree_multimap other; + auto res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res, other.find(3)); + + absl::btree_multimap src2 = {{3, 6}}; + nh = src2.extract(src2.find(3)); + EXPECT_TRUE(src2.empty()); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4), Pair(3, 6))); + EXPECT_EQ(res, ++other.begin()); +} + +// For multisets, insert with hint also affects correctness because we need to +// insert immediately before the hint if possible. +struct InsertMultiHintData { + int key; + int not_key; + bool operator==(const InsertMultiHintData other) const { + return key == other.key && not_key == other.not_key; + } +}; + +struct InsertMultiHintDataKeyCompare { + using is_transparent = void; + bool operator()(const InsertMultiHintData a, + const InsertMultiHintData b) const { + return a.key < b.key; + } + bool operator()(const int a, const InsertMultiHintData b) const { + return a < b.key; + } + bool operator()(const InsertMultiHintData a, const int b) const { + return a.key < b; + } +}; + +TEST(Btree, InsertHintNodeHandle) { + // For unique sets, insert with hint is just a performance optimization. + // Test that insert works correctly when the hint is right or wrong. + { + absl::btree_set src = {1, 2, 3, 4, 5}; + auto nh = src.extract(src.find(3)); + EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); + absl::btree_set other = {0, 100}; + // Test a correct hint. + auto it = other.insert(other.lower_bound(3), std::move(nh)); + EXPECT_THAT(other, ElementsAre(0, 3, 100)); + EXPECT_EQ(it, other.find(3)); + + nh = src.extract(src.find(5)); + // Test an incorrect hint. + it = other.insert(other.end(), std::move(nh)); + EXPECT_THAT(other, ElementsAre(0, 3, 5, 100)); + EXPECT_EQ(it, other.find(5)); + } + + absl::btree_multiset src = + {{1, 2}, {3, 4}, {3, 5}}; + auto nh = src.extract(src.lower_bound(3)); + EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 4})); + absl::btree_multiset + other = {{3, 1}, {3, 2}, {3, 3}}; + auto it = other.insert(--other.end(), std::move(nh)); + EXPECT_THAT( + other, ElementsAre(InsertMultiHintData{3, 1}, InsertMultiHintData{3, 2}, + InsertMultiHintData{3, 4}, InsertMultiHintData{3, 3})); + EXPECT_EQ(it, --(--other.end())); + + nh = src.extract(src.find(3)); + EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 5})); + it = other.insert(other.begin(), std::move(nh)); + EXPECT_THAT(other, + ElementsAre(InsertMultiHintData{3, 5}, InsertMultiHintData{3, 1}, + InsertMultiHintData{3, 2}, InsertMultiHintData{3, 4}, + InsertMultiHintData{3, 3})); + EXPECT_EQ(it, other.begin()); +} + +struct IntCompareToCmp { + absl::weak_ordering operator()(int a, int b) const { + if (a < b) return absl::weak_ordering::less; + if (a > b) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } +}; + +TEST(Btree, MergeIntoUniqueContainers) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_set dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_THAT(src2, ElementsAre(3, 4)); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(Btree, MergeIntoUniqueContainersWithCompareTo) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_set dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_THAT(src2, ElementsAre(3, 4)); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(Btree, MergeIntoMultiContainers) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_multiset dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); +} + +TEST(Btree, MergeIntoMultiContainersWithCompareTo) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_multiset dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); +} + +TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) { + absl::btree_map src1 = {{1, 1}, {2, 2}, {3, 3}}; + absl::btree_multimap> src2 = { + {5, 5}, {4, 1}, {4, 4}, {3, 2}}; + absl::btree_multimap dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3))); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(3, 2), + Pair(4, 1), Pair(4, 4), Pair(5, 5))); +} + +struct KeyCompareToWeakOrdering { + template + absl::weak_ordering operator()(const T &a, const T &b) const { + return a < b ? absl::weak_ordering::less + : a == b ? absl::weak_ordering::equivalent + : absl::weak_ordering::greater; + } +}; + +struct KeyCompareToStrongOrdering { + template + absl::strong_ordering operator()(const T &a, const T &b) const { + return a < b ? absl::strong_ordering::less + : a == b ? absl::strong_ordering::equal + : absl::strong_ordering::greater; + } +}; + +TEST(Btree, UserProvidedKeyCompareToComparators) { + absl::btree_set weak_set = {1, 2, 3}; + EXPECT_TRUE(weak_set.contains(2)); + EXPECT_FALSE(weak_set.contains(4)); + + absl::btree_set strong_set = {1, 2, 3}; + EXPECT_TRUE(strong_set.contains(2)); + EXPECT_FALSE(strong_set.contains(4)); +} + +TEST(Btree, TryEmplaceBasicTest) { + absl::btree_map m; + + // Should construct a std::string from the literal. + m.try_emplace(1, "one"); + EXPECT_EQ(1, m.size()); + + // Try other std::string constructors and const lvalue key. + const int key(42); + m.try_emplace(key, 3, 'a'); + m.try_emplace(2, std::string("two")); + + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); + EXPECT_THAT(m, ElementsAreArray(std::vector>{ + {1, "one"}, {2, "two"}, {42, "aaa"}})); +} + +TEST(Btree, TryEmplaceWithHintWorks) { + // Use a counting comparator here to verify that hint is used. + int calls = 0; + auto cmp = [&calls](int x, int y) { + ++calls; + return x < y; + }; + using Cmp = decltype(cmp); + + absl::btree_map m(cmp); + for (int i = 0; i < 128; ++i) { + m.emplace(i, i); + } + + // Sanity check for the comparator + calls = 0; + m.emplace(127, 127); + EXPECT_GE(calls, 4); + + // Try with begin hint: + calls = 0; + auto it = m.try_emplace(m.begin(), -1, -1); + EXPECT_EQ(129, m.size()); + EXPECT_EQ(it, m.begin()); + EXPECT_LE(calls, 2); + + // Try with end hint: + calls = 0; + std::pair pair1024 = {1024, 1024}; + it = m.try_emplace(m.end(), pair1024.first, pair1024.second); + EXPECT_EQ(130, m.size()); + EXPECT_EQ(it, --m.end()); + EXPECT_LE(calls, 2); + + // Try value already present, bad hint; ensure no duplicate added: + calls = 0; + it = m.try_emplace(m.end(), 16, 17); + EXPECT_EQ(130, m.size()); + EXPECT_GE(calls, 4); + EXPECT_EQ(it, m.find(16)); + + // Try value already present, hint points directly to it: + calls = 0; + it = m.try_emplace(it, 16, 17); + EXPECT_EQ(130, m.size()); + EXPECT_LE(calls, 2); + EXPECT_EQ(it, m.find(16)); + + m.erase(2); + EXPECT_EQ(129, m.size()); + auto hint = m.find(3); + // Try emplace in the middle of two other elements. + calls = 0; + m.try_emplace(hint, 2, 2); + EXPECT_EQ(130, m.size()); + EXPECT_LE(calls, 2); + + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); +} + +TEST(Btree, TryEmplaceWithBadHint) { + absl::btree_map m = {{1, 1}, {9, 9}}; + + // Bad hint (too small), should still emplace: + auto it = m.try_emplace(m.begin(), 2, 2); + EXPECT_EQ(it, ++m.begin()); + EXPECT_THAT(m, ElementsAreArray( + std::vector>{{1, 1}, {2, 2}, {9, 9}})); + + // Bad hint, too large this time: + it = m.try_emplace(++(++m.begin()), 0, 0); + EXPECT_EQ(it, m.begin()); + EXPECT_THAT(m, ElementsAreArray(std::vector>{ + {0, 0}, {1, 1}, {2, 2}, {9, 9}})); +} + +TEST(Btree, TryEmplaceMaintainsSortedOrder) { + absl::btree_map m; + std::pair pair5 = {5, "five"}; + + // Test both lvalue & rvalue emplace. + m.try_emplace(10, "ten"); + m.try_emplace(pair5.first, pair5.second); + EXPECT_EQ(2, m.size()); + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); + + int int100{100}; + m.try_emplace(int100, "hundred"); + m.try_emplace(1, "one"); + EXPECT_EQ(4, m.size()); + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); +} + +TEST(Btree, TryEmplaceWithHintAndNoValueArgsWorks) { + absl::btree_map m; + m.try_emplace(m.end(), 1); + EXPECT_EQ(0, m[1]); +} + +TEST(Btree, TryEmplaceWithHintAndMultipleValueArgsWorks) { + absl::btree_map m; + m.try_emplace(m.end(), 1, 10, 'a'); + EXPECT_EQ(std::string(10, 'a'), m[1]); +} + +TEST(Btree, MoveAssignmentAllocatorPropagation) { + InstanceTracker tracker; + + int64_t bytes1 = 0, bytes2 = 0; + PropagatingCountingAlloc allocator1(&bytes1); + PropagatingCountingAlloc allocator2(&bytes2); + std::less cmp; + + // Test propagating allocator_type. + { + absl::btree_set, + PropagatingCountingAlloc> + set1(cmp, allocator1), set2(cmp, allocator2); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_EQ(tracker.moves(), 0); + } + // Test non-propagating allocator_type with equal allocators. + { + absl::btree_set, + CountingAllocator> + set1(cmp, allocator1), set2(cmp, allocator1); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_EQ(tracker.moves(), 0); + } + // Test non-propagating allocator_type with different allocators. + { + absl::btree_set, + CountingAllocator> + set1(cmp, allocator1), set2(cmp, allocator2); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_GE(tracker.moves(), 100); + } +} + +TEST(Btree, EmptyTree) { + absl::btree_set s; + EXPECT_TRUE(s.empty()); + EXPECT_EQ(s.size(), 0); + EXPECT_GT(s.max_size(), 0); +} + +bool IsEven(int k) { return k % 2 == 0; } + +TEST(Btree, EraseIf) { + // Test that erase_if works with all the container types and supports lambdas. + { + absl::btree_set s = {1, 3, 5, 6, 100}; + erase_if(s, [](int k) { return k > 3; }); + EXPECT_THAT(s, ElementsAre(1, 3)); + } + { + absl::btree_multiset s = {1, 3, 3, 5, 6, 6, 100}; + erase_if(s, [](int k) { return k <= 3; }); + EXPECT_THAT(s, ElementsAre(5, 6, 6, 100)); + } + { + absl::btree_map m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}}; + erase_if(m, [](std::pair kv) { return kv.first > 3; }); + EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3))); + } + { + absl::btree_multimap m = {{1, 1}, {3, 3}, {3, 6}, + {6, 6}, {6, 7}, {100, 6}}; + erase_if(m, [](std::pair kv) { return kv.second == 6; }); + EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7))); + } + // Test that erasing all elements from a large set works and test support for + // function pointers. + { + absl::btree_set s; + for (int i = 0; i < 1000; ++i) s.insert(2 * i); + erase_if(s, IsEven); + EXPECT_THAT(s, IsEmpty()); + } + // Test that erase_if supports other format of function pointers. + { + absl::btree_set s = {1, 3, 5, 6, 100}; + erase_if(s, &IsEven); + EXPECT_THAT(s, ElementsAre(1, 3, 5)); + } +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/btree_test.h b/base/abseil/absl/container/btree_test.h new file mode 100644 index 0000000..218ba41 --- /dev/null +++ b/base/abseil/absl/container/btree_test.h @@ -0,0 +1,155 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_BTREE_TEST_H_ +#define ABSL_CONTAINER_BTREE_TEST_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/flat_hash_set.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Like remove_const but propagates the removal through std::pair. +template +struct remove_pair_const { + using type = typename std::remove_const::type; +}; +template +struct remove_pair_const > { + using type = std::pair::type, + typename remove_pair_const::type>; +}; + +// Utility class to provide an accessor for a key given a value. The default +// behavior is to treat the value as a pair and return the first element. +template +struct KeyOfValue { + struct type { + const K& operator()(const V& p) const { return p.first; } + }; +}; + +// Partial specialization of KeyOfValue class for when the key and value are +// the same type such as in set<> and btree_set<>. +template +struct KeyOfValue { + struct type { + const K& operator()(const K& k) const { return k; } + }; +}; + +inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) { + assert(val <= maxval); + constexpr unsigned kBase = 64; // avoid integer division. + unsigned p = 15; + buf[p--] = 0; + while (maxval > 0) { + buf[p--] = ' ' + (val % kBase); + val /= kBase; + maxval /= kBase; + } + return buf + p + 1; +} + +template +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + K operator()(int i) const { + assert(i <= maxval); + return K(i); + } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + absl::Time operator()(int i) const { return absl::FromUnixMillis(i); } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + std::string operator()(int i) const { + char buf[16]; + return GenerateDigits(buf, i, maxval); + } +}; + +template +struct Generator > { + Generator::type> tgen; + Generator::type> ugen; + + explicit Generator(int m) : tgen(m), ugen(m) {} + std::pair operator()(int i) const { + return std::make_pair(tgen(i), ugen(i)); + } +}; + +// Generate n values for our tests and benchmarks. Value range is [0, maxval]. +inline std::vector GenerateNumbersWithSeed(int n, int maxval, int seed) { + // NOTE: Some tests rely on generated numbers not changing between test runs. + // We use std::minstd_rand0 because it is well-defined, but don't use + // std::uniform_int_distribution because platforms use different algorithms. + std::minstd_rand0 rng(seed); + + std::vector values; + absl::flat_hash_set unique_values; + if (values.size() < n) { + for (int i = values.size(); i < n; i++) { + int value; + do { + value = static_cast(rng()) % (maxval + 1); + } while (!unique_values.insert(value).second); + + values.push_back(value); + } + } + return values; +} + +// Generates n values in the range [0, maxval]. +template +std::vector GenerateValuesWithSeed(int n, int maxval, int seed) { + const std::vector nums = GenerateNumbersWithSeed(n, maxval, seed); + Generator gen(maxval); + std::vector vec; + + vec.reserve(n); + for (int i = 0; i < n; i++) { + vec.push_back(gen(nums[i])); + } + + return vec; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_TEST_H_ diff --git a/base/abseil/absl/container/fixed_array.h b/base/abseil/absl/container/fixed_array.h new file mode 100644 index 0000000..a9ce99b --- /dev/null +++ b/base/abseil/absl/container/fixed_array.h @@ -0,0 +1,515 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: fixed_array.h +// ----------------------------------------------------------------------------- +// +// A `FixedArray` represents a non-resizable array of `T` where the length of +// the array can be determined at run-time. It is a good replacement for +// non-standard and deprecated uses of `alloca()` and variable length arrays +// within the GCC extension. (See +// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html). +// +// `FixedArray` allocates small arrays inline, keeping performance fast by +// avoiding heap operations. It also helps reduce the chances of +// accidentally overflowing your stack if large input is passed to +// your function. + +#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_ +#define ABSL_CONTAINER_FIXED_ARRAY_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +constexpr static auto kFixedArrayUseDefault = static_cast(-1); + +// ----------------------------------------------------------------------------- +// FixedArray +// ----------------------------------------------------------------------------- +// +// A `FixedArray` provides a run-time fixed-size array, allocating a small array +// inline for efficiency. +// +// Most users should not specify an `inline_elements` argument and let +// `FixedArray` automatically determine the number of elements +// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the +// `FixedArray` implementation will use inline storage for arrays with a +// length <= `inline_elements`. +// +// Note that a `FixedArray` constructed with a `size_type` argument will +// default-initialize its values by leaving trivially constructible types +// uninitialized (e.g. int, int[4], double), and others default-constructed. +// This matches the behavior of c-style arrays and `std::array`, but not +// `std::vector`. +// +// Note that `FixedArray` does not provide a public allocator; if it requires a +// heap allocation, it will do so with global `::operator new[]()` and +// `::operator delete[]()`, even if T provides class-scope overrides for these +// operators. +template > +class FixedArray { + static_assert(!std::is_array::value || std::extent::value > 0, + "Arrays with unknown bounds cannot be used with FixedArray."); + + static constexpr size_t kInlineBytesDefault = 256; + + using AllocatorTraits = std::allocator_traits; + // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, + // but this seems to be mostly pedantic. + template + using EnableIfForwardIterator = absl::enable_if_t::iterator_category, + std::forward_iterator_tag>::value>; + static constexpr bool NoexceptCopyable() { + return std::is_nothrow_copy_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool NoexceptMovable() { + return std::is_nothrow_move_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool DefaultConstructorIsNonTrivial() { + return !absl::is_trivially_default_constructible::value; + } + + public: + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename allocator_type::value_type; + using pointer = typename allocator_type::pointer; + using const_pointer = typename allocator_type::const_pointer; + using reference = typename allocator_type::reference; + using const_reference = typename allocator_type::const_reference; + using size_type = typename allocator_type::size_type; + using difference_type = typename allocator_type::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + static constexpr size_type inline_elements = + (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) + : static_cast(N)); + + FixedArray( + const FixedArray& other, + const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable()) + : FixedArray(other.begin(), other.end(), a) {} + + FixedArray( + FixedArray&& other, + const allocator_type& a = allocator_type()) noexcept(NoexceptMovable()) + : FixedArray(std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end()), a) {} + + // Creates an array object that can store `n` elements. + // Note that trivially constructible elements will be uninitialized. + explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) + : storage_(n, a) { + if (DefaultConstructorIsNonTrivial()) { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), + storage_.end()); + } + } + + // Creates an array initialized with `n` copies of `val`. + FixedArray(size_type n, const value_type& val, + const allocator_type& a = allocator_type()) + : storage_(n, a) { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), + storage_.end(), val); + } + + // Creates an array initialized with the size and contents of `init_list`. + FixedArray(std::initializer_list init_list, + const allocator_type& a = allocator_type()) + : FixedArray(init_list.begin(), init_list.end(), a) {} + + // Creates an array initialized with the elements from the input + // range. The array's size will always be `std::distance(first, last)`. + // REQUIRES: Iterator must be a forward_iterator or better. + template * = nullptr> + FixedArray(Iterator first, Iterator last, + const allocator_type& a = allocator_type()) + : storage_(std::distance(first, last), a) { + memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last); + } + + ~FixedArray() noexcept { + for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) { + AllocatorTraits::destroy(storage_.alloc(), cur); + } + } + + // Assignments are deleted because they break the invariant that the size of a + // `FixedArray` never changes. + void operator=(FixedArray&&) = delete; + void operator=(const FixedArray&) = delete; + + // FixedArray::size() + // + // Returns the length of the fixed array. + size_type size() const { return storage_.size(); } + + // FixedArray::max_size() + // + // Returns the largest possible value of `std::distance(begin(), end())` for a + // `FixedArray`. This is equivalent to the most possible addressable bytes + // over the number of bytes taken by T. + constexpr size_type max_size() const { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + // FixedArray::empty() + // + // Returns whether or not the fixed array is empty. + bool empty() const { return size() == 0; } + + // FixedArray::memsize() + // + // Returns the memory size of the fixed array in bytes. + size_t memsize() const { return size() * sizeof(value_type); } + + // FixedArray::data() + // + // Returns a const T* pointer to elements of the `FixedArray`. This pointer + // can be used to access (but not modify) the contained elements. + const_pointer data() const { return AsValueType(storage_.begin()); } + + // Overload of FixedArray::data() to return a T* pointer to elements of the + // fixed array. This pointer can be used to access and modify the contained + // elements. + pointer data() { return AsValueType(storage_.begin()); } + + // FixedArray::operator[] + // + // Returns a reference the ith element of the fixed array. + // REQUIRES: 0 <= i < size() + reference operator[](size_type i) { + assert(i < size()); + return data()[i]; + } + + // Overload of FixedArray::operator()[] to return a const reference to the + // ith element of the fixed array. + // REQUIRES: 0 <= i < size() + const_reference operator[](size_type i) const { + assert(i < size()); + return data()[i]; + } + + // FixedArray::at + // + // Bounds-checked access. Returns a reference to the ith element of the + // fiexed array, or throws std::out_of_range + reference at(size_type i) { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // Overload of FixedArray::at() to return a const reference to the ith element + // of the fixed array. + const_reference at(size_type i) const { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // FixedArray::front() + // + // Returns a reference to the first element of the fixed array. + reference front() { return *begin(); } + + // Overload of FixedArray::front() to return a reference to the first element + // of a fixed array of const values. + const_reference front() const { return *begin(); } + + // FixedArray::back() + // + // Returns a reference to the last element of the fixed array. + reference back() { return *(end() - 1); } + + // Overload of FixedArray::back() to return a reference to the last element + // of a fixed array of const values. + const_reference back() const { return *(end() - 1); } + + // FixedArray::begin() + // + // Returns an iterator to the beginning of the fixed array. + iterator begin() { return data(); } + + // Overload of FixedArray::begin() to return a const iterator to the + // beginning of the fixed array. + const_iterator begin() const { return data(); } + + // FixedArray::cbegin() + // + // Returns a const iterator to the beginning of the fixed array. + const_iterator cbegin() const { return begin(); } + + // FixedArray::end() + // + // Returns an iterator to the end of the fixed array. + iterator end() { return data() + size(); } + + // Overload of FixedArray::end() to return a const iterator to the end of the + // fixed array. + const_iterator end() const { return data() + size(); } + + // FixedArray::cend() + // + // Returns a const iterator to the end of the fixed array. + const_iterator cend() const { return end(); } + + // FixedArray::rbegin() + // + // Returns a reverse iterator from the end of the fixed array. + reverse_iterator rbegin() { return reverse_iterator(end()); } + + // Overload of FixedArray::rbegin() to return a const reverse iterator from + // the end of the fixed array. + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + // FixedArray::crbegin() + // + // Returns a const reverse iterator from the end of the fixed array. + const_reverse_iterator crbegin() const { return rbegin(); } + + // FixedArray::rend() + // + // Returns a reverse iterator from the beginning of the fixed array. + reverse_iterator rend() { return reverse_iterator(begin()); } + + // Overload of FixedArray::rend() for returning a const reverse iterator + // from the beginning of the fixed array. + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // FixedArray::crend() + // + // Returns a reverse iterator from the beginning of the fixed array. + const_reverse_iterator crend() const { return rend(); } + + // FixedArray::fill() + // + // Assigns the given `value` to all elements in the fixed array. + void fill(const value_type& val) { std::fill(begin(), end(), val); } + + // Relational operators. Equality operators are elementwise using + // `operator==`, while order operators order FixedArrays lexicographically. + friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) { + return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) { + return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), + rhs.end()); + } + + friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs < rhs); + } + + template + friend H AbslHashValue(H h, const FixedArray& v) { + return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), + v.size()); + } + + private: + // StorageElement + // + // For FixedArrays with a C-style-array value_type, StorageElement is a POD + // wrapper struct called StorageElementWrapper that holds the value_type + // instance inside. This is needed for construction and destruction of the + // entire array regardless of how many dimensions it has. For all other cases, + // StorageElement is just an alias of value_type. + // + // Maintainer's Note: The simpler solution would be to simply wrap value_type + // in a struct whether it's an array or not. That causes some paranoid + // diagnostics to misfire, believing that 'data()' returns a pointer to a + // single element, rather than the packed array that it really is. + // e.g.: + // + // FixedArray buf(1); + // sprintf(buf.data(), "foo"); + // + // error: call to int __builtin___sprintf_chk(etc...) + // will always overflow destination buffer [-Werror] + // + template , + size_t InnerN = std::extent::value> + struct StorageElementWrapper { + InnerT array[InnerN]; + }; + + using StorageElement = + absl::conditional_t::value, + StorageElementWrapper, value_type>; + + static pointer AsValueType(pointer ptr) { return ptr; } + static pointer AsValueType(StorageElementWrapper* ptr) { + return std::addressof(ptr->array); + } + + static_assert(sizeof(StorageElement) == sizeof(value_type), ""); + static_assert(alignof(StorageElement) == alignof(value_type), ""); + + class NonEmptyInlinedStorage { + public: + StorageElement* data() { return reinterpret_cast(buff_); } + void AnnotateConstruct(size_type n); + void AnnotateDestruct(size_type n); + +#ifdef ADDRESS_SANITIZER + void* RedzoneBegin() { return &redzone_begin_; } + void* RedzoneEnd() { return &redzone_end_ + 1; } +#endif // ADDRESS_SANITIZER + + private: + ADDRESS_SANITIZER_REDZONE(redzone_begin_); + alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; + ADDRESS_SANITIZER_REDZONE(redzone_end_); + }; + + class EmptyInlinedStorage { + public: + StorageElement* data() { return nullptr; } + void AnnotateConstruct(size_type) {} + void AnnotateDestruct(size_type) {} + }; + + using InlinedStorage = + absl::conditional_t; + + // Storage + // + // An instance of Storage manages the inline and out-of-line memory for + // instances of FixedArray. This guarantees that even when construction of + // individual elements fails in the FixedArray constructor body, the + // destructor for Storage will still be called and out-of-line memory will be + // properly deallocated. + // + class Storage : public InlinedStorage { + public: + Storage(size_type n, const allocator_type& a) + : size_alloc_(n, a), data_(InitializeData()) {} + + ~Storage() noexcept { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateDestruct(size()); + } else { + AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); + } + } + + size_type size() const { return size_alloc_.template get<0>(); } + StorageElement* begin() const { return data_; } + StorageElement* end() const { return begin() + size(); } + allocator_type& alloc() { return size_alloc_.template get<1>(); } + + private: + static bool UsingInlinedStorage(size_type n) { + return n <= inline_elements; + } + + StorageElement* InitializeData() { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateConstruct(size()); + return InlinedStorage::data(); + } else { + return reinterpret_cast( + AllocatorTraits::allocate(alloc(), size())); + } + } + + // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s + container_internal::CompressedTuple size_alloc_; + StorageElement* data_; + }; + + Storage storage_; +}; + +template +constexpr size_t FixedArray::kInlineBytesDefault; + +template +constexpr typename FixedArray::size_type + FixedArray::inline_elements; + +template +void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( + typename FixedArray::size_type n) { +#ifdef ADDRESS_SANITIZER + if (!n) return; + ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n); + ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin()); +#endif // ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode +} + +template +void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( + typename FixedArray::size_type n) { +#ifdef ADDRESS_SANITIZER + if (!n) return; + ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd()); + ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data()); +#endif // ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode +} +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/base/abseil/absl/container/fixed_array_benchmark.cc b/base/abseil/absl/container/fixed_array_benchmark.cc new file mode 100644 index 0000000..3c7a5a7 --- /dev/null +++ b/base/abseil/absl/container/fixed_array_benchmark.cc @@ -0,0 +1,67 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +#include "benchmark/benchmark.h" +#include "absl/container/fixed_array.h" + +namespace { + +// For benchmarking -- simple class with constructor and destructor that +// set an int to a constant.. +class SimpleClass { + public: + SimpleClass() : i(3) {} + ~SimpleClass() { i = 0; } + + private: + int i; +}; + +template +void BM_FixedArray(benchmark::State& state) { + const int size = state.range(0); + for (auto _ : state) { + absl::FixedArray fa(size); + benchmark::DoNotOptimize(fa.data()); + } +} +BENCHMARK_TEMPLATE(BM_FixedArray, char, absl::kFixedArrayUseDefault) + ->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 0)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 1)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 16)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 256)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 65536)->Range(0, 1 << 16); + +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, absl::kFixedArrayUseDefault) + ->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 0)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 1)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 16)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 256)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 65536)->Range(0, 1 << 16); + +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, absl::kFixedArrayUseDefault) + ->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 0)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 1)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 16)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 256)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 65536)->Range(0, 1 << 16); + +} // namespace diff --git a/base/abseil/absl/container/fixed_array_exception_safety_test.cc b/base/abseil/absl/container/fixed_array_exception_safety_test.cc new file mode 100644 index 0000000..a5bb009 --- /dev/null +++ b/base/abseil/absl/container/fixed_array_exception_safety_test.cc @@ -0,0 +1,202 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" +#include "absl/container/fixed_array.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/exception_safety_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace { + +constexpr size_t kInlined = 25; +constexpr size_t kSmallSize = kInlined / 2; +constexpr size_t kLargeSize = kInlined * 2; + +constexpr int kInitialValue = 5; +constexpr int kUpdatedValue = 10; + +using ::testing::TestThrowingCtor; + +using Thrower = testing::ThrowingValue; +using ThrowAlloc = + testing::ThrowingAllocator; +using MoveThrower = testing::ThrowingValue; +using MoveThrowAlloc = + testing::ThrowingAllocator; + +using FixedArr = absl::FixedArray; +using FixedArrWithAlloc = absl::FixedArray; + +using MoveFixedArr = absl::FixedArray; +using MoveFixedArrWithAlloc = + absl::FixedArray; + +TEST(FixedArrayExceptionSafety, CopyConstructor) { + auto small = FixedArr(kSmallSize); + TestThrowingCtor(small); + + auto large = FixedArr(kLargeSize); + TestThrowingCtor(large); +} + +TEST(FixedArrayExceptionSafety, CopyConstructorWithAlloc) { + auto small = FixedArrWithAlloc(kSmallSize); + TestThrowingCtor(small); + + auto large = FixedArrWithAlloc(kLargeSize); + TestThrowingCtor(large); +} + +TEST(FixedArrayExceptionSafety, MoveConstructor) { + TestThrowingCtor(FixedArr(kSmallSize)); + TestThrowingCtor(FixedArr(kLargeSize)); + + // TypeSpec::kNoThrowMove + TestThrowingCtor(MoveFixedArr(kSmallSize)); + TestThrowingCtor(MoveFixedArr(kLargeSize)); +} + +TEST(FixedArrayExceptionSafety, MoveConstructorWithAlloc) { + TestThrowingCtor(FixedArrWithAlloc(kSmallSize)); + TestThrowingCtor(FixedArrWithAlloc(kLargeSize)); + + // TypeSpec::kNoThrowMove + TestThrowingCtor(MoveFixedArrWithAlloc(kSmallSize)); + TestThrowingCtor(MoveFixedArrWithAlloc(kLargeSize)); +} + +TEST(FixedArrayExceptionSafety, SizeConstructor) { + TestThrowingCtor(kSmallSize); + TestThrowingCtor(kLargeSize); +} + +TEST(FixedArrayExceptionSafety, SizeConstructorWithAlloc) { + TestThrowingCtor(kSmallSize); + TestThrowingCtor(kLargeSize); +} + +TEST(FixedArrayExceptionSafety, SizeValueConstructor) { + TestThrowingCtor(kSmallSize, Thrower()); + TestThrowingCtor(kLargeSize, Thrower()); +} + +TEST(FixedArrayExceptionSafety, SizeValueConstructorWithAlloc) { + TestThrowingCtor(kSmallSize, Thrower()); + TestThrowingCtor(kLargeSize, Thrower()); +} + +TEST(FixedArrayExceptionSafety, IteratorConstructor) { + auto small = FixedArr(kSmallSize); + TestThrowingCtor(small.begin(), small.end()); + + auto large = FixedArr(kLargeSize); + TestThrowingCtor(large.begin(), large.end()); +} + +TEST(FixedArrayExceptionSafety, IteratorConstructorWithAlloc) { + auto small = FixedArrWithAlloc(kSmallSize); + TestThrowingCtor(small.begin(), small.end()); + + auto large = FixedArrWithAlloc(kLargeSize); + TestThrowingCtor(large.begin(), large.end()); +} + +TEST(FixedArrayExceptionSafety, InitListConstructor) { + constexpr int small_inlined = 3; + using SmallFixedArr = absl::FixedArray; + + TestThrowingCtor(std::initializer_list{}); + // Test inlined allocation + TestThrowingCtor( + std::initializer_list{Thrower{}, Thrower{}}); + // Test out of line allocation + TestThrowingCtor(std::initializer_list{ + Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); +} + +TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) { + constexpr int small_inlined = 3; + using SmallFixedArrWithAlloc = + absl::FixedArray; + + TestThrowingCtor(std::initializer_list{}); + // Test inlined allocation + TestThrowingCtor( + std::initializer_list{Thrower{}, Thrower{}}); + // Test out of line allocation + TestThrowingCtor(std::initializer_list{ + Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); +} + +template +testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) { + // Marked volatile to prevent optimization. Used for running asan tests. + volatile int sum = 0; + for (const auto& thrower : *fixed_arr) { + sum += thrower.Get(); + } + return testing::AssertionSuccess() << "Values sum to [" << sum << "]"; +} + +TEST(FixedArrayExceptionSafety, Fill) { + auto test_fill = testing::MakeExceptionSafetyTester() + .WithContracts(ReadMemory) + .WithOperation([&](FixedArr* fixed_arr_ptr) { + auto thrower = + Thrower(kUpdatedValue, testing::nothrow_ctor); + fixed_arr_ptr->fill(thrower); + }); + + EXPECT_TRUE( + test_fill.WithInitialValue(FixedArr(kSmallSize, Thrower(kInitialValue))) + .Test()); + EXPECT_TRUE( + test_fill.WithInitialValue(FixedArr(kLargeSize, Thrower(kInitialValue))) + .Test()); +} + +TEST(FixedArrayExceptionSafety, FillWithAlloc) { + auto test_fill = testing::MakeExceptionSafetyTester() + .WithContracts(ReadMemory) + .WithOperation([&](FixedArrWithAlloc* fixed_arr_ptr) { + auto thrower = + Thrower(kUpdatedValue, testing::nothrow_ctor); + fixed_arr_ptr->fill(thrower); + }); + + EXPECT_TRUE(test_fill + .WithInitialValue( + FixedArrWithAlloc(kSmallSize, Thrower(kInitialValue))) + .Test()); + EXPECT_TRUE(test_fill + .WithInitialValue( + FixedArrWithAlloc(kLargeSize, Thrower(kInitialValue))) + .Test()); +} + +} // namespace + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/base/abseil/absl/container/fixed_array_test.cc b/base/abseil/absl/container/fixed_array_test.cc new file mode 100644 index 0000000..2b1cf47 --- /dev/null +++ b/base/abseil/absl/container/fixed_array_test.cc @@ -0,0 +1,883 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/fixed_array.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/exception_testing.h" +#include "absl/hash/hash_testing.h" +#include "absl/memory/memory.h" + +using ::testing::ElementsAreArray; + +namespace { + +// Helper routine to determine if a absl::FixedArray used stack allocation. +template +static bool IsOnStack(const ArrayType& a) { + return a.size() <= ArrayType::inline_elements; +} + +class ConstructionTester { + public: + ConstructionTester() : self_ptr_(this), value_(0) { constructions++; } + ~ConstructionTester() { + assert(self_ptr_ == this); + self_ptr_ = nullptr; + destructions++; + } + + // These are incremented as elements are constructed and destructed so we can + // be sure all elements are properly cleaned up. + static int constructions; + static int destructions; + + void CheckConstructed() { assert(self_ptr_ == this); } + + void set(int value) { value_ = value; } + int get() { return value_; } + + private: + // self_ptr_ should always point to 'this' -- that's how we can be sure the + // constructor has been called. + ConstructionTester* self_ptr_; + int value_; +}; + +int ConstructionTester::constructions = 0; +int ConstructionTester::destructions = 0; + +// ThreeInts will initialize its three ints to the value stored in +// ThreeInts::counter. The constructor increments counter so that each object +// in an array of ThreeInts will have different values. +class ThreeInts { + public: + ThreeInts() { + x_ = counter; + y_ = counter; + z_ = counter; + ++counter; + } + + static int counter; + + int x_, y_, z_; +}; + +int ThreeInts::counter = 0; + +TEST(FixedArrayTest, CopyCtor) { + absl::FixedArray on_stack(5); + std::iota(on_stack.begin(), on_stack.end(), 0); + absl::FixedArray stack_copy = on_stack; + EXPECT_THAT(stack_copy, ElementsAreArray(on_stack)); + EXPECT_TRUE(IsOnStack(stack_copy)); + + absl::FixedArray allocated(15); + std::iota(allocated.begin(), allocated.end(), 0); + absl::FixedArray alloced_copy = allocated; + EXPECT_THAT(alloced_copy, ElementsAreArray(allocated)); + EXPECT_FALSE(IsOnStack(alloced_copy)); +} + +TEST(FixedArrayTest, MoveCtor) { + absl::FixedArray, 10> on_stack(5); + for (int i = 0; i < 5; ++i) { + on_stack[i] = absl::make_unique(i); + } + + absl::FixedArray, 10> stack_copy = std::move(on_stack); + for (int i = 0; i < 5; ++i) EXPECT_EQ(*(stack_copy[i]), i); + EXPECT_EQ(stack_copy.size(), on_stack.size()); + + absl::FixedArray, 10> allocated(15); + for (int i = 0; i < 15; ++i) { + allocated[i] = absl::make_unique(i); + } + + absl::FixedArray, 10> alloced_copy = + std::move(allocated); + for (int i = 0; i < 15; ++i) EXPECT_EQ(*(alloced_copy[i]), i); + EXPECT_EQ(allocated.size(), alloced_copy.size()); +} + +TEST(FixedArrayTest, SmallObjects) { + // Small object arrays + { + // Short arrays should be on the stack + absl::FixedArray array(4); + EXPECT_TRUE(IsOnStack(array)); + } + + { + // Large arrays should be on the heap + absl::FixedArray array(1048576); + EXPECT_FALSE(IsOnStack(array)); + } + + { + // Arrays of <= default size should be on the stack + absl::FixedArray array(100); + EXPECT_TRUE(IsOnStack(array)); + } + + { + // Arrays of > default size should be on the heap + absl::FixedArray array(101); + EXPECT_FALSE(IsOnStack(array)); + } + + { + // Arrays with different size elements should use approximately + // same amount of stack space + absl::FixedArray array1(0); + absl::FixedArray array2(0); + EXPECT_LE(sizeof(array1), sizeof(array2) + 100); + EXPECT_LE(sizeof(array2), sizeof(array1) + 100); + } + + { + // Ensure that vectors are properly constructed inside a fixed array. + absl::FixedArray> array(2); + EXPECT_EQ(0, array[0].size()); + EXPECT_EQ(0, array[1].size()); + } + + { + // Regardless of absl::FixedArray implementation, check that a type with a + // low alignment requirement and a non power-of-two size is initialized + // correctly. + ThreeInts::counter = 1; + absl::FixedArray array(2); + EXPECT_EQ(1, array[0].x_); + EXPECT_EQ(1, array[0].y_); + EXPECT_EQ(1, array[0].z_); + EXPECT_EQ(2, array[1].x_); + EXPECT_EQ(2, array[1].y_); + EXPECT_EQ(2, array[1].z_); + } +} + +TEST(FixedArrayTest, AtThrows) { + absl::FixedArray a = {1, 2, 3}; + EXPECT_EQ(a.at(2), 3); + ABSL_BASE_INTERNAL_EXPECT_FAIL(a.at(3), std::out_of_range, + "failed bounds check"); +} + +TEST(FixedArrayRelationalsTest, EqualArrays) { + for (int i = 0; i < 10; ++i) { + absl::FixedArray a1(i); + std::iota(a1.begin(), a1.end(), 0); + absl::FixedArray a2(a1.begin(), a1.end()); + + EXPECT_TRUE(a1 == a2); + EXPECT_FALSE(a1 != a2); + EXPECT_TRUE(a2 == a1); + EXPECT_FALSE(a2 != a1); + EXPECT_FALSE(a1 < a2); + EXPECT_FALSE(a1 > a2); + EXPECT_FALSE(a2 < a1); + EXPECT_FALSE(a2 > a1); + EXPECT_TRUE(a1 <= a2); + EXPECT_TRUE(a1 >= a2); + EXPECT_TRUE(a2 <= a1); + EXPECT_TRUE(a2 >= a1); + } +} + +TEST(FixedArrayRelationalsTest, UnequalArrays) { + for (int i = 1; i < 10; ++i) { + absl::FixedArray a1(i); + std::iota(a1.begin(), a1.end(), 0); + absl::FixedArray a2(a1.begin(), a1.end()); + --a2[i / 2]; + + EXPECT_FALSE(a1 == a2); + EXPECT_TRUE(a1 != a2); + EXPECT_FALSE(a2 == a1); + EXPECT_TRUE(a2 != a1); + EXPECT_FALSE(a1 < a2); + EXPECT_TRUE(a1 > a2); + EXPECT_TRUE(a2 < a1); + EXPECT_FALSE(a2 > a1); + EXPECT_FALSE(a1 <= a2); + EXPECT_TRUE(a1 >= a2); + EXPECT_TRUE(a2 <= a1); + EXPECT_FALSE(a2 >= a1); + } +} + +template +static void TestArray(int n) { + SCOPED_TRACE(n); + SCOPED_TRACE(stack_elements); + ConstructionTester::constructions = 0; + ConstructionTester::destructions = 0; + { + absl::FixedArray array(n); + + EXPECT_THAT(array.size(), n); + EXPECT_THAT(array.memsize(), sizeof(ConstructionTester) * n); + EXPECT_THAT(array.begin() + n, array.end()); + + // Check that all elements were constructed + for (int i = 0; i < n; i++) { + array[i].CheckConstructed(); + } + // Check that no other elements were constructed + EXPECT_THAT(ConstructionTester::constructions, n); + + // Test operator[] + for (int i = 0; i < n; i++) { + array[i].set(i); + } + for (int i = 0; i < n; i++) { + EXPECT_THAT(array[i].get(), i); + EXPECT_THAT(array.data()[i].get(), i); + } + + // Test data() + for (int i = 0; i < n; i++) { + array.data()[i].set(i + 1); + } + for (int i = 0; i < n; i++) { + EXPECT_THAT(array[i].get(), i + 1); + EXPECT_THAT(array.data()[i].get(), i + 1); + } + } // Close scope containing 'array'. + + // Check that all constructed elements were destructed. + EXPECT_EQ(ConstructionTester::constructions, + ConstructionTester::destructions); +} + +template +static void TestArrayOfArrays(int n) { + SCOPED_TRACE(n); + SCOPED_TRACE(inline_elements); + SCOPED_TRACE(elements_per_inner_array); + ConstructionTester::constructions = 0; + ConstructionTester::destructions = 0; + { + using InnerArray = ConstructionTester[elements_per_inner_array]; + // Heap-allocate the FixedArray to avoid blowing the stack frame. + auto array_ptr = + absl::make_unique>(n); + auto& array = *array_ptr; + + ASSERT_EQ(array.size(), n); + ASSERT_EQ(array.memsize(), + sizeof(ConstructionTester) * elements_per_inner_array * n); + ASSERT_EQ(array.begin() + n, array.end()); + + // Check that all elements were constructed + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array[i])[j].CheckConstructed(); + } + } + // Check that no other elements were constructed + ASSERT_EQ(ConstructionTester::constructions, n * elements_per_inner_array); + + // Test operator[] + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array[i])[j].set(i * elements_per_inner_array + j); + } + } + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j); + ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j); + } + } + + // Test data() + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array.data()[i])[j].set((i + 1) * elements_per_inner_array + j); + } + } + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + ASSERT_EQ((array[i])[j].get(), (i + 1) * elements_per_inner_array + j); + ASSERT_EQ((array.data()[i])[j].get(), + (i + 1) * elements_per_inner_array + j); + } + } + } // Close scope containing 'array'. + + // Check that all constructed elements were destructed. + EXPECT_EQ(ConstructionTester::constructions, + ConstructionTester::destructions); +} + +TEST(IteratorConstructorTest, NonInline) { + int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; + absl::FixedArray const fixed( + kInput, kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, Inline) { + int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; + absl::FixedArray const fixed( + kInput, kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, NonPod) { + char const* kInput[] = {"red", "orange", "yellow", "green", + "blue", "indigo", "violet"}; + absl::FixedArray const fixed(kInput, + kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, FromEmptyVector) { + std::vector const empty; + absl::FixedArray const fixed(empty.begin(), empty.end()); + EXPECT_EQ(0, fixed.size()); + EXPECT_EQ(empty.size(), fixed.size()); +} + +TEST(IteratorConstructorTest, FromNonEmptyVector) { + int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; + std::vector const items(kInput, kInput + ABSL_ARRAYSIZE(kInput)); + absl::FixedArray const fixed(items.begin(), items.end()); + ASSERT_EQ(items.size(), fixed.size()); + for (size_t i = 0; i < items.size(); ++i) { + ASSERT_EQ(items[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) { + int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; + std::list const items(kInput, kInput + ABSL_ARRAYSIZE(kInput)); + absl::FixedArray const fixed(items.begin(), items.end()); + EXPECT_THAT(fixed, testing::ElementsAreArray(kInput)); +} + +TEST(InitListConstructorTest, InitListConstruction) { + absl::FixedArray fixed = {1, 2, 3}; + EXPECT_THAT(fixed, testing::ElementsAreArray({1, 2, 3})); +} + +TEST(FillConstructorTest, NonEmptyArrays) { + absl::FixedArray stack_array(4, 1); + EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1})); + + absl::FixedArray heap_array(4, 1); + EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1})); +} + +TEST(FillConstructorTest, EmptyArray) { + absl::FixedArray empty_fill(0, 1); + absl::FixedArray empty_size(0); + EXPECT_EQ(empty_fill, empty_size); +} + +TEST(FillConstructorTest, NotTriviallyCopyable) { + std::string str = "abcd"; + absl::FixedArray strings = {str, str, str, str}; + + absl::FixedArray array(4, str); + EXPECT_EQ(array, strings); +} + +TEST(FillConstructorTest, Disambiguation) { + absl::FixedArray a(1, 2); + EXPECT_THAT(a, testing::ElementsAre(2)); +} + +TEST(FixedArrayTest, ManySizedArrays) { + std::vector sizes; + for (int i = 1; i < 100; i++) sizes.push_back(i); + for (int i = 100; i <= 1000; i += 100) sizes.push_back(i); + for (int n : sizes) { + TestArray<0>(n); + TestArray<1>(n); + TestArray<64>(n); + TestArray<1000>(n); + } +} + +TEST(FixedArrayTest, ManySizedArraysOfArraysOf1) { + for (int n = 1; n < 1000; n++) { + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 0>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 64>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1000>(n))); + } +} + +TEST(FixedArrayTest, ManySizedArraysOfArraysOf2) { + for (int n = 1; n < 1000; n++) { + TestArrayOfArrays<2, 0>(n); + TestArrayOfArrays<2, 1>(n); + TestArrayOfArrays<2, 64>(n); + TestArrayOfArrays<2, 1000>(n); + } +} + +// If value_type is put inside of a struct container, +// we might evoke this error in a hardened build unless data() is carefully +// written, so check on that. +// error: call to int __builtin___sprintf_chk(etc...) +// will always overflow destination buffer [-Werror] +TEST(FixedArrayTest, AvoidParanoidDiagnostics) { + absl::FixedArray buf(32); + sprintf(buf.data(), "foo"); // NOLINT(runtime/printf) +} + +TEST(FixedArrayTest, TooBigInlinedSpace) { + struct TooBig { + char c[1 << 20]; + }; // too big for even one on the stack + + // Simulate the data members of absl::FixedArray, a pointer and a size_t. + struct Data { + TooBig* p; + size_t size; + }; + + // Make sure TooBig objects are not inlined for 0 or default size. + static_assert(sizeof(absl::FixedArray) == sizeof(Data), + "0-sized absl::FixedArray should have same size as Data."); + static_assert(alignof(absl::FixedArray) == alignof(Data), + "0-sized absl::FixedArray should have same alignment as Data."); + static_assert(sizeof(absl::FixedArray) == sizeof(Data), + "default-sized absl::FixedArray should have same size as Data"); + static_assert( + alignof(absl::FixedArray) == alignof(Data), + "default-sized absl::FixedArray should have same alignment as Data."); +} + +// PickyDelete EXPECTs its class-scope deallocation funcs are unused. +struct PickyDelete { + PickyDelete() {} + ~PickyDelete() {} + void operator delete(void* p) { + EXPECT_TRUE(false) << __FUNCTION__; + ::operator delete(p); + } + void operator delete[](void* p) { + EXPECT_TRUE(false) << __FUNCTION__; + ::operator delete[](p); + } +}; + +TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray a(5); } + +TEST(FixedArrayTest, Data) { + static const int kInput[] = {2, 3, 5, 7, 11, 13, 17}; + absl::FixedArray fa(std::begin(kInput), std::end(kInput)); + EXPECT_EQ(fa.data(), &*fa.begin()); + EXPECT_EQ(fa.data(), &fa[0]); + + const absl::FixedArray& cfa = fa; + EXPECT_EQ(cfa.data(), &*cfa.begin()); + EXPECT_EQ(cfa.data(), &cfa[0]); +} + +TEST(FixedArrayTest, Empty) { + absl::FixedArray empty(0); + absl::FixedArray inline_filled(1); + absl::FixedArray heap_filled(1); + EXPECT_TRUE(empty.empty()); + EXPECT_FALSE(inline_filled.empty()); + EXPECT_FALSE(heap_filled.empty()); +} + +TEST(FixedArrayTest, FrontAndBack) { + absl::FixedArray inlined = {1, 2, 3}; + EXPECT_EQ(inlined.front(), 1); + EXPECT_EQ(inlined.back(), 3); + + absl::FixedArray allocated = {1, 2, 3}; + EXPECT_EQ(allocated.front(), 1); + EXPECT_EQ(allocated.back(), 3); + + absl::FixedArray one_element = {1}; + EXPECT_EQ(one_element.front(), one_element.back()); +} + +TEST(FixedArrayTest, ReverseIteratorInlined) { + absl::FixedArray a = {0, 1, 2, 3, 4}; + + int counter = 5; + for (absl::FixedArray::reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (absl::FixedArray::const_reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (auto iter = a.crbegin(); iter != a.crend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); +} + +TEST(FixedArrayTest, ReverseIteratorAllocated) { + absl::FixedArray a = {0, 1, 2, 3, 4}; + + int counter = 5; + for (absl::FixedArray::reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (absl::FixedArray::const_reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (auto iter = a.crbegin(); iter != a.crend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); +} + +TEST(FixedArrayTest, Fill) { + absl::FixedArray inlined(5); + int fill_val = 42; + inlined.fill(fill_val); + for (int i : inlined) EXPECT_EQ(i, fill_val); + + absl::FixedArray allocated(5); + allocated.fill(fill_val); + for (int i : allocated) EXPECT_EQ(i, fill_val); + + // It doesn't do anything, just make sure this compiles. + absl::FixedArray empty(0); + empty.fill(fill_val); +} + +// TODO(johnsoncj): Investigate InlinedStorage default initialization in GCC 4.x +#ifndef __GNUC__ +TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) { + using T = char; + constexpr auto capacity = 10; + using FixedArrType = absl::FixedArray; + using FixedArrBuffType = + absl::aligned_storage_t; + constexpr auto scrubbed_bits = 0x95; + constexpr auto length = capacity / 2; + + FixedArrBuffType buff; + std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrBuffType)); + + FixedArrType* arr = + ::new (static_cast(std::addressof(buff))) FixedArrType(length); + EXPECT_THAT(*arr, testing::Each(scrubbed_bits)); + arr->~FixedArrType(); +} +#endif // __GNUC__ + +// This is a stateful allocator, but the state lives outside of the +// allocator (in whatever test is using the allocator). This is odd +// but helps in tests where the allocator is propagated into nested +// containers - that chain of allocators uses the same state and is +// thus easier to query for aggregate allocation information. +template +class CountingAllocator : public std::allocator { + public: + using Alloc = std::allocator; + using pointer = typename Alloc::pointer; + using size_type = typename Alloc::size_type; + + CountingAllocator() : bytes_used_(nullptr), instance_count_(nullptr) {} + explicit CountingAllocator(int64_t* b) + : bytes_used_(b), instance_count_(nullptr) {} + CountingAllocator(int64_t* b, int64_t* a) + : bytes_used_(b), instance_count_(a) {} + + template + explicit CountingAllocator(const CountingAllocator& x) + : Alloc(x), + bytes_used_(x.bytes_used_), + instance_count_(x.instance_count_) {} + + pointer allocate(size_type n, const void* const hint = nullptr) { + assert(bytes_used_ != nullptr); + *bytes_used_ += n * sizeof(T); + return Alloc::allocate(n, hint); + } + + void deallocate(pointer p, size_type n) { + Alloc::deallocate(p, n); + assert(bytes_used_ != nullptr); + *bytes_used_ -= n * sizeof(T); + } + + template + void construct(pointer p, Args&&... args) { + Alloc::construct(p, absl::forward(args)...); + if (instance_count_) { + *instance_count_ += 1; + } + } + + void destroy(pointer p) { + Alloc::destroy(p); + if (instance_count_) { + *instance_count_ -= 1; + } + } + + template + class rebind { + public: + using other = CountingAllocator; + }; + + int64_t* bytes_used_; + int64_t* instance_count_; +}; + +TEST(AllocatorSupportTest, CountInlineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated = 0; + int64_t active_instances = 0; + + { + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + + Alloc alloc(&allocated, &active_instances); + + AllocFxdArr arr(ia, ia + inlined_size, alloc); + static_cast(arr); + } + + EXPECT_EQ(allocated, 0); + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountOutoflineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated = 0; + int64_t active_instances = 0; + + { + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + Alloc alloc(&allocated, &active_instances); + + AllocFxdArr arr(ia, ia + ABSL_ARRAYSIZE(ia), alloc); + + EXPECT_EQ(allocated, arr.size() * sizeof(int)); + static_cast(arr); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountCopyInlineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated1 = 0; + int64_t allocated2 = 0; + int64_t active_instances = 0; + Alloc alloc(&allocated1, &active_instances); + Alloc alloc2(&allocated2, &active_instances); + + { + int initial_value = 1; + + AllocFxdArr arr1(inlined_size / 2, initial_value, alloc); + + EXPECT_EQ(allocated1, 0); + + AllocFxdArr arr2(arr1, alloc2); + + EXPECT_EQ(allocated2, 0); + static_cast(arr1); + static_cast(arr2); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated1 = 0; + int64_t allocated2 = 0; + int64_t active_instances = 0; + Alloc alloc(&allocated1, &active_instances); + Alloc alloc2(&allocated2, &active_instances); + + { + int initial_value = 1; + + AllocFxdArr arr1(inlined_size * 2, initial_value, alloc); + + EXPECT_EQ(allocated1, arr1.size() * sizeof(int)); + + AllocFxdArr arr2(arr1, alloc2); + + EXPECT_EQ(allocated2, inlined_size * 2 * sizeof(int)); + static_cast(arr1); + static_cast(arr2); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, SizeValAllocConstructor) { + using testing::AllOf; + using testing::Each; + using testing::SizeIs; + + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + { + auto len = inlined_size / 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, 0); + EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); + } + + { + auto len = inlined_size * 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, len * sizeof(int)); + EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); + } +} + +#ifdef ADDRESS_SANITIZER +TEST(FixedArrayTest, AddressSanitizerAnnotations1) { + absl::FixedArray a(10); + int* raw = a.data(); + raw[0] = 0; + raw[9] = 0; + EXPECT_DEATH(raw[-2] = 0, "container-overflow"); + EXPECT_DEATH(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH(raw[10] = 0, "container-overflow"); + EXPECT_DEATH(raw[31] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations2) { + absl::FixedArray a(12); + char* raw = a.data(); + raw[0] = 0; + raw[11] = 0; + EXPECT_DEATH(raw[-7] = 0, "container-overflow"); + EXPECT_DEATH(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH(raw[12] = 0, "container-overflow"); + EXPECT_DEATH(raw[17] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations3) { + absl::FixedArray a(20); + uint64_t* raw = a.data(); + raw[0] = 0; + raw[19] = 0; + EXPECT_DEATH(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH(raw[20] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations4) { + absl::FixedArray a(10); + ThreeInts* raw = a.data(); + raw[0] = ThreeInts(); + raw[9] = ThreeInts(); + // Note: raw[-1] is pointing to 12 bytes before the container range. However, + // there is only a 8-byte red zone before the container range, so we only + // access the last 4 bytes of the struct to make sure it stays within the red + // zone. + EXPECT_DEATH(raw[-1].z_ = 0, "container-overflow"); + EXPECT_DEATH(raw[10] = ThreeInts(), "container-overflow"); + // The actual size of storage is kDefaultBytes=256, 21*12 = 252, + // so reading raw[21] should still trigger the correct warning. + EXPECT_DEATH(raw[21] = ThreeInts(), "container-overflow"); +} +#endif // ADDRESS_SANITIZER + +TEST(FixedArrayTest, AbslHashValueWorks) { + using V = absl::FixedArray; + std::vector cases; + + // Generate a variety of vectors some of these are small enough for the inline + // space but are stored out of line. + for (int i = 0; i < 10; ++i) { + V v(i); + for (int j = 0; j < i; ++j) { + v[j] = j; + } + cases.push_back(v); + } + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); +} + +} // namespace diff --git a/base/abseil/absl/container/flat_hash_map.h b/base/abseil/absl/container/flat_hash_map.h new file mode 100644 index 0000000..fb570cd --- /dev/null +++ b/base/abseil/absl/container/flat_hash_map.h @@ -0,0 +1,591 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `flat_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. + +#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_ +#define ABSL_CONTAINER_FLAT_HASH_MAP_H_ + +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +template +struct FlatHashMapPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_map +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map` with +// the following notable differences: +// +// * Requires keys that are CopyConstructible +// * Requires values that are MoveConstructible +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_map` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `flat_hash_map`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// NOTE: A `flat_hash_map` stores its value types directly inside its +// implementation array to avoid memory indirection. Because a `flat_hash_map` +// is designed to move data when rehashed, map values will not retain pointer +// stability. If you require pointer stability, or if your values are large, +// consider using `absl::flat_hash_map>` instead. +// If your types are not moveable or you require pointer stability for keys, +// consider `absl::node_hash_map`. +// +// Example: +// +// // Create a flat hash map of three strings (that map to strings) +// absl::flat_hash_map ducks = +// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; +// +// // Insert a new element into the flat hash map +// ducks.insert({"d", "donald"}); +// +// // Force a rehash of the flat hash map +// ducks.rehash(0); +// +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Allocator = std::allocator>> +class flat_hash_map : public absl::container_internal::raw_hash_map< + absl::container_internal::FlatHashMapPolicy, + Hash, Eq, Allocator> { + using Base = typename flat_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_map map1; + // + // * Initializer List constructor + // + // absl::flat_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::flat_hash_map map7(v.begin(), v.end()); + flat_hash_map() {} + using Base::Base; + + // flat_hash_map::begin() + // + // Returns an iterator to the beginning of the `flat_hash_map`. + using Base::begin; + + // flat_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_map`. + using Base::cbegin; + + // flat_hash_map::cend() + // + // Returns a const iterator to the end of the `flat_hash_map`. + using Base::cend; + + // flat_hash_map::end() + // + // Returns an iterator to the end of the `flat_hash_map`. + using Base::end; + + // flat_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_map`. + // + // NOTE: this member function is particular to `absl::flat_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_map::empty() + // + // Returns whether or not the `flat_hash_map` is empty. + using Base::empty; + + // flat_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_map` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_map`. + using Base::max_size; + + // flat_hash_map::size() + // + // Returns the number of elements currently within the `flat_hash_map`. + using Base::size; + + // flat_hash_map::clear() + // + // Removes all elements from the `flat_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_map::erase() + // + // Erases elements within the `flat_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_map`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_map` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // map.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists. + using Base::erase; + + // flat_hash_map::insert() + // + // Inserts an element of the specified value into the `flat_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `flat_hash_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `flat_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due + // to the insertion, all existing iterators are invalidated. Overloads are + // listed below. + // + // pair insert_or_assign(const init_type& k, T&& obj): + // pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // flat_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // pair try_emplace(const key_type& k, Args&&... args): + // pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const init_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + using Base::try_emplace; + + // flat_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `flat_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + using Base::extract; + + // flat_hash_map::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_map`. If the destination `flat_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_map::swap(flat_hash_map& other) + // + // Exchanges the contents of this `flat_hash_map` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash map's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_map::rehash(count) + // + // Rehashes the `flat_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_map`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_map::reserve(count) + // + // Sets the number of slots in the `flat_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // flat_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `flat_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // flat_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `flat_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `flat_hash_map`. + using Base::count; + + // flat_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_map`. + using Base::equal_range; + + // flat_hash_map::find() + // + // Finds an element with the passed `key` within the `flat_hash_map`. + using Base::find; + + // flat_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `flat_hash_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs and results in a rehashing of the container, all + // iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // flat_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_map`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_map`. + using Base::bucket_count; + + // flat_hash_map::load_factor() + // + // Returns the current load factor of the `flat_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_map`. Overloads are + // listed below. + // + // float flat_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_map`. + // + // void flat_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_map::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_map`. + using Base::get_allocator; + + // flat_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_map`. + using Base::hash_function; + + // flat_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +namespace container_internal { + +template +struct FlatHashMapPolicy { + using slot_policy = container_internal::map_slot_policy; + using slot_type = typename slot_policy::slot_type; + using key_type = K; + using mapped_type = V; + using init_type = std::pair; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + slot_policy::destroy(alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + slot_policy::transfer(alloc, new_slot, old_slot); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposePair(std::forward(f), + std::forward(args)...); + } + + static size_t space_used(const slot_type*) { return 0; } + + static std::pair& element(slot_type* slot) { return slot->value; } + + static V& value(std::pair* kv) { return kv->second; } + static const V& value(const std::pair* kv) { return kv->second; } +}; + +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer< + absl::flat_hash_map> : std::true_type {}; + +} // namespace container_algorithm_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ diff --git a/base/abseil/absl/container/flat_hash_map_test.cc b/base/abseil/absl/container/flat_hash_map_test.cc new file mode 100644 index 0000000..dae8e00 --- /dev/null +++ b/base/abseil/absl/container/flat_hash_map_test.cc @@ -0,0 +1,256 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/flat_hash_map.h" + +#include + +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/unordered_map_constructor_test.h" +#include "absl/container/internal/unordered_map_lookup_test.h" +#include "absl/container/internal/unordered_map_members_test.h" +#include "absl/container/internal/unordered_map_modifiers_test.h" +#include "absl/types/any.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::_; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +template +using Map = flat_hash_map>>; + +static_assert(!std::is_standard_layout(), ""); + +using MapTypes = + ::testing::Types, Map, + Map, Map, + Map, Map>; + +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ConstructorTest, MapTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, LookupTest, MapTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, MembersTest, MapTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ModifiersTest, MapTypes); + +using UniquePtrMapTypes = ::testing::Types>>; + +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, UniquePtrModifiersTest, + UniquePtrMapTypes); + +TEST(FlatHashMap, StandardLayout) { + struct Int { + explicit Int(size_t value) : value(value) {} + Int() : value(0) { ADD_FAILURE(); } + Int(const Int& other) : value(other.value) { ADD_FAILURE(); } + Int(Int&&) = default; + bool operator==(const Int& other) const { return value == other.value; } + size_t value; + }; + static_assert(std::is_standard_layout(), ""); + + struct Hash { + size_t operator()(const Int& obj) const { return obj.value; } + }; + + // Verify that neither the key nor the value get default-constructed or + // copy-constructed. + { + flat_hash_map m; + m.try_emplace(Int(1), Int(2)); + m.try_emplace(Int(3), Int(4)); + m.erase(Int(1)); + m.rehash(2 * m.bucket_count()); + } + { + flat_hash_map m; + m.try_emplace(Int(1), Int(2)); + m.try_emplace(Int(3), Int(4)); + m.erase(Int(1)); + m.clear(); + } +} + +// gcc becomes unhappy if this is inside the method, so pull it out here. +struct balast {}; + +TEST(FlatHashMap, IteratesMsan) { + // Because SwissTable randomizes on pointer addresses, we keep old tables + // around to ensure we don't reuse old memory. + std::vector> garbage; + for (int i = 0; i < 100; ++i) { + absl::flat_hash_map t; + for (int j = 0; j < 100; ++j) { + t[j]; + for (const auto& p : t) EXPECT_THAT(p, Pair(_, _)); + } + garbage.push_back(std::move(t)); + } +} + +// Demonstration of the "Lazy Key" pattern. This uses heterogeneous insert to +// avoid creating expensive key elements when the item is already present in the +// map. +struct LazyInt { + explicit LazyInt(size_t value, int* tracker) + : value(value), tracker(tracker) {} + + explicit operator size_t() const { + ++*tracker; + return value; + } + + size_t value; + int* tracker; +}; + +struct Hash { + using is_transparent = void; + int* tracker; + size_t operator()(size_t obj) const { + ++*tracker; + return obj; + } + size_t operator()(const LazyInt& obj) const { + ++*tracker; + return obj.value; + } +}; + +struct Eq { + using is_transparent = void; + bool operator()(size_t lhs, size_t rhs) const { + return lhs == rhs; + } + bool operator()(size_t lhs, const LazyInt& rhs) const { + return lhs == rhs.value; + } +}; + +TEST(FlatHashMap, LazyKeyPattern) { + // hashes are only guaranteed in opt mode, we use assertions to track internal + // state that can cause extra calls to hash. + int conversions = 0; + int hashes = 0; + flat_hash_map m(0, Hash{&hashes}); + m.reserve(3); + + m[LazyInt(1, &conversions)] = 1; + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1))); + EXPECT_EQ(conversions, 1); +#ifdef NDEBUG + EXPECT_EQ(hashes, 1); +#endif + + m[LazyInt(1, &conversions)] = 2; + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2))); + EXPECT_EQ(conversions, 1); +#ifdef NDEBUG + EXPECT_EQ(hashes, 2); +#endif + + m.try_emplace(LazyInt(2, &conversions), 3); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); + EXPECT_EQ(conversions, 2); +#ifdef NDEBUG + EXPECT_EQ(hashes, 3); +#endif + + m.try_emplace(LazyInt(2, &conversions), 4); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); + EXPECT_EQ(conversions, 2); +#ifdef NDEBUG + EXPECT_EQ(hashes, 4); +#endif +} + +TEST(FlatHashMap, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + flat_hash_map m; + m.erase(n); + m.count(n); + m.prefetch(n); + m.find(n); + m.contains(n); + m.equal_range(n); + m.insert_or_assign(n, n); + m.insert_or_assign(m.end(), n, n); + m.try_emplace(n); + m.try_emplace(m.end(), n); + m.at(n); + m[n]; +} + +TEST(FlatHashMap, MergeExtractInsert) { + // We can't test mutable keys, or non-copyable keys with flat_hash_map. + // Test that the nodes have the proper API. + absl::flat_hash_map m = {{1, 7}, {2, 9}}; + auto node = m.extract(1); + EXPECT_TRUE(node); + EXPECT_EQ(node.key(), 1); + EXPECT_EQ(node.mapped(), 7); + EXPECT_THAT(m, UnorderedElementsAre(Pair(2, 9))); + + node.mapped() = 17; + m.insert(std::move(node)); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9))); +} + +#if (defined(ABSL_USES_STD_ANY) || !defined(_LIBCPP_VERSION)) && \ + !defined(__EMSCRIPTEN__) +TEST(FlatHashMap, Any) { + absl::flat_hash_map m; + m.emplace(1, 7); + auto it = m.find(1); + ASSERT_NE(it, m.end()); + EXPECT_EQ(7, absl::any_cast(it->second)); + + m.emplace(std::piecewise_construct, std::make_tuple(2), std::make_tuple(8)); + it = m.find(2); + ASSERT_NE(it, m.end()); + EXPECT_EQ(8, absl::any_cast(it->second)); + + m.emplace(std::piecewise_construct, std::make_tuple(3), + std::make_tuple(absl::any(9))); + it = m.find(3); + ASSERT_NE(it, m.end()); + EXPECT_EQ(9, absl::any_cast(it->second)); + + struct H { + size_t operator()(const absl::any&) const { return 0; } + }; + struct E { + bool operator()(const absl::any&, const absl::any&) const { return true; } + }; + absl::flat_hash_map m2; + m2.emplace(1, 7); + auto it2 = m2.find(1); + ASSERT_NE(it2, m2.end()); + EXPECT_EQ(7, it2->second); +} +#endif // (defined(ABSL_USES_STD_ANY) || !defined(_LIBCPP_VERSION)) && + // !defined(__EMSCRIPTEN__) + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/flat_hash_set.h b/base/abseil/absl/container/flat_hash_set.h new file mode 100644 index 0000000..930107e --- /dev/null +++ b/base/abseil/absl/container/flat_hash_set.h @@ -0,0 +1,495 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of set elements can be done +// as an `O(1)` operation. However, `flat_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash set should be a set of type +// `flat_hash_set`. +#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_ +#define ABSL_CONTAINER_FLAT_HASH_SET_H_ + +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +template +struct FlatHashSetPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_set +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container which has +// been optimized for both speed and memory footprint in most common use cases. +// Its interface is similar to that of `std::unordered_set` with the +// following notable differences: +// +// * Requires keys that are CopyConstructible +// * Supports heterogeneous lookup, through `find()` and `insert()`, provided +// that the set is provided a compatible heterogeneous hashing function and +// equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All +// fundamental and Abseil types that support the `absl::Hash` framework have a +// compatible equality operator for comparing insertions into `flat_hash_map`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// NOTE: A `flat_hash_set` stores its keys directly inside its implementation +// array to avoid memory indirection. Because a `flat_hash_set` is designed to +// move data when rehashed, set keys will not retain pointer stability. If you +// require pointer stability, consider using +// `absl::flat_hash_set>`. If your type is not moveable and +// you require pointer stability, consider `absl::node_hash_set` instead. +// +// Example: +// +// // Create a flat hash set of three strings +// absl::flat_hash_set ducks = +// {"huey", "dewey", "louie"}; +// +// // Insert a new element into the flat hash set +// ducks.insert("donald"); +// +// // Force a rehash of the flat hash set +// ducks.rehash(0); +// +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Allocator = std::allocator> +class flat_hash_set + : public absl::container_internal::raw_hash_set< + absl::container_internal::FlatHashSetPolicy, Hash, Eq, Allocator> { + using Base = typename flat_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_set supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_set set1; + // + // * Initializer List constructor + // + // absl::flat_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::flat_hash_set set7(v.begin(), v.end()); + flat_hash_set() {} + using Base::Base; + + // flat_hash_set::begin() + // + // Returns an iterator to the beginning of the `flat_hash_set`. + using Base::begin; + + // flat_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_set`. + using Base::cbegin; + + // flat_hash_set::cend() + // + // Returns a const iterator to the end of the `flat_hash_set`. + using Base::cend; + + // flat_hash_set::end() + // + // Returns an iterator to the end of the `flat_hash_set`. + using Base::end; + + // flat_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_set`. + // + // NOTE: this member function is particular to `absl::flat_hash_set` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_set::empty() + // + // Returns whether or not the `flat_hash_set` is empty. + using Base::empty; + + // flat_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_set`. + using Base::max_size; + + // flat_hash_set::size() + // + // Returns the number of elements currently within the `flat_hash_set`. + using Base::size; + + // flat_hash_set::clear() + // + // Removes all elements from the `flat_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_set::erase() + // + // Erases elements within the `flat_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_set`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_set` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // set.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists. + using Base::erase; + + // flat_hash_set::insert() + // + // Inserts an element of the specified value into the `flat_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `flat_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `flat_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // flat_hash_set::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_set`. If the destination `flat_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_set::swap(flat_hash_set& other) + // + // Exchanges the contents of this `flat_hash_set` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash set's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_set::rehash(count) + // + // Rehashes the `flat_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_set::reserve(count) + // + // Sets the number of slots in the `flat_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `flat_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // flat_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `flat_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `flat_hash_set`. + using Base::count; + + // flat_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_set`. + using Base::equal_range; + + // flat_hash_set::find() + // + // Finds an element with the passed `key` within the `flat_hash_set`. + using Base::find; + + // flat_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_set`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_set`. + using Base::bucket_count; + + // flat_hash_set::load_factor() + // + // Returns the current load factor of the `flat_hash_set` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_set`. Overloads are + // listed below. + // + // float flat_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_set`. + // + // void flat_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_set::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_set`. + using Base::get_allocator; + + // flat_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_set`. + using Base::hash_function; + + // flat_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +namespace container_internal { + +template +struct FlatHashSetPolicy { + using slot_type = T; + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + static T& element(slot_type* slot) { return *slot; } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } + + static size_t space_used(const T*) { return 0; } +}; +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer> + : std::true_type {}; + +} // namespace container_algorithm_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ diff --git a/base/abseil/absl/container/flat_hash_set_test.cc b/base/abseil/absl/container/flat_hash_set_test.cc new file mode 100644 index 0000000..6eacb1b --- /dev/null +++ b/base/abseil/absl/container/flat_hash_set_test.cc @@ -0,0 +1,130 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/flat_hash_set.h" + +#include + +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/unordered_set_constructor_test.h" +#include "absl/container/internal/unordered_set_lookup_test.h" +#include "absl/container/internal/unordered_set_members_test.h" +#include "absl/container/internal/unordered_set_modifiers_test.h" +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::Pointee; +using ::testing::UnorderedElementsAre; +using ::testing::UnorderedElementsAreArray; + +template +using Set = + absl::flat_hash_set>; + +using SetTypes = + ::testing::Types, Set, Set, Set>; + +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ConstructorTest, SetTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, LookupTest, SetTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, MembersTest, SetTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ModifiersTest, SetTypes); + +TEST(FlatHashSet, EmplaceString) { + std::vector v = {"a", "b"}; + absl::flat_hash_set hs(v.begin(), v.end()); + EXPECT_THAT(hs, UnorderedElementsAreArray(v)); +} + +TEST(FlatHashSet, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + absl::flat_hash_set s = {n}; + s.insert(n); + s.insert(s.end(), n); + s.insert({n}); + s.erase(n); + s.count(n); + s.prefetch(n); + s.find(n); + s.contains(n); + s.equal_range(n); +} + +TEST(FlatHashSet, MergeExtractInsert) { + struct Hash { + size_t operator()(const std::unique_ptr& p) const { return *p; } + }; + struct Eq { + bool operator()(const std::unique_ptr& a, + const std::unique_ptr& b) const { + return *a == *b; + } + }; + absl::flat_hash_set, Hash, Eq> set1, set2; + set1.insert(absl::make_unique(7)); + set1.insert(absl::make_unique(17)); + + set2.insert(absl::make_unique(7)); + set2.insert(absl::make_unique(19)); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19))); + + set1.merge(set2); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + auto node = set1.extract(absl::make_unique(7)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(7)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19))); + + auto insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_FALSE(insert_result.inserted); + EXPECT_TRUE(insert_result.node); + EXPECT_THAT(insert_result.node.value(), Pointee(7)); + EXPECT_EQ(**insert_result.position, 7); + EXPECT_NE(insert_result.position->get(), insert_result.node.value().get()); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + node = set1.extract(absl::make_unique(17)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(17)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19))); + + node.value() = absl::make_unique(23); + + insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_TRUE(insert_result.inserted); + EXPECT_FALSE(insert_result.node); + EXPECT_EQ(**insert_result.position, 23); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23))); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/inlined_vector.h b/base/abseil/absl/container/inlined_vector.h new file mode 100644 index 0000000..2388d47 --- /dev/null +++ b/base/abseil/absl/container/inlined_vector.h @@ -0,0 +1,848 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: inlined_vector.h +// ----------------------------------------------------------------------------- +// +// This header file contains the declaration and definition of an "inlined +// vector" which behaves in an equivalent fashion to a `std::vector`, except +// that storage for small sequences of the vector are provided inline without +// requiring any heap allocation. +// +// An `absl::InlinedVector` specifies the default capacity `N` as one of +// its template parameters. Instances where `size() <= N` hold contained +// elements in inline space. Typically `N` is very small so that sequences that +// are expected to be short do not require allocations. +// +// An `absl::InlinedVector` does not usually require a specific allocator. If +// the inlined vector grows beyond its initial constraints, it will need to +// allocate (as any normal `std::vector` would). This is usually performed with +// the default allocator (defined as `std::allocator`). Optionally, a custom +// allocator type may be specified as `A` in `absl::InlinedVector`. + +#ifndef ABSL_CONTAINER_INLINED_VECTOR_H_ +#define ABSL_CONTAINER_INLINED_VECTOR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/inlined_vector.h" +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +// ----------------------------------------------------------------------------- +// InlinedVector +// ----------------------------------------------------------------------------- +// +// An `absl::InlinedVector` is designed to be a drop-in replacement for +// `std::vector` for use cases where the vector's size is sufficiently small +// that it can be inlined. If the inlined vector does grow beyond its estimated +// capacity, it will trigger an initial allocation on the heap, and will behave +// as a `std:vector`. The API of the `absl::InlinedVector` within this file is +// designed to cover the same API footprint as covered by `std::vector`. +template > +class InlinedVector { + static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity."); + + using Storage = inlined_vector_internal::Storage; + + using AllocatorTraits = typename Storage::AllocatorTraits; + using RValueReference = typename Storage::RValueReference; + using MoveIterator = typename Storage::MoveIterator; + using IsMemcpyOk = typename Storage::IsMemcpyOk; + + template + using IteratorValueAdapter = + typename Storage::template IteratorValueAdapter; + using CopyValueAdapter = typename Storage::CopyValueAdapter; + using DefaultValueAdapter = typename Storage::DefaultValueAdapter; + + template + using EnableIfAtLeastForwardIterator = absl::enable_if_t< + inlined_vector_internal::IsAtLeastForwardIterator::value>; + template + using DisableIfAtLeastForwardIterator = absl::enable_if_t< + !inlined_vector_internal::IsAtLeastForwardIterator::value>; + + public: + using allocator_type = typename Storage::allocator_type; + using value_type = typename Storage::value_type; + using pointer = typename Storage::pointer; + using const_pointer = typename Storage::const_pointer; + using size_type = typename Storage::size_type; + using difference_type = typename Storage::difference_type; + using reference = typename Storage::reference; + using const_reference = typename Storage::const_reference; + using iterator = typename Storage::iterator; + using const_iterator = typename Storage::const_iterator; + using reverse_iterator = typename Storage::reverse_iterator; + using const_reverse_iterator = typename Storage::const_reverse_iterator; + + // --------------------------------------------------------------------------- + // InlinedVector Constructors and Destructor + // --------------------------------------------------------------------------- + + // Creates an empty inlined vector with a value-initialized allocator. + InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {} + + // Creates an empty inlined vector with a copy of `alloc`. + explicit InlinedVector(const allocator_type& alloc) noexcept + : storage_(alloc) {} + + // Creates an inlined vector with `n` copies of `value_type()`. + explicit InlinedVector(size_type n, + const allocator_type& alloc = allocator_type()) + : storage_(alloc) { + storage_.Initialize(DefaultValueAdapter(), n); + } + + // Creates an inlined vector with `n` copies of `v`. + InlinedVector(size_type n, const_reference v, + const allocator_type& alloc = allocator_type()) + : storage_(alloc) { + storage_.Initialize(CopyValueAdapter(v), n); + } + + // Creates an inlined vector with copies of the elements of `list`. + InlinedVector(std::initializer_list list, + const allocator_type& alloc = allocator_type()) + : InlinedVector(list.begin(), list.end(), alloc) {} + + // Creates an inlined vector with elements constructed from the provided + // forward iterator range [`first`, `last`). + // + // NOTE: the `enable_if` prevents ambiguous interpretation between a call to + // this constructor with two integral arguments and a call to the above + // `InlinedVector(size_type, const_reference)` constructor. + template * = nullptr> + InlinedVector(ForwardIterator first, ForwardIterator last, + const allocator_type& alloc = allocator_type()) + : storage_(alloc) { + storage_.Initialize(IteratorValueAdapter(first), + std::distance(first, last)); + } + + // Creates an inlined vector with elements constructed from the provided input + // iterator range [`first`, `last`). + template * = nullptr> + InlinedVector(InputIterator first, InputIterator last, + const allocator_type& alloc = allocator_type()) + : storage_(alloc) { + std::copy(first, last, std::back_inserter(*this)); + } + + // Creates an inlined vector by copying the contents of `other` using + // `other`'s allocator. + InlinedVector(const InlinedVector& other) + : InlinedVector(other, *other.storage_.GetAllocPtr()) {} + + // Creates an inlined vector by copying the contents of `other` using `alloc`. + InlinedVector(const InlinedVector& other, const allocator_type& alloc) + : storage_(alloc) { + if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { + storage_.MemcpyFrom(other.storage_); + } else { + storage_.Initialize(IteratorValueAdapter(other.data()), + other.size()); + } + } + + // Creates an inlined vector by moving in the contents of `other` without + // allocating. If `other` contains allocated memory, the newly-created inlined + // vector will take ownership of that memory. However, if `other` does not + // contain allocated memory, the newly-created inlined vector will perform + // element-wise move construction of the contents of `other`. + // + // NOTE: since no allocation is performed for the inlined vector in either + // case, the `noexcept(...)` specification depends on whether moving the + // underlying objects can throw. It is assumed assumed that... + // a) move constructors should only throw due to allocation failure. + // b) if `value_type`'s move constructor allocates, it uses the same + // allocation function as the inlined vector's allocator. + // Thus, the move constructor is non-throwing if the allocator is non-throwing + // or `value_type`'s move constructor is specified as `noexcept`. + InlinedVector(InlinedVector&& other) noexcept( + absl::allocator_is_nothrow::value || + std::is_nothrow_move_constructible::value) + : storage_(*other.storage_.GetAllocPtr()) { + if (IsMemcpyOk::value) { + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } else if (other.storage_.GetIsAllocated()) { + storage_.SetAllocatedData(other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + } else { + IteratorValueAdapter other_values( + MoveIterator(other.storage_.GetInlinedData())); + + inlined_vector_internal::ConstructElements( + storage_.GetAllocPtr(), storage_.GetInlinedData(), &other_values, + other.storage_.GetSize()); + + storage_.SetInlinedSize(other.storage_.GetSize()); + } + } + + // Creates an inlined vector by moving in the contents of `other` with a copy + // of `alloc`. + // + // NOTE: if `other`'s allocator is not equal to `alloc`, even if `other` + // contains allocated memory, this move constructor will still allocate. Since + // allocation is performed, this constructor can only be `noexcept` if the + // specified allocator is also `noexcept`. + InlinedVector(InlinedVector&& other, const allocator_type& alloc) noexcept( + absl::allocator_is_nothrow::value) + : storage_(alloc) { + if (IsMemcpyOk::value) { + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) && + other.storage_.GetIsAllocated()) { + storage_.SetAllocatedData(other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + } else { + storage_.Initialize( + IteratorValueAdapter(MoveIterator(other.data())), + other.size()); + } + } + + ~InlinedVector() {} + + // --------------------------------------------------------------------------- + // InlinedVector Member Accessors + // --------------------------------------------------------------------------- + + // `InlinedVector::empty()` + // + // Returns whether the inlined vector contains no elements. + bool empty() const noexcept { return !size(); } + + // `InlinedVector::size()` + // + // Returns the number of elements in the inlined vector. + size_type size() const noexcept { return storage_.GetSize(); } + + // `InlinedVector::max_size()` + // + // Returns the maximum number of elements the inlined vector can hold. + size_type max_size() const noexcept { + // One bit of the size storage is used to indicate whether the inlined + // vector contains allocated memory. As a result, the maximum size that the + // inlined vector can express is half of the max for `size_type`. + return (std::numeric_limits::max)() / 2; + } + + // `InlinedVector::capacity()` + // + // Returns the number of elements that could be stored in the inlined vector + // without requiring a reallocation. + // + // NOTE: for most inlined vectors, `capacity()` should be equal to the + // template parameter `N`. For inlined vectors which exceed this capacity, + // they will no longer be inlined and `capacity()` will equal the capactity of + // the allocated memory. + size_type capacity() const noexcept { + return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity() + : storage_.GetInlinedCapacity(); + } + + // `InlinedVector::data()` + // + // Returns a `pointer` to the elements of the inlined vector. This pointer + // can be used to access and modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + pointer data() noexcept { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() + : storage_.GetInlinedData(); + } + + // Overload of `InlinedVector::data()` that returns a `const_pointer` to the + // elements of the inlined vector. This pointer can be used to access but not + // modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + const_pointer data() const noexcept { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() + : storage_.GetInlinedData(); + } + + // `InlinedVector::operator[](...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + reference operator[](size_type i) { + assert(i < size()); + + return data()[i]; + } + + // Overload of `InlinedVector::operator[](...)` that returns a + // `const_reference` to the `i`th element of the inlined vector. + const_reference operator[](size_type i) const { + assert(i < size()); + + return data()[i]; + } + + // `InlinedVector::at(...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + reference at(size_type i) { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type)` failed bounds check"); + } + + return data()[i]; + } + + // Overload of `InlinedVector::at(...)` that returns a `const_reference` to + // the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + const_reference at(size_type i) const { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type) const` failed bounds check"); + } + + return data()[i]; + } + + // `InlinedVector::front()` + // + // Returns a `reference` to the first element of the inlined vector. + reference front() { + assert(!empty()); + + return at(0); + } + + // Overload of `InlinedVector::front()` that returns a `const_reference` to + // the first element of the inlined vector. + const_reference front() const { + assert(!empty()); + + return at(0); + } + + // `InlinedVector::back()` + // + // Returns a `reference` to the last element of the inlined vector. + reference back() { + assert(!empty()); + + return at(size() - 1); + } + + // Overload of `InlinedVector::back()` that returns a `const_reference` to the + // last element of the inlined vector. + const_reference back() const { + assert(!empty()); + + return at(size() - 1); + } + + // `InlinedVector::begin()` + // + // Returns an `iterator` to the beginning of the inlined vector. + iterator begin() noexcept { return data(); } + + // Overload of `InlinedVector::begin()` that returns a `const_iterator` to + // the beginning of the inlined vector. + const_iterator begin() const noexcept { return data(); } + + // `InlinedVector::end()` + // + // Returns an `iterator` to the end of the inlined vector. + iterator end() noexcept { return data() + size(); } + + // Overload of `InlinedVector::end()` that returns a `const_iterator` to the + // end of the inlined vector. + const_iterator end() const noexcept { return data() + size(); } + + // `InlinedVector::cbegin()` + // + // Returns a `const_iterator` to the beginning of the inlined vector. + const_iterator cbegin() const noexcept { return begin(); } + + // `InlinedVector::cend()` + // + // Returns a `const_iterator` to the end of the inlined vector. + const_iterator cend() const noexcept { return end(); } + + // `InlinedVector::rbegin()` + // + // Returns a `reverse_iterator` from the end of the inlined vector. + reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } + + // Overload of `InlinedVector::rbegin()` that returns a + // `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator rbegin() const noexcept { + return const_reverse_iterator(end()); + } + + // `InlinedVector::rend()` + // + // Returns a `reverse_iterator` from the beginning of the inlined vector. + reverse_iterator rend() noexcept { return reverse_iterator(begin()); } + + // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator` + // from the beginning of the inlined vector. + const_reverse_iterator rend() const noexcept { + return const_reverse_iterator(begin()); + } + + // `InlinedVector::crbegin()` + // + // Returns a `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator crbegin() const noexcept { return rbegin(); } + + // `InlinedVector::crend()` + // + // Returns a `const_reverse_iterator` from the beginning of the inlined + // vector. + const_reverse_iterator crend() const noexcept { return rend(); } + + // `InlinedVector::get_allocator()` + // + // Returns a copy of the inlined vector's allocator. + allocator_type get_allocator() const { return *storage_.GetAllocPtr(); } + + // --------------------------------------------------------------------------- + // InlinedVector Member Mutators + // --------------------------------------------------------------------------- + + // `InlinedVector::operator=(...)` + // + // Replaces the elements of the inlined vector with copies of the elements of + // `list`. + InlinedVector& operator=(std::initializer_list list) { + assign(list.begin(), list.end()); + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that replaces the elements of + // the inlined vector with copies of the elements of `other`. + InlinedVector& operator=(const InlinedVector& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + const_pointer other_data = other.data(); + assign(other_data, other_data + other.size()); + } + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that moves the elements of + // `other` into the inlined vector. + // + // NOTE: as a result of calling this overload, `other` is left in a valid but + // unspecified state. + InlinedVector& operator=(InlinedVector&& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { + inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), + size()); + storage_.DeallocateIfAllocated(); + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } else { + storage_.Assign(IteratorValueAdapter( + MoveIterator(other.storage_.GetInlinedData())), + other.size()); + } + } + + return *this; + } + + // `InlinedVector::assign(...)` + // + // Replaces the contents of the inlined vector with `n` copies of `v`. + void assign(size_type n, const_reference v) { + storage_.Assign(CopyValueAdapter(v), n); + } + + // Overload of `InlinedVector::assign(...)` that replaces the contents of the + // inlined vector with copies of the elements of `list`. + void assign(std::initializer_list list) { + assign(list.begin(), list.end()); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "forward" category or better. + template * = nullptr> + void assign(ForwardIterator first, ForwardIterator last) { + storage_.Assign(IteratorValueAdapter(first), + std::distance(first, last)); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "input" category. + template * = nullptr> + void assign(InputIterator first, InputIterator last) { + size_type i = 0; + for (; i < size() && first != last; ++i, static_cast(++first)) { + at(i) = *first; + } + + erase(data() + i, data() + size()); + std::copy(first, last, std::back_inserter(*this)); + } + + // `InlinedVector::resize(...)` + // + // Resizes the inlined vector to contain `n` elements. + // + // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are value-initialized. + void resize(size_type n) { storage_.Resize(DefaultValueAdapter(), n); } + + // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to + // contain `n` elements. + // + // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are copied-constructed from `v`. + void resize(size_type n, const_reference v) { + storage_.Resize(CopyValueAdapter(v), n); + } + + // `InlinedVector::insert(...)` + // + // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly + // inserted element. + iterator insert(const_iterator pos, const_reference v) { + return emplace(pos, v); + } + + // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using + // move semantics, returning an `iterator` to the newly inserted element. + iterator insert(const_iterator pos, RValueReference v) { + return emplace(pos, std::move(v)); + } + + // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies + // of `v` starting at `pos`, returning an `iterator` pointing to the first of + // the newly inserted elements. + iterator insert(const_iterator pos, size_type n, const_reference v) { + assert(pos >= begin()); + assert(pos <= end()); + + if (ABSL_PREDICT_TRUE(n != 0)) { + value_type dealias = v; + return storage_.Insert(pos, CopyValueAdapter(dealias), n); + } else { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts copies of the + // elements of `list` starting at `pos`, returning an `iterator` pointing to + // the first of the newly inserted elements. + iterator insert(const_iterator pos, std::initializer_list list) { + return insert(pos, list.begin(), list.end()); + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "forward" category or better. + template * = nullptr> + iterator insert(const_iterator pos, ForwardIterator first, + ForwardIterator last) { + assert(pos >= begin()); + assert(pos <= end()); + + if (ABSL_PREDICT_TRUE(first != last)) { + return storage_.Insert(pos, IteratorValueAdapter(first), + std::distance(first, last)); + } else { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "input" category. + template * = nullptr> + iterator insert(const_iterator pos, InputIterator first, InputIterator last) { + assert(pos >= begin()); + assert(pos <= end()); + + size_type index = std::distance(cbegin(), pos); + for (size_type i = index; first != last; ++i, static_cast(++first)) { + insert(data() + i, *first); + } + + return iterator(data() + index); + } + + // `InlinedVector::emplace(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `pos`, returning an `iterator` pointing to the newly emplaced element. + template + iterator emplace(const_iterator pos, Args&&... args) { + assert(pos >= begin()); + assert(pos <= end()); + + value_type dealias(std::forward(args)...); + return storage_.Insert(pos, + IteratorValueAdapter( + MoveIterator(std::addressof(dealias))), + 1); + } + + // `InlinedVector::emplace_back(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `end()`, returning a `reference` to the newly emplaced element. + template + reference emplace_back(Args&&... args) { + return storage_.EmplaceBack(std::forward(args)...); + } + + // `InlinedVector::push_back(...)` + // + // Inserts a copy of `v` in the inlined vector at `end()`. + void push_back(const_reference v) { static_cast(emplace_back(v)); } + + // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` + // using move semantics. + void push_back(RValueReference v) { + static_cast(emplace_back(std::move(v))); + } + + // `InlinedVector::pop_back()` + // + // Destroys the element at `back()`, reducing the size by `1`. + void pop_back() noexcept { + assert(!empty()); + + AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1)); + storage_.SubtractSize(1); + } + + // `InlinedVector::erase(...)` + // + // Erases the element at `pos`, returning an `iterator` pointing to where the + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferencable. + iterator erase(const_iterator pos) { + assert(pos >= begin()); + assert(pos < end()); + + return storage_.Erase(pos, pos + 1); + } + + // Overload of `InlinedVector::erase(...)` that erases every element in the + // range [`from`, `to`), returning an `iterator` pointing to where the first + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferencable. + iterator erase(const_iterator from, const_iterator to) { + assert(from >= begin()); + assert(from <= to); + assert(to <= end()); + + if (ABSL_PREDICT_TRUE(from != to)) { + return storage_.Erase(from, to); + } else { + return const_cast(from); + } + } + + // `InlinedVector::clear()` + // + // Destroys all elements in the inlined vector, setting the size to `0` and + // deallocating any held memory. + void clear() noexcept { + inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), + size()); + storage_.DeallocateIfAllocated(); + + storage_.SetInlinedSize(0); + } + + // `InlinedVector::reserve(...)` + // + // Ensures that there is enough room for at least `n` elements. + void reserve(size_type n) { storage_.Reserve(n); } + + // `InlinedVector::shrink_to_fit()` + // + // Reduces memory usage by freeing unused memory. After being called, calls to + // `capacity()` will be equal to `max(N, size())`. + // + // If `size() <= N` and the inlined vector contains allocated memory, the + // elements will all be moved to the inlined space and the allocated memory + // will be deallocated. + // + // If `size() > N` and `size() < capacity()`, the elements will be moved to a + // smaller allocation. + void shrink_to_fit() { + if (storage_.GetIsAllocated()) { + storage_.ShrinkToFit(); + } + } + + // `InlinedVector::swap(...)` + // + // Swaps the contents of the inlined vector with `other`. + void swap(InlinedVector& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + storage_.Swap(std::addressof(other.storage_)); + } + } + + private: + template + friend H AbslHashValue(H h, const absl::InlinedVector& a); + + Storage storage_; +}; + +// ----------------------------------------------------------------------------- +// InlinedVector Non-Member Functions +// ----------------------------------------------------------------------------- + +// `swap(...)` +// +// Swaps the contents of two inlined vectors. +template +void swap(absl::InlinedVector& a, + absl::InlinedVector& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); +} + +// `operator==(...)` +// +// Tests for value-equality of two inlined vectors. +template +bool operator==(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + auto a_data = a.data(); + auto b_data = b.data(); + return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size()); +} + +// `operator!=(...)` +// +// Tests for value-inequality of two inlined vectors. +template +bool operator!=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(a == b); +} + +// `operator<(...)` +// +// Tests whether the value of an inlined vector is less than the value of +// another inlined vector using a lexicographical comparison algorithm. +template +bool operator<(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + auto a_data = a.data(); + auto b_data = b.data(); + return std::lexicographical_compare(a_data, a_data + a.size(), b_data, + b_data + b.size()); +} + +// `operator>(...)` +// +// Tests whether the value of an inlined vector is greater than the value of +// another inlined vector using a lexicographical comparison algorithm. +template +bool operator>(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return b < a; +} + +// `operator<=(...)` +// +// Tests whether the value of an inlined vector is less than or equal to the +// value of another inlined vector using a lexicographical comparison algorithm. +template +bool operator<=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(b < a); +} + +// `operator>=(...)` +// +// Tests whether the value of an inlined vector is greater than or equal to the +// value of another inlined vector using a lexicographical comparison algorithm. +template +bool operator>=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(a < b); +} + +// `AbslHashValue(...)` +// +// Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to +// call this directly. +template +H AbslHashValue(H h, const absl::InlinedVector& a) { + auto size = a.size(); + return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INLINED_VECTOR_H_ diff --git a/base/abseil/absl/container/inlined_vector_benchmark.cc b/base/abseil/absl/container/inlined_vector_benchmark.cc new file mode 100644 index 0000000..3f2b4ed --- /dev/null +++ b/base/abseil/absl/container/inlined_vector_benchmark.cc @@ -0,0 +1,807 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/inlined_vector.h" +#include "absl/strings/str_cat.h" + +namespace { + +void BM_InlinedVectorFill(benchmark::State& state) { + const int len = state.range(0); + absl::InlinedVector v; + v.reserve(len); + for (auto _ : state) { + v.resize(0); // Use resize(0) as InlinedVector releases storage on clear(). + for (int i = 0; i < len; ++i) { + v.push_back(i); + } + benchmark::DoNotOptimize(v); + } +} +BENCHMARK(BM_InlinedVectorFill)->Range(1, 256); + +void BM_InlinedVectorFillRange(benchmark::State& state) { + const int len = state.range(0); + const std::vector src(len, len); + absl::InlinedVector v; + v.reserve(len); + for (auto _ : state) { + benchmark::DoNotOptimize(src); + v.assign(src.begin(), src.end()); + benchmark::DoNotOptimize(v); + } +} +BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256); + +void BM_StdVectorFill(benchmark::State& state) { + const int len = state.range(0); + std::vector v; + v.reserve(len); + for (auto _ : state) { + v.clear(); + for (int i = 0; i < len; ++i) { + v.push_back(i); + } + benchmark::DoNotOptimize(v); + } +} +BENCHMARK(BM_StdVectorFill)->Range(1, 256); + +// The purpose of the next two benchmarks is to verify that +// absl::InlinedVector is efficient when moving is more efficent than +// copying. To do so, we use strings that are larger than the short +// string optimization. +bool StringRepresentedInline(std::string s) { + const char* chars = s.data(); + std::string s1 = std::move(s); + return s1.data() != chars; +} + +int GetNonShortStringOptimizationSize() { + for (int i = 24; i <= 192; i *= 2) { + if (!StringRepresentedInline(std::string(i, 'A'))) { + return i; + } + } + ABSL_RAW_LOG( + FATAL, + "Failed to find a std::string larger than the short std::string optimization"); + return -1; +} + +void BM_InlinedVectorFillString(benchmark::State& state) { + const int len = state.range(0); + const int no_sso = GetNonShortStringOptimizationSize(); + std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'), + std::string(no_sso, 'C'), std::string(no_sso, 'D')}; + + for (auto _ : state) { + absl::InlinedVector v; + for (int i = 0; i < len; i++) { + v.push_back(strings[i & 3]); + } + } + state.SetItemsProcessed(static_cast(state.iterations()) * len); +} +BENCHMARK(BM_InlinedVectorFillString)->Range(0, 1024); + +void BM_StdVectorFillString(benchmark::State& state) { + const int len = state.range(0); + const int no_sso = GetNonShortStringOptimizationSize(); + std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'), + std::string(no_sso, 'C'), std::string(no_sso, 'D')}; + + for (auto _ : state) { + std::vector v; + for (int i = 0; i < len; i++) { + v.push_back(strings[i & 3]); + } + } + state.SetItemsProcessed(static_cast(state.iterations()) * len); +} +BENCHMARK(BM_StdVectorFillString)->Range(0, 1024); + +struct Buffer { // some arbitrary structure for benchmarking. + char* base; + int length; + int capacity; + void* user_data; +}; + +void BM_InlinedVectorAssignments(benchmark::State& state) { + const int len = state.range(0); + using BufferVec = absl::InlinedVector; + + BufferVec src; + src.resize(len); + + BufferVec dst; + for (auto _ : state) { + benchmark::DoNotOptimize(dst); + benchmark::DoNotOptimize(src); + dst = src; + } +} +BENCHMARK(BM_InlinedVectorAssignments) + ->Arg(0) + ->Arg(1) + ->Arg(2) + ->Arg(3) + ->Arg(4) + ->Arg(20); + +void BM_CreateFromContainer(benchmark::State& state) { + for (auto _ : state) { + absl::InlinedVector src{1, 2, 3}; + benchmark::DoNotOptimize(src); + absl::InlinedVector dst(std::move(src)); + benchmark::DoNotOptimize(dst); + } +} +BENCHMARK(BM_CreateFromContainer); + +struct LargeCopyableOnly { + LargeCopyableOnly() : d(1024, 17) {} + LargeCopyableOnly(const LargeCopyableOnly& o) = default; + LargeCopyableOnly& operator=(const LargeCopyableOnly& o) = default; + + std::vector d; +}; + +struct LargeCopyableSwappable { + LargeCopyableSwappable() : d(1024, 17) {} + + LargeCopyableSwappable(const LargeCopyableSwappable& o) = default; + + LargeCopyableSwappable& operator=(LargeCopyableSwappable o) { + using std::swap; + swap(*this, o); + return *this; + } + + friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) { + using std::swap; + swap(a.d, b.d); + } + + std::vector d; +}; + +struct LargeCopyableMovable { + LargeCopyableMovable() : d(1024, 17) {} + // Use implicitly defined copy and move. + + std::vector d; +}; + +struct LargeCopyableMovableSwappable { + LargeCopyableMovableSwappable() : d(1024, 17) {} + LargeCopyableMovableSwappable(const LargeCopyableMovableSwappable& o) = + default; + LargeCopyableMovableSwappable(LargeCopyableMovableSwappable&& o) = default; + + LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable o) { + using std::swap; + swap(*this, o); + return *this; + } + LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable&& o) = + default; + + friend void swap(LargeCopyableMovableSwappable& a, + LargeCopyableMovableSwappable& b) { + using std::swap; + swap(a.d, b.d); + } + + std::vector d; +}; + +template +void BM_SwapElements(benchmark::State& state) { + const int len = state.range(0); + using Vec = absl::InlinedVector; + Vec a(len); + Vec b; + for (auto _ : state) { + using std::swap; + benchmark::DoNotOptimize(a); + benchmark::DoNotOptimize(b); + swap(a, b); + } +} +BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableOnly)->Range(0, 1024); +BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableSwappable)->Range(0, 1024); +BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovable)->Range(0, 1024); +BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovableSwappable) + ->Range(0, 1024); + +// The following benchmark is meant to track the efficiency of the vector size +// as a function of stored type via the benchmark label. It is not meant to +// output useful sizeof operator performance. The loop is a dummy operation +// to fulfill the requirement of running the benchmark. +template +void BM_Sizeof(benchmark::State& state) { + int size = 0; + for (auto _ : state) { + VecType vec; + size = sizeof(vec); + } + state.SetLabel(absl::StrCat("sz=", size)); +} +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); + +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); + +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); + +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); + +void BM_InlinedVectorIndexInlined(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); + } +} +BENCHMARK(BM_InlinedVectorIndexInlined); + +void BM_InlinedVectorIndexExternal(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); + } +} +BENCHMARK(BM_InlinedVectorIndexExternal); + +void BM_StdVectorIndex(benchmark::State& state) { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); + } +} +BENCHMARK(BM_StdVectorIndex); + +void BM_InlinedVectorDataInlined(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); + } +} +BENCHMARK(BM_InlinedVectorDataInlined); + +void BM_InlinedVectorDataExternal(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); + } + state.SetItemsProcessed(16 * static_cast(state.iterations())); +} +BENCHMARK(BM_InlinedVectorDataExternal); + +void BM_StdVectorData(benchmark::State& state) { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); + } + state.SetItemsProcessed(16 * static_cast(state.iterations())); +} +BENCHMARK(BM_StdVectorData); + +void BM_InlinedVectorSizeInlined(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); + } +} +BENCHMARK(BM_InlinedVectorSizeInlined); + +void BM_InlinedVectorSizeExternal(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); + } +} +BENCHMARK(BM_InlinedVectorSizeExternal); + +void BM_StdVectorSize(benchmark::State& state) { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); + } +} +BENCHMARK(BM_StdVectorSize); + +void BM_InlinedVectorEmptyInlined(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); + } +} +BENCHMARK(BM_InlinedVectorEmptyInlined); + +void BM_InlinedVectorEmptyExternal(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); + } +} +BENCHMARK(BM_InlinedVectorEmptyExternal); + +void BM_StdVectorEmpty(benchmark::State& state) { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); + } +} +BENCHMARK(BM_StdVectorEmpty); + +constexpr size_t kInlinedCapacity = 4; +constexpr size_t kLargeSize = kInlinedCapacity * 2; +constexpr size_t kSmallSize = kInlinedCapacity / 2; +constexpr size_t kBatchSize = 100; + +#define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize) + +#define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T) \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize) + +template +using InlVec = absl::InlinedVector; + +struct TrivialType { + size_t val; +}; + +class NontrivialType { + public: + ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() { + benchmark::DoNotOptimize(*this); + } + + ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other) + : val_(other.val_) { + benchmark::DoNotOptimize(*this); + } + + ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=( + const NontrivialType& other) { + val_ = other.val_; + benchmark::DoNotOptimize(*this); + return *this; + } + + ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept { + benchmark::DoNotOptimize(*this); + } + + private: + size_t val_; +}; + +template +void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec, + TestVecFn test_vec) { + std::array, kBatchSize> vector_batch{}; + + while (state.KeepRunningBatch(kBatchSize)) { + // Prepare batch + state.PauseTiming(); + for (size_t i = 0; i < kBatchSize; ++i) { + prepare_vec(vector_batch.data() + i, i); + } + benchmark::DoNotOptimize(vector_batch); + state.ResumeTiming(); + + // Test batch + for (size_t i = 0; i < kBatchSize; ++i) { + test_vec(vector_batch.data() + i, i); + } + } +} + +template +void BM_ConstructFromSize(benchmark::State& state) { + using VecT = InlVec; + auto size = ToSize; + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + ::new (ptr) VecT(size); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType); + +template +void BM_ConstructFromSizeRef(benchmark::State& state) { + using VecT = InlVec; + auto size = ToSize; + auto ref = T(); + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + benchmark::DoNotOptimize(ref); + ::new (ptr) VecT(size, ref); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType); + +template +void BM_ConstructFromRange(benchmark::State& state) { + using VecT = InlVec; + std::array arr{}; + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(arr); + ::new (ptr) VecT(arr.begin(), arr.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType); + +template +void BM_ConstructFromCopy(benchmark::State& state) { + using VecT = InlVec; + VecT other_vec(ToSize); + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(other_vec); + ::new (ptr) VecT(other_vec); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType); + +template +void BM_ConstructFromMove(benchmark::State& state) { + using VecT = InlVec; + std::array vector_batch{}; + BatchedBenchmark( + state, + /* prepare_vec = */ + [&](InlVec* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->~VecT(); + }, + /* test_vec = */ + [&](void* ptr, size_t i) { + benchmark::DoNotOptimize(vector_batch[i]); + ::new (ptr) VecT(std::move(vector_batch[i])); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); + +template +void BM_AssignSizeRef(benchmark::State& state) { + auto size = ToSize; + auto ref = T(); + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(size); + benchmark::DoNotOptimize(ref); + vec->assign(size, ref); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType); + +template +void BM_AssignRange(benchmark::State& state) { + std::array arr{}; + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(arr); + vec->assign(arr.begin(), arr.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType); + +template +void BM_AssignFromCopy(benchmark::State& state) { + InlVec other_vec(ToSize); + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(other_vec); + *vec = other_vec; + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType); + +template +void BM_AssignFromMove(benchmark::State& state) { + using VecT = InlVec; + std::array vector_batch{}; + BatchedBenchmark( + state, + /* prepare_vec = */ + [&](InlVec* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t i) { + benchmark::DoNotOptimize(vector_batch[i]); + *vec = std::move(vector_batch[i]); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType); + +template +void BM_ResizeSize(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { vec->resize(ToSize); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType); + +template +void BM_ResizeSizeRef(benchmark::State& state) { + auto t = T(); + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(t); + vec->resize(ToSize, t); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType); + +template +void BM_InsertSizeRef(benchmark::State& state) { + auto t = T(); + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(t); + auto* pos = vec->data() + (vec->size() / 2); + vec->insert(pos, t); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType); + +template +void BM_InsertRange(benchmark::State& state) { + InlVec other_vec(ToSize); + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(other_vec); + auto* pos = vec->data() + (vec->size() / 2); + vec->insert(pos, other_vec.begin(), other_vec.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType); + +template +void BM_EmplaceBack(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { vec->emplace_back(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType); + +template +void BM_PopBack(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { vec->pop_back(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType); + +template +void BM_EraseOne(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { + auto* pos = vec->data() + (vec->size() / 2); + vec->erase(pos); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType); + +template +void BM_EraseRange(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { + auto* pos = vec->data() + (vec->size() / 2); + vec->erase(pos, pos + 1); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType); + +template +void BM_Clear(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ [](InlVec* vec, size_t) { vec->clear(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType); + +template +void BM_Reserve(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { vec->reserve(ToCapacity); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType); + +template +void BM_ShrinkToFit(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(ToCapacity); + vec->reserve(FromCapacity); + }, + /* test_vec = */ [](InlVec* vec, size_t) { vec->shrink_to_fit(); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType); + +template +void BM_Swap(benchmark::State& state) { + using VecT = InlVec; + std::array vector_batch{}; + BatchedBenchmark( + state, + /* prepare_vec = */ + [&](InlVec* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t i) { + using std::swap; + benchmark::DoNotOptimize(vector_batch[i]); + swap(*vec, vector_batch[i]); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType); + +} // namespace diff --git a/base/abseil/absl/container/inlined_vector_exception_safety_test.cc b/base/abseil/absl/container/inlined_vector_exception_safety_test.cc new file mode 100644 index 0000000..0e6a05b --- /dev/null +++ b/base/abseil/absl/container/inlined_vector_exception_safety_test.cc @@ -0,0 +1,508 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/inlined_vector.h" + +#include "absl/base/config.h" + +#if defined(ABSL_HAVE_EXCEPTIONS) + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/exception_safety_testing.h" + +namespace { + +constexpr size_t kInlinedCapacity = 4; +constexpr size_t kLargeSize = kInlinedCapacity * 2; +constexpr size_t kSmallSize = kInlinedCapacity / 2; + +using Thrower = testing::ThrowingValue<>; +using MovableThrower = testing::ThrowingValue; +using ThrowAlloc = testing::ThrowingAllocator; + +using ThrowerVec = absl::InlinedVector; +using MovableThrowerVec = absl::InlinedVector; + +using ThrowAllocThrowerVec = + absl::InlinedVector; +using ThrowAllocMovableThrowerVec = + absl::InlinedVector; + +// In GCC, if an element of a `std::initializer_list` throws during construction +// the elements that were constructed before it are not destroyed. This causes +// incorrect exception safety test failures. Thus, `testing::nothrow_ctor` is +// required. See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66139 +#define ABSL_INTERNAL_MAKE_INIT_LIST(T, N) \ + (N > kInlinedCapacity \ + ? std::initializer_list{T(0, testing::nothrow_ctor), \ + T(1, testing::nothrow_ctor), \ + T(2, testing::nothrow_ctor), \ + T(3, testing::nothrow_ctor), \ + T(4, testing::nothrow_ctor), \ + T(5, testing::nothrow_ctor), \ + T(6, testing::nothrow_ctor), \ + T(7, testing::nothrow_ctor)} \ + \ + : std::initializer_list{T(0, testing::nothrow_ctor), \ + T(1, testing::nothrow_ctor)}) +static_assert(kLargeSize == 8, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)"); +static_assert(kSmallSize == 2, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)"); + +template +class TestParams { + public: + using VecT = TheVecT; + constexpr static size_t GetSizeAt(size_t i) { return kSizes[1 + i]; } + + private: + constexpr static size_t kSizes[1 + sizeof...(TheSizes)] = {1, TheSizes...}; +}; + +using NoSizeTestParams = + ::testing::Types, TestParams, + TestParams, + TestParams>; + +using OneSizeTestParams = + ::testing::Types, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams>; + +using TwoSizeTestParams = ::testing::Types< + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams>; + +template +struct NoSizeTest : ::testing::Test {}; +TYPED_TEST_SUITE(NoSizeTest, NoSizeTestParams); + +template +struct OneSizeTest : ::testing::Test {}; +TYPED_TEST_SUITE(OneSizeTest, OneSizeTestParams); + +template +struct TwoSizeTest : ::testing::Test {}; +TYPED_TEST_SUITE(TwoSizeTest, TwoSizeTestParams); + +template +bool InlinedVectorInvariants(VecT* vec) { + if (*vec != *vec) return false; + if (vec->size() > vec->capacity()) return false; + if (vec->size() > vec->max_size()) return false; + if (vec->capacity() > vec->max_size()) return false; + if (vec->data() != std::addressof(vec->at(0))) return false; + if (vec->data() != vec->begin()) return false; + if (*vec->data() != *vec->begin()) return false; + if (vec->begin() > vec->end()) return false; + if ((vec->end() - vec->begin()) != vec->size()) return false; + if (std::distance(vec->begin(), vec->end()) != vec->size()) return false; + return true; +} + +// Function that always returns false is correct, but refactoring is required +// for clarity. It's needed to express that, as a contract, certain operations +// should not throw at all. Execution of this function means an exception was +// thrown and thus the test should fail. +// TODO(johnsoncj): Add `testing::NoThrowGuarantee` to the framework +template +bool NoThrowGuarantee(VecT* /* vec */) { + return false; +} + +TYPED_TEST(NoSizeTest, DefaultConstructor) { + using VecT = typename TypeParam::VecT; + using allocator_type = typename VecT::allocator_type; + + testing::TestThrowingCtor(); + + testing::TestThrowingCtor(allocator_type{}); +} + +TYPED_TEST(OneSizeTest, SizeConstructor) { + using VecT = typename TypeParam::VecT; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + testing::TestThrowingCtor(size); + + testing::TestThrowingCtor(size, allocator_type{}); +} + +TYPED_TEST(OneSizeTest, SizeRefConstructor) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + testing::TestThrowingCtor(size, value_type{}); + + testing::TestThrowingCtor(size, value_type{}, allocator_type{}); +} + +TYPED_TEST(OneSizeTest, InitializerListConstructor) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + testing::TestThrowingCtor( + ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size)); + + testing::TestThrowingCtor( + ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size), allocator_type{}); +} + +TYPED_TEST(OneSizeTest, RangeConstructor) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + std::array arr{}; + + testing::TestThrowingCtor(arr.begin(), arr.end()); + + testing::TestThrowingCtor(arr.begin(), arr.end(), allocator_type{}); +} + +TYPED_TEST(OneSizeTest, CopyConstructor) { + using VecT = typename TypeParam::VecT; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + VecT other_vec{size}; + + testing::TestThrowingCtor(other_vec); + + testing::TestThrowingCtor(other_vec, allocator_type{}); +} + +TYPED_TEST(OneSizeTest, MoveConstructor) { + using VecT = typename TypeParam::VecT; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + if (!absl::allocator_is_nothrow::value) { + testing::TestThrowingCtor(VecT{size}); + + testing::TestThrowingCtor(VecT{size}, allocator_type{}); + } +} + +TYPED_TEST(TwoSizeTest, Assign) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_size = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + *vec = ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + VecT other_vec{to_size}; + *vec = other_vec; + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + VecT other_vec{to_size}; + *vec = std::move(other_vec); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + value_type val{}; + vec->assign(to_size, val); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->assign(ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size)); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + std::array arr{}; + vec->assign(arr.begin(), arr.end()); + })); +} + +TYPED_TEST(TwoSizeTest, Resize) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_size = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants, + testing::strong_guarantee); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->resize(to_size); // + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->resize(to_size, value_type{}); // + })); +} + +TYPED_TEST(OneSizeTest, Insert) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->insert(it, value_type{}); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->insert(it, value_type{}); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->end(); + vec->insert(it, value_type{}); + })); +} + +TYPED_TEST(TwoSizeTest, Insert) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto count = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->insert(it, count, value_type{}); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->insert(it, count, value_type{}); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->end(); + vec->insert(it, count, value_type{}); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->end(); + vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + std::array arr{}; + vec->insert(it, arr.begin(), arr.end()); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + std::array arr{}; + vec->insert(it, arr.begin(), arr.end()); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->end(); + std::array arr{}; + vec->insert(it, arr.begin(), arr.end()); + })); +} + +TYPED_TEST(OneSizeTest, EmplaceBack) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + // For testing calls to `emplace_back(...)` that reallocate. + VecT full_vec{size}; + full_vec.resize(full_vec.capacity()); + + // For testing calls to `emplace_back(...)` that don't reallocate. + VecT nonfull_vec{size}; + nonfull_vec.reserve(size + 1); + + auto tester = testing::MakeExceptionSafetyTester().WithContracts( + InlinedVectorInvariants); + + EXPECT_TRUE(tester.WithInitialValue(nonfull_vec).Test([](VecT* vec) { + vec->emplace_back(); + })); + + EXPECT_TRUE(tester.WithInitialValue(full_vec).Test( + [](VecT* vec) { vec->emplace_back(); })); +} + +TYPED_TEST(OneSizeTest, PopBack) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(NoThrowGuarantee); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->pop_back(); // + })); +} + +TYPED_TEST(OneSizeTest, Erase) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->erase(it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->erase(it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() - 1); + vec->erase(it); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->erase(it, it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->erase(it, it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() - 1); + vec->erase(it, it); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->erase(it, it + 1); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->erase(it, it + 1); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() - 1); + vec->erase(it, it + 1); + })); +} + +TYPED_TEST(OneSizeTest, Clear) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(NoThrowGuarantee); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->clear(); // + })); +} + +TYPED_TEST(TwoSizeTest, Reserve) { + using VecT = typename TypeParam::VecT; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_capacity = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { vec->reserve(to_capacity); })); +} + +TYPED_TEST(OneSizeTest, ShrinkToFit) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->shrink_to_fit(); // + })); +} + +TYPED_TEST(TwoSizeTest, Swap) { + using VecT = typename TypeParam::VecT; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_size = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + VecT other_vec{to_size}; + vec->swap(other_vec); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + using std::swap; + VecT other_vec{to_size}; + swap(*vec, other_vec); + })); +} + +} // namespace + +#endif // defined(ABSL_HAVE_EXCEPTIONS) diff --git a/base/abseil/absl/container/inlined_vector_test.cc b/base/abseil/absl/container/inlined_vector_test.cc new file mode 100644 index 0000000..2c9b0d0 --- /dev/null +++ b/base/abseil/absl/container/inlined_vector_test.cc @@ -0,0 +1,1800 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/inlined_vector.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/exception_testing.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/internal/counting_allocator.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/hash/hash_testing.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_cat.h" + +namespace { + +using absl::container_internal::CountingAllocator; +using absl::test_internal::CopyableMovableInstance; +using absl::test_internal::CopyableOnlyInstance; +using absl::test_internal::InstanceTracker; +using testing::AllOf; +using testing::Each; +using testing::ElementsAre; +using testing::ElementsAreArray; +using testing::Eq; +using testing::Gt; +using testing::PrintToString; + +using IntVec = absl::InlinedVector; + +MATCHER_P(SizeIs, n, "") { + return testing::ExplainMatchResult(n, arg.size(), result_listener); +} + +MATCHER_P(CapacityIs, n, "") { + return testing::ExplainMatchResult(n, arg.capacity(), result_listener); +} + +MATCHER_P(ValueIs, e, "") { + return testing::ExplainMatchResult(e, arg.value(), result_listener); +} + +// TODO(bsamwel): Add support for movable-only types. + +// Test fixture for typed tests on BaseCountedInstance derived classes, see +// test_instance_tracker.h. +template +class InstanceTest : public ::testing::Test {}; +TYPED_TEST_SUITE_P(InstanceTest); + +// A simple reference counted class to make sure that the proper elements are +// destroyed in the erase(begin, end) test. +class RefCounted { + public: + RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); } + + RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) { + Ref(); + } + + ~RefCounted() { + Unref(); + count_ = nullptr; + } + + friend void swap(RefCounted& a, RefCounted& b) { + using std::swap; + swap(a.value_, b.value_); + swap(a.count_, b.count_); + } + + RefCounted& operator=(RefCounted v) { + using std::swap; + swap(*this, v); + return *this; + } + + void Ref() const { + ABSL_RAW_CHECK(count_ != nullptr, ""); + ++(*count_); + } + + void Unref() const { + --(*count_); + ABSL_RAW_CHECK(*count_ >= 0, ""); + } + + int value_; + int* count_; +}; + +using RefCountedVec = absl::InlinedVector; + +// A class with a vtable pointer +class Dynamic { + public: + virtual ~Dynamic() {} +}; + +using DynamicVec = absl::InlinedVector; + +// Append 0..len-1 to *v +template +static void Fill(Container* v, int len, int offset = 0) { + for (int i = 0; i < len; i++) { + v->push_back(i + offset); + } +} + +static IntVec Fill(int len, int offset = 0) { + IntVec v; + Fill(&v, len, offset); + return v; +} + +TEST(IntVec, SimpleOps) { + for (int len = 0; len < 20; len++) { + IntVec v; + const IntVec& cv = v; // const alias + + Fill(&v, len); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + + for (int i = 0; i < len; i++) { + EXPECT_EQ(i, v[i]); + EXPECT_EQ(i, v.at(i)); + } + EXPECT_EQ(v.begin(), v.data()); + EXPECT_EQ(cv.begin(), cv.data()); + + int counter = 0; + for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) { + EXPECT_EQ(counter, *iter); + counter++; + } + EXPECT_EQ(counter, len); + + counter = 0; + for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) { + EXPECT_EQ(counter, *iter); + counter++; + } + EXPECT_EQ(counter, len); + + counter = 0; + for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) { + EXPECT_EQ(counter, *iter); + counter++; + } + EXPECT_EQ(counter, len); + + if (len > 0) { + EXPECT_EQ(0, v.front()); + EXPECT_EQ(len - 1, v.back()); + v.pop_back(); + EXPECT_EQ(len - 1, v.size()); + for (int i = 0; i < v.size(); ++i) { + EXPECT_EQ(i, v[i]); + EXPECT_EQ(i, v.at(i)); + } + } + } +} + +TEST(IntVec, PopBackNoOverflow) { + IntVec v = {1}; + v.pop_back(); + EXPECT_EQ(v.size(), 0); +} + +TEST(IntVec, AtThrows) { + IntVec v = {1, 2, 3}; + EXPECT_EQ(v.at(2), 3); + ABSL_BASE_INTERNAL_EXPECT_FAIL(v.at(3), std::out_of_range, + "failed bounds check"); +} + +TEST(IntVec, ReverseIterator) { + for (int len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + int counter = len; + for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = len; + for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend(); + ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = len; + for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend(); + ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + } +} + +TEST(IntVec, Erase) { + for (int len = 1; len < 20; len++) { + for (int i = 0; i < len; ++i) { + IntVec v; + Fill(&v, len); + v.erase(v.begin() + i); + EXPECT_EQ(len - 1, v.size()); + for (int j = 0; j < i; ++j) { + EXPECT_EQ(j, v[j]); + } + for (int j = i; j < len - 1; ++j) { + EXPECT_EQ(j + 1, v[j]); + } + } + } +} + +// At the end of this test loop, the elements between [erase_begin, erase_end) +// should have reference counts == 0, and all others elements should have +// reference counts == 1. +TEST(RefCountedVec, EraseBeginEnd) { + for (int len = 1; len < 20; ++len) { + for (int erase_begin = 0; erase_begin < len; ++erase_begin) { + for (int erase_end = erase_begin; erase_end <= len; ++erase_end) { + std::vector counts(len, 0); + RefCountedVec v; + for (int i = 0; i < len; ++i) { + v.push_back(RefCounted(i, &counts[i])); + } + + int erase_len = erase_end - erase_begin; + + v.erase(v.begin() + erase_begin, v.begin() + erase_end); + + EXPECT_EQ(len - erase_len, v.size()); + + // Check the elements before the first element erased. + for (int i = 0; i < erase_begin; ++i) { + EXPECT_EQ(i, v[i].value_); + } + + // Check the elements after the first element erased. + for (int i = erase_begin; i < v.size(); ++i) { + EXPECT_EQ(i + erase_len, v[i].value_); + } + + // Check that the elements at the beginning are preserved. + for (int i = 0; i < erase_begin; ++i) { + EXPECT_EQ(1, counts[i]); + } + + // Check that the erased elements are destroyed + for (int i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(0, counts[i]); + } + + // Check that the elements at the end are preserved. + for (int i = erase_end; i < len; ++i) { + EXPECT_EQ(1, counts[i]); + } + } + } + } +} + +struct NoDefaultCtor { + explicit NoDefaultCtor(int) {} +}; +struct NoCopy { + NoCopy() {} + NoCopy(const NoCopy&) = delete; +}; +struct NoAssign { + NoAssign() {} + NoAssign& operator=(const NoAssign&) = delete; +}; +struct MoveOnly { + MoveOnly() {} + MoveOnly(MoveOnly&&) = default; + MoveOnly& operator=(MoveOnly&&) = default; +}; +TEST(InlinedVectorTest, NoDefaultCtor) { + absl::InlinedVector v(10, NoDefaultCtor(2)); + (void)v; +} +TEST(InlinedVectorTest, NoCopy) { + absl::InlinedVector v(10); + (void)v; +} +TEST(InlinedVectorTest, NoAssign) { + absl::InlinedVector v(10); + (void)v; +} +TEST(InlinedVectorTest, MoveOnly) { + absl::InlinedVector v; + v.push_back(MoveOnly{}); + v.push_back(MoveOnly{}); + v.push_back(MoveOnly{}); + v.erase(v.begin()); + v.push_back(MoveOnly{}); + v.erase(v.begin(), v.begin() + 1); + v.insert(v.begin(), MoveOnly{}); + v.emplace(v.begin()); + v.emplace(v.begin(), MoveOnly{}); +} +TEST(InlinedVectorTest, Noexcept) { + EXPECT_TRUE(std::is_nothrow_move_constructible::value); + EXPECT_TRUE((std::is_nothrow_move_constructible< + absl::InlinedVector>::value)); + + struct MoveCanThrow { + MoveCanThrow(MoveCanThrow&&) {} + }; + EXPECT_EQ(absl::default_allocator_is_nothrow::value, + (std::is_nothrow_move_constructible< + absl::InlinedVector>::value)); +} + +TEST(InlinedVectorTest, EmplaceBack) { + absl::InlinedVector, 1> v; + + auto& inlined_element = v.emplace_back("answer", 42); + EXPECT_EQ(&inlined_element, &v[0]); + EXPECT_EQ(inlined_element.first, "answer"); + EXPECT_EQ(inlined_element.second, 42); + + auto& allocated_element = v.emplace_back("taxicab", 1729); + EXPECT_EQ(&allocated_element, &v[1]); + EXPECT_EQ(allocated_element.first, "taxicab"); + EXPECT_EQ(allocated_element.second, 1729); +} + +TEST(InlinedVectorTest, ShrinkToFitGrowingVector) { + absl::InlinedVector, 1> v; + + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 1); + + v.emplace_back("answer", 42); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 1); + + v.emplace_back("taxicab", 1729); + EXPECT_GE(v.capacity(), 2); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2); + + v.reserve(100); + EXPECT_GE(v.capacity(), 100); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2); +} + +TEST(InlinedVectorTest, ShrinkToFitEdgeCases) { + { + absl::InlinedVector, 1> v; + v.emplace_back("answer", 42); + v.emplace_back("taxicab", 1729); + EXPECT_GE(v.capacity(), 2); + v.pop_back(); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 1); + EXPECT_EQ(v[0].first, "answer"); + EXPECT_EQ(v[0].second, 42); + } + + { + absl::InlinedVector v(100); + v.resize(0); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2); // inlined capacity + } + + { + absl::InlinedVector v(100); + v.resize(1); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2); // inlined capacity + } + + { + absl::InlinedVector v(100); + v.resize(2); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2); + } + + { + absl::InlinedVector v(100); + v.resize(3); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 3); + } +} + +TEST(IntVec, Insert) { + for (int len = 0; len < 20; len++) { + for (int pos = 0; pos <= len; pos++) { + { + // Single element + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, 9999); + IntVec::iterator it = v.insert(v.cbegin() + pos, 9999); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // n elements + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + IntVec::size_type n = 5; + std_v.insert(std_v.begin() + pos, n, 9999); + IntVec::iterator it = v.insert(v.cbegin() + pos, n, 9999); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (random access iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + const std::vector input = {9999, 8888, 7777}; + std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend()); + IntVec::iterator it = + v.insert(v.cbegin() + pos, input.cbegin(), input.cend()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (forward iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + const std::forward_list input = {9999, 8888, 7777}; + std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend()); + IntVec::iterator it = + v.insert(v.cbegin() + pos, input.cbegin(), input.cend()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (input iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, {9999, 8888, 7777}); + std::istringstream input("9999 8888 7777"); + IntVec::iterator it = + v.insert(v.cbegin() + pos, std::istream_iterator(input), + std::istream_iterator()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Initializer list + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, {9999, 8888}); + IntVec::iterator it = v.insert(v.cbegin() + pos, {9999, 8888}); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + } + } +} + +TEST(RefCountedVec, InsertConstructorDestructor) { + // Make sure the proper construction/destruction happen during insert + // operations. + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (int pos = 0; pos <= len; pos++) { + SCOPED_TRACE(pos); + std::vector counts(len, 0); + int inserted_count = 0; + RefCountedVec v; + for (int i = 0; i < len; ++i) { + SCOPED_TRACE(i); + v.push_back(RefCounted(i, &counts[i])); + } + + EXPECT_THAT(counts, Each(Eq(1))); + + RefCounted insert_element(9999, &inserted_count); + EXPECT_EQ(1, inserted_count); + v.insert(v.begin() + pos, insert_element); + EXPECT_EQ(2, inserted_count); + // Check that the elements at the end are preserved. + EXPECT_THAT(counts, Each(Eq(1))); + EXPECT_EQ(2, inserted_count); + } + } +} + +TEST(IntVec, Resize) { + for (int len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + // Try resizing up and down by k elements + static const int kResizeElem = 1000000; + for (int k = 0; k < 10; k++) { + // Enlarging resize + v.resize(len + k, kResizeElem); + EXPECT_EQ(len + k, v.size()); + EXPECT_LE(len + k, v.capacity()); + for (int i = 0; i < len + k; i++) { + if (i < len) { + EXPECT_EQ(i, v[i]); + } else { + EXPECT_EQ(kResizeElem, v[i]); + } + } + + // Shrinking resize + v.resize(len, kResizeElem); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + for (int i = 0; i < len; i++) { + EXPECT_EQ(i, v[i]); + } + } + } +} + +TEST(IntVec, InitWithLength) { + for (int len = 0; len < 20; len++) { + IntVec v(len, 7); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + for (int i = 0; i < len; i++) { + EXPECT_EQ(7, v[i]); + } + } +} + +TEST(IntVec, CopyConstructorAndAssignment) { + for (int len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + + IntVec v2(v); + EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2); + + for (int start_len = 0; start_len < 20; start_len++) { + IntVec v3; + Fill(&v3, start_len, 99); // Add dummy elements that should go away + v3 = v; + EXPECT_TRUE(v == v3) << PrintToString(v) << PrintToString(v3); + } + } +} + +TEST(IntVec, AliasingCopyAssignment) { + for (int len = 0; len < 20; ++len) { + IntVec original; + Fill(&original, len); + IntVec dup = original; + dup = *&dup; + EXPECT_EQ(dup, original); + } +} + +TEST(IntVec, MoveConstructorAndAssignment) { + for (int len = 0; len < 20; len++) { + IntVec v_in; + const int inlined_capacity = v_in.capacity(); + Fill(&v_in, len); + EXPECT_EQ(len, v_in.size()); + EXPECT_LE(len, v_in.capacity()); + + { + IntVec v_temp(v_in); + auto* old_data = v_temp.data(); + IntVec v_out(std::move(v_temp)); + EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out); + if (v_in.size() > inlined_capacity) { + // Allocation is moved as a whole, data stays in place. + EXPECT_TRUE(v_out.data() == old_data); + } else { + EXPECT_FALSE(v_out.data() == old_data); + } + } + for (int start_len = 0; start_len < 20; start_len++) { + IntVec v_out; + Fill(&v_out, start_len, 99); // Add dummy elements that should go away + IntVec v_temp(v_in); + auto* old_data = v_temp.data(); + v_out = std::move(v_temp); + EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out); + if (v_in.size() > inlined_capacity) { + // Allocation is moved as a whole, data stays in place. + EXPECT_TRUE(v_out.data() == old_data); + } else { + EXPECT_FALSE(v_out.data() == old_data); + } + } + } +} + +class NotTriviallyDestructible { + public: + NotTriviallyDestructible() : p_(new int(1)) {} + explicit NotTriviallyDestructible(int i) : p_(new int(i)) {} + + NotTriviallyDestructible(const NotTriviallyDestructible& other) + : p_(new int(*other.p_)) {} + + NotTriviallyDestructible& operator=(const NotTriviallyDestructible& other) { + p_ = absl::make_unique(*other.p_); + return *this; + } + + bool operator==(const NotTriviallyDestructible& other) const { + return *p_ == *other.p_; + } + + private: + std::unique_ptr p_; +}; + +TEST(AliasingTest, Emplace) { + for (int i = 2; i < 20; ++i) { + absl::InlinedVector vec; + for (int j = 0; j < i; ++j) { + vec.push_back(NotTriviallyDestructible(j)); + } + vec.emplace(vec.begin(), vec[0]); + EXPECT_EQ(vec[0], vec[1]); + vec.emplace(vec.begin() + i / 2, vec[i / 2]); + EXPECT_EQ(vec[i / 2], vec[i / 2 + 1]); + vec.emplace(vec.end() - 1, vec.back()); + EXPECT_EQ(vec[vec.size() - 2], vec.back()); + } +} + +TEST(AliasingTest, InsertWithCount) { + for (int i = 1; i < 20; ++i) { + absl::InlinedVector vec; + for (int j = 0; j < i; ++j) { + vec.push_back(NotTriviallyDestructible(j)); + } + for (int n = 0; n < 5; ++n) { + // We use back where we can because it's guaranteed to become invalidated + vec.insert(vec.begin(), n, vec.back()); + auto b = vec.begin(); + EXPECT_TRUE( + std::all_of(b, b + n, [&vec](const NotTriviallyDestructible& x) { + return x == vec.back(); + })); + + auto m_idx = vec.size() / 2; + vec.insert(vec.begin() + m_idx, n, vec.back()); + auto m = vec.begin() + m_idx; + EXPECT_TRUE( + std::all_of(m, m + n, [&vec](const NotTriviallyDestructible& x) { + return x == vec.back(); + })); + + // We want distinct values so the equality test is meaningful, + // vec[vec.size() - 1] is also almost always invalidated. + auto old_e = vec.size() - 1; + auto val = vec[old_e]; + vec.insert(vec.end(), n, vec[old_e]); + auto e = vec.begin() + old_e; + EXPECT_TRUE(std::all_of( + e, e + n, + [&val](const NotTriviallyDestructible& x) { return x == val; })); + } + } +} + +TEST(OverheadTest, Storage) { + // Check for size overhead. + // In particular, ensure that std::allocator doesn't cost anything to store. + // The union should be absorbing some of the allocation bookkeeping overhead + // in the larger vectors, leaving only the size_ field as overhead. + EXPECT_EQ(2 * sizeof(int*), + sizeof(absl::InlinedVector) - 1 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 2 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 3 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 4 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 5 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 6 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 7 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 8 * sizeof(int*)); +} + +TEST(IntVec, Clear) { + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + IntVec v; + Fill(&v, len); + v.clear(); + EXPECT_EQ(0, v.size()); + EXPECT_EQ(v.begin(), v.end()); + } +} + +TEST(IntVec, Reserve) { + for (int len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + for (int newlen = 0; newlen < 100; newlen++) { + const int* start_rep = v.data(); + v.reserve(newlen); + const int* final_rep = v.data(); + if (newlen <= len) { + EXPECT_EQ(start_rep, final_rep); + } + EXPECT_LE(newlen, v.capacity()); + + // Filling up to newlen should not change rep + while (v.size() < newlen) { + v.push_back(0); + } + EXPECT_EQ(final_rep, v.data()); + } + } +} + +TEST(StringVec, SelfRefPushBack) { + std::vector std_v; + absl::InlinedVector v; + const std::string s = "A quite long std::string to ensure heap."; + std_v.push_back(s); + v.push_back(s); + for (int i = 0; i < 20; ++i) { + EXPECT_THAT(v, ElementsAreArray(std_v)); + + v.push_back(v.back()); + std_v.push_back(std_v.back()); + } + EXPECT_THAT(v, ElementsAreArray(std_v)); +} + +TEST(StringVec, SelfRefPushBackWithMove) { + std::vector std_v; + absl::InlinedVector v; + const std::string s = "A quite long std::string to ensure heap."; + std_v.push_back(s); + v.push_back(s); + for (int i = 0; i < 20; ++i) { + EXPECT_EQ(v.back(), std_v.back()); + + v.push_back(std::move(v.back())); + std_v.push_back(std::move(std_v.back())); + } + EXPECT_EQ(v.back(), std_v.back()); +} + +TEST(StringVec, SelfMove) { + const std::string s = "A quite long std::string to ensure heap."; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + absl::InlinedVector v; + for (int i = 0; i < len; ++i) { + SCOPED_TRACE(i); + v.push_back(s); + } + // Indirection necessary to avoid compiler warning. + v = std::move(*(&v)); + // Ensure that the inlined vector is still in a valid state by copying it. + // We don't expect specific contents since a self-move results in an + // unspecified valid state. + std::vector copy(v.begin(), v.end()); + } +} + +TEST(IntVec, Swap) { + for (int l1 = 0; l1 < 20; l1++) { + SCOPED_TRACE(l1); + for (int l2 = 0; l2 < 20; l2++) { + SCOPED_TRACE(l2); + IntVec a = Fill(l1, 0); + IntVec b = Fill(l2, 100); + { + using std::swap; + swap(a, b); + } + EXPECT_EQ(l1, b.size()); + EXPECT_EQ(l2, a.size()); + for (int i = 0; i < l1; i++) { + SCOPED_TRACE(i); + EXPECT_EQ(i, b[i]); + } + for (int i = 0; i < l2; i++) { + SCOPED_TRACE(i); + EXPECT_EQ(100 + i, a[i]); + } + } + } +} + +TYPED_TEST_P(InstanceTest, Swap) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + for (int l1 = 0; l1 < 20; l1++) { + SCOPED_TRACE(l1); + for (int l2 = 0; l2 < 20; l2++) { + SCOPED_TRACE(l2); + InstanceTracker tracker; + InstanceVec a, b; + const size_t inlined_capacity = a.capacity(); + auto min_len = std::min(l1, l2); + auto max_len = std::max(l1, l2); + for (int i = 0; i < l1; i++) a.push_back(Instance(i)); + for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i)); + EXPECT_EQ(tracker.instances(), l1 + l2); + tracker.ResetCopiesMovesSwaps(); + { + using std::swap; + swap(a, b); + } + EXPECT_EQ(tracker.instances(), l1 + l2); + if (a.size() > inlined_capacity && b.size() > inlined_capacity) { + EXPECT_EQ(tracker.swaps(), 0); // Allocations are swapped. + EXPECT_EQ(tracker.moves(), 0); + } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) { + EXPECT_EQ(tracker.swaps(), min_len); + EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), + max_len - min_len); + } else { + // One is allocated and the other isn't. The allocation is transferred + // without copying elements, and the inlined instances are copied/moved. + EXPECT_EQ(tracker.swaps(), 0); + EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), + min_len); + } + + EXPECT_EQ(l1, b.size()); + EXPECT_EQ(l2, a.size()); + for (int i = 0; i < l1; i++) { + EXPECT_EQ(i, b[i].value()); + } + for (int i = 0; i < l2; i++) { + EXPECT_EQ(100 + i, a[i].value()); + } + } + } +} + +TEST(IntVec, EqualAndNotEqual) { + IntVec a, b; + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + a.push_back(3); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + b.push_back(3); + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + b.push_back(7); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + a.push_back(6); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + a.clear(); + b.clear(); + for (int i = 0; i < 100; i++) { + a.push_back(i); + b.push_back(i); + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + b[i] = b[i] + 1; + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + b[i] = b[i] - 1; // Back to before + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + } +} + +TEST(IntVec, RelationalOps) { + IntVec a, b; + EXPECT_FALSE(a < b); + EXPECT_FALSE(b < a); + EXPECT_FALSE(a > b); + EXPECT_FALSE(b > a); + EXPECT_TRUE(a <= b); + EXPECT_TRUE(b <= a); + EXPECT_TRUE(a >= b); + EXPECT_TRUE(b >= a); + b.push_back(3); + EXPECT_TRUE(a < b); + EXPECT_FALSE(b < a); + EXPECT_FALSE(a > b); + EXPECT_TRUE(b > a); + EXPECT_TRUE(a <= b); + EXPECT_FALSE(b <= a); + EXPECT_FALSE(a >= b); + EXPECT_TRUE(b >= a); +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + const size_t inlined_capacity = v.capacity(); + for (int i = 0; i < len; i++) { + v.push_back(Instance(i)); + } + EXPECT_EQ(tracker.instances(), len); + EXPECT_GE(tracker.copies() + tracker.moves(), + len); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + + // Enlarging resize() must construct some objects + tracker.ResetCopiesMovesSwaps(); + v.resize(len + 10, Instance(100)); + EXPECT_EQ(tracker.instances(), len + 10); + if (len <= inlined_capacity && len + 10 > inlined_capacity) { + EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + len); + } else { + // Only specify a minimum number of copies + moves. We don't want to + // depend on the reallocation policy here. + EXPECT_GE(tracker.copies() + tracker.moves(), + 10); // More due to reallocation. + } + + // Shrinking resize() must destroy some objects + tracker.ResetCopiesMovesSwaps(); + v.resize(len, Instance(100)); + EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + + // reserve() must not increase the number of initialized objects + SCOPED_TRACE("reserve"); + v.reserve(len + 1000); + EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.copies() + tracker.moves(), len); + + // pop_back() and erase() must destroy one object + if (len > 0) { + tracker.ResetCopiesMovesSwaps(); + v.pop_back(); + EXPECT_EQ(tracker.instances(), len - 1); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + + if (!v.empty()) { + tracker.ResetCopiesMovesSwaps(); + v.erase(v.begin()); + EXPECT_EQ(tracker.instances(), len - 2); + EXPECT_EQ(tracker.copies() + tracker.moves(), len - 2); + } + } + + tracker.ResetCopiesMovesSwaps(); + int instances_before_empty_erase = tracker.instances(); + v.erase(v.begin(), v.begin()); + EXPECT_EQ(tracker.instances(), instances_before_empty_erase); + EXPECT_EQ(tracker.copies() + tracker.moves(), 0); + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnCopyConstruction) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + for (int i = 0; i < len; i++) { + v.push_back(Instance(i)); + } + EXPECT_EQ(tracker.instances(), len); + EXPECT_GE(tracker.copies() + tracker.moves(), + len); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + { // Copy constructor should create 'len' more instances. + InstanceVec v_copy(v); + EXPECT_EQ(tracker.instances(), len + len); + EXPECT_EQ(tracker.copies(), len); + EXPECT_EQ(tracker.moves(), 0); + } + EXPECT_EQ(tracker.instances(), len); + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveConstruction) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + const size_t inlined_capacity = v.capacity(); + for (int i = 0; i < len; i++) { + v.push_back(Instance(i)); + } + EXPECT_EQ(tracker.instances(), len); + EXPECT_GE(tracker.copies() + tracker.moves(), + len); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + { + InstanceVec v_copy(std::move(v)); + if (len > inlined_capacity) { + // Allocation is moved as a whole. + EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.live_instances(), len); + // Tests an implementation detail, don't rely on this in your code. + EXPECT_EQ(v.size(), 0); // NOLINT misc-use-after-move + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + } else { + EXPECT_EQ(tracker.instances(), len + len); + if (Instance::supports_move()) { + EXPECT_EQ(tracker.live_instances(), len); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), len); + } else { + EXPECT_EQ(tracker.live_instances(), len + len); + EXPECT_EQ(tracker.copies(), len); + EXPECT_EQ(tracker.moves(), 0); + } + } + EXPECT_EQ(tracker.swaps(), 0); + } + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnAssignment) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (int longorshort = 0; longorshort <= 1; ++longorshort) { + SCOPED_TRACE(longorshort); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec longer, shorter; + for (int i = 0; i < len; i++) { + longer.push_back(Instance(i)); + shorter.push_back(Instance(i)); + } + longer.push_back(Instance(len)); + EXPECT_EQ(tracker.instances(), len + len + 1); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + len + 1); // More due to reallocation. + + tracker.ResetCopiesMovesSwaps(); + if (longorshort) { + shorter = longer; + EXPECT_EQ(tracker.instances(), (len + 1) + (len + 1)); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + 1); // More due to reallocation. + } else { + longer = shorter; + EXPECT_EQ(tracker.instances(), len + len); + EXPECT_EQ(tracker.copies() + tracker.moves(), len); + } + } + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (int longorshort = 0; longorshort <= 1; ++longorshort) { + SCOPED_TRACE(longorshort); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec longer, shorter; + const int inlined_capacity = longer.capacity(); + for (int i = 0; i < len; i++) { + longer.push_back(Instance(i)); + shorter.push_back(Instance(i)); + } + longer.push_back(Instance(len)); + EXPECT_EQ(tracker.instances(), len + len + 1); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + len + 1); // More due to reallocation. + + tracker.ResetCopiesMovesSwaps(); + int src_len; + if (longorshort) { + src_len = len + 1; + shorter = std::move(longer); + } else { + src_len = len; + longer = std::move(shorter); + } + if (src_len > inlined_capacity) { + // Allocation moved as a whole. + EXPECT_EQ(tracker.instances(), src_len); + EXPECT_EQ(tracker.live_instances(), src_len); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + } else { + // Elements are all copied. + EXPECT_EQ(tracker.instances(), src_len + src_len); + if (Instance::supports_move()) { + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), src_len); + EXPECT_EQ(tracker.live_instances(), src_len); + } else { + EXPECT_EQ(tracker.copies(), src_len); + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), src_len + src_len); + } + } + EXPECT_EQ(tracker.swaps(), 0); + } + } +} + +TEST(CountElemAssign, SimpleTypeWithInlineBacking) { + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(2, 123); + EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(123, 123))); + if (original_size <= 2) { + // If the original had inline backing, it should stay inline. + EXPECT_EQ(2, v.capacity()); + } + } +} + +TEST(CountElemAssign, SimpleTypeWithAllocation) { + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(3, 123); + EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(123, 123, 123))); + EXPECT_LE(v.size(), v.capacity()); + } +} + +TYPED_TEST_P(InstanceTest, CountElemAssignInlineBacking) { + using Instance = TypeParam; + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(2, Instance(123)); + EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(ValueIs(123), ValueIs(123)))); + if (original_size <= 2) { + // If the original had inline backing, it should stay inline. + EXPECT_EQ(2, v.capacity()); + } + } +} + +template +void InstanceCountElemAssignWithAllocationTest() { + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(3, Instance(123)); + EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123), + ValueIs(123)))); + EXPECT_LE(v.size(), v.capacity()); + } +} +TEST(CountElemAssign, WithAllocationCopyableInstance) { + InstanceCountElemAssignWithAllocationTest(); +} +TEST(CountElemAssign, WithAllocationCopyableMovableInstance) { + InstanceCountElemAssignWithAllocationTest(); +} + +TEST(RangedConstructor, SimpleType) { + std::vector source_v = {4, 5, 6}; + // First try to fit in inline backing + absl::InlinedVector v(source_v.begin(), source_v.end()); + EXPECT_EQ(3, v.size()); + EXPECT_EQ(4, v.capacity()); // Indication that we're still on inlined storage + EXPECT_EQ(4, v[0]); + EXPECT_EQ(5, v[1]); + EXPECT_EQ(6, v[2]); + + // Now, force a re-allocate + absl::InlinedVector realloc_v(source_v.begin(), source_v.end()); + EXPECT_EQ(3, realloc_v.size()); + EXPECT_LT(2, realloc_v.capacity()); + EXPECT_EQ(4, realloc_v[0]); + EXPECT_EQ(5, realloc_v[1]); + EXPECT_EQ(6, realloc_v[2]); +} + +// Test for ranged constructors using Instance as the element type and +// SourceContainer as the source container type. +template +void InstanceRangedConstructorTestForContainer() { + InstanceTracker tracker; + SourceContainer source_v = {Instance(0), Instance(1)}; + tracker.ResetCopiesMovesSwaps(); + absl::InlinedVector v(source_v.begin(), + source_v.end()); + EXPECT_EQ(2, v.size()); + EXPECT_LT(1, v.capacity()); + EXPECT_EQ(0, v[0].value()); + EXPECT_EQ(1, v[1].value()); + EXPECT_EQ(tracker.copies(), 2); + EXPECT_EQ(tracker.moves(), 0); +} + +template +void InstanceRangedConstructorTestWithCapacity() { + // Test with const and non-const, random access and non-random-access sources. + // TODO(bsamwel): Test with an input iterator source. + { + SCOPED_TRACE("std::list"); + InstanceRangedConstructorTestForContainer, + inlined_capacity>(); + { + SCOPED_TRACE("const std::list"); + InstanceRangedConstructorTestForContainer< + Instance, const std::list, inlined_capacity>(); + } + { + SCOPED_TRACE("std::vector"); + InstanceRangedConstructorTestForContainer, + inlined_capacity>(); + } + { + SCOPED_TRACE("const std::vector"); + InstanceRangedConstructorTestForContainer< + Instance, const std::vector, inlined_capacity>(); + } + } +} + +TYPED_TEST_P(InstanceTest, RangedConstructor) { + using Instance = TypeParam; + SCOPED_TRACE("capacity=1"); + InstanceRangedConstructorTestWithCapacity(); + SCOPED_TRACE("capacity=2"); + InstanceRangedConstructorTestWithCapacity(); +} + +TEST(RangedConstructor, ElementsAreConstructed) { + std::vector source_v = {"cat", "dog"}; + + // Force expansion and re-allocation of v. Ensures that when the vector is + // expanded that new elements are constructed. + absl::InlinedVector v(source_v.begin(), source_v.end()); + EXPECT_EQ("cat", v[0]); + EXPECT_EQ("dog", v[1]); +} + +TEST(RangedAssign, SimpleType) { + // Test for all combinations of original sizes (empty and non-empty inline, + // and out of line) and target sizes. + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + for (size_t target_size = 0; target_size <= 5; ++target_size) { + SCOPED_TRACE(target_size); + + // New contents are [3, 4, ...] + std::vector new_contents; + for (size_t i = 0; i < target_size; ++i) { + new_contents.push_back(i + 3); + } + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(new_contents.begin(), new_contents.end()); + + EXPECT_EQ(new_contents.size(), v.size()); + EXPECT_LE(new_contents.size(), v.capacity()); + if (target_size <= 3 && original_size <= 3) { + // Storage should stay inline when target size is small. + EXPECT_EQ(3, v.capacity()); + } + EXPECT_THAT(v, ElementsAreArray(new_contents)); + } + } +} + +// Returns true if lhs and rhs have the same value. +template +static bool InstanceValuesEqual(const Instance& lhs, const Instance& rhs) { + return lhs.value() == rhs.value(); +} + +// Test for ranged assign() using Instance as the element type and +// SourceContainer as the source container type. +template +void InstanceRangedAssignTestForContainer() { + // Test for all combinations of original sizes (empty and non-empty inline, + // and out of line) and target sizes. + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + for (size_t target_size = 0; target_size <= 5; ++target_size) { + SCOPED_TRACE(target_size); + + // New contents are [3, 4, ...] + // Generate data using a non-const container, because SourceContainer + // itself may be const. + // TODO(bsamwel): Test with an input iterator. + std::vector new_contents_in; + for (size_t i = 0; i < target_size; ++i) { + new_contents_in.push_back(Instance(i + 3)); + } + SourceContainer new_contents(new_contents_in.begin(), + new_contents_in.end()); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(new_contents.begin(), new_contents.end()); + + EXPECT_EQ(new_contents.size(), v.size()); + EXPECT_LE(new_contents.size(), v.capacity()); + if (target_size <= 3 && original_size <= 3) { + // Storage should stay inline when target size is small. + EXPECT_EQ(3, v.capacity()); + } + EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(), + InstanceValuesEqual)); + } + } +} + +TYPED_TEST_P(InstanceTest, RangedAssign) { + using Instance = TypeParam; + // Test with const and non-const, random access and non-random-access sources. + // TODO(bsamwel): Test with an input iterator source. + SCOPED_TRACE("std::list"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("const std::list"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("std::vector"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("const std::vector"); + InstanceRangedAssignTestForContainer>(); +} + +TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) { + EXPECT_THAT((absl::InlinedVector{4, 5, 6}), + AllOf(SizeIs(3), CapacityIs(4), ElementsAre(4, 5, 6))); +} + +TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) { + EXPECT_THAT((absl::InlinedVector{4, 5, 6}), + AllOf(SizeIs(3), CapacityIs(Gt(2)), ElementsAre(4, 5, 6))); +} + +TEST(InitializerListConstructor, DisparateTypesInList) { + EXPECT_THAT((absl::InlinedVector{-7, 8ULL}), ElementsAre(-7, 8)); + + EXPECT_THAT((absl::InlinedVector{"foo", std::string("bar")}), + ElementsAre("foo", "bar")); +} + +TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) { + EXPECT_THAT((absl::InlinedVector{ + CopyableMovableInstance(0)}), + AllOf(SizeIs(1), CapacityIs(1), ElementsAre(ValueIs(0)))); +} + +TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) { + EXPECT_THAT( + (absl::InlinedVector{ + CopyableMovableInstance(0), CopyableMovableInstance(1)}), + AllOf(SizeIs(2), CapacityIs(Gt(1)), ElementsAre(ValueIs(0), ValueIs(1)))); +} + +TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) { + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + + absl::InlinedVector v1(original_size, 12345); + const size_t original_capacity_v1 = v1.capacity(); + v1.assign({3}); + EXPECT_THAT( + v1, AllOf(SizeIs(1), CapacityIs(original_capacity_v1), ElementsAre(3))); + + absl::InlinedVector v2(original_size, 12345); + const size_t original_capacity_v2 = v2.capacity(); + v2 = {3}; + EXPECT_THAT( + v2, AllOf(SizeIs(1), CapacityIs(original_capacity_v2), ElementsAre(3))); + } +} + +TEST(InitializerListAssign, SimpleTypeDoesNotFitInlineBacking) { + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v1(original_size, 12345); + v1.assign({3, 4, 5}); + EXPECT_THAT(v1, AllOf(SizeIs(3), ElementsAre(3, 4, 5))); + EXPECT_LE(3, v1.capacity()); + + absl::InlinedVector v2(original_size, 12345); + v2 = {3, 4, 5}; + EXPECT_THAT(v2, AllOf(SizeIs(3), ElementsAre(3, 4, 5))); + EXPECT_LE(3, v2.capacity()); + } +} + +TEST(InitializerListAssign, DisparateTypesInList) { + absl::InlinedVector v_int1; + v_int1.assign({-7, 8ULL}); + EXPECT_THAT(v_int1, ElementsAre(-7, 8)); + + absl::InlinedVector v_int2; + v_int2 = {-7, 8ULL}; + EXPECT_THAT(v_int2, ElementsAre(-7, 8)); + + absl::InlinedVector v_string1; + v_string1.assign({"foo", std::string("bar")}); + EXPECT_THAT(v_string1, ElementsAre("foo", "bar")); + + absl::InlinedVector v_string2; + v_string2 = {"foo", std::string("bar")}; + EXPECT_THAT(v_string2, ElementsAre("foo", "bar")); +} + +TYPED_TEST_P(InstanceTest, InitializerListAssign) { + using Instance = TypeParam; + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v(original_size, Instance(12345)); + const size_t original_capacity = v.capacity(); + v.assign({Instance(3)}); + EXPECT_THAT(v, AllOf(SizeIs(1), CapacityIs(original_capacity), + ElementsAre(ValueIs(3)))); + } + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v(original_size, Instance(12345)); + v.assign({Instance(3), Instance(4), Instance(5)}); + EXPECT_THAT( + v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5)))); + EXPECT_LE(3, v.capacity()); + } +} + +REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors, + CountConstructorsDestructorsOnCopyConstruction, + CountConstructorsDestructorsOnMoveConstruction, + CountConstructorsDestructorsOnAssignment, + CountConstructorsDestructorsOnMoveAssignment, + CountElemAssignInlineBacking, RangedConstructor, + RangedAssign, InitializerListAssign); + +using InstanceTypes = + ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes); + +TEST(DynamicVec, DynamicVecCompiles) { + DynamicVec v; + (void)v; +} + +TEST(AllocatorSupportTest, Constructors) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + int64_t allocated = 0; + MyAlloc alloc(&allocated); + { AllocVec ABSL_ATTRIBUTE_UNUSED v; } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v({1, 2, 3}, alloc); } + + AllocVec v2; + { AllocVec ABSL_ATTRIBUTE_UNUSED v(v2, alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(std::move(v2), alloc); } +} + +TEST(AllocatorSupportTest, CountAllocations) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + int64_t allocated = 0; + MyAlloc alloc(&allocated); + { + AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc); + EXPECT_THAT(allocated, 0); + } + EXPECT_THAT(allocated, 0); + { + AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); + EXPECT_THAT(allocated, v.size() * sizeof(int)); + } + EXPECT_THAT(allocated, 0); + { + AllocVec v(4, 1, alloc); + EXPECT_THAT(allocated, 0); + + int64_t allocated2 = 0; + MyAlloc alloc2(&allocated2); + AllocVec v2(v, alloc2); + EXPECT_THAT(allocated2, 0); + + int64_t allocated3 = 0; + MyAlloc alloc3(&allocated3); + AllocVec v3(std::move(v), alloc3); + EXPECT_THAT(allocated3, 0); + } + EXPECT_THAT(allocated, 0); + { + AllocVec v(8, 2, alloc); + EXPECT_THAT(allocated, v.size() * sizeof(int)); + + int64_t allocated2 = 0; + MyAlloc alloc2(&allocated2); + AllocVec v2(v, alloc2); + EXPECT_THAT(allocated2, v2.size() * sizeof(int)); + + int64_t allocated3 = 0; + MyAlloc alloc3(&allocated3); + AllocVec v3(std::move(v), alloc3); + EXPECT_THAT(allocated3, v3.size() * sizeof(int)); + } + EXPECT_EQ(allocated, 0); + { + // Test shrink_to_fit deallocations. + AllocVec v(8, 2, alloc); + EXPECT_EQ(allocated, 8 * sizeof(int)); + v.resize(5); + EXPECT_EQ(allocated, 8 * sizeof(int)); + v.shrink_to_fit(); + EXPECT_EQ(allocated, 5 * sizeof(int)); + v.resize(4); + EXPECT_EQ(allocated, 5 * sizeof(int)); + v.shrink_to_fit(); + EXPECT_EQ(allocated, 0); + } +} + +TEST(AllocatorSupportTest, SwapBothAllocated) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + int64_t allocated1 = 0; + int64_t allocated2 = 0; + { + const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7}; + const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + MyAlloc a1(&allocated1); + MyAlloc a2(&allocated2); + AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); + AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); + EXPECT_LT(v1.capacity(), v2.capacity()); + EXPECT_THAT(allocated1, v1.capacity() * sizeof(int)); + EXPECT_THAT(allocated2, v2.capacity() * sizeof(int)); + v1.swap(v2); + EXPECT_THAT(v1, ElementsAreArray(ia2)); + EXPECT_THAT(v2, ElementsAreArray(ia1)); + EXPECT_THAT(allocated1, v2.capacity() * sizeof(int)); + EXPECT_THAT(allocated2, v1.capacity() * sizeof(int)); + } + EXPECT_THAT(allocated1, 0); + EXPECT_THAT(allocated2, 0); +} + +TEST(AllocatorSupportTest, SwapOneAllocated) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + int64_t allocated1 = 0; + int64_t allocated2 = 0; + { + const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7}; + const int ia2[] = {0, 1, 2, 3}; + MyAlloc a1(&allocated1); + MyAlloc a2(&allocated2); + AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); + AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); + EXPECT_THAT(allocated1, v1.capacity() * sizeof(int)); + EXPECT_THAT(allocated2, 0); + v1.swap(v2); + EXPECT_THAT(v1, ElementsAreArray(ia2)); + EXPECT_THAT(v2, ElementsAreArray(ia1)); + EXPECT_THAT(allocated1, v2.capacity() * sizeof(int)); + EXPECT_THAT(allocated2, 0); + EXPECT_TRUE(v2.get_allocator() == a1); + EXPECT_TRUE(v1.get_allocator() == a2); + } + EXPECT_THAT(allocated1, 0); + EXPECT_THAT(allocated2, 0); +} + +TEST(AllocatorSupportTest, ScopedAllocatorWorksInlined) { + using StdVector = std::vector>; + using Alloc = CountingAllocator; + using ScopedAlloc = std::scoped_allocator_adaptor; + using AllocVec = absl::InlinedVector; + + int64_t total_allocated_byte_count = 0; + + AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count))); + + // Called only once to remain inlined + inlined_case.emplace_back(); + + int64_t absl_responsible_for_count = total_allocated_byte_count; + + // MSVC's allocator preemptively allocates in debug mode +#if !defined(_MSC_VER) + EXPECT_EQ(absl_responsible_for_count, 0); +#endif // !defined(_MSC_VER) + + inlined_case[0].emplace_back(); + EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count); + + inlined_case.clear(); + inlined_case.shrink_to_fit(); + EXPECT_EQ(total_allocated_byte_count, 0); +} + +TEST(AllocatorSupportTest, ScopedAllocatorWorksAllocated) { + using StdVector = std::vector>; + using Alloc = CountingAllocator; + using ScopedAlloc = std::scoped_allocator_adaptor; + using AllocVec = absl::InlinedVector; + + int64_t total_allocated_byte_count = 0; + + AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count))); + + // Called twice to force into being allocated + allocated_case.emplace_back(); + allocated_case.emplace_back(); + + int64_t absl_responsible_for_count = total_allocated_byte_count; + EXPECT_GT(absl_responsible_for_count, 0); + + allocated_case[1].emplace_back(); + EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count); + + allocated_case.clear(); + allocated_case.shrink_to_fit(); + EXPECT_EQ(total_allocated_byte_count, 0); +} + +TEST(AllocatorSupportTest, SizeAllocConstructor) { + constexpr int inlined_size = 4; + using Alloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + + { + auto len = inlined_size / 2; + int64_t allocated = 0; + auto v = AllocVec(len, Alloc(&allocated)); + + // Inline storage used; allocator should not be invoked + EXPECT_THAT(allocated, 0); + EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); + } + + { + auto len = inlined_size * 2; + int64_t allocated = 0; + auto v = AllocVec(len, Alloc(&allocated)); + + // Out of line storage used; allocation of 8 elements expected + EXPECT_THAT(allocated, len * sizeof(int)); + EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); + } +} + +TEST(InlinedVectorTest, MinimumAllocatorCompilesUsingTraits) { + using T = int; + using A = std::allocator; + using ATraits = absl::allocator_traits; + + struct MinimumAllocator { + using value_type = T; + + value_type* allocate(size_t n) { + A a; + return ATraits::allocate(a, n); + } + + void deallocate(value_type* p, size_t n) { + A a; + ATraits::deallocate(a, p, n); + } + }; + + absl::InlinedVector vec; + vec.emplace_back(); + vec.resize(0); +} + +TEST(InlinedVectorTest, AbslHashValueWorks) { + using V = absl::InlinedVector; + std::vector cases; + + // Generate a variety of vectors some of these are small enough for the inline + // space but are stored out of line. + for (int i = 0; i < 10; ++i) { + V v; + for (int j = 0; j < i; ++j) { + v.push_back(j); + } + cases.push_back(v); + v.resize(i % 4); + cases.push_back(v); + } + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); +} + +} // anonymous namespace diff --git a/base/abseil/absl/container/internal/btree.h b/base/abseil/absl/container/internal/btree.h new file mode 100644 index 0000000..aef861d --- /dev/null +++ b/base/abseil/absl/container/internal/btree.h @@ -0,0 +1,2613 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A btree implementation of the STL set and map interfaces. A btree is smaller +// and generally also faster than STL set/map (refer to the benchmarks below). +// The red-black tree implementation of STL set/map has an overhead of 3 +// pointers (left, right and parent) plus the node color information for each +// stored value. So a set consumes 40 bytes for each value stored in +// 64-bit mode. This btree implementation stores multiple values on fixed +// size nodes (usually 256 bytes) and doesn't store child pointers for leaf +// nodes. The result is that a btree_set may use much less memory per +// stored value. For the random insertion benchmark in btree_bench.cc, a +// btree_set with node-size of 256 uses 5.1 bytes per stored value. +// +// The packing of multiple values on to each node of a btree has another effect +// besides better space utilization: better cache locality due to fewer cache +// lines being accessed. Better cache locality translates into faster +// operations. +// +// CAVEATS +// +// Insertions and deletions on a btree can cause splitting, merging or +// rebalancing of btree nodes. And even without these operations, insertions +// and deletions on a btree will move values around within a node. In both +// cases, the result is that insertions and deletions can invalidate iterators +// pointing to values other than the one being inserted/deleted. Therefore, this +// container does not provide pointer stability. This is notably different from +// STL set/map which takes care to not invalidate iterators on insert/erase +// except, of course, for iterators pointing to the value being erased. A +// partial workaround when erasing is available: erase() returns an iterator +// pointing to the item just after the one that was erased (or end() if none +// exists). + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/container/internal/common.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A helper class that indicates if the Compare parameter is a key-compare-to +// comparator. +template +using btree_is_key_compare_to = + std::is_convertible, + absl::weak_ordering>; + +struct StringBtreeDefaultLess { + using is_transparent = void; + + StringBtreeDefaultLess() = default; + + // Compatibility constructor. + StringBtreeDefaultLess(std::less) {} // NOLINT + StringBtreeDefaultLess(std::less) {} // NOLINT + + absl::weak_ordering operator()(absl::string_view lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); + } +}; + +struct StringBtreeDefaultGreater { + using is_transparent = void; + + StringBtreeDefaultGreater() = default; + + StringBtreeDefaultGreater(std::greater) {} // NOLINT + StringBtreeDefaultGreater(std::greater) {} // NOLINT + + absl::weak_ordering operator()(absl::string_view lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); + } +}; + +// A helper class to convert a boolean comparison into a three-way "compare-to" +// comparison that returns a negative value to indicate less-than, zero to +// indicate equality and a positive value to indicate greater-than. This helper +// class is specialized for less, greater, +// less, and greater. +// +// key_compare_to_adapter is provided so that btree users +// automatically get the more efficient compare-to code when using common +// google string types with common comparison functors. +// These string-like specializations also turn on heterogeneous lookup by +// default. +template +struct key_compare_to_adapter { + using type = Compare; +}; + +template <> +struct key_compare_to_adapter> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_to_adapter> { + using type = StringBtreeDefaultGreater; +}; + +template <> +struct key_compare_to_adapter> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_to_adapter> { + using type = StringBtreeDefaultGreater; +}; + +template +struct common_params { + // If Compare is a common comparator for a std::string-like type, then we adapt it + // to use heterogeneous lookup and to be a key-compare-to comparator. + using key_compare = typename key_compare_to_adapter::type; + // A type which indicates if we have a key-compare-to functor or a plain old + // key-compare functor. + using is_key_compare_to = btree_is_key_compare_to; + + using allocator_type = Alloc; + using key_type = Key; + using size_type = std::make_signed::type; + using difference_type = ptrdiff_t; + + // True if this is a multiset or multimap. + using is_multi_container = std::integral_constant; + + using slot_policy = SlotPolicy; + using slot_type = typename slot_policy::slot_type; + using value_type = typename slot_policy::value_type; + using init_type = typename slot_policy::mutable_value_type; + using pointer = value_type *; + using const_pointer = const value_type *; + using reference = value_type &; + using const_reference = const value_type &; + + enum { + kTargetNodeSize = TargetNodeSize, + + // Upper bound for the available space for values. This is largest for leaf + // nodes, which have overhead of at least a pointer + 4 bytes (for storing + // 3 field_types and an enum). + kNodeValueSpace = + TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), + }; + + // This is an integral type large enough to hold as many + // ValueSize-values as will fit a node of TargetNodeSize bytes. + using node_count_type = + absl::conditional_t<(kNodeValueSpace / sizeof(value_type) > + (std::numeric_limits::max)()), + uint16_t, uint8_t>; // NOLINT + + // The following methods are necessary for passing this struct as PolicyTraits + // for node_handle and/or are used within btree. + static value_type &element(slot_type *slot) { + return slot_policy::element(slot); + } + static const value_type &element(const slot_type *slot) { + return slot_policy::element(slot); + } + template + static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + slot_policy::construct(alloc, slot, other); + } + static void destroy(Alloc *alloc, slot_type *slot) { + slot_policy::destroy(alloc, slot); + } + static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { + construct(alloc, new_slot, old_slot); + destroy(alloc, old_slot); + } + static void swap(Alloc *alloc, slot_type *a, slot_type *b) { + slot_policy::swap(alloc, a, b); + } + static void move(Alloc *alloc, slot_type *src, slot_type *dest) { + slot_policy::move(alloc, src, dest); + } + static void move(Alloc *alloc, slot_type *first, slot_type *last, + slot_type *result) { + slot_policy::move(alloc, first, last, result); + } +}; + +// A parameters structure for holding the type parameters for a btree_map. +// Compare and Alloc should be nothrow copy-constructible. +template +struct map_params : common_params> { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + using key_compare = typename super_type::key_compare; + // Inherit from key_compare for empty base class optimization. + struct value_compare : private key_compare { + value_compare() = default; + explicit value_compare(const key_compare &cmp) : key_compare(cmp) {} + + template + auto operator()(const T &left, const U &right) const + -> decltype(std::declval()(left.first, right.first)) { + return key_compare::operator()(left.first, right.first); + } + }; + using is_map_container = std::true_type; + + static const Key &key(const value_type &x) { return x.first; } + static const Key &key(const init_type &x) { return x.first; } + static const Key &key(const slot_type *x) { return slot_policy::key(x); } + static mapped_type &value(value_type *value) { return value->second; } +}; + +// This type implements the necessary functions from the +// absl::container_internal::slot_type interface. +template +struct set_slot_policy { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type &element(slot_type *slot) { return *slot; } + static const value_type &element(const slot_type *slot) { return *slot; } + + template + static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void destroy(Alloc *alloc, slot_type *slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) { + using std::swap; + swap(*a, *b); + } + + template + static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) { + *dest = std::move(*src); + } + + template + static void move(Alloc *alloc, slot_type *first, slot_type *last, + slot_type *result) { + for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) + move(alloc, src, dest); + } +}; + +// A parameters structure for holding the type parameters for a btree_set. +// Compare and Alloc should be nothrow copy-constructible. +template +struct set_params : common_params> { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + using value_compare = typename set_params::common_params::key_compare; + using is_map_container = std::false_type; + + static const Key &key(const value_type &x) { return x; } + static const Key &key(const slot_type *x) { return *x; } +}; + +// An adapter class that converts a lower-bound compare into an upper-bound +// compare. Note: there is no need to make a version of this adapter specialized +// for key-compare-to functors because the upper-bound (the first value greater +// than the input) is never an exact match. +template +struct upper_bound_adapter { + explicit upper_bound_adapter(const Compare &c) : comp(c) {} + template + bool operator()(const K &a, const LK &b) const { + // Returns true when a is not greater than b. + return !compare_internal::compare_result_as_less_than(comp(b, a)); + } + + private: + Compare comp; +}; + +enum class MatchKind : uint8_t { kEq, kNe }; + +template +struct SearchResult { + V value; + MatchKind match; + + static constexpr bool HasMatch() { return true; } + bool IsEq() const { return match == MatchKind::kEq; } +}; + +// When we don't use CompareTo, `match` is not present. +// This ensures that callers can't use it accidentally when it provides no +// useful information. +template +struct SearchResult { + V value; + + static constexpr bool HasMatch() { return false; } + static constexpr bool IsEq() { return false; } +}; + +// A node in the btree holding. The same node type is used for both internal +// and leaf nodes in the btree, though the nodes are allocated in such a way +// that the children array is only valid in internal nodes. +template +class btree_node { + using is_key_compare_to = typename Params::is_key_compare_to; + using is_multi_container = typename Params::is_multi_container; + using field_type = typename Params::node_count_type; + using allocator_type = typename Params::allocator_type; + using slot_type = typename Params::slot_type; + + public: + using params_type = Params; + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using key_compare = typename Params::key_compare; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + + // Btree decides whether to use linear node search as follows: + // - If the key is arithmetic and the comparator is std::less or + // std::greater, choose linear. + // - Otherwise, choose binary. + // TODO(ezb): Might make sense to add condition(s) based on node-size. + using use_linear_search = std::integral_constant< + bool, + std::is_arithmetic::value && + (std::is_same, key_compare>::value || + std::is_same, key_compare>::value)>; + + // This class is organized by gtl::Layout as if it had the following + // structure: + // // A pointer to the node's parent. + // btree_node *parent; + // + // // The position of the node in the node's parent. + // field_type position; + // // The index of the first populated value in `values`. + // // TODO(ezb): right now, `start` is always 0. Update insertion/merge + // // logic to allow for floating storage within nodes. + // field_type start; + // // The count of the number of populated values in the node. + // field_type count; + // // The maximum number of values the node can hold. This is an integer in + // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf + // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal + // // nodes (even though there are still kNodeValues values in the node). + // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) + // // to free extra bits for is_root, etc. + // field_type max_count; + // + // // The array of values. The capacity is `max_count` for leaf nodes and + // // kNodeValues for internal nodes. Only the values in + // // [start, start + count) have been initialized and are valid. + // slot_type values[max_count]; + // + // // The array of child pointers. The keys in children[i] are all less + // // than key(i). The keys in children[i + 1] are all greater than key(i). + // // There are 0 children for leaf nodes and kNodeValues + 1 children for + // // internal nodes. + // btree_node *children[kNodeValues + 1]; + // + // This class is only constructed by EmptyNodeType. Normally, pointers to the + // layout above are allocated, cast to btree_node*, and de-allocated within + // the btree implementation. + ~btree_node() = default; + btree_node(btree_node const &) = delete; + btree_node &operator=(btree_node const &) = delete; + + // Public for EmptyNodeType. + constexpr static size_type Alignment() { + static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), + "Alignment of all nodes must be equal."); + return InternalLayout().Alignment(); + } + + protected: + btree_node() = default; + + private: + using layout_type = absl::container_internal::Layout; + constexpr static size_type SizeWithNValues(size_type n) { + return layout_type(/*parent*/ 1, + /*position, start, count, max_count*/ 4, + /*values*/ n, + /*children*/ 0) + .AllocSize(); + } + // A lower bound for the overhead of fields other than values in a leaf node. + constexpr static size_type MinimumOverhead() { + return SizeWithNValues(1) - sizeof(value_type); + } + + // Compute how many values we can fit onto a leaf node taking into account + // padding. + constexpr static size_type NodeTargetValues(const int begin, const int end) { + return begin == end ? begin + : SizeWithNValues((begin + end) / 2 + 1) > + params_type::kTargetNodeSize + ? NodeTargetValues(begin, (begin + end) / 2) + : NodeTargetValues((begin + end) / 2 + 1, end); + } + + enum { + kTargetNodeSize = params_type::kTargetNodeSize, + kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), + + // We need a minimum of 3 values per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). + kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, + + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + kInternalNodeMaxCount = 0, + }; + + // Leaves can have less than kNodeValues values. + constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { + return layout_type(/*parent*/ 1, + /*position, start, count, max_count*/ 4, + /*values*/ max_values, + /*children*/ 0); + } + constexpr static layout_type InternalLayout() { + return layout_type(/*parent*/ 1, + /*position, start, count, max_count*/ 4, + /*values*/ kNodeValues, + /*children*/ kNodeValues + 1); + } + constexpr static size_type LeafSize(const int max_values = kNodeValues) { + return LeafLayout(max_values).AllocSize(); + } + constexpr static size_type InternalSize() { + return InternalLayout().AllocSize(); + } + + // N is the index of the type in the Layout definition. + // ElementType is the Nth type in the Layout definition. + template + inline typename layout_type::template ElementType *GetField() { + // We assert that we don't read from values that aren't there. + assert(N < 3 || !leaf()); + return InternalLayout().template Pointer(reinterpret_cast(this)); + } + template + inline const typename layout_type::template ElementType *GetField() const { + assert(N < 3 || !leaf()); + return InternalLayout().template Pointer( + reinterpret_cast(this)); + } + void set_parent(btree_node *p) { *GetField<0>() = p; } + field_type &mutable_count() { return GetField<1>()[2]; } + slot_type *slot(int i) { return &GetField<2>()[i]; } + const slot_type *slot(int i) const { return &GetField<2>()[i]; } + void set_position(field_type v) { GetField<1>()[0] = v; } + void set_start(field_type v) { GetField<1>()[1] = v; } + void set_count(field_type v) { GetField<1>()[2] = v; } + // This method is only called by the node init methods. + void set_max_count(field_type v) { GetField<1>()[3] = v; } + + public: + // Whether this is a leaf node or not. This value doesn't change after the + // node is created. + bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; } + + // Getter for the position of this node in its parent. + field_type position() const { return GetField<1>()[0]; } + + // Getter for the offset of the first value in the `values` array. + field_type start() const { return GetField<1>()[1]; } + + // Getters for the number of values stored in this node. + field_type count() const { return GetField<1>()[2]; } + field_type max_count() const { + // Internal nodes have max_count==kInternalNodeMaxCount. + // Leaf nodes have max_count in [1, kNodeValues]. + const field_type max_count = GetField<1>()[3]; + return max_count == field_type{kInternalNodeMaxCount} + ? field_type{kNodeValues} + : max_count; + } + + // Getter for the parent of this node. + btree_node *parent() const { return *GetField<0>(); } + // Getter for whether the node is the root of the tree. The parent of the + // root of the tree is the leftmost node in the tree which is guaranteed to + // be a leaf. + bool is_root() const { return parent()->leaf(); } + void make_root() { + assert(parent()->is_root()); + set_parent(parent()->parent()); + } + + // Getters for the key/value at position i in the node. + const key_type &key(int i) const { return params_type::key(slot(i)); } + reference value(int i) { return params_type::element(slot(i)); } + const_reference value(int i) const { return params_type::element(slot(i)); } + + // Getters/setter for the child at position i in the node. + btree_node *child(int i) const { return GetField<3>()[i]; } + btree_node *&mutable_child(int i) { return GetField<3>()[i]; } + void clear_child(int i) { + absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); + } + void set_child(int i, btree_node *c) { + absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); + mutable_child(i) = c; + c->set_position(i); + } + void init_child(int i, btree_node *c) { + set_child(i, c); + c->set_parent(this); + } + + // Returns the position of the first value whose key is not less than k. + template + SearchResult lower_bound( + const K &k, const key_compare &comp) const { + return use_linear_search::value ? linear_search(k, comp) + : binary_search(k, comp); + } + // Returns the position of the first value whose key is greater than k. + template + int upper_bound(const K &k, const key_compare &comp) const { + auto upper_compare = upper_bound_adapter(comp); + return use_linear_search::value ? linear_search(k, upper_compare).value + : binary_search(k, upper_compare).value; + } + + template + SearchResult::value> + linear_search(const K &k, const Compare &comp) const { + return linear_search_impl(k, 0, count(), comp, + btree_is_key_compare_to()); + } + + template + SearchResult::value> + binary_search(const K &k, const Compare &comp) const { + return binary_search_impl(k, 0, count(), comp, + btree_is_key_compare_to()); + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using plain compare. + template + SearchResult linear_search_impl( + const K &k, int s, const int e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s < e) { + if (!comp(key(s), k)) { + break; + } + ++s; + } + return {s}; + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using compare-to. + template + SearchResult linear_search_impl( + const K &k, int s, const int e, const Compare &comp, + std::true_type /* IsCompareTo */) const { + while (s < e) { + const absl::weak_ordering c = comp(key(s), k); + if (c == 0) { + return {s, MatchKind::kEq}; + } else if (c > 0) { + break; + } + ++s; + } + return {s, MatchKind::kNe}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using plain compare. + template + SearchResult binary_search_impl( + const K &k, int s, int e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s != e) { + const int mid = (s + e) >> 1; + if (comp(key(mid), k)) { + s = mid + 1; + } else { + e = mid; + } + } + return {s}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using compare-to. + template + SearchResult binary_search_impl( + const K &k, int s, int e, const CompareTo &comp, + std::true_type /* IsCompareTo */) const { + if (is_multi_container::value) { + MatchKind exact_match = MatchKind::kNe; + while (s != e) { + const int mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else { + e = mid; + if (c == 0) { + // Need to return the first value whose key is not less than k, + // which requires continuing the binary search if this is a + // multi-container. + exact_match = MatchKind::kEq; + } + } + } + return {s, exact_match}; + } else { // Not a multi-container. + while (s != e) { + const int mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else if (c > 0) { + e = mid; + } else { + return {mid, MatchKind::kEq}; + } + } + return {s, MatchKind::kNe}; + } + } + + // Emplaces a value at position i, shifting all existing values and + // children at positions >= i to the right by 1. + template + void emplace_value(size_type i, allocator_type *alloc, Args &&... args); + + // Removes the value at position i, shifting all existing values and children + // at positions > i to the left by 1. + void remove_value(int i, allocator_type *alloc); + + // Removes the values at positions [i, i + to_erase), shifting all values + // after that range to the left by to_erase. Does not change children at all. + void remove_values_ignore_children(int i, int to_erase, + allocator_type *alloc); + + // Rebalances a node with its right sibling. + void rebalance_right_to_left(int to_move, btree_node *right, + allocator_type *alloc); + void rebalance_left_to_right(int to_move, btree_node *right, + allocator_type *alloc); + + // Splits a node, moving a portion of the node's values to its right sibling. + void split(int insert_position, btree_node *dest, allocator_type *alloc); + + // Merges a node with its right sibling, moving all of the values and the + // delimiting key in the parent node onto itself. + void merge(btree_node *sibling, allocator_type *alloc); + + // Swap the contents of "this" and "src". + void swap(btree_node *src, allocator_type *alloc); + + // Node allocation/deletion routines. + static btree_node *init_leaf(btree_node *n, btree_node *parent, + int max_count) { + n->set_parent(parent); + n->set_position(0); + n->set_start(0); + n->set_count(0); + n->set_max_count(max_count); + absl::container_internal::SanitizerPoisonMemoryRegion( + n->slot(0), max_count * sizeof(slot_type)); + return n; + } + static btree_node *init_internal(btree_node *n, btree_node *parent) { + init_leaf(n, parent, kNodeValues); + // Set `max_count` to a sentinel value to indicate that this node is + // internal. + n->set_max_count(kInternalNodeMaxCount); + absl::container_internal::SanitizerPoisonMemoryRegion( + &n->mutable_child(0), (kNodeValues + 1) * sizeof(btree_node *)); + return n; + } + void destroy(allocator_type *alloc) { + for (int i = 0; i < count(); ++i) { + value_destroy(i, alloc); + } + } + + public: + // Exposed only for tests. + static bool testonly_uses_linear_node_search() { + return use_linear_search::value; + } + + private: + template + void value_init(const size_type i, allocator_type *alloc, Args &&... args) { + absl::container_internal::SanitizerUnpoisonObject(slot(i)); + params_type::construct(alloc, slot(i), std::forward(args)...); + } + void value_destroy(const size_type i, allocator_type *alloc) { + params_type::destroy(alloc, slot(i)); + absl::container_internal::SanitizerPoisonObject(slot(i)); + } + + // Move n values starting at value i in this node into the values starting at + // value j in node x. + void uninitialized_move_n(const size_type n, const size_type i, + const size_type j, btree_node *x, + allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonMemoryRegion( + x->slot(j), n * sizeof(slot_type)); + for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j); + src != end; ++src, ++dest) { + params_type::construct(alloc, dest, src); + } + } + + // Destroys a range of n values, starting at index i. + void value_destroy_n(const size_type i, const size_type n, + allocator_type *alloc) { + for (int j = 0; j < n; ++j) { + value_destroy(i + j, alloc); + } + } + + template + friend class btree; + template + friend struct btree_iterator; + friend class BtreeNodePeer; +}; + +template +struct btree_iterator { + private: + using key_type = typename Node::key_type; + using size_type = typename Node::size_type; + using params_type = typename Node::params_type; + + using node_type = Node; + using normal_node = typename std::remove_const::type; + using const_node = const Node; + using normal_pointer = typename params_type::pointer; + using normal_reference = typename params_type::reference; + using const_pointer = typename params_type::const_pointer; + using const_reference = typename params_type::const_reference; + using slot_type = typename params_type::slot_type; + + using iterator = + btree_iterator; + using const_iterator = + btree_iterator; + + public: + // These aliases are public for std::iterator_traits. + using difference_type = typename Node::difference_type; + using value_type = typename params_type::value_type; + using pointer = Pointer; + using reference = Reference; + using iterator_category = std::bidirectional_iterator_tag; + + btree_iterator() : node(nullptr), position(-1) {} + btree_iterator(Node *n, int p) : node(n), position(p) {} + + // NOTE: this SFINAE allows for implicit conversions from iterator to + // const_iterator, but it specifically avoids defining copy constructors so + // that btree_iterator can be trivially copyable. This is for performance and + // binary size reasons. + template , iterator>::value && + std::is_same::value, + int> = 0> + btree_iterator(const btree_iterator &x) // NOLINT + : node(x.node), position(x.position) {} + + private: + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids defining a copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template , const_iterator>::value && + std::is_same::value, + int> = 0> + explicit btree_iterator(const btree_iterator &x) + : node(const_cast(x.node)), position(x.position) {} + + // Increment/decrement the iterator. + void increment() { + if (node->leaf() && ++position < node->count()) { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() { + if (node->leaf() && --position >= 0) { + return; + } + decrement_slow(); + } + void decrement_slow(); + + public: + bool operator==(const const_iterator &x) const { + return node == x.node && position == x.position; + } + bool operator!=(const const_iterator &x) const { + return node != x.node || position != x.position; + } + + // Accessors for the key/value the iterator is pointing at. + reference operator*() const { + return node->value(position); + } + pointer operator->() const { + return &node->value(position); + } + + btree_iterator& operator++() { + increment(); + return *this; + } + btree_iterator& operator--() { + decrement(); + return *this; + } + btree_iterator operator++(int) { + btree_iterator tmp = *this; + ++*this; + return tmp; + } + btree_iterator operator--(int) { + btree_iterator tmp = *this; + --*this; + return tmp; + } + + private: + template + friend class btree; + template + friend class btree_container; + template + friend class btree_set_container; + template + friend class btree_map_container; + template + friend class btree_multiset_container; + template + friend struct btree_iterator; + template + friend class base_checker; + + const key_type &key() const { return node->key(position); } + slot_type *slot() { return node->slot(position); } + + // The node in the tree the iterator is pointing at. + Node *node; + // The position within the node of the tree the iterator is pointing at. + // TODO(ezb): make this a field_type + int position; +}; + +template +class btree { + using node_type = btree_node; + using is_key_compare_to = typename Params::is_key_compare_to; + + // We use a static empty node for the root/leftmost/rightmost of empty btrees + // in order to avoid branching in begin()/end(). + struct alignas(node_type::Alignment()) EmptyNodeType : node_type { + using field_type = typename node_type::field_type; + node_type *parent; + field_type position = 0; + field_type start = 0; + field_type count = 0; + // max_count must be != kInternalNodeMaxCount (so that this node is regarded + // as a leaf node). max_count() is never called when the tree is empty. + field_type max_count = node_type::kInternalNodeMaxCount + 1; + +#ifdef _MSC_VER + // MSVC has constexpr code generations bugs here. + EmptyNodeType() : parent(this) {} +#else + constexpr EmptyNodeType(node_type *p) : parent(p) {} +#endif + }; + + static node_type *EmptyNode() { +#ifdef _MSC_VER + static EmptyNodeType* empty_node = new EmptyNodeType; + // This assert fails on some other construction methods. + assert(empty_node->parent == empty_node); + return empty_node; +#else + static constexpr EmptyNodeType empty_node( + const_cast(&empty_node)); + return const_cast(&empty_node); +#endif + } + + enum { + kNodeValues = node_type::kNodeValues, + kMinNodeValues = kNodeValues / 2, + }; + + struct node_stats { + using size_type = typename Params::size_type; + + node_stats(size_type l, size_type i) + : leaf_nodes(l), + internal_nodes(i) { + } + + node_stats& operator+=(const node_stats &x) { + leaf_nodes += x.leaf_nodes; + internal_nodes += x.internal_nodes; + return *this; + } + + size_type leaf_nodes; + size_type internal_nodes; + }; + + public: + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + using key_compare = typename Params::key_compare; + using value_compare = typename Params::value_compare; + using allocator_type = typename Params::allocator_type; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using iterator = btree_iterator; + using const_iterator = typename iterator::const_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using node_handle_type = node_handle; + + // Internal types made public for use by btree_container types. + using params_type = Params; + using slot_type = typename Params::slot_type; + + private: + // For use in copy_or_move_values_in_order. + const value_type &maybe_move_from_iterator(const_iterator x) { return *x; } + value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); } + + // Copies or moves (depending on the template parameter) the values in + // x into this btree in their order in x. This btree must be empty before this + // method is called. This method is used in copy construction, copy + // assignment, and move assignment. + template + void copy_or_move_values_in_order(Btree *x); + + // Validates that various assumptions/requirements are true at compile time. + constexpr static bool static_assert_validation(); + + public: + btree(const key_compare &comp, const allocator_type &alloc); + + btree(const btree &x); + btree(btree &&x) noexcept + : root_(std::move(x.root_)), + rightmost_(absl::exchange(x.rightmost_, EmptyNode())), + size_(absl::exchange(x.size_, 0)) { + x.mutable_root() = EmptyNode(); + } + + ~btree() { + // Put static_asserts in destructor to avoid triggering them before the type + // is complete. + static_assert(static_assert_validation(), "This call must be elided."); + clear(); + } + + // Assign the contents of x to *this. + btree &operator=(const btree &x); + btree &operator=(btree &&x) noexcept; + + iterator begin() { + return iterator(leftmost(), 0); + } + const_iterator begin() const { + return const_iterator(leftmost(), 0); + } + iterator end() { return iterator(rightmost_, rightmost_->count()); } + const_iterator end() const { + return const_iterator(rightmost_, rightmost_->count()); + } + reverse_iterator rbegin() { + return reverse_iterator(end()); + } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + reverse_iterator rend() { + return reverse_iterator(begin()); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // Finds the first element whose key is not less than key. + template + iterator lower_bound(const K &key) { + return internal_end(internal_lower_bound(key)); + } + template + const_iterator lower_bound(const K &key) const { + return internal_end(internal_lower_bound(key)); + } + + // Finds the first element whose key is greater than key. + template + iterator upper_bound(const K &key) { + return internal_end(internal_upper_bound(key)); + } + template + const_iterator upper_bound(const K &key) const { + return internal_end(internal_upper_bound(key)); + } + + // Finds the range of values which compare equal to key. The first member of + // the returned pair is equal to lower_bound(key). The second member pair of + // the pair is equal to upper_bound(key). + template + std::pair equal_range(const K &key) { + return {lower_bound(key), upper_bound(key)}; + } + template + std::pair equal_range(const K &key) const { + return {lower_bound(key), upper_bound(key)}; + } + + // Inserts a value into the btree only if it does not already exist. The + // boolean return value indicates whether insertion succeeded or failed. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_unique(const key_type &key, Args &&... args); + + // Inserts with hint. Checks to see if the value should be placed immediately + // before `position` in the tree. If so, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_unique() were made. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_hint_unique(iterator position, + const key_type &key, + Args &&... args); + + // Insert a range of values into the btree. + template + void insert_iterator_unique(InputIterator b, InputIterator e); + + // Inserts a value into the btree. + template + iterator insert_multi(const key_type &key, ValueType &&v); + + // Inserts a value into the btree. + template + iterator insert_multi(ValueType &&v) { + return insert_multi(params_type::key(v), std::forward(v)); + } + + // Insert with hint. Check to see if the value should be placed immediately + // before position in the tree. If it does, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_multi(v) were made. + template + iterator insert_hint_multi(iterator position, ValueType &&v); + + // Insert a range of values into the btree. + template + void insert_iterator_multi(InputIterator b, InputIterator e); + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + // Requirement: does not read the value at `*iter`. + iterator erase(iterator iter); + + // Erases range. Returns the number of keys erased and an iterator pointing + // to the element after the last erased element. + std::pair erase(iterator begin, iterator end); + + // Erases the specified key from the btree. Returns 1 if an element was + // erased and 0 otherwise. + template + size_type erase_unique(const K &key); + + // Erases all of the entries matching the specified key from the + // btree. Returns the number of elements erased. + template + size_type erase_multi(const K &key); + + // Finds the iterator corresponding to a key or returns end() if the key is + // not present. + template + iterator find(const K &key) { + return internal_end(internal_find(key)); + } + template + const_iterator find(const K &key) const { + return internal_end(internal_find(key)); + } + + // Returns a count of the number of times the key appears in the btree. + template + size_type count_unique(const K &key) const { + const iterator begin = internal_find(key); + if (begin.node == nullptr) { + // The key doesn't exist in the tree. + return 0; + } + return 1; + } + // Returns a count of the number of times the key appears in the btree. + template + size_type count_multi(const K &key) const { + const auto range = equal_range(key); + return std::distance(range.first, range.second); + } + + // Clear the btree, deleting all of the values it contains. + void clear(); + + // Swap the contents of *this and x. + void swap(btree &x); + + const key_compare &key_comp() const noexcept { + return root_.template get<0>(); + } + template + bool compare_keys(const K &x, const LK &y) const { + return compare_internal::compare_result_as_less_than(key_comp()(x, y)); + } + + value_compare value_comp() const { return value_compare(key_comp()); } + + // Verifies the structure of the btree. + void verify() const; + + // Size routines. + size_type size() const { return size_; } + size_type max_size() const { return (std::numeric_limits::max)(); } + bool empty() const { return size_ == 0; } + + // The height of the btree. An empty tree will have height 0. + size_type height() const { + size_type h = 0; + if (!empty()) { + // Count the length of the chain from the leftmost node up to the + // root. We actually count from the root back around to the level below + // the root, but the calculation is the same because of the circularity + // of that traversal. + const node_type *n = root(); + do { + ++h; + n = n->parent(); + } while (n != root()); + } + return h; + } + + // The number of internal, leaf and total nodes used by the btree. + size_type leaf_nodes() const { + return internal_stats(root()).leaf_nodes; + } + size_type internal_nodes() const { + return internal_stats(root()).internal_nodes; + } + size_type nodes() const { + node_stats stats = internal_stats(root()); + return stats.leaf_nodes + stats.internal_nodes; + } + + // The total number of bytes used by the btree. + size_type bytes_used() const { + node_stats stats = internal_stats(root()); + if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { + return sizeof(*this) + + node_type::LeafSize(root()->max_count()); + } else { + return sizeof(*this) + + stats.leaf_nodes * node_type::LeafSize() + + stats.internal_nodes * node_type::InternalSize(); + } + } + + // The average number of bytes used per value stored in the btree. + static double average_bytes_per_value() { + // Returns the number of bytes per value on a leaf node that is 75% + // full. Experimentally, this matches up nicely with the computed number of + // bytes per value in trees that had their values inserted in random order. + return node_type::LeafSize() / (kNodeValues * 0.75); + } + + // The fullness of the btree. Computed as the number of elements in the btree + // divided by the maximum number of elements a tree with the current number + // of nodes could hold. A value of 1 indicates perfect space + // utilization. Smaller values indicate space wastage. + // Returns 0 for empty trees. + double fullness() const { + if (empty()) return 0.0; + return static_cast(size()) / (nodes() * kNodeValues); + } + // The overhead of the btree structure in bytes per node. Computed as the + // total number of bytes used by the btree minus the number of bytes used for + // storing elements divided by the number of elements. + // Returns 0 for empty trees. + double overhead() const { + if (empty()) return 0.0; + return (bytes_used() - size() * sizeof(value_type)) / + static_cast(size()); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { + return allocator(); + } + + private: + // Internal accessor routines. + node_type *root() { return root_.template get<2>(); } + const node_type *root() const { return root_.template get<2>(); } + node_type *&mutable_root() noexcept { return root_.template get<2>(); } + key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); } + + // The leftmost node is stored as the parent of the root node. + node_type *leftmost() { return root()->parent(); } + const node_type *leftmost() const { return root()->parent(); } + + // Allocator routines. + allocator_type *mutable_allocator() noexcept { + return &root_.template get<1>(); + } + const allocator_type &allocator() const noexcept { + return root_.template get<1>(); + } + + // Allocates a correctly aligned node of at least size bytes using the + // allocator. + node_type *allocate(const size_type size) { + return reinterpret_cast( + absl::container_internal::Allocate( + mutable_allocator(), size)); + } + + // Node creation/deletion routines. + node_type* new_internal_node(node_type *parent) { + node_type *p = allocate(node_type::InternalSize()); + return node_type::init_internal(p, parent); + } + node_type* new_leaf_node(node_type *parent) { + node_type *p = allocate(node_type::LeafSize()); + return node_type::init_leaf(p, parent, kNodeValues); + } + node_type *new_leaf_root_node(const int max_count) { + node_type *p = allocate(node_type::LeafSize(max_count)); + return node_type::init_leaf(p, p, max_count); + } + + // Deletion helper routines. + void erase_same_node(iterator begin, iterator end); + iterator erase_from_leaf_node(iterator begin, size_type to_erase); + iterator rebalance_after_delete(iterator iter); + + // Deallocates a node of a certain size in bytes using the allocator. + void deallocate(const size_type size, node_type *node) { + absl::container_internal::Deallocate( + mutable_allocator(), node, size); + } + + void delete_internal_node(node_type *node) { + node->destroy(mutable_allocator()); + deallocate(node_type::InternalSize(), node); + } + void delete_leaf_node(node_type *node) { + node->destroy(mutable_allocator()); + deallocate(node_type::LeafSize(node->max_count()), node); + } + + // Rebalances or splits the node iter points to. + void rebalance_or_split(iterator *iter); + + // Merges the values of left, right and the delimiting key on their parent + // onto left, removing the delimiting key and deleting right. + void merge_nodes(node_type *left, node_type *right); + + // Tries to merge node with its left or right sibling, and failing that, + // rebalance with its left or right sibling. Returns true if a merge + // occurred, at which point it is no longer valid to access node. Returns + // false if no merging took place. + bool try_merge_or_rebalance(iterator *iter); + + // Tries to shrink the height of the tree by 1. + void try_shrink(); + + iterator internal_end(iterator iter) { + return iter.node != nullptr ? iter : end(); + } + const_iterator internal_end(const_iterator iter) const { + return iter.node != nullptr ? iter : end(); + } + + // Emplaces a value into the btree immediately before iter. Requires that + // key(v) <= iter.key() and (--iter).key() <= key(v). + template + iterator internal_emplace(iterator iter, Args &&... args); + + // Returns an iterator pointing to the first value >= the value "iter" is + // pointing at. Note that "iter" might be pointing to an invalid location as + // iter.position == iter.node->count(). This routine simply moves iter up in + // the tree to a valid location. + // Requires: iter.node is non-null. + template + static IterType internal_last(IterType iter); + + // Returns an iterator pointing to the leaf position at which key would + // reside in the tree. We provide 2 versions of internal_locate. The first + // version uses a less-than comparator and is incapable of distinguishing when + // there is an exact match. The second version is for the key-compare-to + // specialization and distinguishes exact matches. The key-compare-to + // specialization allows the caller to avoid a subsequent comparison to + // determine if an exact match was made, which is important for keys with + // expensive comparison, such as strings. + template + SearchResult internal_locate( + const K &key) const; + + template + SearchResult internal_locate_impl( + const K &key, std::false_type /* IsCompareTo */) const; + + template + SearchResult internal_locate_impl( + const K &key, std::true_type /* IsCompareTo */) const; + + // Internal routine which implements lower_bound(). + template + iterator internal_lower_bound(const K &key) const; + + // Internal routine which implements upper_bound(). + template + iterator internal_upper_bound(const K &key) const; + + // Internal routine which implements find(). + template + iterator internal_find(const K &key) const; + + // Deletes a node and all of its children. + void internal_clear(node_type *node); + + // Verifies the tree structure of node. + int internal_verify(const node_type *node, + const key_type *lo, const key_type *hi) const; + + node_stats internal_stats(const node_type *node) const { + // The root can be a static empty node. + if (node == nullptr || (node == root() && empty())) { + return node_stats(0, 0); + } + if (node->leaf()) { + return node_stats(1, 0); + } + node_stats res(0, 1); + for (int i = 0; i <= node->count(); ++i) { + res += internal_stats(node->child(i)); + } + return res; + } + + public: + // Exposed only for tests. + static bool testonly_uses_linear_node_search() { + return node_type::testonly_uses_linear_node_search(); + } + + private: + // We use compressed tuple in order to save space because key_compare and + // allocator_type are usually empty. + absl::container_internal::CompressedTuple + root_; + + // A pointer to the rightmost node. Note that the leftmost node is stored as + // the root's parent. + node_type *rightmost_; + + // Number of values. + size_type size_; +}; + +//// +// btree_node methods +template +template +inline void btree_node

::emplace_value(const size_type i, + allocator_type *alloc, + Args &&... args) { + assert(i <= count()); + // Shift old values to create space for new value and then construct it in + // place. + if (i < count()) { + value_init(count(), alloc, slot(count() - 1)); + for (size_type j = count() - 1; j > i; --j) + params_type::move(alloc, slot(j - 1), slot(j)); + value_destroy(i, alloc); + } + value_init(i, alloc, std::forward(args)...); + set_count(count() + 1); + + if (!leaf() && count() > i + 1) { + for (int j = count(); j > i + 1; --j) { + set_child(j, child(j - 1)); + } + clear_child(i + 1); + } +} + +template +inline void btree_node

::remove_value(const int i, allocator_type *alloc) { + if (!leaf() && count() > i + 1) { + assert(child(i + 1)->count() == 0); + for (size_type j = i + 1; j < count(); ++j) { + set_child(j, child(j + 1)); + } + clear_child(count()); + } + + remove_values_ignore_children(i, /*to_erase=*/1, alloc); +} + +template +inline void btree_node

::remove_values_ignore_children( + const int i, const int to_erase, allocator_type *alloc) { + params_type::move(alloc, slot(i + to_erase), slot(count()), slot(i)); + value_destroy_n(count() - to_erase, to_erase, alloc); + set_count(count() - to_erase); +} + +template +void btree_node

::rebalance_right_to_left(const int to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(right->count() >= count()); + assert(to_move >= 1); + assert(to_move <= right->count()); + + // 1) Move the delimiting value in the parent to the left node. + value_init(count(), alloc, parent()->slot(position())); + + // 2) Move the (to_move - 1) values from the right node to the left node. + right->uninitialized_move_n(to_move - 1, 0, count() + 1, this, alloc); + + // 3) Move the new delimiting value to the parent from the right node. + params_type::move(alloc, right->slot(to_move - 1), + parent()->slot(position())); + + // 4) Shift the values in the right node to their correct position. + params_type::move(alloc, right->slot(to_move), right->slot(right->count()), + right->slot(0)); + + // 5) Destroy the now-empty to_move entries in the right node. + right->value_destroy_n(right->count() - to_move, to_move, alloc); + + if (!leaf()) { + // Move the child pointers from the right to the left node. + for (int i = 0; i < to_move; ++i) { + init_child(count() + i + 1, right->child(i)); + } + for (int i = 0; i <= right->count() - to_move; ++i) { + assert(i + to_move <= right->max_count()); + right->init_child(i, right->child(i + to_move)); + right->clear_child(i + to_move); + } + } + + // Fixup the counts on the left and right nodes. + set_count(count() + to_move); + right->set_count(right->count() - to_move); +} + +template +void btree_node

::rebalance_left_to_right(const int to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(count() >= right->count()); + assert(to_move >= 1); + assert(to_move <= count()); + + // Values in the right node are shifted to the right to make room for the + // new to_move values. Then, the delimiting value in the parent and the + // other (to_move - 1) values in the left node are moved into the right node. + // Lastly, a new delimiting value is moved from the left node into the + // parent, and the remaining empty left node entries are destroyed. + + if (right->count() >= to_move) { + // The original location of the right->count() values are sufficient to hold + // the new to_move entries from the parent and left node. + + // 1) Shift existing values in the right node to their correct positions. + right->uninitialized_move_n(to_move, right->count() - to_move, + right->count(), right, alloc); + for (slot_type *src = right->slot(right->count() - to_move - 1), + *dest = right->slot(right->count() - 1), + *end = right->slot(0); + src >= end; --src, --dest) { + params_type::move(alloc, src, dest); + } + + // 2) Move the delimiting value in the parent to the right node. + params_type::move(alloc, parent()->slot(position()), + right->slot(to_move - 1)); + + // 3) Move the (to_move - 1) values from the left node to the right node. + params_type::move(alloc, slot(count() - (to_move - 1)), slot(count()), + right->slot(0)); + } else { + // The right node does not have enough initialized space to hold the new + // to_move entries, so part of them will move to uninitialized space. + + // 1) Shift existing values in the right node to their correct positions. + right->uninitialized_move_n(right->count(), 0, to_move, right, alloc); + + // 2) Move the delimiting value in the parent to the right node. + right->value_init(to_move - 1, alloc, parent()->slot(position())); + + // 3) Move the (to_move - 1) values from the left node to the right node. + const size_type uninitialized_remaining = to_move - right->count() - 1; + uninitialized_move_n(uninitialized_remaining, + count() - uninitialized_remaining, right->count(), + right, alloc); + params_type::move(alloc, slot(count() - (to_move - 1)), + slot(count() - uninitialized_remaining), right->slot(0)); + } + + // 4) Move the new delimiting value to the parent from the left node. + params_type::move(alloc, slot(count() - to_move), parent()->slot(position())); + + // 5) Destroy the now-empty to_move entries in the left node. + value_destroy_n(count() - to_move, to_move, alloc); + + if (!leaf()) { + // Move the child pointers from the left to the right node. + for (int i = right->count(); i >= 0; --i) { + right->init_child(i + to_move, right->child(i)); + right->clear_child(i); + } + for (int i = 1; i <= to_move; ++i) { + right->init_child(i - 1, child(count() - to_move + i)); + clear_child(count() - to_move + i); + } + } + + // Fixup the counts on the left and right nodes. + set_count(count() - to_move); + right->set_count(right->count() + to_move); +} + +template +void btree_node

::split(const int insert_position, btree_node *dest, + allocator_type *alloc) { + assert(dest->count() == 0); + assert(max_count() == kNodeValues); + + // We bias the split based on the position being inserted. If we're + // inserting at the beginning of the left node then bias the split to put + // more values on the right node. If we're inserting at the end of the + // right node then bias the split to put more values on the left node. + if (insert_position == 0) { + dest->set_count(count() - 1); + } else if (insert_position == kNodeValues) { + dest->set_count(0); + } else { + dest->set_count(count() / 2); + } + set_count(count() - dest->count()); + assert(count() >= 1); + + // Move values from the left sibling to the right sibling. + uninitialized_move_n(dest->count(), count(), 0, dest, alloc); + + // Destroy the now-empty entries in the left node. + value_destroy_n(count(), dest->count(), alloc); + + // The split key is the largest value in the left sibling. + set_count(count() - 1); + parent()->emplace_value(position(), alloc, slot(count())); + value_destroy(count(), alloc); + parent()->init_child(position() + 1, dest); + + if (!leaf()) { + for (int i = 0; i <= dest->count(); ++i) { + assert(child(count() + i + 1) != nullptr); + dest->init_child(i, child(count() + i + 1)); + clear_child(count() + i + 1); + } + } +} + +template +void btree_node

::merge(btree_node *src, allocator_type *alloc) { + assert(parent() == src->parent()); + assert(position() + 1 == src->position()); + + // Move the delimiting value to the left node. + value_init(count(), alloc, parent()->slot(position())); + + // Move the values from the right to the left node. + src->uninitialized_move_n(src->count(), 0, count() + 1, this, alloc); + + // Destroy the now-empty entries in the right node. + src->value_destroy_n(0, src->count(), alloc); + + if (!leaf()) { + // Move the child pointers from the right to the left node. + for (int i = 0; i <= src->count(); ++i) { + init_child(count() + i + 1, src->child(i)); + src->clear_child(i); + } + } + + // Fixup the counts on the src and dest nodes. + set_count(1 + count() + src->count()); + src->set_count(0); + + // Remove the value on the parent node. + parent()->remove_value(position(), alloc); +} + +template +void btree_node

::swap(btree_node *x, allocator_type *alloc) { + using std::swap; + assert(leaf() == x->leaf()); + + // Determine which is the smaller/larger node. + btree_node *smaller = this, *larger = x; + if (smaller->count() > larger->count()) { + swap(smaller, larger); + } + + // Swap the values. + for (slot_type *a = smaller->slot(0), *b = larger->slot(0), + *end = a + smaller->count(); + a != end; ++a, ++b) { + params_type::swap(alloc, a, b); + } + + // Move values that can't be swapped. + const size_type to_move = larger->count() - smaller->count(); + larger->uninitialized_move_n(to_move, smaller->count(), smaller->count(), + smaller, alloc); + larger->value_destroy_n(smaller->count(), to_move, alloc); + + if (!leaf()) { + // Swap the child pointers. + std::swap_ranges(&smaller->mutable_child(0), + &smaller->mutable_child(smaller->count() + 1), + &larger->mutable_child(0)); + // Update swapped children's parent pointers. + int i = 0; + for (; i <= smaller->count(); ++i) { + smaller->child(i)->set_parent(smaller); + larger->child(i)->set_parent(larger); + } + // Move the child pointers that couldn't be swapped. + for (; i <= larger->count(); ++i) { + smaller->init_child(i, larger->child(i)); + larger->clear_child(i); + } + } + + // Swap the counts. + swap(mutable_count(), x->mutable_count()); +} + +//// +// btree_iterator methods +template +void btree_iterator::increment_slow() { + if (node->leaf()) { + assert(position >= node->count()); + btree_iterator save(*this); + while (position == node->count() && !node->is_root()) { + assert(node->parent()->child(node->position()) == node); + position = node->position(); + node = node->parent(); + } + if (position == node->count()) { + *this = save; + } + } else { + assert(position < node->count()); + node = node->child(position + 1); + while (!node->leaf()) { + node = node->child(0); + } + position = 0; + } +} + +template +void btree_iterator::decrement_slow() { + if (node->leaf()) { + assert(position <= -1); + btree_iterator save(*this); + while (position < 0 && !node->is_root()) { + assert(node->parent()->child(node->position()) == node); + position = node->position() - 1; + node = node->parent(); + } + if (position < 0) { + *this = save; + } + } else { + assert(position >= 0); + node = node->child(position); + while (!node->leaf()) { + node = node->child(node->count()); + } + position = node->count() - 1; + } +} + +//// +// btree methods +template +template +void btree

::copy_or_move_values_in_order(Btree *x) { + static_assert(std::is_same::value || + std::is_same::value, + "Btree type must be same or const."); + assert(empty()); + + // We can avoid key comparisons because we know the order of the + // values is the same order we'll store them in. + auto iter = x->begin(); + if (iter == x->end()) return; + insert_multi(maybe_move_from_iterator(iter)); + ++iter; + for (; iter != x->end(); ++iter) { + // If the btree is not empty, we can just insert the new value at the end + // of the tree. + internal_emplace(end(), maybe_move_from_iterator(iter)); + } +} + +template +constexpr bool btree

::static_assert_validation() { + static_assert(std::is_nothrow_copy_constructible::value, + "Key comparison must be nothrow copy constructible"); + static_assert(std::is_nothrow_copy_constructible::value, + "Allocator must be nothrow copy constructible"); + static_assert(type_traits_internal::is_trivially_copyable::value, + "iterator not trivially copyable."); + + // Note: We assert that kTargetValues, which is computed from + // Params::kTargetNodeSize, must fit the node_type::field_type. + static_assert( + kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), + "target node size too large"); + + // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. + using compare_result_type = + absl::result_of_t; + static_assert( + std::is_same::value || + std::is_convertible::value, + "key comparison function must return absl::{weak,strong}_ordering or " + "bool."); + + // Test the assumption made in setting kNodeValueSpace. + static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, + "node space assumption incorrect"); + + return true; +} + +template +btree

::btree(const key_compare &comp, const allocator_type &alloc) + : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} + +template +btree

::btree(const btree &x) : btree(x.key_comp(), x.allocator()) { + copy_or_move_values_in_order(&x); +} + +template +template +auto btree

::insert_unique(const key_type &key, Args &&... args) + -> std::pair { + if (empty()) { + mutable_root() = rightmost_ = new_leaf_root_node(1); + } + + auto res = internal_locate(key); + iterator &iter = res.value; + + if (res.HasMatch()) { + if (res.IsEq()) { + // The key already exists in the tree, do nothing. + return {iter, false}; + } + } else { + iterator last = internal_last(iter); + if (last.node && !compare_keys(key, last.key())) { + // The key already exists in the tree, do nothing. + return {last, false}; + } + } + return {internal_emplace(iter, std::forward(args)...), true}; +} + +template +template +inline auto btree

::insert_hint_unique(iterator position, const key_type &key, + Args &&... args) + -> std::pair { + if (!empty()) { + if (position == end() || compare_keys(key, position.key())) { + iterator prev = position; + if (position == begin() || compare_keys((--prev).key(), key)) { + // prev.key() < key < position.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } else if (compare_keys(position.key(), key)) { + ++position; + if (position == end() || compare_keys(key, position.key())) { + // {original `position`}.key() < key < {current `position`}.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } else { + // position.key() == key + return {position, false}; + } + } + return insert_unique(key, std::forward(args)...); +} + +template +template +void btree

::insert_iterator_unique(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert_hint_unique(end(), params_type::key(*b), *b); + } +} + +template +template +auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { + if (empty()) { + mutable_root() = rightmost_ = new_leaf_root_node(1); + } + + iterator iter = internal_upper_bound(key); + if (iter.node == nullptr) { + iter = end(); + } + return internal_emplace(iter, std::forward(v)); +} + +template +template +auto btree

::insert_hint_multi(iterator position, ValueType &&v) -> iterator { + if (!empty()) { + const key_type &key = params_type::key(v); + if (position == end() || !compare_keys(position.key(), key)) { + iterator prev = position; + if (position == begin() || !compare_keys(key, (--prev).key())) { + // prev.key() <= key <= position.key() + return internal_emplace(position, std::forward(v)); + } + } else { + iterator next = position; + ++next; + if (next == end() || !compare_keys(next.key(), key)) { + // position.key() < key <= next.key() + return internal_emplace(next, std::forward(v)); + } + } + } + return insert_multi(std::forward(v)); +} + +template +template +void btree

::insert_iterator_multi(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert_hint_multi(end(), *b); + } +} + +template +auto btree

::operator=(const btree &x) -> btree & { + if (this != &x) { + clear(); + + *mutable_key_comp() = x.key_comp(); + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + *mutable_allocator() = x.allocator(); + } + + copy_or_move_values_in_order(&x); + } + return *this; +} + +template +auto btree

::operator=(btree &&x) noexcept -> btree & { + if (this != &x) { + clear(); + + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + // Note: `root_` also contains the allocator and the key comparator. + swap(root_, x.root_); + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); + } else { + if (allocator() == x.allocator()) { + swap(mutable_root(), x.mutable_root()); + swap(*mutable_key_comp(), *x.mutable_key_comp()); + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); + } else { + // We aren't allowed to propagate the allocator and the allocator is + // different so we can't take over its memory. We must move each element + // individually. We need both `x` and `this` to have `x`s key comparator + // while moving the values so we can't swap the key comparators. + *mutable_key_comp() = x.key_comp(); + copy_or_move_values_in_order(&x); + } + } + } + return *this; +} + +template +auto btree

::erase(iterator iter) -> iterator { + bool internal_delete = false; + if (!iter.node->leaf()) { + // Deletion of a value on an internal node. First, move the largest value + // from our left child here, then delete that position (in remove_value() + // below). We can get to the largest value from our left child by + // decrementing iter. + iterator internal_iter(iter); + --iter; + assert(iter.node->leaf()); + params_type::move(mutable_allocator(), iter.node->slot(iter.position), + internal_iter.node->slot(internal_iter.position)); + internal_delete = true; + } + + // Delete the key from the leaf. + iter.node->remove_value(iter.position, mutable_allocator()); + --size_; + + // We want to return the next value after the one we just erased. If we + // erased from an internal node (internal_delete == true), then the next + // value is ++(++iter). If we erased from a leaf node (internal_delete == + // false) then the next value is ++iter. Note that ++iter may point to an + // internal node and the value in the internal node may move to a leaf node + // (iter.node) when rebalancing is performed at the leaf level. + + iterator res = rebalance_after_delete(iter); + + // If we erased from an internal node, advance the iterator. + if (internal_delete) { + ++res; + } + return res; +} + +template +auto btree

::rebalance_after_delete(iterator iter) -> iterator { + // Merge/rebalance as we walk back up the tree. + iterator res(iter); + bool first_iteration = true; + for (;;) { + if (iter.node == root()) { + try_shrink(); + if (empty()) { + return end(); + } + break; + } + if (iter.node->count() >= kMinNodeValues) { + break; + } + bool merged = try_merge_or_rebalance(&iter); + // On the first iteration, we should update `res` with `iter` because `res` + // may have been invalidated. + if (first_iteration) { + res = iter; + first_iteration = false; + } + if (!merged) { + break; + } + iter.position = iter.node->position(); + iter.node = iter.node->parent(); + } + + // Adjust our return value. If we're pointing at the end of a node, advance + // the iterator. + if (res.position == res.node->count()) { + res.position = res.node->count() - 1; + ++res; + } + + return res; +} + +template +auto btree

::erase(iterator begin, iterator end) + -> std::pair { + difference_type count = std::distance(begin, end); + assert(count >= 0); + + if (count == 0) { + return {0, begin}; + } + + if (count == size_) { + clear(); + return {count, this->end()}; + } + + if (begin.node == end.node) { + erase_same_node(begin, end); + size_ -= count; + return {count, rebalance_after_delete(begin)}; + } + + const size_type target_size = size_ - count; + while (size_ > target_size) { + if (begin.node->leaf()) { + const size_type remaining_to_erase = size_ - target_size; + const size_type remaining_in_node = begin.node->count() - begin.position; + begin = erase_from_leaf_node( + begin, (std::min)(remaining_to_erase, remaining_in_node)); + } else { + begin = erase(begin); + } + } + return {count, begin}; +} + +template +void btree

::erase_same_node(iterator begin, iterator end) { + assert(begin.node == end.node); + assert(end.position > begin.position); + + node_type *node = begin.node; + size_type to_erase = end.position - begin.position; + if (!node->leaf()) { + // Delete all children between begin and end. + for (size_type i = 0; i < to_erase; ++i) { + internal_clear(node->child(begin.position + i + 1)); + } + // Rotate children after end into new positions. + for (size_type i = begin.position + to_erase + 1; i <= node->count(); ++i) { + node->set_child(i - to_erase, node->child(i)); + node->clear_child(i); + } + } + node->remove_values_ignore_children(begin.position, to_erase, + mutable_allocator()); + + // Do not need to update rightmost_, because + // * either end == this->end(), and therefore node == rightmost_, and still + // exists + // * or end != this->end(), and therefore rightmost_ hasn't been erased, since + // it wasn't covered in [begin, end) +} + +template +auto btree

::erase_from_leaf_node(iterator begin, size_type to_erase) + -> iterator { + node_type *node = begin.node; + assert(node->leaf()); + assert(node->count() > begin.position); + assert(begin.position + to_erase <= node->count()); + + node->remove_values_ignore_children(begin.position, to_erase, + mutable_allocator()); + + size_ -= to_erase; + + return rebalance_after_delete(begin); +} + +template +template +auto btree

::erase_unique(const K &key) -> size_type { + const iterator iter = internal_find(key); + if (iter.node == nullptr) { + // The key doesn't exist in the tree, return nothing done. + return 0; + } + erase(iter); + return 1; +} + +template +template +auto btree

::erase_multi(const K &key) -> size_type { + const iterator begin = internal_lower_bound(key); + if (begin.node == nullptr) { + // The key doesn't exist in the tree, return nothing done. + return 0; + } + // Delete all of the keys between begin and upper_bound(key). + const iterator end = internal_end(internal_upper_bound(key)); + return erase(begin, end).first; +} + +template +void btree

::clear() { + if (!empty()) { + internal_clear(root()); + } + mutable_root() = EmptyNode(); + rightmost_ = EmptyNode(); + size_ = 0; +} + +template +void btree

::swap(btree &x) { + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_swap::value) { + // Note: `root_` also contains the allocator and the key comparator. + swap(root_, x.root_); + } else { + // It's undefined behavior if the allocators are unequal here. + assert(allocator() == x.allocator()); + swap(mutable_root(), x.mutable_root()); + swap(*mutable_key_comp(), *x.mutable_key_comp()); + } + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); +} + +template +void btree

::verify() const { + assert(root() != nullptr); + assert(leftmost() != nullptr); + assert(rightmost_ != nullptr); + assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); + assert(leftmost() == (++const_iterator(root(), -1)).node); + assert(rightmost_ == (--const_iterator(root(), root()->count())).node); + assert(leftmost()->leaf()); + assert(rightmost_->leaf()); +} + +template +void btree

::rebalance_or_split(iterator *iter) { + node_type *&node = iter->node; + int &insert_position = iter->position; + assert(node->count() == node->max_count()); + assert(kNodeValues == node->max_count()); + + // First try to make room on the node by rebalancing. + node_type *parent = node->parent(); + if (node != root()) { + if (node->position() > 0) { + // Try rebalancing with our left sibling. + node_type *left = parent->child(node->position() - 1); + assert(left->max_count() == kNodeValues); + if (left->count() < kNodeValues) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the end of the right node then we bias rebalancing to + // fill up the left node. + int to_move = (kNodeValues - left->count()) / + (1 + (insert_position < kNodeValues)); + to_move = (std::max)(1, to_move); + + if (((insert_position - to_move) >= 0) || + ((left->count() + to_move) < kNodeValues)) { + left->rebalance_right_to_left(to_move, node, mutable_allocator()); + + assert(node->max_count() - node->count() == to_move); + insert_position = insert_position - to_move; + if (insert_position < 0) { + insert_position = insert_position + left->count() + 1; + node = left; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + if (node->position() < parent->count()) { + // Try rebalancing with our right sibling. + node_type *right = parent->child(node->position() + 1); + assert(right->max_count() == kNodeValues); + if (right->count() < kNodeValues) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the beginning of the left node then we bias rebalancing + // to fill up the right node. + int to_move = + (kNodeValues - right->count()) / (1 + (insert_position > 0)); + to_move = (std::max)(1, to_move); + + if ((insert_position <= (node->count() - to_move)) || + ((right->count() + to_move) < kNodeValues)) { + node->rebalance_left_to_right(to_move, right, mutable_allocator()); + + if (insert_position > node->count()) { + insert_position = insert_position - node->count() - 1; + node = right; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + // Rebalancing failed, make sure there is room on the parent node for a new + // value. + assert(parent->max_count() == kNodeValues); + if (parent->count() == kNodeValues) { + iterator parent_iter(node->parent(), node->position()); + rebalance_or_split(&parent_iter); + } + } else { + // Rebalancing not possible because this is the root node. + // Create a new root node and set the current root node as the child of the + // new root. + parent = new_internal_node(parent); + parent->init_child(0, root()); + mutable_root() = parent; + // If the former root was a leaf node, then it's now the rightmost node. + assert(!parent->child(0)->leaf() || parent->child(0) == rightmost_); + } + + // Split the node. + node_type *split_node; + if (node->leaf()) { + split_node = new_leaf_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + if (rightmost_ == node) rightmost_ = split_node; + } else { + split_node = new_internal_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + } + + if (insert_position > node->count()) { + insert_position = insert_position - node->count() - 1; + node = split_node; + } +} + +template +void btree

::merge_nodes(node_type *left, node_type *right) { + left->merge(right, mutable_allocator()); + if (right->leaf()) { + if (rightmost_ == right) rightmost_ = left; + delete_leaf_node(right); + } else { + delete_internal_node(right); + } +} + +template +bool btree

::try_merge_or_rebalance(iterator *iter) { + node_type *parent = iter->node->parent(); + if (iter->node->position() > 0) { + // Try merging with our left sibling. + node_type *left = parent->child(iter->node->position() - 1); + assert(left->max_count() == kNodeValues); + if ((1 + left->count() + iter->node->count()) <= kNodeValues) { + iter->position += 1 + left->count(); + merge_nodes(left, iter->node); + iter->node = left; + return true; + } + } + if (iter->node->position() < parent->count()) { + // Try merging with our right sibling. + node_type *right = parent->child(iter->node->position() + 1); + assert(right->max_count() == kNodeValues); + if ((1 + iter->node->count() + right->count()) <= kNodeValues) { + merge_nodes(iter->node, right); + return true; + } + // Try rebalancing with our right sibling. We don't perform rebalancing if + // we deleted the first element from iter->node and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the front of the tree. + if ((right->count() > kMinNodeValues) && + ((iter->node->count() == 0) || + (iter->position > 0))) { + int to_move = (right->count() - iter->node->count()) / 2; + to_move = (std::min)(to_move, right->count() - 1); + iter->node->rebalance_right_to_left(to_move, right, mutable_allocator()); + return false; + } + } + if (iter->node->position() > 0) { + // Try rebalancing with our left sibling. We don't perform rebalancing if + // we deleted the last element from iter->node and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the back of the tree. + node_type *left = parent->child(iter->node->position() - 1); + if ((left->count() > kMinNodeValues) && + ((iter->node->count() == 0) || + (iter->position < iter->node->count()))) { + int to_move = (left->count() - iter->node->count()) / 2; + to_move = (std::min)(to_move, left->count() - 1); + left->rebalance_left_to_right(to_move, iter->node, mutable_allocator()); + iter->position += to_move; + return false; + } + } + return false; +} + +template +void btree

::try_shrink() { + if (root()->count() > 0) { + return; + } + // Deleted the last item on the root node, shrink the height of the tree. + if (root()->leaf()) { + assert(size() == 0); + delete_leaf_node(root()); + mutable_root() = EmptyNode(); + rightmost_ = EmptyNode(); + } else { + node_type *child = root()->child(0); + child->make_root(); + delete_internal_node(root()); + mutable_root() = child; + } +} + +template +template +inline IterType btree

::internal_last(IterType iter) { + assert(iter.node != nullptr); + while (iter.position == iter.node->count()) { + iter.position = iter.node->position(); + iter.node = iter.node->parent(); + if (iter.node->leaf()) { + iter.node = nullptr; + break; + } + } + return iter; +} + +template +template +inline auto btree

::internal_emplace(iterator iter, Args &&... args) + -> iterator { + if (!iter.node->leaf()) { + // We can't insert on an internal node. Instead, we'll insert after the + // previous value which is guaranteed to be on a leaf node. + --iter; + ++iter.position; + } + const int max_count = iter.node->max_count(); + if (iter.node->count() == max_count) { + // Make room in the leaf for the new item. + if (max_count < kNodeValues) { + // Insertion into the root where the root is smaller than the full node + // size. Simply grow the size of the root node. + assert(iter.node == root()); + iter.node = + new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); + iter.node->swap(root(), mutable_allocator()); + delete_leaf_node(root()); + mutable_root() = iter.node; + rightmost_ = iter.node; + } else { + rebalance_or_split(&iter); + } + } + iter.node->emplace_value(iter.position, mutable_allocator(), + std::forward(args)...); + ++size_; + return iter; +} + +template +template +inline auto btree

::internal_locate(const K &key) const + -> SearchResult { + return internal_locate_impl(key, is_key_compare_to()); +} + +template +template +inline auto btree

::internal_locate_impl( + const K &key, std::false_type /* IsCompareTo */) const + -> SearchResult { + iterator iter(const_cast(root()), 0); + for (;;) { + iter.position = iter.node->lower_bound(key, key_comp()).value; + // NOTE: we don't need to walk all the way down the tree if the keys are + // equal, but determining equality would require doing an extra comparison + // on each node on the way down, and we will need to go all the way to the + // leaf node in the expected case. + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return {iter}; +} + +template +template +inline auto btree

::internal_locate_impl( + const K &key, std::true_type /* IsCompareTo */) const + -> SearchResult { + iterator iter(const_cast(root()), 0); + for (;;) { + SearchResult res = iter.node->lower_bound(key, key_comp()); + iter.position = res.value; + if (res.match == MatchKind::kEq) { + return {iter, MatchKind::kEq}; + } + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return {iter, MatchKind::kNe}; +} + +template +template +auto btree

::internal_lower_bound(const K &key) const -> iterator { + iterator iter(const_cast(root()), 0); + for (;;) { + iter.position = iter.node->lower_bound(key, key_comp()).value; + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return internal_last(iter); +} + +template +template +auto btree

::internal_upper_bound(const K &key) const -> iterator { + iterator iter(const_cast(root()), 0); + for (;;) { + iter.position = iter.node->upper_bound(key, key_comp()); + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return internal_last(iter); +} + +template +template +auto btree

::internal_find(const K &key) const -> iterator { + auto res = internal_locate(key); + if (res.HasMatch()) { + if (res.IsEq()) { + return res.value; + } + } else { + const iterator iter = internal_last(res.value); + if (iter.node != nullptr && !compare_keys(key, iter.key())) { + return iter; + } + } + return {nullptr, 0}; +} + +template +void btree

::internal_clear(node_type *node) { + if (!node->leaf()) { + for (int i = 0; i <= node->count(); ++i) { + internal_clear(node->child(i)); + } + delete_internal_node(node); + } else { + delete_leaf_node(node); + } +} + +template +int btree

::internal_verify( + const node_type *node, const key_type *lo, const key_type *hi) const { + assert(node->count() > 0); + assert(node->count() <= node->max_count()); + if (lo) { + assert(!compare_keys(node->key(0), *lo)); + } + if (hi) { + assert(!compare_keys(*hi, node->key(node->count() - 1))); + } + for (int i = 1; i < node->count(); ++i) { + assert(!compare_keys(node->key(i), node->key(i - 1))); + } + int count = node->count(); + if (!node->leaf()) { + for (int i = 0; i <= node->count(); ++i) { + assert(node->child(i) != nullptr); + assert(node->child(i)->parent() == node); + assert(node->child(i)->position() == i); + count += internal_verify( + node->child(i), + (i == 0) ? lo : &node->key(i - 1), + (i == node->count()) ? hi : &node->key(i)); + } + } + return count; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_ diff --git a/base/abseil/absl/container/internal/btree_container.h b/base/abseil/absl/container/internal/btree_container.h new file mode 100644 index 0000000..04795c2 --- /dev/null +++ b/base/abseil/absl/container/internal/btree_container.h @@ -0,0 +1,609 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ + +#include +#include +#include +#include + +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/common.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A common base class for btree_set, btree_map, btree_multiset, and +// btree_multimap. +template +class btree_container { + using params_type = typename Tree::params_type; + + protected: + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = + typename KeyArg::value>:: + template type; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using difference_type = typename Tree::difference_type; + using key_compare = typename Tree::key_compare; + using value_compare = typename Tree::value_compare; + using allocator_type = typename Tree::allocator_type; + using reference = typename Tree::reference; + using const_reference = typename Tree::const_reference; + using pointer = typename Tree::pointer; + using const_pointer = typename Tree::const_pointer; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using reverse_iterator = typename Tree::reverse_iterator; + using const_reverse_iterator = typename Tree::const_reverse_iterator; + using node_type = typename Tree::node_handle_type; + + // Constructors/assignments. + btree_container() : tree_(key_compare(), allocator_type()) {} + explicit btree_container(const key_compare &comp, + const allocator_type &alloc = allocator_type()) + : tree_(comp, alloc) {} + btree_container(const btree_container &x) = default; + btree_container(btree_container &&x) noexcept = default; + btree_container &operator=(const btree_container &x) = default; + btree_container &operator=(btree_container &&x) noexcept( + std::is_nothrow_move_assignable::value) = default; + + // Iterator routines. + iterator begin() { return tree_.begin(); } + const_iterator begin() const { return tree_.begin(); } + const_iterator cbegin() const { return tree_.begin(); } + iterator end() { return tree_.end(); } + const_iterator end() const { return tree_.end(); } + const_iterator cend() const { return tree_.end(); } + reverse_iterator rbegin() { return tree_.rbegin(); } + const_reverse_iterator rbegin() const { return tree_.rbegin(); } + const_reverse_iterator crbegin() const { return tree_.rbegin(); } + reverse_iterator rend() { return tree_.rend(); } + const_reverse_iterator rend() const { return tree_.rend(); } + const_reverse_iterator crend() const { return tree_.rend(); } + + // Lookup routines. + template + iterator find(const key_arg &key) { + return tree_.find(key); + } + template + const_iterator find(const key_arg &key) const { + return tree_.find(key); + } + template + bool contains(const key_arg &key) const { + return find(key) != end(); + } + template + iterator lower_bound(const key_arg &key) { + return tree_.lower_bound(key); + } + template + const_iterator lower_bound(const key_arg &key) const { + return tree_.lower_bound(key); + } + template + iterator upper_bound(const key_arg &key) { + return tree_.upper_bound(key); + } + template + const_iterator upper_bound(const key_arg &key) const { + return tree_.upper_bound(key); + } + template + std::pair equal_range(const key_arg &key) { + return tree_.equal_range(key); + } + template + std::pair equal_range( + const key_arg &key) const { + return tree_.equal_range(key); + } + + // Deletion routines. Note that there is also a deletion routine that is + // specific to btree_set_container/btree_multiset_container. + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); } + iterator erase(iterator iter) { return tree_.erase(iter); } + iterator erase(const_iterator first, const_iterator last) { + return tree_.erase(iterator(first), iterator(last)).second; + } + + // Extract routines. + node_type extract(iterator position) { + // Use Move instead of Transfer, because the rebalancing code expects to + // have a valid object to scribble metadata bits on top of. + auto node = CommonAccess::Move(get_allocator(), position.slot()); + erase(position); + return node; + } + node_type extract(const_iterator position) { + return extract(iterator(position)); + } + + public: + // Utility routines. + void clear() { tree_.clear(); } + void swap(btree_container &x) { tree_.swap(x.tree_); } + void verify() const { tree_.verify(); } + + // Size routines. + size_type size() const { return tree_.size(); } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { return tree_.empty(); } + + friend bool operator==(const btree_container &x, const btree_container &y) { + if (x.size() != y.size()) return false; + return std::equal(x.begin(), x.end(), y.begin()); + } + + friend bool operator!=(const btree_container &x, const btree_container &y) { + return !(x == y); + } + + friend bool operator<(const btree_container &x, const btree_container &y) { + return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); + } + + friend bool operator>(const btree_container &x, const btree_container &y) { + return y < x; + } + + friend bool operator<=(const btree_container &x, const btree_container &y) { + return !(y < x); + } + + friend bool operator>=(const btree_container &x, const btree_container &y) { + return !(x < y); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { return tree_.get_allocator(); } + + // The key comparator used by the btree. + key_compare key_comp() const { return tree_.key_comp(); } + value_compare value_comp() const { return tree_.value_comp(); } + + // Support absl::Hash. + template + friend State AbslHashValue(State h, const btree_container &b) { + for (const auto &v : b) { + h = State::combine(std::move(h), v); + } + return State::combine(std::move(h), b.size()); + } + + protected: + Tree tree_; +}; + +// A common base class for btree_set and btree_map. +template +class btree_set_container : public btree_container { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + protected: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + using insert_return_type = InsertReturnType; + + // Inherit constructors. + using super_type::super_type; + btree_set_container() {} + + // Range constructor. + template + btree_set_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + + // Initializer list constructor. + btree_set_container(std::initializer_list init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_set_container(init.begin(), init.end(), comp, alloc) {} + + // Lookup routines. + template + size_type count(const key_arg &key) const { + return this->tree_.count_unique(key); + } + + // Insertion routines. + std::pair insert(const value_type &x) { + return this->tree_.insert_unique(params_type::key(x), x); + } + std::pair insert(value_type &&x) { + return this->tree_.insert_unique(params_type::key(x), std::move(x)); + } + template + std::pair emplace(Args &&... args) { + init_type v(std::forward(args)...); + return this->tree_.insert_unique(params_type::key(v), std::move(v)); + } + iterator insert(const_iterator position, const value_type &x) { + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(x), x) + .first; + } + iterator insert(const_iterator position, value_type &&x) { + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(x), + std::move(x)) + .first; + } + template + iterator emplace_hint(const_iterator position, Args &&... args) { + init_type v(std::forward(args)...); + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(v), + std::move(v)) + .first; + } + template + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_unique(b, e); + } + void insert(std::initializer_list init) { + this->tree_.insert_iterator_unique(init.begin(), init.end()); + } + insert_return_type insert(node_type &&node) { + if (!node) return {this->end(), false, node_type()}; + std::pair res = + this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) { + CommonAccess::Destroy(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + iterator insert(const_iterator hint, node_type &&node) { + if (!node) return this->end(); + std::pair res = this->tree_.insert_hint_unique( + iterator(hint), params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) CommonAccess::Destroy(&node); + return res.first; + } + + // Deletion routines. + template + size_type erase(const key_arg &key) { + return this->tree_.erase_unique(key); + } + using super_type::erase; + + // Node extraction routines. + template + node_type extract(const key_arg &key) { + auto it = this->find(key); + return it == this->end() ? node_type() : extract(it); + } + using super_type::extract; + + // Merge routines. + // Moves elements from `src` into `this`. If the element already exists in + // `this`, it is left unmodified in `src`. + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &src) { // NOLINT + for (auto src_it = src.begin(); src_it != src.end();) { + if (insert(std::move(*src_it)).second) { + src_it = src.erase(src_it); + } else { + ++src_it; + } + } + } + + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &&src) { + merge(src); + } +}; + +// Base class for btree_map. +template +class btree_map_container : public btree_set_container { + using super_type = btree_set_container; + using params_type = typename Tree::params_type; + + protected: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using mapped_type = typename params_type::mapped_type; + using value_type = typename Tree::value_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + + // Inherit constructors. + using super_type::super_type; + btree_map_container() {} + + // Insertion routines. + template + std::pair try_emplace(const key_type &k, Args &&... args) { + return this->tree_.insert_unique( + k, std::piecewise_construct, std::forward_as_tuple(k), + std::forward_as_tuple(std::forward(args)...)); + } + template + std::pair try_emplace(key_type &&k, Args &&... args) { + // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` + // and then using `k` unsequenced. This is safe because the move is into a + // forwarding reference and insert_unique guarantees that `key` is never + // referenced after consuming `args`. + const key_type& key_ref = k; + return this->tree_.insert_unique( + key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)), + std::forward_as_tuple(std::forward(args)...)); + } + template + iterator try_emplace(const_iterator hint, const key_type &k, + Args &&... args) { + return this->tree_ + .insert_hint_unique(iterator(hint), k, std::piecewise_construct, + std::forward_as_tuple(k), + std::forward_as_tuple(std::forward(args)...)) + .first; + } + template + iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) { + // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` + // and then using `k` unsequenced. This is safe because the move is into a + // forwarding reference and insert_hint_unique guarantees that `key` is + // never referenced after consuming `args`. + const key_type& key_ref = k; + return this->tree_ + .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct, + std::forward_as_tuple(std::move(k)), + std::forward_as_tuple(std::forward(args)...)) + .first; + } + mapped_type &operator[](const key_type &k) { + return try_emplace(k).first->second; + } + mapped_type &operator[](key_type &&k) { + return try_emplace(std::move(k)).first->second; + } + + template + mapped_type &at(const key_arg &key) { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + template + const mapped_type &at(const key_arg &key) const { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } +}; + +// A common base class for btree_multiset and btree_multimap. +template +class btree_multiset_container : public btree_container { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + + // Inherit constructors. + using super_type::super_type; + btree_multiset_container() {} + + // Range constructor. + template + btree_multiset_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + + // Initializer list constructor. + btree_multiset_container(std::initializer_list init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} + + // Lookup routines. + template + size_type count(const key_arg &key) const { + return this->tree_.count_multi(key); + } + + // Insertion routines. + iterator insert(const value_type &x) { return this->tree_.insert_multi(x); } + iterator insert(value_type &&x) { + return this->tree_.insert_multi(std::move(x)); + } + iterator insert(const_iterator position, const value_type &x) { + return this->tree_.insert_hint_multi(iterator(position), x); + } + iterator insert(const_iterator position, value_type &&x) { + return this->tree_.insert_hint_multi(iterator(position), std::move(x)); + } + template + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_multi(b, e); + } + void insert(std::initializer_list init) { + this->tree_.insert_iterator_multi(init.begin(), init.end()); + } + template + iterator emplace(Args &&... args) { + return this->tree_.insert_multi(init_type(std::forward(args)...)); + } + template + iterator emplace_hint(const_iterator position, Args &&... args) { + return this->tree_.insert_hint_multi( + iterator(position), init_type(std::forward(args)...)); + } + iterator insert(node_type &&node) { + if (!node) return this->end(); + iterator res = + this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + CommonAccess::Destroy(&node); + return res; + } + iterator insert(const_iterator hint, node_type &&node) { + if (!node) return this->end(); + iterator res = this->tree_.insert_hint_multi( + iterator(hint), + std::move(params_type::element(CommonAccess::GetSlot(node)))); + CommonAccess::Destroy(&node); + return res; + } + + // Deletion routines. + template + size_type erase(const key_arg &key) { + return this->tree_.erase_multi(key); + } + using super_type::erase; + + // Node extraction routines. + template + node_type extract(const key_arg &key) { + auto it = this->find(key); + return it == this->end() ? node_type() : extract(it); + } + using super_type::extract; + + // Merge routines. + // Moves all elements from `src` into `this`. + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &src) { // NOLINT + insert(std::make_move_iterator(src.begin()), + std::make_move_iterator(src.end())); + src.clear(); + } + + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &&src) { + merge(src); + } +}; + +// A base class for btree_multimap. +template +class btree_multimap_container : public btree_multiset_container { + using super_type = btree_multiset_container; + using params_type = typename Tree::params_type; + + public: + using mapped_type = typename params_type::mapped_type; + + // Inherit constructors. + using super_type::super_type; + btree_multimap_container() {} +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ diff --git a/base/abseil/absl/container/internal/common.h b/base/abseil/absl/container/internal/common.h new file mode 100644 index 0000000..853a5b2 --- /dev/null +++ b/base/abseil/absl/container/internal/common.h @@ -0,0 +1,203 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_ + +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/types/optional.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct IsTransparent : std::false_type {}; +template +struct IsTransparent> + : std::true_type {}; + +template +struct KeyArg { + // Transparent. Forward `K`. + template + using type = K; +}; + +template <> +struct KeyArg { + // Not transparent. Always use `key_type`. + template + using type = key_type; +}; + +// The node_handle concept from C++17. +// We specialize node_handle for sets and maps. node_handle_base holds the +// common API of both. +template +class node_handle_base { + protected: + using slot_type = typename PolicyTraits::slot_type; + + public: + using allocator_type = Alloc; + + constexpr node_handle_base() {} + node_handle_base(node_handle_base&& other) noexcept { + *this = std::move(other); + } + ~node_handle_base() { destroy(); } + node_handle_base& operator=(node_handle_base&& other) noexcept { + destroy(); + if (!other.empty()) { + alloc_ = other.alloc_; + PolicyTraits::transfer(alloc(), slot(), other.slot()); + other.reset(); + } + return *this; + } + + bool empty() const noexcept { return !alloc_; } + explicit operator bool() const noexcept { return !empty(); } + allocator_type get_allocator() const { return *alloc_; } + + protected: + friend struct CommonAccess; + + struct transfer_tag_t {}; + node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s) + : alloc_(a) { + PolicyTraits::transfer(alloc(), slot(), s); + } + + struct move_tag_t {}; + node_handle_base(move_tag_t, const allocator_type& a, slot_type* s) + : alloc_(a) { + PolicyTraits::construct(alloc(), slot(), s); + } + + void destroy() { + if (!empty()) { + PolicyTraits::destroy(alloc(), slot()); + reset(); + } + } + + void reset() { + assert(alloc_.has_value()); + alloc_ = absl::nullopt; + } + + slot_type* slot() const { + assert(!empty()); + return reinterpret_cast(std::addressof(slot_space_)); + } + allocator_type* alloc() { return std::addressof(*alloc_); } + + private: + absl::optional alloc_; + mutable absl::aligned_storage_t + slot_space_; +}; + +// For sets. +template +class node_handle : public node_handle_base { + using Base = node_handle_base; + + public: + using value_type = typename PolicyTraits::value_type; + + constexpr node_handle() {} + + value_type& value() const { return PolicyTraits::element(this->slot()); } + + private: + friend struct CommonAccess; + + using Base::Base; +}; + +// For maps. +template +class node_handle> + : public node_handle_base { + using Base = node_handle_base; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + + constexpr node_handle() {} + + auto key() const -> decltype(PolicyTraits::key(this->slot())) { + return PolicyTraits::key(this->slot()); + } + + mapped_type& mapped() const { + return PolicyTraits::value(&PolicyTraits::element(this->slot())); + } + + private: + friend struct CommonAccess; + + using Base::Base; +}; + +// Provide access to non-public node-handle functions. +struct CommonAccess { + template + static auto GetSlot(const Node& node) -> decltype(node.slot()) { + return node.slot(); + } + + template + static void Destroy(Node* node) { + node->destroy(); + } + + template + static void Reset(Node* node) { + node->reset(); + } + + template + static T Transfer(Args&&... args) { + return T(typename T::transfer_tag_t{}, std::forward(args)...); + } + + template + static T Move(Args&&... args) { + return T(typename T::move_tag_t{}, std::forward(args)...); + } +}; + +// Implement the insert_return_type<> concept of C++17. +template +struct InsertReturnType { + Iterator position; + bool inserted; + NodeType node; +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_ diff --git a/base/abseil/absl/container/internal/compressed_tuple.h b/base/abseil/absl/container/internal/compressed_tuple.h new file mode 100644 index 0000000..4bfe92f --- /dev/null +++ b/base/abseil/absl/container/internal/compressed_tuple.h @@ -0,0 +1,265 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Helper class to perform the Empty Base Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the optimization. If all types in Ts are empty +// classes, then CompressedTuple is itself an empty class. +// +// To access the members, use member get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// https://en.cppreference.com/w/cpp/language/ebo + +#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ +#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ + +#include +#include +#include +#include + +#include "absl/utility/utility.h" + +#if defined(_MSC_VER) && !defined(__NVCC__) +// We need to mark these classes with this declspec to ensure that +// CompressedTuple happens. +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases) +#else +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class CompressedTuple; + +namespace internal_compressed_tuple { + +template +struct Elem; +template +struct Elem, I> + : std::tuple_element> {}; +template +using ElemT = typename Elem::type; + +// Use the __is_final intrinsic if available. Where it's not available, classes +// declared with the 'final' specifier cannot be used as CompressedTuple +// elements. +// TODO(sbenza): Replace this with std::is_final in C++14. +template +constexpr bool IsFinal() { +#if defined(__clang__) || defined(__GNUC__) + return __is_final(T); +#else + return false; +#endif +} + +// We can't use EBCO on other CompressedTuples because that would mean that we +// derive from multiple Storage<> instantiations with the same I parameter, +// and potentially from multiple identical Storage<> instantiations. So anytime +// we use type inheritance rather than encapsulation, we mark +// CompressedTupleImpl, to make this easy to detect. +struct uses_inheritance {}; + +template +constexpr bool ShouldUseBase() { + return std::is_class::value && std::is_empty::value && !IsFinal() && + !std::is_base_of::value; +} + +// The storage class provides two specializations: +// - For empty classes, it stores T as a base class. +// - For everything else, it stores T as a member. +template ::type>()> +#else + bool UseBase = ShouldUseBase()> +#endif +struct Storage { + T value; + constexpr Storage() = default; + template + explicit constexpr Storage(absl::in_place_t, V&& v) + : value(absl::forward(v)) {} + constexpr const T& get() const& { return value; } + T& get() & { return value; } + constexpr const T&& get() const&& { return absl::move(*this).value; } + T&& get() && { return std::move(*this).value; } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { + constexpr Storage() = default; + + template + explicit constexpr Storage(absl::in_place_t, V&& v) + : T(absl::forward(v)) {} + + constexpr const T& get() const& { return *this; } + T& get() & { return *this; } + constexpr const T&& get() const&& { return absl::move(*this); } + T&& get() && { return std::move(*this); } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, absl::index_sequence, ShouldAnyUseBase> + // We use the dummy identity function through std::integral_constant to + // convince MSVC of accepting and expanding I in that context. Without it + // you would get: + // error C3548: 'I': parameter pack cannot be used in this context + : uses_inheritance, + Storage::value>... { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) + : Storage(absl::in_place, absl::forward(args))... {} + friend CompressedTuple; +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, absl::index_sequence, false> + // We use the dummy identity function as above... + : Storage::value, false>... { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) + : Storage(absl::in_place, absl::forward(args))... {} + friend CompressedTuple; +}; + +std::false_type Or(std::initializer_list); +std::true_type Or(std::initializer_list); + +// MSVC requires this to be done separately rather than within the declaration +// of CompressedTuple below. +template +constexpr bool ShouldAnyUseBase() { + return decltype( + Or({std::integral_constant()>()...})){}; +} + +template +using TupleMoveConstructible = typename std::conditional< + std::is_reference::value, std::is_convertible, + std::is_constructible>::type; + +} // namespace internal_compressed_tuple + +// Helper class to perform the Empty Base Class Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the CompressedTuple. If all types in Ts are +// empty classes, then CompressedTuple is itself an empty class. (This +// does not apply when one or more of those empty classes is itself an empty +// CompressedTuple.) +// +// To access the members, use member .get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// https://en.cppreference.com/w/cpp/language/ebo +template +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple + : private internal_compressed_tuple::CompressedTupleImpl< + CompressedTuple, absl::index_sequence_for, + internal_compressed_tuple::ShouldAnyUseBase()> { + private: + template + using ElemT = internal_compressed_tuple::ElemT; + + template + using StorageT = internal_compressed_tuple::Storage, I>; + + public: + // There seems to be a bug in MSVC dealing in which using '=default' here will + // cause the compiler to ignore the body of other constructors. The work- + // around is to explicitly implement the default constructor. +#if defined(_MSC_VER) + constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {} +#else + constexpr CompressedTuple() = default; +#endif + explicit constexpr CompressedTuple(const Ts&... base) + : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {} + + template ...)>>, + internal_compressed_tuple::TupleMoveConstructible< + Ts, Vs&&>...>::value, + bool> = true> + explicit constexpr CompressedTuple(Vs&&... base) + : CompressedTuple::CompressedTupleImpl(absl::in_place, + absl::forward(base)...) {} + + template + ElemT& get() & { + return internal_compressed_tuple::Storage, I>::get(); + } + + template + constexpr const ElemT& get() const& { + return StorageT::get(); + } + + template + ElemT&& get() && { + return std::move(*this).StorageT::get(); + } + + template + constexpr const ElemT&& get() const&& { + return absl::move(*this).StorageT::get(); + } +}; + +// Explicit specialization for a zero-element tuple +// (needed to avoid ambiguous overloads for the default constructor). +template <> +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + +#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ diff --git a/base/abseil/absl/container/internal/compressed_tuple_test.cc b/base/abseil/absl/container/internal/compressed_tuple_test.cc new file mode 100644 index 0000000..76bc921 --- /dev/null +++ b/base/abseil/absl/container/internal/compressed_tuple_test.cc @@ -0,0 +1,413 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/compressed_tuple.h" + +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/memory/memory.h" +#include "absl/types/any.h" +#include "absl/types/optional.h" +#include "absl/utility/utility.h" + +// These are declared at global scope purely so that error messages +// are smaller and easier to understand. +enum class CallType { kConstRef, kConstMove }; + +template +struct Empty { + constexpr CallType value() const& { return CallType::kConstRef; } + constexpr CallType value() const&& { return CallType::kConstMove; } +}; + +template +struct NotEmpty { + T value; +}; + +template +struct TwoValues { + T value1; + U value2; +}; + + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using absl::test_internal::CopyableMovableInstance; +using absl::test_internal::InstanceTracker; + +TEST(CompressedTupleTest, Sizeof) { + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple, Empty<1>>)); + EXPECT_EQ(sizeof(int), + sizeof(CompressedTuple, Empty<1>, Empty<2>>)); + + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty, Empty<1>>)); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) { + InstanceTracker tracker; + CompressedTuple x1(CopyableMovableInstance(1)); + EXPECT_EQ(tracker.instances(), 1); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_LE(tracker.moves(), 1); + EXPECT_EQ(x1.get<0>().value(), 1); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) { + InstanceTracker tracker; + + CopyableMovableInstance i1(1); + CompressedTuple x1(std::move(i1)); + EXPECT_EQ(tracker.instances(), 2); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_LE(tracker.moves(), 1); + EXPECT_EQ(x1.get<0>().value(), 1); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CopyableMovableInstance i2(2); + Empty<0> empty; + CompressedTuple> + x1(std::move(i1), i2, empty); + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(x1.get<1>().value(), 2); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); +} + +struct IncompleteType; +CompressedTuple> +MakeWithIncomplete(CopyableMovableInstance i1, + IncompleteType& t, // NOLINT + Empty<0> empty) { + return CompressedTuple>{ + std::move(i1), t, empty}; +} + +struct IncompleteType {}; +TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + Empty<0> empty; + struct DerivedType : IncompleteType {int value = 0;}; + DerivedType fd; + fd.value = 7; + + CompressedTuple> x1 = + MakeWithIncomplete(std::move(i1), fd, empty); + + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(static_cast(x1.get<1>()).value, 7); + + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 2); +} + +TEST(CompressedTupleTest, + OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CopyableMovableInstance i2(2); + CompressedTuple> + x1(std::move(i1), i2, {}); // NOLINT + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(x1.get<1>().value(), 2); + EXPECT_EQ(tracker.instances(), 3); + // We are forced into the `const Ts&...` constructor (invoking copies) + // because we need it to deduce the type of `{}`. + // std::tuple also has this behavior. + // Note, this test is proof that this is expected behavior, but it is not + // _desired_ behavior. + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); +} + +TEST(CompressedTupleTest, OneCopyOnLValueConstruction) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + + CompressedTuple x1(i1); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); + + tracker.ResetCopiesMovesSwaps(); + + CopyableMovableInstance i2(2); + const CopyableMovableInstance& i2_ref = i2; + CompressedTuple x2(i2_ref); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); +} + +TEST(CompressedTupleTest, OneMoveOnRValueAccess) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CompressedTuple x(std::move(i1)); + tracker.ResetCopiesMovesSwaps(); + + CopyableMovableInstance i2 = std::move(x).get<0>(); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); +} + +TEST(CompressedTupleTest, OneCopyOnLValueAccess) { + InstanceTracker tracker; + + CompressedTuple x(CopyableMovableInstance(0)); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + + CopyableMovableInstance t = x.get<0>(); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 1); +} + +TEST(CompressedTupleTest, ZeroCopyOnRefAccess) { + InstanceTracker tracker; + + CompressedTuple x(CopyableMovableInstance(0)); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + + CopyableMovableInstance& t1 = x.get<0>(); + const CopyableMovableInstance& t2 = x.get<0>(); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(t1.value(), 0); + EXPECT_EQ(t2.value(), 0); +} + +TEST(CompressedTupleTest, Access) { + struct S { + std::string x; + }; + CompressedTuple, S> x(7, {}, S{"ABC"}); + EXPECT_EQ(sizeof(x), sizeof(TwoValues)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_EQ("ABC", x.get<2>().x); +} + +TEST(CompressedTupleTest, NonClasses) { + CompressedTuple x(7, "ABC"); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); +} + +TEST(CompressedTupleTest, MixClassAndNonClass) { + CompressedTuple, NotEmpty> x(7, "ABC", {}, + {1.25}); + struct Mock { + int v; + const char* p; + double d; + }; + EXPECT_EQ(sizeof(x), sizeof(Mock)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); + EXPECT_EQ(1.25, x.get<3>().value); +} + +TEST(CompressedTupleTest, Nested) { + CompressedTuple, + CompressedTuple>> + x(1, CompressedTuple(2), + CompressedTuple>(3, CompressedTuple(4))); + EXPECT_EQ(1, x.get<0>()); + EXPECT_EQ(2, x.get<1>().get<0>()); + EXPECT_EQ(3, x.get<2>().get<0>()); + EXPECT_EQ(4, x.get<2>().get<1>().get<0>()); + + CompressedTuple, Empty<0>, + CompressedTuple, CompressedTuple>>> + y; + std::set*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(), + &y.get<2>().get<1>().get<0>()}; +#ifdef _MSC_VER + // MSVC has a bug where many instances of the same base class are layed out in + // the same address when using __declspec(empty_bases). + // This will be fixed in a future version of MSVC. + int expected = 1; +#else + int expected = 4; +#endif + EXPECT_EQ(expected, sizeof(y)); + EXPECT_EQ(expected, empties.size()); + EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size()); + + EXPECT_EQ(4 * sizeof(char), + sizeof(CompressedTuple, + CompressedTuple>)); + EXPECT_TRUE((std::is_empty, Empty<1>>>::value)); + + // Make sure everything still works when things are nested. + struct CT_Empty : CompressedTuple> {}; + CompressedTuple, CT_Empty> nested_empty; + auto contained = nested_empty.get<0>(); + auto nested = nested_empty.get<1>().get<0>(); + EXPECT_TRUE((std::is_same::value)); +} + +TEST(CompressedTupleTest, Reference) { + int i = 7; + std::string s = "Very long std::string that goes in the heap"; + CompressedTuple x(i, i, s, s); + + // Sanity check. We should have not moved from `s` + EXPECT_EQ(s, "Very long std::string that goes in the heap"); + + EXPECT_EQ(x.get<0>(), x.get<1>()); + EXPECT_NE(&x.get<0>(), &x.get<1>()); + EXPECT_EQ(&x.get<1>(), &i); + + EXPECT_EQ(x.get<2>(), x.get<3>()); + EXPECT_NE(&x.get<2>(), &x.get<3>()); + EXPECT_EQ(&x.get<3>(), &s); +} + +TEST(CompressedTupleTest, NoElements) { + CompressedTuple<> x; + static_cast(x); // Silence -Wunused-variable. + EXPECT_TRUE(std::is_empty>::value); +} + +TEST(CompressedTupleTest, MoveOnlyElements) { + CompressedTuple> str_tup( + absl::make_unique("str")); + + CompressedTuple>, + std::unique_ptr> + x(std::move(str_tup), absl::make_unique(5)); + + EXPECT_EQ(*x.get<0>().get<0>(), "str"); + EXPECT_EQ(*x.get<1>(), 5); + + std::unique_ptr x0 = std::move(x.get<0>()).get<0>(); + std::unique_ptr x1 = std::move(x).get<1>(); + + EXPECT_EQ(*x0, "str"); + EXPECT_EQ(*x1, 5); +} + +TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) { + CompressedTuple> base( + absl::make_unique("str")); + EXPECT_EQ(*base.get<0>(), "str"); + + CompressedTuple> copy(std::move(base)); + EXPECT_EQ(*copy.get<0>(), "str"); +} + +TEST(CompressedTupleTest, AnyElements) { + any a(std::string("str")); + CompressedTuple x(any(5), a); + EXPECT_EQ(absl::any_cast(x.get<0>()), 5); + EXPECT_EQ(absl::any_cast(x.get<1>()), "str"); + + a = 0.5f; + EXPECT_EQ(absl::any_cast(x.get<1>()), 0.5); + + // Ensure copy construction work in the face of a type with a universal + // implicit constructor; + CompressedTuple c{}, d(c); // NOLINT +} + +TEST(CompressedTupleTest, Constexpr) { + struct NonTrivialStruct { + constexpr NonTrivialStruct() = default; + constexpr int value() const { return v; } + int v = 5; + }; + struct TrivialStruct { + TrivialStruct() = default; + constexpr int value() const { return v; } + int v; + }; + constexpr CompressedTuple, Empty<0>> x( + 7, 1.25, CompressedTuple(5), {}); + constexpr int x0 = x.get<0>(); + constexpr double x1 = x.get<1>(); + constexpr int x2 = x.get<2>().get<0>(); + constexpr CallType x3 = x.get<3>().value(); + + EXPECT_EQ(x0, 7); + EXPECT_EQ(x1, 1.25); + EXPECT_EQ(x2, 5); + EXPECT_EQ(x3, CallType::kConstRef); + +#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4 + constexpr CompressedTuple, TrivialStruct, int> trivial = {}; + constexpr CallType trivial0 = trivial.get<0>().value(); + constexpr int trivial1 = trivial.get<1>().value(); + constexpr int trivial2 = trivial.get<2>(); + + EXPECT_EQ(trivial0, CallType::kConstRef); + EXPECT_EQ(trivial1, 0); + EXPECT_EQ(trivial2, 0); +#endif + + constexpr CompressedTuple, NonTrivialStruct, absl::optional> + non_trivial = {}; + constexpr CallType non_trivial0 = non_trivial.get<0>().value(); + constexpr int non_trivial1 = non_trivial.get<1>().value(); + constexpr absl::optional non_trivial2 = non_trivial.get<2>(); + + EXPECT_EQ(non_trivial0, CallType::kConstRef); + EXPECT_EQ(non_trivial1, 5); + EXPECT_EQ(non_trivial2, absl::nullopt); + + static constexpr char data[] = "DEF"; + constexpr CompressedTuple z(data); + constexpr const char* z1 = z.get<0>(); + EXPECT_EQ(std::string(z1), std::string(data)); + +#if defined(__clang__) + // An apparent bug in earlier versions of gcc claims these are ambiguous. + constexpr int x2m = absl::move(x.get<2>()).get<0>(); + constexpr CallType x3m = absl::move(x).get<3>().value(); + EXPECT_EQ(x2m, 5); + EXPECT_EQ(x3m, CallType::kConstMove); +#endif +} + +#if defined(__clang__) || defined(__GNUC__) +TEST(CompressedTupleTest, EmptyFinalClass) { + struct S final { + int f() const { return 5; } + }; + CompressedTuple x; + EXPECT_EQ(x.get<0>().f(), 5); +} +#endif + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/container_memory.h b/base/abseil/absl/container/internal/container_memory.h new file mode 100644 index 0000000..d24b0f8 --- /dev/null +++ b/base/abseil/absl/container/internal/container_memory.h @@ -0,0 +1,440 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ + +#ifdef ADDRESS_SANITIZER +#include +#endif + +#ifdef MEMORY_SANITIZER +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include "absl/memory/memory.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Allocates at least n bytes aligned to the specified alignment. +// Alignment must be a power of 2. It must be positive. +// +// Note that many allocators don't honor alignment requirements above certain +// threshold (usually either alignof(std::max_align_t) or alignof(void*)). +// Allocate() doesn't apply alignment corrections. If the underlying allocator +// returns insufficiently alignment pointer, that's what you are going to get. +template +void* Allocate(Alloc* alloc, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + struct alignas(Alignment) M {}; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + A mem_alloc(*alloc); + void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + assert(reinterpret_cast(p) % Alignment == 0 && + "allocator does not respect alignment"); + return p; +} + +// The pointer must have been previously obtained by calling +// Allocate(alloc, n). +template +void Deallocate(Alloc* alloc, void* p, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + struct alignas(Alignment) M {}; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + A mem_alloc(*alloc); + AT::deallocate(mem_alloc, static_cast(p), + (n + sizeof(M) - 1) / sizeof(M)); +} + +namespace memory_internal { + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, + absl::index_sequence) { + absl::allocator_traits::construct( + *alloc, ptr, std::get(std::forward(t))...); +} + +template +struct WithConstructedImplF { + template + decltype(std::declval()(std::declval())) operator()( + Args&&... args) const { + return std::forward(f)(T(std::forward(args)...)); + } + F&& f; +}; + +template +decltype(std::declval()(std::declval())) WithConstructedImpl( + Tuple&& t, absl::index_sequence, F&& f) { + return WithConstructedImplF{std::forward(f)}( + std::get(std::forward(t))...); +} + +template +auto TupleRefImpl(T&& t, absl::index_sequence) + -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { + return std::forward_as_tuple(std::get(std::forward(t))...); +} + +// Returns a tuple of references to the elements of the input tuple. T must be a +// tuple. +template +auto TupleRef(T&& t) -> decltype( + TupleRefImpl(std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>())) { + return TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +template +decltype(std::declval()(std::declval(), std::piecewise_construct, + std::declval>(), std::declval())) +DecomposePairImpl(F&& f, std::pair, V> p) { + const auto& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); +} + +} // namespace memory_internal + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +// Constructs T using the args specified in the tuple and calls F with the +// constructed value. +template +decltype(std::declval()(std::declval())) WithConstructed( + Tuple&& t, F&& f) { + return memory_internal::WithConstructedImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>(), + std::forward(f)); +} + +// Given arguments of an std::pair's consructor, PairArgs() returns a pair of +// tuples with references to the passed arguments. The tuples contain +// constructor arguments for the first and the second elements of the pair. +// +// The following two snippets are equivalent. +// +// 1. std::pair p(args...); +// +// 2. auto a = PairArgs(args...); +// std::pair p(std::piecewise_construct, +// std::move(p.first), std::move(p.second)); +inline std::pair, std::tuple<>> PairArgs() { return {}; } +template +std::pair, std::tuple> PairArgs(F&& f, S&& s) { + return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), + std::forward_as_tuple(std::forward(s))}; +} +template +std::pair, std::tuple> PairArgs( + const std::pair& p) { + return PairArgs(p.first, p.second); +} +template +std::pair, std::tuple> PairArgs(std::pair&& p) { + return PairArgs(std::forward(p.first), std::forward(p.second)); +} +template +auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s)))) { + return std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s))); +} + +// A helper function for implementing apply() in map policies. +template +auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...))) { + return memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...)); +} + +// A helper function for implementing apply() in set policies. +template +decltype(std::declval()(std::declval(), std::declval())) +DecomposeValue(F&& f, Arg&& arg) { + const auto& key = arg; + return std::forward(f)(key, std::forward(arg)); +} + +// Helper functions for asan and msan. +inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { +#ifdef ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(m, s); +#endif +#ifdef MEMORY_SANITIZER + __msan_poison(m, s); +#endif + (void)m; + (void)s; +} + +inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(m, s); +#endif +#ifdef MEMORY_SANITIZER + __msan_unpoison(m, s); +#endif + (void)m; + (void)s; +} + +template +inline void SanitizerPoisonObject(const T* object) { + SanitizerPoisonMemoryRegion(object, sizeof(T)); +} + +template +inline void SanitizerUnpoisonObject(const T* object) { + SanitizerUnpoisonMemoryRegion(object, sizeof(T)); +} + +namespace memory_internal { + +// If Pair is a standard-layout type, OffsetOf::kFirst and +// OffsetOf::kSecond are equivalent to offsetof(Pair, first) and +// offsetof(Pair, second) respectively. Otherwise they are -1. +// +// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout +// type, which is non-portable. +template +struct OffsetOf { + static constexpr size_t kFirst = -1; + static constexpr size_t kSecond = -1; +}; + +template +struct OffsetOf::type> { + static constexpr size_t kFirst = offsetof(Pair, first); + static constexpr size_t kSecond = offsetof(Pair, second); +}; + +template +struct IsLayoutCompatible { + private: + struct Pair { + K first; + V second; + }; + + // Is P layout-compatible with Pair? + template + static constexpr bool LayoutCompatible() { + return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && + alignof(P) == alignof(Pair) && + memory_internal::OffsetOf

::kFirst == + memory_internal::OffsetOf::kFirst && + memory_internal::OffsetOf

::kSecond == + memory_internal::OffsetOf::kSecond; + } + + public: + // Whether pair and pair are layout-compatible. If they are, + // then it is safe to store them in a union and read from either. + static constexpr bool value = std::is_standard_layout() && + std::is_standard_layout() && + memory_internal::OffsetOf::kFirst == 0 && + LayoutCompatible>() && + LayoutCompatible>(); +}; + +} // namespace memory_internal + +// The internal storage type for key-value containers like flat_hash_map. +// +// It is convenient for the value_type of a flat_hash_map to be +// pair; the "const K" prevents accidental modification of the key +// when dealing with the reference returned from find() and similar methods. +// However, this creates other problems; we want to be able to emplace(K, V) +// efficiently with move operations, and similarly be able to move a +// pair in insert(). +// +// The solution is this union, which aliases the const and non-const versions +// of the pair. This also allows flat_hash_map to work, even though +// that has the same efficiency issues with move in emplace() and insert() - +// but people do it anyway. +// +// If kMutableKeys is false, only the value member can be accessed. +// +// If kMutableKeys is true, key can be accessed through all slots while value +// and mutable_value must be accessed only via INITIALIZED slots. Slots are +// created and destroyed via mutable_value so that the key can be moved later. +// +// Accessing one of the union fields while the other is active is safe as +// long as they are layout-compatible, which is guaranteed by the definition of +// kMutableKeys. For C++11, the relevant section of the standard is +// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) +template +union map_slot_type { + map_slot_type() {} + ~map_slot_type() = delete; + using value_type = std::pair; + using mutable_value_type = std::pair; + + value_type value; + mutable_value_type mutable_value; + K key; +}; + +template +struct map_slot_policy { + using slot_type = map_slot_type; + using value_type = std::pair; + using mutable_value_type = std::pair; + + private: + static void emplace(slot_type* slot) { + // The construction of union doesn't do anything at runtime but it allows us + // to access its members without violating aliasing rules. + new (slot) slot_type; + } + // If pair and pair are layout-compatible, we can accept one + // or the other via slot_type. We are also free to access the key via + // slot_type::key in this case. + using kMutableKeys = memory_internal::IsLayoutCompatible; + + public: + static value_type& element(slot_type* slot) { return slot->value; } + static const value_type& element(const slot_type* slot) { + return slot->value; + } + + static const K& key(const slot_type* slot) { + return kMutableKeys::value ? slot->key : slot->value.first; + } + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct(*alloc, &slot->mutable_value, + std::forward(args)...); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::forward(args)...); + } + } + + // Construct this slot by moving from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &slot->mutable_value, std::move(other->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::move(other->value)); + } + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + if (kMutableKeys::value) { + absl::allocator_traits::destroy(*alloc, &slot->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &slot->value); + } + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + emplace(new_slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &new_slot->value, + std::move(old_slot->value)); + } + destroy(alloc, old_slot); + } + + template + static void swap(Allocator* alloc, slot_type* a, slot_type* b) { + if (kMutableKeys::value) { + using std::swap; + swap(a->mutable_value, b->mutable_value); + } else { + value_type tmp = std::move(a->value); + absl::allocator_traits::destroy(*alloc, &a->value); + absl::allocator_traits::construct(*alloc, &a->value, + std::move(b->value)); + absl::allocator_traits::destroy(*alloc, &b->value); + absl::allocator_traits::construct(*alloc, &b->value, + std::move(tmp)); + } + } + + template + static void move(Allocator* alloc, slot_type* src, slot_type* dest) { + if (kMutableKeys::value) { + dest->mutable_value = std::move(src->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &dest->value); + absl::allocator_traits::construct(*alloc, &dest->value, + std::move(src->value)); + } + } + + template + static void move(Allocator* alloc, slot_type* first, slot_type* last, + slot_type* result) { + for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) + move(alloc, src, dest); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/base/abseil/absl/container/internal/container_memory_test.cc b/base/abseil/absl/container/internal/container_memory_test.cc new file mode 100644 index 0000000..7942c7b --- /dev/null +++ b/base/abseil/absl/container/internal/container_memory_test.cc @@ -0,0 +1,190 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/container_memory.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::Pair; + +TEST(Memory, AlignmentLargerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +TEST(Memory, AlignmentSmallerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +class Fixture : public ::testing::Test { + using Alloc = std::allocator; + + public: + Fixture() { ptr_ = std::allocator_traits::allocate(*alloc(), 1); } + ~Fixture() override { + std::allocator_traits::destroy(*alloc(), ptr_); + std::allocator_traits::deallocate(*alloc(), ptr_, 1); + } + std::string* ptr() { return ptr_; } + Alloc* alloc() { return &alloc_; } + + private: + Alloc alloc_; + std::string* ptr_; +}; + +TEST_F(Fixture, ConstructNoArgs) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple()); + EXPECT_EQ(*ptr(), ""); +} + +TEST_F(Fixture, ConstructOneArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde")); + EXPECT_EQ(*ptr(), "abcde"); +} + +TEST_F(Fixture, ConstructTwoArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a')); + EXPECT_EQ(*ptr(), "aaaaa"); +} + +TEST(PairArgs, NoArgs) { + EXPECT_THAT(PairArgs(), + Pair(std::forward_as_tuple(), std::forward_as_tuple())); +} + +TEST(PairArgs, TwoArgs) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(1, 'A')); +} + +TEST(PairArgs, Pair) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::make_pair(1, 'A'))); +} + +TEST(PairArgs, Piecewise) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::piecewise_construct, std::forward_as_tuple(1), + std::forward_as_tuple('A'))); +} + +TEST(WithConstructed, Simple) { + EXPECT_EQ(1, WithConstructed( + std::make_tuple(std::string("a")), + [](absl::string_view str) { return str.size(); })); +} + +template +decltype(DecomposeValue(std::declval(), std::declval())) +DecomposeValueImpl(int, F&& f, Arg&& arg) { + return DecomposeValue(std::forward(f), std::forward(arg)); +} + +template +const char* DecomposeValueImpl(char, F&& f, Arg&& arg) { + return "not decomposable"; +} + +template +decltype(DecomposeValueImpl(0, std::declval(), std::declval())) +TryDecomposeValue(F&& f, Arg&& arg) { + return DecomposeValueImpl(0, std::forward(f), std::forward(arg)); +} + +TEST(DecomposeValue, Decomposable) { + auto f = [](const int& x, int&& y) { + EXPECT_EQ(&x, &y); + EXPECT_EQ(42, x); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposeValue(f, 42)); +} + +TEST(DecomposeValue, NotDecomposable) { + auto f = [](void*) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42)); +} + +template +decltype(DecomposePair(std::declval(), std::declval()...)) +DecomposePairImpl(int, F&& f, Args&&... args) { + return DecomposePair(std::forward(f), std::forward(args)...); +} + +template +const char* DecomposePairImpl(char, F&& f, Args&&... args) { + return "not decomposable"; +} + +template +decltype(DecomposePairImpl(0, std::declval(), std::declval()...)) +TryDecomposePair(F&& f, Args&&... args) { + return DecomposePairImpl(0, std::forward(f), std::forward(args)...); +} + +TEST(DecomposePair, Decomposable) { + auto f = [](const int& x, std::piecewise_construct_t, std::tuple k, + std::tuple&& v) { + EXPECT_EQ(&x, &std::get<0>(k)); + EXPECT_EQ(42, x); + EXPECT_EQ(0.5, std::get<0>(v)); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5)); + EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5))); + EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct, + std::make_tuple(42), std::make_tuple(0.5))); +} + +TEST(DecomposePair, NotDecomposable) { + auto f = [](...) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", + TryDecomposePair(f)); + EXPECT_STREQ("not decomposable", + TryDecomposePair(f, std::piecewise_construct, std::make_tuple(), + std::make_tuple(0.5))); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/counting_allocator.h b/base/abseil/absl/container/internal/counting_allocator.h new file mode 100644 index 0000000..9efdc66 --- /dev/null +++ b/base/abseil/absl/container/internal/counting_allocator.h @@ -0,0 +1,83 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ +#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// This is a stateful allocator, but the state lives outside of the +// allocator (in whatever test is using the allocator). This is odd +// but helps in tests where the allocator is propagated into nested +// containers - that chain of allocators uses the same state and is +// thus easier to query for aggregate allocation information. +template +class CountingAllocator : public std::allocator { + public: + using Alloc = std::allocator; + using pointer = typename Alloc::pointer; + using size_type = typename Alloc::size_type; + + CountingAllocator() : bytes_used_(nullptr) {} + explicit CountingAllocator(int64_t* b) : bytes_used_(b) {} + + template + CountingAllocator(const CountingAllocator& x) + : Alloc(x), bytes_used_(x.bytes_used_) {} + + pointer allocate(size_type n, + std::allocator::const_pointer hint = nullptr) { + assert(bytes_used_ != nullptr); + *bytes_used_ += n * sizeof(T); + return Alloc::allocate(n, hint); + } + + void deallocate(pointer p, size_type n) { + Alloc::deallocate(p, n); + assert(bytes_used_ != nullptr); + *bytes_used_ -= n * sizeof(T); + } + + template + class rebind { + public: + using other = CountingAllocator; + }; + + friend bool operator==(const CountingAllocator& a, + const CountingAllocator& b) { + return a.bytes_used_ == b.bytes_used_; + } + + friend bool operator!=(const CountingAllocator& a, + const CountingAllocator& b) { + return !(a == b); + } + + int64_t* bytes_used_; +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ diff --git a/base/abseil/absl/container/internal/hash_function_defaults.h b/base/abseil/absl/container/internal/hash_function_defaults.h new file mode 100644 index 0000000..401ddf4 --- /dev/null +++ b/base/abseil/absl/container/internal/hash_function_defaults.h @@ -0,0 +1,146 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Define the default Hash and Eq functions for SwissTable containers. +// +// std::hash and std::equal_to are not appropriate hash and equal +// functions for SwissTable containers. There are two reasons for this. +// +// SwissTable containers are power of 2 sized containers: +// +// This means they use the lower bits of the hash value to find the slot for +// each entry. The typical hash function for integral types is the identity. +// This is a very weak hash function for SwissTable and any power of 2 sized +// hashtable implementation which will lead to excessive collisions. For +// SwissTable we use murmur3 style mixing to reduce collisions to a minimum. +// +// SwissTable containers support heterogeneous lookup: +// +// In order to make heterogeneous lookup work, hash and equal functions must be +// polymorphic. At the same time they have to satisfy the same requirements the +// C++ standard imposes on hash functions and equality operators. That is: +// +// if hash_default_eq(a, b) returns true for any a and b of type T, then +// hash_default_hash(a) must equal hash_default_hash(b) +// +// For SwissTable containers this requirement is relaxed to allow a and b of +// any and possibly different types. Note that like the standard the hash and +// equal functions are still bound to T. This is important because some type U +// can be hashed by/tested for equality differently depending on T. A notable +// example is `const char*`. `const char*` is treated as a c-style string when +// the hash function is hash but as a pointer when the hash +// function is hash. +// +#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// The hash of an object of type T is computed by using absl::Hash. +template +struct HashEq { + using Hash = absl::Hash; + using Eq = std::equal_to; +}; + +struct StringHash { + using is_transparent = void; + + size_t operator()(absl::string_view v) const { + return absl::Hash{}(v); + } +}; + +// Supports heterogeneous lookup for string-like elements. +struct StringHashEq { + using Hash = StringHash; + struct Eq { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const { + return lhs == rhs; + } + }; +}; + +template <> +struct HashEq : StringHashEq {}; +template <> +struct HashEq : StringHashEq {}; + +// Supports heterogeneous lookup for pointers and smart pointers. +template +struct HashEq { + struct Hash { + using is_transparent = void; + template + size_t operator()(const U& ptr) const { + return absl::Hash{}(HashEq::ToPtr(ptr)); + } + }; + struct Eq { + using is_transparent = void; + template + bool operator()(const A& a, const B& b) const { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + + private: + static const T* ToPtr(const T* ptr) { return ptr; } + template + static const T* ToPtr(const std::unique_ptr& ptr) { + return ptr.get(); + } + template + static const T* ToPtr(const std::shared_ptr& ptr) { + return ptr.get(); + } +}; + +template +struct HashEq> : HashEq {}; +template +struct HashEq> : HashEq {}; + +// This header's visibility is restricted. If you need to access the default +// hasher please use the container's ::hasher alias instead. +// +// Example: typename Hash = typename absl::flat_hash_map::hasher +template +using hash_default_hash = typename container_internal::HashEq::Hash; + +// This header's visibility is restricted. If you need to access the default +// key equal please use the container's ::key_equal alias instead. +// +// Example: typename Eq = typename absl::flat_hash_map::key_equal +template +using hash_default_eq = typename container_internal::HashEq::Eq; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/base/abseil/absl/container/internal/hash_function_defaults_test.cc b/base/abseil/absl/container/internal/hash_function_defaults_test.cc new file mode 100644 index 0000000..2eefc7e --- /dev/null +++ b/base/abseil/absl/container/internal/hash_function_defaults_test.cc @@ -0,0 +1,299 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_function_defaults.h" + +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::Types; + +TEST(Eq, Int32) { + hash_default_eq eq; + EXPECT_TRUE(eq(1, 1u)); + EXPECT_TRUE(eq(1, char{1})); + EXPECT_TRUE(eq(1, true)); + EXPECT_TRUE(eq(1, double{1.1})); + EXPECT_FALSE(eq(1, char{2})); + EXPECT_FALSE(eq(1, 2u)); + EXPECT_FALSE(eq(1, false)); + EXPECT_FALSE(eq(1, 2.)); +} + +TEST(Hash, Int32) { + hash_default_hash hash; + auto h = hash(1); + EXPECT_EQ(h, hash(1u)); + EXPECT_EQ(h, hash(char{1})); + EXPECT_EQ(h, hash(true)); + EXPECT_EQ(h, hash(double{1.1})); + EXPECT_NE(h, hash(2u)); + EXPECT_NE(h, hash(char{2})); + EXPECT_NE(h, hash(false)); + EXPECT_NE(h, hash(2.)); +} + +enum class MyEnum { A, B, C, D }; + +TEST(Eq, Enum) { + hash_default_eq eq; + EXPECT_TRUE(eq(MyEnum::A, MyEnum::A)); + EXPECT_FALSE(eq(MyEnum::A, MyEnum::B)); +} + +TEST(Hash, Enum) { + hash_default_hash hash; + + for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) { + auto h = hash(e); + EXPECT_EQ(h, hash_default_hash{}(static_cast(e))); + EXPECT_NE(h, hash(MyEnum::D)); + } +} + +using StringTypes = ::testing::Types; + +template +struct EqString : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_SUITE(EqString, StringTypes); + +template +struct HashString : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_SUITE(HashString, StringTypes); + +TYPED_TEST(EqString, Works) { + auto eq = this->key_eq; + EXPECT_TRUE(eq("a", "a")); + EXPECT_TRUE(eq("a", absl::string_view("a"))); + EXPECT_TRUE(eq("a", std::string("a"))); + EXPECT_FALSE(eq("a", "b")); + EXPECT_FALSE(eq("a", absl::string_view("b"))); + EXPECT_FALSE(eq("a", std::string("b"))); +} + +TYPED_TEST(HashString, Works) { + auto hash = this->hasher; + auto h = hash("a"); + EXPECT_EQ(h, hash(absl::string_view("a"))); + EXPECT_EQ(h, hash(std::string("a"))); + EXPECT_NE(h, hash(absl::string_view("b"))); + EXPECT_NE(h, hash(std::string("b"))); +} + +struct NoDeleter { + template + void operator()(const T* ptr) const {} +}; + +using PointerTypes = + ::testing::Types, + std::unique_ptr, + std::unique_ptr, std::unique_ptr, + std::shared_ptr, std::shared_ptr>; + +template +struct EqPointer : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_SUITE(EqPointer, PointerTypes); + +template +struct HashPointer : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_SUITE(HashPointer, PointerTypes); + +TYPED_TEST(EqPointer, Works) { + int dummy; + auto eq = this->key_eq; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_TRUE(eq(ptr, cptr)); + EXPECT_TRUE(eq(ptr, sptr)); + EXPECT_TRUE(eq(ptr, uptr)); + EXPECT_TRUE(eq(ptr, csptr)); + EXPECT_TRUE(eq(ptr, cuptr)); + EXPECT_FALSE(eq(&dummy, cptr)); + EXPECT_FALSE(eq(&dummy, sptr)); + EXPECT_FALSE(eq(&dummy, uptr)); + EXPECT_FALSE(eq(&dummy, csptr)); + EXPECT_FALSE(eq(&dummy, cuptr)); +} + +TEST(Hash, DerivedAndBase) { + struct Base {}; + struct Derived : Base {}; + + hash_default_hash hasher; + + Base base; + Derived derived; + EXPECT_NE(hasher(&base), hasher(&derived)); + EXPECT_EQ(hasher(static_cast(&derived)), hasher(&derived)); + + auto dp = std::make_shared(); + EXPECT_EQ(hasher(static_cast(dp.get())), hasher(dp)); +} + +TEST(Hash, FunctionPointer) { + using Func = int (*)(); + hash_default_hash hasher; + hash_default_eq eq; + + Func p1 = [] { return 1; }, p2 = [] { return 2; }; + EXPECT_EQ(hasher(p1), hasher(p1)); + EXPECT_TRUE(eq(p1, p1)); + + EXPECT_NE(hasher(p1), hasher(p2)); + EXPECT_FALSE(eq(p1, p2)); +} + +TYPED_TEST(HashPointer, Works) { + int dummy; + auto hash = this->hasher; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_EQ(hash(ptr), hash(cptr)); + EXPECT_EQ(hash(ptr), hash(sptr)); + EXPECT_EQ(hash(ptr), hash(uptr)); + EXPECT_EQ(hash(ptr), hash(csptr)); + EXPECT_EQ(hash(ptr), hash(cuptr)); + EXPECT_NE(hash(&dummy), hash(cptr)); + EXPECT_NE(hash(&dummy), hash(sptr)); + EXPECT_NE(hash(&dummy), hash(uptr)); + EXPECT_NE(hash(&dummy), hash(csptr)); + EXPECT_NE(hash(&dummy), hash(cuptr)); +} + +// Cartesian product of (std::string, absl::string_view) +// with (std::string, absl::string_view, const char*). +using StringTypesCartesianProduct = Types< + // clang-format off + + std::pair, + std::pair, + std::pair>; +// clang-format on + +constexpr char kFirstString[] = "abc123"; +constexpr char kSecondString[] = "ijk456"; + +template +struct StringLikeTest : public ::testing::Test { + typename T::first_type a1{kFirstString}; + typename T::second_type b1{kFirstString}; + typename T::first_type a2{kSecondString}; + typename T::second_type b2{kSecondString}; + hash_default_eq eq; + hash_default_hash hash; +}; + +TYPED_TEST_CASE_P(StringLikeTest); + +TYPED_TEST_P(StringLikeTest, Eq) { + EXPECT_TRUE(this->eq(this->a1, this->b1)); + EXPECT_TRUE(this->eq(this->b1, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, NotEq) { + EXPECT_FALSE(this->eq(this->a1, this->b2)); + EXPECT_FALSE(this->eq(this->b2, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, HashEq) { + EXPECT_EQ(this->hash(this->a1), this->hash(this->b1)); + EXPECT_EQ(this->hash(this->a2), this->hash(this->b2)); + // It would be a poor hash function which collides on these strings. + EXPECT_NE(this->hash(this->a1), this->hash(this->b2)); +} + +TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct); + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +enum Hash : size_t { + kStd = 0x2, // std::hash +#ifdef _MSC_VER + kExtension = kStd, // In MSVC, std::hash == ::hash +#else // _MSC_VER + kExtension = 0x4, // ::hash (GCC extension) +#endif // _MSC_VER +}; + +// H is a bitmask of Hash enumerations. +// Hashable is hashable via all means specified in H. +template +struct Hashable { + static constexpr bool HashableBy(Hash h) { return h & H; } +}; + +namespace std { +template +struct hash> { + template , + class = typename std::enable_if::type> + size_t operator()(E) const { + return kStd; + } +}; +} // namespace std + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +template +size_t Hash(const T& v) { + return hash_default_hash()(v); +} + +TEST(Delegate, HashDispatch) { + EXPECT_EQ(Hash(kStd), Hash(Hashable())); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/hash_generator_testing.cc b/base/abseil/absl/container/internal/hash_generator_testing.cc new file mode 100644 index 0000000..75c4db6 --- /dev/null +++ b/base/abseil/absl/container/internal/hash_generator_testing.cc @@ -0,0 +1,74 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_generator_testing.h" + +#include + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hash_internal { +namespace { + +class RandomDeviceSeedSeq { + public: + using result_type = typename std::random_device::result_type; + + template + void generate(Iterator start, Iterator end) { + while (start != end) { + *start = gen_(); + ++start; + } + } + + private: + std::random_device gen_; +}; + +} // namespace + +std::mt19937_64* GetSharedRng() { + RandomDeviceSeedSeq seed_seq; + static auto* rng = new std::mt19937_64(seed_seq); + return rng; +} + +std::string Generator::operator()() const { + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution chars(0x20, 0x7E); + std::string res; + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetSharedRng()); }); + return res; +} + +absl::string_view Generator::operator()() const { + static auto* arena = new std::deque(); + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution chars(0x20, 0x7E); + arena->emplace_back(); + auto& res = arena->back(); + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetSharedRng()); }); + return res; +} + +} // namespace hash_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/hash_generator_testing.h b/base/abseil/absl/container/internal/hash_generator_testing.h new file mode 100644 index 0000000..6869fe4 --- /dev/null +++ b/base/abseil/absl/container/internal/hash_generator_testing.h @@ -0,0 +1,161 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates random values for testing. Specialized only for the few types we +// care about. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ + +#include + +#include +#include +#include +#include +#include +#include + +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hash_internal { +namespace generator_internal { + +template +struct IsMap : std::false_type {}; + +template +struct IsMap> : std::true_type {}; + +} // namespace generator_internal + +std::mt19937_64* GetSharedRng(); + +enum Enum { + kEnumEmpty, + kEnumDeleted, +}; + +enum class EnumClass : uint64_t { + kEmpty, + kDeleted, +}; + +inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) { + return o << static_cast(ec); +} + +template +struct Generator; + +template +struct Generator::value>::type> { + T operator()() const { + std::uniform_int_distribution dist; + return dist(*GetSharedRng()); + } +}; + +template <> +struct Generator { + Enum operator()() const { + std::uniform_int_distribution::type> + dist; + while (true) { + auto variate = dist(*GetSharedRng()); + if (variate != kEnumEmpty && variate != kEnumDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + EnumClass operator()() const { + std::uniform_int_distribution< + typename std::underlying_type::type> + dist; + while (true) { + EnumClass variate = static_cast(dist(*GetSharedRng())); + if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + std::string operator()() const; +}; + +template <> +struct Generator { + absl::string_view operator()() const; +}; + +template <> +struct Generator { + NonStandardLayout operator()() const { + return NonStandardLayout(Generator()()); + } +}; + +template +struct Generator> { + std::pair operator()() const { + return std::pair(Generator::type>()(), + Generator::type>()()); + } +}; + +template +struct Generator> { + std::tuple operator()() const { + return std::tuple(Generator::type>()()...); + } +}; + +template +struct Generator> { + std::unique_ptr operator()() const { + return absl::make_unique(Generator()()); + } +}; + +template +struct Generator().key()), + decltype(std::declval().value())>> + : Generator().key())>::type, + typename std::decay().value())>::type>> {}; + +template +using GeneratedType = decltype( + std::declval::value, + typename Container::value_type, + typename Container::key_type>::type>&>()()); + +} // namespace hash_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/base/abseil/absl/container/internal/hash_policy_testing.h b/base/abseil/absl/container/internal/hash_policy_testing.h new file mode 100644 index 0000000..01c40d2 --- /dev/null +++ b/base/abseil/absl/container/internal/hash_policy_testing.h @@ -0,0 +1,184 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities to help tests verify that hash tables properly handle stateful +// allocators and hash functions. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hash_testing_internal { + +template +struct WithId { + WithId() : id_(next_id()) {} + WithId(const WithId& that) : id_(that.id_) {} + WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; } + WithId& operator=(const WithId& that) { + id_ = that.id_; + return *this; + } + WithId& operator=(WithId&& that) { + id_ = that.id_; + that.id_ = 0; + return *this; + } + + size_t id() const { return id_; } + + friend bool operator==(const WithId& a, const WithId& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); } + + protected: + explicit WithId(size_t id) : id_(id) {} + + private: + size_t id_; + + template + static size_t next_id() { + // 0 is reserved for moved from state. + static size_t gId = 1; + return gId++; + } +}; + +} // namespace hash_testing_internal + +struct NonStandardLayout { + NonStandardLayout() {} + explicit NonStandardLayout(std::string s) : value(std::move(s)) {} + virtual ~NonStandardLayout() {} + + friend bool operator==(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value == b.value; + } + friend bool operator!=(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value != b.value; + } + + template + friend H AbslHashValue(H h, const NonStandardLayout& v) { + return H::combine(std::move(h), v.value); + } + + std::string value; +}; + +struct StatefulTestingHash + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingHash> { + template + size_t operator()(const T& t) const { + return absl::Hash{}(t); + } +}; + +struct StatefulTestingEqual + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingEqual> { + template + bool operator()(const T& t, const U& u) const { + return t == u; + } +}; + +// It is expected that Alloc() == Alloc() for all allocators so we cannot use +// WithId base. We need to explicitly assign ids. +template +struct Alloc : std::allocator { + using propagate_on_container_swap = std::true_type; + + // Using old paradigm for this to ensure compatibility. + explicit Alloc(size_t id = 0) : id_(id) {} + + Alloc(const Alloc&) = default; + Alloc& operator=(const Alloc&) = default; + + template + Alloc(const Alloc& that) : std::allocator(that), id_(that.id()) {} + + template + struct rebind { + using other = Alloc; + }; + + size_t id() const { return id_; } + + friend bool operator==(const Alloc& a, const Alloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); } + + private: + size_t id_ = (std::numeric_limits::max)(); +}; + +template +auto items(const Map& m) -> std::vector< + std::pair> { + using std::get; + std::vector> res; + res.reserve(m.size()); + for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v)); + return res; +} + +template +auto keys(const Set& s) + -> std::vector::type> { + std::vector::type> res; + res.reserve(s.size()); + for (const auto& v : s) res.emplace_back(v); + return res; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions +// where the unordered containers are missing certain constructors that +// take allocator arguments. This test is defined ad-hoc for the platforms +// we care about (notably Crosstool 17) because libstdcxx's useless +// versioning scheme precludes a more principled solution. +// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) +// "the unordered associative containers in and +// meet the allocator-aware container requirements;" +#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \ +( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 )) +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 +#else +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ diff --git a/base/abseil/absl/container/internal/hash_policy_testing_test.cc b/base/abseil/absl/container/internal/hash_policy_testing_test.cc new file mode 100644 index 0000000..f0b20fe --- /dev/null +++ b/base/abseil/absl/container/internal/hash_policy_testing_test.cc @@ -0,0 +1,45 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_testing.h" + +#include "gtest/gtest.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +TEST(_, Hash) { + StatefulTestingHash h1; + EXPECT_EQ(1, h1.id()); + StatefulTestingHash h2; + EXPECT_EQ(2, h2.id()); + StatefulTestingHash h1c(h1); + EXPECT_EQ(1, h1c.id()); + StatefulTestingHash h2m(std::move(h2)); + EXPECT_EQ(2, h2m.id()); + EXPECT_EQ(0, h2.id()); + StatefulTestingHash h3; + EXPECT_EQ(3, h3.id()); + h3 = StatefulTestingHash(); + EXPECT_EQ(4, h3.id()); + h3 = std::move(h1); + EXPECT_EQ(1, h3.id()); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/hash_policy_traits.h b/base/abseil/absl/container/internal/hash_policy_traits.h new file mode 100644 index 0000000..3e1209c --- /dev/null +++ b/base/abseil/absl/container/internal/hash_policy_traits.h @@ -0,0 +1,191 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ + +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Defines how slots are initialized/destroyed/moved. +template +struct hash_policy_traits { + private: + struct ReturnKey { + // We return `Key` here. + // When Key=T&, we forward the lvalue reference. + // When Key=T, we return by value to avoid a dangling reference. + // eg, for string_hash_map. + template + Key operator()(Key&& k, const Args&...) const { + return std::forward(k); + } + }; + + template + struct ConstantIteratorsImpl : std::false_type {}; + + template + struct ConstantIteratorsImpl> + : P::constant_iterators {}; + + public: + // The actual object stored in the hash table. + using slot_type = typename Policy::slot_type; + + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + + // The argument type for insertions into the hashtable. This is different + // from value_type for increased performance. See initializer_list constructor + // and insert() member functions for more details. + using init_type = typename Policy::init_type; + + using reference = decltype(Policy::element(std::declval())); + using pointer = typename std::remove_reference::type*; + using value_type = typename std::remove_reference::type; + + // Policies can set this variable to tell raw_hash_set that all iterators + // should be constant, even `iterator`. This is useful for set-like + // containers. + // Defaults to false if not provided by the policy. + using constant_iterators = ConstantIteratorsImpl<>; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + Policy::construct(alloc, slot, std::forward(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transferred. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { + transfer_impl(alloc, new_slot, old_slot, 0); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static auto element(slot_type* slot) -> decltype(P::element(slot)) { + return P::element(slot); + } + + // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. + // + // If `slot` is nullptr, returns the constant amount of memory owned by any + // full slot or -1 if slots own variable amounts of memory. + // + // PRECONDITION: `slot` is INITIALIZED or nullptr + template + static size_t space_used(const slot_type* slot) { + return P::space_used(slot); + } + + // Provides generalized access to the key for elements, both for elements in + // the table and for elements that have not yet been inserted (or even + // constructed). We would like an API that allows us to say: `key(args...)` + // but we cannot do that for all cases, so we use this more general API that + // can be used for many things, including the following: + // + // - Given an element in a table, get its key. + // - Given an element initializer, get its key. + // - Given `emplace()` arguments, get the element key. + // + // Implementations of this must adhere to a very strict technical + // specification around aliasing and consuming arguments: + // + // Let `value_type` be the result type of `element()` without ref- and + // cv-qualifiers. The first argument is a functor, the rest are constructor + // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where + // `k` is the element key, and `xs...` are the new constructor arguments for + // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias + // `ts...`. The key won't be touched once `xs...` are used to construct an + // element; `ts...` won't be touched at all, which allows `apply()` to consume + // any rvalues among them. + // + // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not + // trigger a hard compile error unless it originates from `f`. In other words, + // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not + // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. + // + // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, + // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. + template + static auto apply(F&& f, Ts&&... ts) + -> decltype(P::apply(std::forward(f), std::forward(ts)...)) { + return P::apply(std::forward(f), std::forward(ts)...); + } + + // Returns the "key" portion of the slot. + // Used for node handle manipulation. + template + static auto key(slot_type* slot) + -> decltype(P::apply(ReturnKey(), element(slot))) { + return P::apply(ReturnKey(), element(slot)); + } + + // Returns the "value" (as opposed to the "key") portion of the element. Used + // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + private: + // Use auto -> decltype as an enabler. + template + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, int) + -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { + P::transfer(alloc, new_slot, old_slot); + } + template + static void transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, char) { + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/base/abseil/absl/container/internal/hash_policy_traits_test.cc b/base/abseil/absl/container/internal/hash_policy_traits_test.cc new file mode 100644 index 0000000..6ef8b9e --- /dev/null +++ b/base/abseil/absl/container/internal/hash_policy_traits_test.cc @@ -0,0 +1,144 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_traits.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::MockFunction; +using ::testing::Return; +using ::testing::ReturnRef; + +using Alloc = std::allocator; +using Slot = int; + +struct PolicyWithoutOptionalOps { + using slot_type = Slot; + using key_type = Slot; + using init_type = Slot; + + static std::function construct; + static std::function destroy; + + static std::function element; + static int apply(int v) { return apply_impl(v); } + static std::function apply_impl; + static std::function value; +}; + +std::function PolicyWithoutOptionalOps::construct; +std::function PolicyWithoutOptionalOps::destroy; + +std::function PolicyWithoutOptionalOps::element; +std::function PolicyWithoutOptionalOps::apply_impl; +std::function PolicyWithoutOptionalOps::value; + +struct PolicyWithOptionalOps : PolicyWithoutOptionalOps { + static std::function transfer; +}; + +std::function PolicyWithOptionalOps::transfer; + +struct Test : ::testing::Test { + Test() { + PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) { + construct.Call(a1, a2, std::move(a3)); + }; + PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) { + destroy.Call(a1, a2); + }; + + PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& { + return element.Call(a1); + }; + PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int { + return apply.Call(a1); + }; + PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& { + return value.Call(a1); + }; + + PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) { + return transfer.Call(a1, a2, a3); + }; + } + + std::allocator alloc; + int a = 53; + + MockFunction construct; + MockFunction destroy; + + MockFunction element; + MockFunction apply; + MockFunction value; + + MockFunction transfer; +}; + +TEST_F(Test, construct) { + EXPECT_CALL(construct, Call(&alloc, &a, 53)); + hash_policy_traits::construct(&alloc, &a, 53); +} + +TEST_F(Test, destroy) { + EXPECT_CALL(destroy, Call(&alloc, &a)); + hash_policy_traits::destroy(&alloc, &a); +} + +TEST_F(Test, element) { + int b = 0; + EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits::element(&a)); +} + +TEST_F(Test, apply) { + EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337)); + EXPECT_EQ(1337, (hash_policy_traits::apply(42))); +} + +TEST_F(Test, value) { + int b = 0; + EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits::value(&a)); +} + +TEST_F(Test, without_transfer) { + int b = 42; + EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b)); + EXPECT_CALL(construct, Call(&alloc, &a, b)); + EXPECT_CALL(destroy, Call(&alloc, &b)); + hash_policy_traits::transfer(&alloc, &a, &b); +} + +TEST_F(Test, with_transfer) { + int b = 42; + EXPECT_CALL(transfer, Call(&alloc, &a, &b)); + hash_policy_traits::transfer(&alloc, &a, &b); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/hashtable_debug.h b/base/abseil/absl/container/internal/hashtable_debug.h new file mode 100644 index 0000000..19d5212 --- /dev/null +++ b/base/abseil/absl/container/internal/hashtable_debug.h @@ -0,0 +1,110 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This library provides APIs to debug the probing behavior of hash tables. +// +// In general, the probing behavior is a black box for users and only the +// side effects can be measured in the form of performance differences. +// These APIs give a glimpse on the actual behavior of the probing algorithms in +// these hashtables given a specified hash function and a set of elements. +// +// The probe count distribution can be used to assess the quality of the hash +// function for that particular hash table. Note that a hash function that +// performs well in one hash table implementation does not necessarily performs +// well in a different one. +// +// This library supports std::unordered_{set,map}, dense_hash_{set,map} and +// absl::{flat,node,string}_hash_{set,map}. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ + +#include +#include +#include +#include + +#include "absl/container/internal/hashtable_debug_hooks.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Returns the number of probes required to lookup `key`. Returns 0 for a +// search with no collisions. Higher values mean more hash collisions occurred; +// however, the exact meaning of this number varies according to the container +// type. +template +size_t GetHashtableDebugNumProbes( + const C& c, const typename C::key_type& key) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::GetNumProbes(c, key); +} + +// Gets a histogram of the number of probes for each elements in the container. +// The sum of all the values in the vector is equal to container.size(). +template +std::vector GetHashtableDebugNumProbesHistogram(const C& container) { + std::vector v; + for (auto it = container.begin(); it != container.end(); ++it) { + size_t num_probes = GetHashtableDebugNumProbes( + container, + absl::container_internal::hashtable_debug_internal::GetKey(*it, 0)); + v.resize((std::max)(v.size(), num_probes + 1)); + v[num_probes]++; + } + return v; +} + +struct HashtableDebugProbeSummary { + size_t total_elements; + size_t total_num_probes; + double mean; +}; + +// Gets a summary of the probe count distribution for the elements in the +// container. +template +HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) { + auto probes = GetHashtableDebugNumProbesHistogram(container); + HashtableDebugProbeSummary summary = {}; + for (size_t i = 0; i < probes.size(); ++i) { + summary.total_elements += probes[i]; + summary.total_num_probes += probes[i] * i; + } + summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; + return summary; +} + +// Returns the number of bytes requested from the allocator by the container +// and not freed. +template +size_t AllocatedByteSize(const C& c) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::AllocatedByteSize(c); +} + +// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` +// and `c.size()` is equal to `num_elements`. +template +size_t LowerBoundAllocatedByteSize(size_t num_elements) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/base/abseil/absl/container/internal/hashtable_debug_hooks.h b/base/abseil/absl/container/internal/hashtable_debug_hooks.h new file mode 100644 index 0000000..3e9ea59 --- /dev/null +++ b/base/abseil/absl/container/internal/hashtable_debug_hooks.h @@ -0,0 +1,85 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Provides the internal API for hashtable_debug.h. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ + +#include + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hashtable_debug_internal { + +// If it is a map, call get<0>(). +using std::get; +template +auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { + return get<0>(pair); +} + +// If it is not a map, return the value directly. +template +const typename T::key_type& GetKey(const typename T::key_type& key, char) { + return key; +} + +// Containers should specialize this to provide debug information for that +// container. +template +struct HashtableDebugAccess { + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + static size_t GetNumProbes(const Container& c, + const typename Container::key_type& key) { + if (!c.bucket_count()) return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { + if (it == e) return num_probes; + if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; + } + } + + // Returns the number of bytes requested from the allocator by the container + // and not freed. + // + // static size_t AllocatedByteSize(const Container& c); + + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type + // `Container` and `c.size()` is equal to `num_elements`. + // + // static size_t LowerBoundAllocatedByteSize(size_t num_elements); +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/base/abseil/absl/container/internal/hashtablez_sampler.cc b/base/abseil/absl/container/internal/hashtablez_sampler.cc new file mode 100644 index 0000000..e15f444 --- /dev/null +++ b/base/abseil/absl/container/internal/hashtablez_sampler.cc @@ -0,0 +1,270 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/exponential_biased.h" +#include "absl/container/internal/have_sse.h" +#include "absl/debugging/stacktrace.h" +#include "absl/memory/memory.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +constexpr int HashtablezInfo::kMaxStackDepth; + +namespace { +ABSL_CONST_INIT std::atomic g_hashtablez_enabled{ + false +}; +ABSL_CONST_INIT std::atomic g_hashtablez_sample_parameter{1 << 10}; +ABSL_CONST_INIT std::atomic g_hashtablez_max_samples{1 << 20}; + +#if ABSL_PER_THREAD_TLS == 1 +ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased + g_exponential_biased_generator; +#endif + +} // namespace + +#if ABSL_PER_THREAD_TLS == 1 +ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; +#endif // ABSL_PER_THREAD_TLS == 1 + + +HashtablezSampler& HashtablezSampler::Global() { + static auto* sampler = new HashtablezSampler(); + return *sampler; +} + +HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback( + DisposeCallback f) { + return dispose_.exchange(f, std::memory_order_relaxed); +} + +HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } +HashtablezInfo::~HashtablezInfo() = default; + +void HashtablezInfo::PrepareForSampling() { + capacity.store(0, std::memory_order_relaxed); + size.store(0, std::memory_order_relaxed); + num_erases.store(0, std::memory_order_relaxed); + max_probe_length.store(0, std::memory_order_relaxed); + total_probe_length.store(0, std::memory_order_relaxed); + hashes_bitwise_or.store(0, std::memory_order_relaxed); + hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); + + create_time = absl::Now(); + // The inliner makes hardcoded skip_count difficult (especially when combined + // with LTO). We use the ability to exclude stacks by regex when encoding + // instead. + depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, + /* skip_count= */ 0); + dead = nullptr; +} + +HashtablezSampler::HashtablezSampler() + : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) { + absl::MutexLock l(&graveyard_.init_mu); + graveyard_.dead = &graveyard_; +} + +HashtablezSampler::~HashtablezSampler() { + HashtablezInfo* s = all_.load(std::memory_order_acquire); + while (s != nullptr) { + HashtablezInfo* next = s->next; + delete s; + s = next; + } +} + +void HashtablezSampler::PushNew(HashtablezInfo* sample) { + sample->next = all_.load(std::memory_order_relaxed); + while (!all_.compare_exchange_weak(sample->next, sample, + std::memory_order_release, + std::memory_order_relaxed)) { + } +} + +void HashtablezSampler::PushDead(HashtablezInfo* sample) { + if (auto* dispose = dispose_.load(std::memory_order_relaxed)) { + dispose(*sample); + } + + absl::MutexLock graveyard_lock(&graveyard_.init_mu); + absl::MutexLock sample_lock(&sample->init_mu); + sample->dead = graveyard_.dead; + graveyard_.dead = sample; +} + +HashtablezInfo* HashtablezSampler::PopDead() { + absl::MutexLock graveyard_lock(&graveyard_.init_mu); + + // The list is circular, so eventually it collapses down to + // graveyard_.dead == &graveyard_ + // when it is empty. + HashtablezInfo* sample = graveyard_.dead; + if (sample == &graveyard_) return nullptr; + + absl::MutexLock sample_lock(&sample->init_mu); + graveyard_.dead = sample->dead; + sample->PrepareForSampling(); + return sample; +} + +HashtablezInfo* HashtablezSampler::Register() { + int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); + if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) { + size_estimate_.fetch_sub(1, std::memory_order_relaxed); + dropped_samples_.fetch_add(1, std::memory_order_relaxed); + return nullptr; + } + + HashtablezInfo* sample = PopDead(); + if (sample == nullptr) { + // Resurrection failed. Hire a new warlock. + sample = new HashtablezInfo(); + PushNew(sample); + } + + return sample; +} + +void HashtablezSampler::Unregister(HashtablezInfo* sample) { + PushDead(sample); + size_estimate_.fetch_sub(1, std::memory_order_relaxed); +} + +int64_t HashtablezSampler::Iterate( + const std::function& f) { + HashtablezInfo* s = all_.load(std::memory_order_acquire); + while (s != nullptr) { + absl::MutexLock l(&s->init_mu); + if (s->dead == nullptr) { + f(*s); + } + s = s->next; + } + + return dropped_samples_.load(std::memory_order_relaxed); +} + +static bool ShouldForceSampling() { + enum ForceState { + kDontForce, + kForce, + kUninitialized + }; + ABSL_CONST_INIT static std::atomic global_state{ + kUninitialized}; + ForceState state = global_state.load(std::memory_order_relaxed); + if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; + + if (state == kUninitialized) { + state = AbslContainerInternalSampleEverything() ? kForce : kDontForce; + global_state.store(state, std::memory_order_relaxed); + } + return state == kForce; +} + +HashtablezInfo* SampleSlow(int64_t* next_sample) { + if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { + *next_sample = 1; + return HashtablezSampler::Global().Register(); + } + +#if ABSL_PER_THREAD_TLS == 0 + *next_sample = std::numeric_limits::max(); + return nullptr; +#else + bool first = *next_sample < 0; + *next_sample = g_exponential_biased_generator.GetStride( + g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); + // Small values of interval are equivalent to just sampling next time. + ABSL_ASSERT(*next_sample >= 1); + + // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold + // low enough that we will start sampling in a reasonable time, so we just use + // the default sampling rate. + if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr; + + // We will only be negative on our first count, so we should just retry in + // that case. + if (first) { + if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr; + return SampleSlow(next_sample); + } + + return HashtablezSampler::Global().Register(); +#endif +} + +void UnsampleSlow(HashtablezInfo* info) { + HashtablezSampler::Global().Unregister(info); +} + +void RecordInsertSlow(HashtablezInfo* info, size_t hash, + size_t distance_from_desired) { + // SwissTables probe in groups of 16, so scale this to count items probes and + // not offset from desired. + size_t probe_length = distance_from_desired; +#if SWISSTABLE_HAVE_SSE2 + probe_length /= 16; +#else + probe_length /= 8; +#endif + + info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed); + info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed); + info->max_probe_length.store( + std::max(info->max_probe_length.load(std::memory_order_relaxed), + probe_length), + std::memory_order_relaxed); + info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed); + info->size.fetch_add(1, std::memory_order_relaxed); +} + +void SetHashtablezEnabled(bool enabled) { + g_hashtablez_enabled.store(enabled, std::memory_order_release); +} + +void SetHashtablezSampleParameter(int32_t rate) { + if (rate > 0) { + g_hashtablez_sample_parameter.store(rate, std::memory_order_release); + } else { + ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld", + static_cast(rate)); // NOLINT(runtime/int) + } +} + +void SetHashtablezMaxSamples(int32_t max) { + if (max > 0) { + g_hashtablez_max_samples.store(max, std::memory_order_release); + } else { + ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld", + static_cast(max)); // NOLINT(runtime/int) + } +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/hashtablez_sampler.h b/base/abseil/absl/container/internal/hashtablez_sampler.h new file mode 100644 index 0000000..c4f9629 --- /dev/null +++ b/base/abseil/absl/container/internal/hashtablez_sampler.h @@ -0,0 +1,288 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hashtablez_sampler.h +// ----------------------------------------------------------------------------- +// +// This header file defines the API for a low level library to sample hashtables +// and collect runtime statistics about them. +// +// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which +// store information about a single sample. +// +// `Record*` methods store information into samples. +// `Sample()` and `Unsample()` make use of a single global sampler with +// properties controlled by the flags hashtablez_enabled, +// hashtablez_sample_rate, and hashtablez_max_samples. +// +// WARNING +// +// Using this sampling API may cause sampled Swiss tables to use the global +// allocator (operator `new`) in addition to any custom allocator. If you +// are using a table in an unusual circumstance where allocation or calling a +// linux syscall is unacceptable, this could interfere. +// +// This utility is internal-only. Use at your own risk. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ + +#include +#include +#include +#include + +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" +#include "absl/container/internal/have_sse.h" +#include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Stores information about a sampled hashtable. All mutations to this *must* +// be made through `Record*` functions below. All reads from this *must* only +// occur in the callback to `HashtablezSampler::Iterate`. +struct HashtablezInfo { + // Constructs the object but does not fill in any fields. + HashtablezInfo(); + ~HashtablezInfo(); + HashtablezInfo(const HashtablezInfo&) = delete; + HashtablezInfo& operator=(const HashtablezInfo&) = delete; + + // Puts the object into a clean state, fills in the logically `const` members, + // blocking for any readers that are currently sampling the object. + void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + + // These fields are mutated by the various Record* APIs and need to be + // thread-safe. + std::atomic capacity; + std::atomic size; + std::atomic num_erases; + std::atomic max_probe_length; + std::atomic total_probe_length; + std::atomic hashes_bitwise_or; + std::atomic hashes_bitwise_and; + + // `HashtablezSampler` maintains intrusive linked lists for all samples. See + // comments on `HashtablezSampler::all_` for details on these. `init_mu` + // guards the ability to restore the sample to a pristine state. This + // prevents races with sampling and resurrecting an object. + absl::Mutex init_mu; + HashtablezInfo* next; + HashtablezInfo* dead ABSL_GUARDED_BY(init_mu); + + // All of the fields below are set by `PrepareForSampling`, they must not be + // mutated in `Record*` functions. They are logically `const` in that sense. + // These are guarded by init_mu, but that is not externalized to clients, who + // can only read them during `HashtablezSampler::Iterate` which will hold the + // lock. + static constexpr int kMaxStackDepth = 64; + absl::Time create_time; + int32_t depth; + void* stack[kMaxStackDepth]; +}; + +inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { +#if SWISSTABLE_HAVE_SSE2 + total_probe_length /= 16; +#else + total_probe_length /= 8; +#endif + info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); +} + +inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, + size_t capacity) { + info->size.store(size, std::memory_order_relaxed); + info->capacity.store(capacity, std::memory_order_relaxed); + if (size == 0) { + // This is a clear, reset the total/num_erases too. + RecordRehashSlow(info, 0); + } +} + +void RecordInsertSlow(HashtablezInfo* info, size_t hash, + size_t distance_from_desired); + +inline void RecordEraseSlow(HashtablezInfo* info) { + info->size.fetch_sub(1, std::memory_order_relaxed); + info->num_erases.fetch_add(1, std::memory_order_relaxed); +} + +HashtablezInfo* SampleSlow(int64_t* next_sample); +void UnsampleSlow(HashtablezInfo* info); + +class HashtablezInfoHandle { + public: + explicit HashtablezInfoHandle() : info_(nullptr) {} + explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {} + ~HashtablezInfoHandle() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + UnsampleSlow(info_); + } + + HashtablezInfoHandle(const HashtablezInfoHandle&) = delete; + HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete; + + HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept + : info_(absl::exchange(o.info_, nullptr)) {} + HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept { + if (ABSL_PREDICT_FALSE(info_ != nullptr)) { + UnsampleSlow(info_); + } + info_ = absl::exchange(o.info_, nullptr); + return *this; + } + + inline void RecordStorageChanged(size_t size, size_t capacity) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordStorageChangedSlow(info_, size, capacity); + } + + inline void RecordRehash(size_t total_probe_length) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordRehashSlow(info_, total_probe_length); + } + + inline void RecordInsert(size_t hash, size_t distance_from_desired) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordInsertSlow(info_, hash, distance_from_desired); + } + + inline void RecordErase() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordEraseSlow(info_); + } + + friend inline void swap(HashtablezInfoHandle& lhs, + HashtablezInfoHandle& rhs) { + std::swap(lhs.info_, rhs.info_); + } + + private: + friend class HashtablezInfoHandlePeer; + HashtablezInfo* info_; +}; + +#if ABSL_PER_THREAD_TLS == 1 +extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample; +#endif // ABSL_PER_THREAD_TLS + +// Returns an RAII sampling handle that manages registration and unregistation +// with the global sampler. +inline HashtablezInfoHandle Sample() { +#if ABSL_PER_THREAD_TLS == 1 + if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) { + return HashtablezInfoHandle(nullptr); + } + return HashtablezInfoHandle(SampleSlow(&global_next_sample)); +#else + return HashtablezInfoHandle(nullptr); +#endif // !ABSL_PER_THREAD_TLS +} + +// Holds samples and their associated stack traces with a soft limit of +// `SetHashtablezMaxSamples()`. +// +// Thread safe. +class HashtablezSampler { + public: + // Returns a global Sampler. + static HashtablezSampler& Global(); + + HashtablezSampler(); + ~HashtablezSampler(); + + // Registers for sampling. Returns an opaque registration info. + HashtablezInfo* Register(); + + // Unregisters the sample. + void Unregister(HashtablezInfo* sample); + + // The dispose callback will be called on all samples the moment they are + // being unregistered. Only affects samples that are unregistered after the + // callback has been set. + // Returns the previous callback. + using DisposeCallback = void (*)(const HashtablezInfo&); + DisposeCallback SetDisposeCallback(DisposeCallback f); + + // Iterates over all the registered `StackInfo`s. Returning the number of + // samples that have been dropped. + int64_t Iterate(const std::function& f); + + private: + void PushNew(HashtablezInfo* sample); + void PushDead(HashtablezInfo* sample); + HashtablezInfo* PopDead(); + + std::atomic dropped_samples_; + std::atomic size_estimate_; + + // Intrusive lock free linked lists for tracking samples. + // + // `all_` records all samples (they are never removed from this list) and is + // terminated with a `nullptr`. + // + // `graveyard_.dead` is a circular linked list. When it is empty, + // `graveyard_.dead == &graveyard`. The list is circular so that + // every item on it (even the last) has a non-null dead pointer. This allows + // `Iterate` to determine if a given sample is live or dead using only + // information on the sample itself. + // + // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead + // looks like this (G is the Graveyard): + // + // +---+ +---+ +---+ +---+ +---+ + // all -->| A |--->| B |--->| C |--->| D |--->| E | + // | | | | | | | | | | + // +---+ | | +->| |-+ | | +->| |-+ | | + // | G | +---+ | +---+ | +---+ | +---+ | +---+ + // | | | | | | + // | | --------+ +--------+ | + // +---+ | + // ^ | + // +--------------------------------------+ + // + std::atomic all_; + HashtablezInfo graveyard_; + + std::atomic dispose_; +}; + +// Enables or disables sampling for Swiss tables. +void SetHashtablezEnabled(bool enabled); + +// Sets the rate at which Swiss tables will be sampled. +void SetHashtablezSampleParameter(int32_t rate); + +// Sets a soft max for the number of samples that will be kept. +void SetHashtablezMaxSamples(int32_t max); + +// Configuration override. +// This allows process-wide sampling without depending on order of +// initialization of static storage duration objects. +// The definition of this constant is weak, which allows us to inject a +// different value for it at link time. +extern "C" bool AbslContainerInternalSampleEverything(); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ diff --git a/base/abseil/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/base/abseil/absl/container/internal/hashtablez_sampler_force_weak_definition.cc new file mode 100644 index 0000000..78b9d36 --- /dev/null +++ b/base/abseil/absl/container/internal/hashtablez_sampler_force_weak_definition.cc @@ -0,0 +1,30 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include "absl/base/attributes.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// See hashtablez_sampler.h for details. +extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() { + return false; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/hashtablez_sampler_test.cc b/base/abseil/absl/container/internal/hashtablez_sampler_test.cc new file mode 100644 index 0000000..102b237 --- /dev/null +++ b/base/abseil/absl/container/internal/hashtablez_sampler_test.cc @@ -0,0 +1,359 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/container/internal/have_sse.h" +#include "absl/synchronization/blocking_counter.h" +#include "absl/synchronization/internal/thread_pool.h" +#include "absl/synchronization/mutex.h" +#include "absl/synchronization/notification.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" + +#if SWISSTABLE_HAVE_SSE2 +constexpr int kProbeLength = 16; +#else +constexpr int kProbeLength = 8; +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +class HashtablezInfoHandlePeer { + public: + static bool IsSampled(const HashtablezInfoHandle& h) { + return h.info_ != nullptr; + } + + static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; } +}; + +namespace { +using ::absl::synchronization_internal::ThreadPool; +using ::testing::IsEmpty; +using ::testing::UnorderedElementsAre; + +std::vector GetSizes(HashtablezSampler* s) { + std::vector res; + s->Iterate([&](const HashtablezInfo& info) { + res.push_back(info.size.load(std::memory_order_acquire)); + }); + return res; +} + +HashtablezInfo* Register(HashtablezSampler* s, size_t size) { + auto* info = s->Register(); + assert(info != nullptr); + info->size.store(size); + return info; +} + +TEST(HashtablezInfoTest, PrepareForSampling) { + absl::Time test_start = absl::Now(); + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(); + + EXPECT_EQ(info.capacity.load(), 0); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.max_probe_length.load(), 0); + EXPECT_EQ(info.total_probe_length.load(), 0); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0); + EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); + EXPECT_GE(info.create_time, test_start); + + info.capacity.store(1, std::memory_order_relaxed); + info.size.store(1, std::memory_order_relaxed); + info.num_erases.store(1, std::memory_order_relaxed); + info.max_probe_length.store(1, std::memory_order_relaxed); + info.total_probe_length.store(1, std::memory_order_relaxed); + info.hashes_bitwise_or.store(1, std::memory_order_relaxed); + info.hashes_bitwise_and.store(1, std::memory_order_relaxed); + info.create_time = test_start - absl::Hours(20); + + info.PrepareForSampling(); + EXPECT_EQ(info.capacity.load(), 0); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.max_probe_length.load(), 0); + EXPECT_EQ(info.total_probe_length.load(), 0); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0); + EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); + EXPECT_GE(info.create_time, test_start); +} + +TEST(HashtablezInfoTest, RecordStorageChanged) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(); + RecordStorageChangedSlow(&info, 17, 47); + EXPECT_EQ(info.size.load(), 17); + EXPECT_EQ(info.capacity.load(), 47); + RecordStorageChangedSlow(&info, 20, 20); + EXPECT_EQ(info.size.load(), 20); + EXPECT_EQ(info.capacity.load(), 20); +} + +TEST(HashtablezInfoTest, RecordInsert) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(); + EXPECT_EQ(info.max_probe_length.load(), 0); + RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 6); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00); + RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 6); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00); + RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 12); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00); +} + +TEST(HashtablezInfoTest, RecordErase) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.size.load(), 0); + RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); + EXPECT_EQ(info.size.load(), 1); + RecordEraseSlow(&info); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 1); +} + +TEST(HashtablezInfoTest, RecordRehash) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(); + RecordInsertSlow(&info, 0x1, 0); + RecordInsertSlow(&info, 0x2, kProbeLength); + RecordInsertSlow(&info, 0x4, kProbeLength); + RecordInsertSlow(&info, 0x8, 2 * kProbeLength); + EXPECT_EQ(info.size.load(), 4); + EXPECT_EQ(info.total_probe_length.load(), 4); + + RecordEraseSlow(&info); + RecordEraseSlow(&info); + EXPECT_EQ(info.size.load(), 2); + EXPECT_EQ(info.total_probe_length.load(), 4); + EXPECT_EQ(info.num_erases.load(), 2); + + RecordRehashSlow(&info, 3 * kProbeLength); + EXPECT_EQ(info.size.load(), 2); + EXPECT_EQ(info.total_probe_length.load(), 3); + EXPECT_EQ(info.num_erases.load(), 0); +} + +#if ABSL_PER_THREAD_TLS == 1 +TEST(HashtablezSamplerTest, SmallSampleParameter) { + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(100); + + for (int i = 0; i < 1000; ++i) { + int64_t next_sample = 0; + HashtablezInfo* sample = SampleSlow(&next_sample); + EXPECT_GT(next_sample, 0); + EXPECT_NE(sample, nullptr); + UnsampleSlow(sample); + } +} + +TEST(HashtablezSamplerTest, LargeSampleParameter) { + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(std::numeric_limits::max()); + + for (int i = 0; i < 1000; ++i) { + int64_t next_sample = 0; + HashtablezInfo* sample = SampleSlow(&next_sample); + EXPECT_GT(next_sample, 0); + EXPECT_NE(sample, nullptr); + UnsampleSlow(sample); + } +} + +TEST(HashtablezSamplerTest, Sample) { + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(100); + int64_t num_sampled = 0; + int64_t total = 0; + double sample_rate = 0.0; + for (int i = 0; i < 1000000; ++i) { + HashtablezInfoHandle h = Sample(); + ++total; + if (HashtablezInfoHandlePeer::IsSampled(h)) { + ++num_sampled; + } + sample_rate = static_cast(num_sampled) / total; + if (0.005 < sample_rate && sample_rate < 0.015) break; + } + EXPECT_NEAR(sample_rate, 0.01, 0.005); +} +#endif + +TEST(HashtablezSamplerTest, Handle) { + auto& sampler = HashtablezSampler::Global(); + HashtablezInfoHandle h(sampler.Register()); + auto* info = HashtablezInfoHandlePeer::GetInfo(&h); + info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); + + bool found = false; + sampler.Iterate([&](const HashtablezInfo& h) { + if (&h == info) { + EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678); + found = true; + } + }); + EXPECT_TRUE(found); + + h = HashtablezInfoHandle(); + found = false; + sampler.Iterate([&](const HashtablezInfo& h) { + if (&h == info) { + // this will only happen if some other thread has resurrected the info + // the old handle was using. + if (h.hashes_bitwise_and.load() == 0x12345678) { + found = true; + } + } + }); + EXPECT_FALSE(found); +} + +TEST(HashtablezSamplerTest, Registration) { + HashtablezSampler sampler; + auto* info1 = Register(&sampler, 1); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1)); + + auto* info2 = Register(&sampler, 2); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2)); + info1->size.store(3); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2)); + + sampler.Unregister(info1); + sampler.Unregister(info2); +} + +TEST(HashtablezSamplerTest, Unregistration) { + HashtablezSampler sampler; + std::vector infos; + for (size_t i = 0; i < 3; ++i) { + infos.push_back(Register(&sampler, i)); + } + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2)); + + sampler.Unregister(infos[1]); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2)); + + infos.push_back(Register(&sampler, 3)); + infos.push_back(Register(&sampler, 4)); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4)); + sampler.Unregister(infos[3]); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4)); + + sampler.Unregister(infos[0]); + sampler.Unregister(infos[2]); + sampler.Unregister(infos[4]); + EXPECT_THAT(GetSizes(&sampler), IsEmpty()); +} + +TEST(HashtablezSamplerTest, MultiThreaded) { + HashtablezSampler sampler; + Notification stop; + ThreadPool pool(10); + + for (int i = 0; i < 10; ++i) { + pool.Schedule([&sampler, &stop]() { + std::random_device rd; + std::mt19937 gen(rd()); + + std::vector infoz; + while (!stop.HasBeenNotified()) { + if (infoz.empty()) { + infoz.push_back(sampler.Register()); + } + switch (std::uniform_int_distribution<>(0, 2)(gen)) { + case 0: { + infoz.push_back(sampler.Register()); + break; + } + case 1: { + size_t p = + std::uniform_int_distribution<>(0, infoz.size() - 1)(gen); + HashtablezInfo* info = infoz[p]; + infoz[p] = infoz.back(); + infoz.pop_back(); + sampler.Unregister(info); + break; + } + case 2: { + absl::Duration oldest = absl::ZeroDuration(); + sampler.Iterate([&](const HashtablezInfo& info) { + oldest = std::max(oldest, absl::Now() - info.create_time); + }); + ASSERT_GE(oldest, absl::ZeroDuration()); + break; + } + } + } + }); + } + // The threads will hammer away. Give it a little bit of time for tsan to + // spot errors. + absl::SleepFor(absl::Seconds(3)); + stop.Notify(); +} + +TEST(HashtablezSamplerTest, Callback) { + HashtablezSampler sampler; + + auto* info1 = Register(&sampler, 1); + auto* info2 = Register(&sampler, 2); + + static const HashtablezInfo* expected; + + auto callback = [](const HashtablezInfo& info) { + // We can't use `info` outside of this callback because the object will be + // disposed as soon as we return from here. + EXPECT_EQ(&info, expected); + }; + + // Set the callback. + EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr); + expected = info1; + sampler.Unregister(info1); + + // Unset the callback. + EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr)); + expected = nullptr; // no more calls. + sampler.Unregister(info2); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/have_sse.h b/base/abseil/absl/container/internal/have_sse.h new file mode 100644 index 0000000..4341441 --- /dev/null +++ b/base/abseil/absl/container/internal/have_sse.h @@ -0,0 +1,49 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Shared config probing for SSE instructions used in Swiss tables. +#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ +#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ + +#ifndef SWISSTABLE_HAVE_SSE2 +#if defined(__SSE2__) || \ + (defined(_MSC_VER) && \ + (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2))) +#define SWISSTABLE_HAVE_SSE2 1 +#else +#define SWISSTABLE_HAVE_SSE2 0 +#endif +#endif + +#ifndef SWISSTABLE_HAVE_SSSE3 +#ifdef __SSSE3__ +#define SWISSTABLE_HAVE_SSSE3 1 +#else +#define SWISSTABLE_HAVE_SSSE3 0 +#endif +#endif + +#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2 +#error "Bad configuration!" +#endif + +#if SWISSTABLE_HAVE_SSE2 +#include +#endif + +#if SWISSTABLE_HAVE_SSSE3 +#include +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ diff --git a/base/abseil/absl/container/internal/inlined_vector.h b/base/abseil/absl/container/internal/inlined_vector.h new file mode 100644 index 0000000..4d80b72 --- /dev/null +++ b/base/abseil/absl/container/internal/inlined_vector.h @@ -0,0 +1,892 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ +#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inlined_vector_internal { + +template +using IsAtLeastForwardIterator = std::is_convertible< + typename std::iterator_traits::iterator_category, + std::forward_iterator_tag>; + +template ::value_type> +using IsMemcpyOk = + absl::conjunction>, + absl::is_trivially_copy_constructible, + absl::is_trivially_copy_assignable, + absl::is_trivially_destructible>; + +template +void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first, + SizeType destroy_size) { + using AllocatorTraits = absl::allocator_traits; + + if (destroy_first != nullptr) { + for (auto i = destroy_size; i != 0;) { + --i; + AllocatorTraits::destroy(*alloc_ptr, destroy_first + i); + } + +#if !defined(NDEBUG) + { + using ValueType = typename AllocatorTraits::value_type; + + // Overwrite unused memory with `0xab` so we can catch uninitialized + // usage. + // + // Cast to `void*` to tell the compiler that we don't care that we might + // be scribbling on a vtable pointer. + void* memory_ptr = destroy_first; + auto memory_size = destroy_size * sizeof(ValueType); + std::memset(memory_ptr, 0xab, memory_size); + } +#endif // !defined(NDEBUG) + } +} + +template +void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first, + ValueAdapter* values_ptr, SizeType construct_size) { + for (SizeType i = 0; i < construct_size; ++i) { + ABSL_INTERNAL_TRY { + values_ptr->ConstructNext(alloc_ptr, construct_first + i); + } + ABSL_INTERNAL_CATCH_ANY { + inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i); + ABSL_INTERNAL_RETHROW; + } + } +} + +template +void AssignElements(Pointer assign_first, ValueAdapter* values_ptr, + SizeType assign_size) { + for (SizeType i = 0; i < assign_size; ++i) { + values_ptr->AssignNext(assign_first + i); + } +} + +template +struct StorageView { + using AllocatorTraits = absl::allocator_traits; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; + + Pointer data; + SizeType size; + SizeType capacity; +}; + +template +class IteratorValueAdapter { + using AllocatorTraits = absl::allocator_traits; + using Pointer = typename AllocatorTraits::pointer; + + public: + explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} + + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { + AllocatorTraits::construct(*alloc_ptr, construct_at, *it_); + ++it_; + } + + void AssignNext(Pointer assign_at) { + *assign_at = *it_; + ++it_; + } + + private: + Iterator it_; +}; + +template +class CopyValueAdapter { + using AllocatorTraits = absl::allocator_traits; + using ValueType = typename AllocatorTraits::value_type; + using Pointer = typename AllocatorTraits::pointer; + using ConstPointer = typename AllocatorTraits::const_pointer; + + public: + explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {} + + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { + AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_); + } + + void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } + + private: + ConstPointer ptr_; +}; + +template +class DefaultValueAdapter { + using AllocatorTraits = absl::allocator_traits; + using ValueType = typename AllocatorTraits::value_type; + using Pointer = typename AllocatorTraits::pointer; + + public: + explicit DefaultValueAdapter() {} + + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { + AllocatorTraits::construct(*alloc_ptr, construct_at); + } + + void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } +}; + +template +class AllocationTransaction { + using AllocatorTraits = absl::allocator_traits; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; + + public: + explicit AllocationTransaction(AllocatorType* alloc_ptr) + : alloc_data_(*alloc_ptr, nullptr) {} + + ~AllocationTransaction() { + if (DidAllocate()) { + AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity()); + } + } + + AllocationTransaction(const AllocationTransaction&) = delete; + void operator=(const AllocationTransaction&) = delete; + + AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } + Pointer& GetData() { return alloc_data_.template get<1>(); } + SizeType& GetCapacity() { return capacity_; } + + bool DidAllocate() { return GetData() != nullptr; } + Pointer Allocate(SizeType capacity) { + GetData() = AllocatorTraits::allocate(GetAllocator(), capacity); + GetCapacity() = capacity; + return GetData(); + } + + void Reset() { + GetData() = nullptr; + GetCapacity() = 0; + } + + private: + container_internal::CompressedTuple alloc_data_; + SizeType capacity_ = 0; +}; + +template +class ConstructionTransaction { + using AllocatorTraits = absl::allocator_traits; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; + + public: + explicit ConstructionTransaction(AllocatorType* alloc_ptr) + : alloc_data_(*alloc_ptr, nullptr) {} + + ~ConstructionTransaction() { + if (DidConstruct()) { + inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()), + GetData(), GetSize()); + } + } + + ConstructionTransaction(const ConstructionTransaction&) = delete; + void operator=(const ConstructionTransaction&) = delete; + + AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } + Pointer& GetData() { return alloc_data_.template get<1>(); } + SizeType& GetSize() { return size_; } + + bool DidConstruct() { return GetData() != nullptr; } + template + void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) { + inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()), + data, values_ptr, size); + GetData() = data; + GetSize() = size; + } + void Commit() { + GetData() = nullptr; + GetSize() = 0; + } + + private: + container_internal::CompressedTuple alloc_data_; + SizeType size_ = 0; +}; + +template +class Storage { + public: + using AllocatorTraits = absl::allocator_traits; + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + + using reference = value_type&; + using const_reference = const value_type&; + using RValueReference = value_type&&; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using MoveIterator = std::move_iterator; + using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; + + using StorageView = inlined_vector_internal::StorageView; + + template + using IteratorValueAdapter = + inlined_vector_internal::IteratorValueAdapter; + using CopyValueAdapter = + inlined_vector_internal::CopyValueAdapter; + using DefaultValueAdapter = + inlined_vector_internal::DefaultValueAdapter; + + using AllocationTransaction = + inlined_vector_internal::AllocationTransaction; + using ConstructionTransaction = + inlined_vector_internal::ConstructionTransaction; + + static size_type NextCapacity(size_type current_capacity) { + return current_capacity * 2; + } + + static size_type ComputeCapacity(size_type current_capacity, + size_type requested_capacity) { + return (std::max)(NextCapacity(current_capacity), requested_capacity); + } + + // --------------------------------------------------------------------------- + // Storage Constructors and Destructor + // --------------------------------------------------------------------------- + + Storage() : metadata_() {} + + explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {} + + ~Storage() { + pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); + inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize()); + DeallocateIfAllocated(); + } + + // --------------------------------------------------------------------------- + // Storage Member Accessors + // --------------------------------------------------------------------------- + + size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } + + const size_type& GetSizeAndIsAllocated() const { + return metadata_.template get<1>(); + } + + size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; } + + bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } + + pointer GetAllocatedData() { return data_.allocated.allocated_data; } + + const_pointer GetAllocatedData() const { + return data_.allocated.allocated_data; + } + + pointer GetInlinedData() { + return reinterpret_cast( + std::addressof(data_.inlined.inlined_data[0])); + } + + const_pointer GetInlinedData() const { + return reinterpret_cast( + std::addressof(data_.inlined.inlined_data[0])); + } + + size_type GetAllocatedCapacity() const { + return data_.allocated.allocated_capacity; + } + + size_type GetInlinedCapacity() const { return static_cast(N); } + + StorageView MakeStorageView() { + return GetIsAllocated() + ? StorageView{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()} + : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()}; + } + + allocator_type* GetAllocPtr() { + return std::addressof(metadata_.template get<0>()); + } + + const allocator_type* GetAllocPtr() const { + return std::addressof(metadata_.template get<0>()); + } + + // --------------------------------------------------------------------------- + // Storage Member Mutators + // --------------------------------------------------------------------------- + + template + void Initialize(ValueAdapter values, size_type new_size); + + template + void Assign(ValueAdapter values, size_type new_size); + + template + void Resize(ValueAdapter values, size_type new_size); + + template + iterator Insert(const_iterator pos, ValueAdapter values, + size_type insert_count); + + template + reference EmplaceBack(Args&&... args); + + iterator Erase(const_iterator from, const_iterator to); + + void Reserve(size_type requested_capacity); + + void ShrinkToFit(); + + void Swap(Storage* other_storage_ptr); + + void SetIsAllocated() { + GetSizeAndIsAllocated() |= static_cast(1); + } + + void UnsetIsAllocated() { + GetSizeAndIsAllocated() &= ((std::numeric_limits::max)() - 1); + } + + void SetSize(size_type size) { + GetSizeAndIsAllocated() = + (size << 1) | static_cast(GetIsAllocated()); + } + + void SetAllocatedSize(size_type size) { + GetSizeAndIsAllocated() = (size << 1) | static_cast(1); + } + + void SetInlinedSize(size_type size) { + GetSizeAndIsAllocated() = size << static_cast(1); + } + + void AddSize(size_type count) { + GetSizeAndIsAllocated() += count << static_cast(1); + } + + void SubtractSize(size_type count) { + assert(count <= GetSize()); + + GetSizeAndIsAllocated() -= count << static_cast(1); + } + + void SetAllocatedData(pointer data, size_type capacity) { + data_.allocated.allocated_data = data; + data_.allocated.allocated_capacity = capacity; + } + + void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) { + SetAllocatedData(allocation_tx_ptr->GetData(), + allocation_tx_ptr->GetCapacity()); + + allocation_tx_ptr->Reset(); + } + + void MemcpyFrom(const Storage& other_storage) { + assert(IsMemcpyOk::value || other_storage.GetIsAllocated()); + + GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); + data_ = other_storage.data_; + } + + void DeallocateIfAllocated() { + if (GetIsAllocated()) { + AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(), + GetAllocatedCapacity()); + } + } + + private: + using Metadata = + container_internal::CompressedTuple; + + struct Allocated { + pointer allocated_data; + size_type allocated_capacity; + }; + + struct Inlined { + alignas(value_type) char inlined_data[sizeof(value_type[N])]; + }; + + union Data { + Allocated allocated; + Inlined inlined; + }; + + Metadata metadata_; + Data data_; +}; + +template +template +auto Storage::Initialize(ValueAdapter values, size_type new_size) + -> void { + // Only callable from constructors! + assert(!GetIsAllocated()); + assert(GetSize() == 0); + + pointer construct_data; + if (new_size > GetInlinedCapacity()) { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size); + construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity); + SetAllocatedData(construct_data, new_capacity); + SetIsAllocated(); + } else { + construct_data = GetInlinedData(); + } + + inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data, + &values, new_size); + + // Since the initial size was guaranteed to be `0` and the allocated bit is + // already correct for either case, *adding* `new_size` gives us the correct + // result faster than setting it directly. + AddSize(new_size); +} + +template +template +auto Storage::Assign(ValueAdapter values, size_type new_size) -> void { + StorageView storage_view = MakeStorageView(); + + AllocationTransaction allocation_tx(GetAllocPtr()); + + absl::Span assign_loop; + absl::Span construct_loop; + absl::Span destroy_loop; + + if (new_size > storage_view.capacity) { + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + construct_loop = {allocation_tx.Allocate(new_capacity), new_size}; + destroy_loop = {storage_view.data, storage_view.size}; + } else if (new_size > storage_view.size) { + assign_loop = {storage_view.data, storage_view.size}; + construct_loop = {storage_view.data + storage_view.size, + new_size - storage_view.size}; + } else { + assign_loop = {storage_view.data, new_size}; + destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; + } + + inlined_vector_internal::AssignElements(assign_loop.data(), &values, + assign_loop.size()); + + inlined_vector_internal::ConstructElements( + GetAllocPtr(), construct_loop.data(), &values, construct_loop.size()); + + inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), + destroy_loop.size()); + + if (allocation_tx.DidAllocate()) { + DeallocateIfAllocated(); + AcquireAllocatedData(&allocation_tx); + SetIsAllocated(); + } + + SetSize(new_size); +} + +template +template +auto Storage::Resize(ValueAdapter values, size_type new_size) -> void { + StorageView storage_view = MakeStorageView(); + + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); + + AllocationTransaction allocation_tx(GetAllocPtr()); + ConstructionTransaction construction_tx(GetAllocPtr()); + + absl::Span construct_loop; + absl::Span move_construct_loop; + absl::Span destroy_loop; + + if (new_size > storage_view.capacity) { + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + pointer new_data = allocation_tx.Allocate(new_capacity); + construct_loop = {new_data + storage_view.size, + new_size - storage_view.size}; + move_construct_loop = {new_data, storage_view.size}; + destroy_loop = {storage_view.data, storage_view.size}; + } else if (new_size > storage_view.size) { + construct_loop = {storage_view.data + storage_view.size, + new_size - storage_view.size}; + } else { + destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; + } + + construction_tx.Construct(construct_loop.data(), &values, + construct_loop.size()); + + inlined_vector_internal::ConstructElements( + GetAllocPtr(), move_construct_loop.data(), &move_values, + move_construct_loop.size()); + + inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), + destroy_loop.size()); + + construction_tx.Commit(); + if (allocation_tx.DidAllocate()) { + DeallocateIfAllocated(); + AcquireAllocatedData(&allocation_tx); + SetIsAllocated(); + } + + SetSize(new_size); +} + +template +template +auto Storage::Insert(const_iterator pos, ValueAdapter values, + size_type insert_count) -> iterator { + StorageView storage_view = MakeStorageView(); + + size_type insert_index = + std::distance(const_iterator(storage_view.data), pos); + size_type insert_end_index = insert_index + insert_count; + size_type new_size = storage_view.size + insert_count; + + if (new_size > storage_view.capacity) { + AllocationTransaction allocation_tx(GetAllocPtr()); + ConstructionTransaction construction_tx(GetAllocPtr()); + ConstructionTransaction move_construciton_tx(GetAllocPtr()); + + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); + + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + pointer new_data = allocation_tx.Allocate(new_capacity); + + construction_tx.Construct(new_data + insert_index, &values, insert_count); + + move_construciton_tx.Construct(new_data, &move_values, insert_index); + + inlined_vector_internal::ConstructElements( + GetAllocPtr(), new_data + insert_end_index, &move_values, + storage_view.size - insert_index); + + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); + + construction_tx.Commit(); + move_construciton_tx.Commit(); + DeallocateIfAllocated(); + AcquireAllocatedData(&allocation_tx); + + SetAllocatedSize(new_size); + return iterator(new_data + insert_index); + } else { + size_type move_construction_destination_index = + (std::max)(insert_end_index, storage_view.size); + + ConstructionTransaction move_construction_tx(GetAllocPtr()); + + IteratorValueAdapter move_construction_values( + MoveIterator(storage_view.data + + (move_construction_destination_index - insert_count))); + absl::Span move_construction = { + storage_view.data + move_construction_destination_index, + new_size - move_construction_destination_index}; + + pointer move_assignment_values = storage_view.data + insert_index; + absl::Span move_assignment = { + storage_view.data + insert_end_index, + move_construction_destination_index - insert_end_index}; + + absl::Span insert_assignment = {move_assignment_values, + move_construction.size()}; + + absl::Span insert_construction = { + insert_assignment.data() + insert_assignment.size(), + insert_count - insert_assignment.size()}; + + move_construction_tx.Construct(move_construction.data(), + &move_construction_values, + move_construction.size()); + + for (pointer destination = move_assignment.data() + move_assignment.size(), + last_destination = move_assignment.data(), + source = move_assignment_values + move_assignment.size(); + ;) { + --destination; + --source; + if (destination < last_destination) break; + *destination = std::move(*source); + } + + inlined_vector_internal::AssignElements(insert_assignment.data(), &values, + insert_assignment.size()); + + inlined_vector_internal::ConstructElements( + GetAllocPtr(), insert_construction.data(), &values, + insert_construction.size()); + + move_construction_tx.Commit(); + + AddSize(insert_count); + return iterator(storage_view.data + insert_index); + } +} + +template +template +auto Storage::EmplaceBack(Args&&... args) -> reference { + StorageView storage_view = MakeStorageView(); + + AllocationTransaction allocation_tx(GetAllocPtr()); + + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); + + pointer construct_data; + if (storage_view.size == storage_view.capacity) { + size_type new_capacity = NextCapacity(storage_view.capacity); + construct_data = allocation_tx.Allocate(new_capacity); + } else { + construct_data = storage_view.data; + } + + pointer last_ptr = construct_data + storage_view.size; + + AllocatorTraits::construct(*GetAllocPtr(), last_ptr, + std::forward(args)...); + + if (allocation_tx.DidAllocate()) { + ABSL_INTERNAL_TRY { + inlined_vector_internal::ConstructElements( + GetAllocPtr(), allocation_tx.GetData(), &move_values, + storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY { + AllocatorTraits::destroy(*GetAllocPtr(), last_ptr); + ABSL_INTERNAL_RETHROW; + } + + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); + + DeallocateIfAllocated(); + AcquireAllocatedData(&allocation_tx); + SetIsAllocated(); + } + + AddSize(1); + return *last_ptr; +} + +template +auto Storage::Erase(const_iterator from, const_iterator to) + -> iterator { + StorageView storage_view = MakeStorageView(); + + size_type erase_size = std::distance(from, to); + size_type erase_index = + std::distance(const_iterator(storage_view.data), from); + size_type erase_end_index = erase_index + erase_size; + + IteratorValueAdapter move_values( + MoveIterator(storage_view.data + erase_end_index)); + + inlined_vector_internal::AssignElements(storage_view.data + erase_index, + &move_values, + storage_view.size - erase_end_index); + + inlined_vector_internal::DestroyElements( + GetAllocPtr(), storage_view.data + (storage_view.size - erase_size), + erase_size); + + SubtractSize(erase_size); + return iterator(storage_view.data + erase_index); +} + +template +auto Storage::Reserve(size_type requested_capacity) -> void { + StorageView storage_view = MakeStorageView(); + + if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return; + + AllocationTransaction allocation_tx(GetAllocPtr()); + + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); + + size_type new_capacity = + ComputeCapacity(storage_view.capacity, requested_capacity); + pointer new_data = allocation_tx.Allocate(new_capacity); + + inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data, + &move_values, storage_view.size); + + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); + + DeallocateIfAllocated(); + AcquireAllocatedData(&allocation_tx); + SetIsAllocated(); +} + +template +auto Storage::ShrinkToFit() -> void { + // May only be called on allocated instances! + assert(GetIsAllocated()); + + StorageView storage_view{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()}; + + if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; + + AllocationTransaction allocation_tx(GetAllocPtr()); + + IteratorValueAdapter move_values( + MoveIterator(storage_view.data)); + + pointer construct_data; + if (storage_view.size > GetInlinedCapacity()) { + size_type new_capacity = storage_view.size; + construct_data = allocation_tx.Allocate(new_capacity); + } else { + construct_data = GetInlinedData(); + } + + ABSL_INTERNAL_TRY { + inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data, + &move_values, storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY { + SetAllocatedData(storage_view.data, storage_view.capacity); + ABSL_INTERNAL_RETHROW; + } + + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); + + AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data, + storage_view.capacity); + + if (allocation_tx.DidAllocate()) { + AcquireAllocatedData(&allocation_tx); + } else { + UnsetIsAllocated(); + } +} + +template +auto Storage::Swap(Storage* other_storage_ptr) -> void { + using std::swap; + assert(this != other_storage_ptr); + + if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { + swap(data_.allocated, other_storage_ptr->data_.allocated); + } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) { + Storage* small_ptr = this; + Storage* large_ptr = other_storage_ptr; + if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr); + + for (size_type i = 0; i < small_ptr->GetSize(); ++i) { + swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]); + } + + IteratorValueAdapter move_values( + MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); + + inlined_vector_internal::ConstructElements( + large_ptr->GetAllocPtr(), + small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values, + large_ptr->GetSize() - small_ptr->GetSize()); + + inlined_vector_internal::DestroyElements( + large_ptr->GetAllocPtr(), + large_ptr->GetInlinedData() + small_ptr->GetSize(), + large_ptr->GetSize() - small_ptr->GetSize()); + } else { + Storage* allocated_ptr = this; + Storage* inlined_ptr = other_storage_ptr; + if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr); + + StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(), + allocated_ptr->GetSize(), + allocated_ptr->GetAllocatedCapacity()}; + + IteratorValueAdapter move_values( + MoveIterator(inlined_ptr->GetInlinedData())); + + ABSL_INTERNAL_TRY { + inlined_vector_internal::ConstructElements( + inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(), + &move_values, inlined_ptr->GetSize()); + } + ABSL_INTERNAL_CATCH_ANY { + allocated_ptr->SetAllocatedData(allocated_storage_view.data, + allocated_storage_view.capacity); + ABSL_INTERNAL_RETHROW; + } + + inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(), + inlined_ptr->GetInlinedData(), + inlined_ptr->GetSize()); + + inlined_ptr->SetAllocatedData(allocated_storage_view.data, + allocated_storage_view.capacity); + } + + swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); + swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr()); +} + +} // namespace inlined_vector_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ diff --git a/base/abseil/absl/container/internal/layout.h b/base/abseil/absl/container/internal/layout.h new file mode 100644 index 0000000..69cc85d --- /dev/null +++ b/base/abseil/absl/container/internal/layout.h @@ -0,0 +1,741 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// MOTIVATION AND TUTORIAL +// +// If you want to put in a single heap allocation N doubles followed by M ints, +// it's easy if N and M are known at compile time. +// +// struct S { +// double a[N]; +// int b[M]; +// }; +// +// S* p = new S; +// +// But what if N and M are known only in run time? Class template Layout to the +// rescue! It's a portable generalization of the technique known as struct hack. +// +// // This object will tell us everything we need to know about the memory +// // layout of double[N] followed by int[M]. It's structurally identical to +// // size_t[2] that stores N and M. It's very cheap to create. +// const Layout layout(N, M); +// +// // Allocate enough memory for both arrays. `AllocSize()` tells us how much +// // memory is needed. We are free to use any allocation function we want as +// // long as it returns aligned memory. +// std::unique_ptr p(new unsigned char[layout.AllocSize()]); +// +// // Obtain the pointer to the array of doubles. +// // Equivalent to `reinterpret_cast(p.get())`. +// // +// // We could have written layout.Pointer<0>(p) instead. If all the types are +// // unique you can use either form, but if some types are repeated you must +// // use the index form. +// double* a = layout.Pointer(p.get()); +// +// // Obtain the pointer to the array of ints. +// // Equivalent to `reinterpret_cast(p.get() + N * 8)`. +// int* b = layout.Pointer(p); +// +// If we are unable to specify sizes of all fields, we can pass as many sizes as +// we can to `Partial()`. In return, it'll allow us to access the fields whose +// locations and sizes can be computed from the provided information. +// `Partial()` comes in handy when the array sizes are embedded into the +// allocation. +// +// // size_t[1] containing N, size_t[1] containing M, double[N], int[M]. +// using L = Layout; +// +// unsigned char* Allocate(size_t n, size_t m) { +// const L layout(1, 1, n, m); +// unsigned char* p = new unsigned char[layout.AllocSize()]; +// *layout.Pointer<0>(p) = n; +// *layout.Pointer<1>(p) = m; +// return p; +// } +// +// void Use(unsigned char* p) { +// // First, extract N and M. +// // Specify that the first array has only one element. Using `prefix` we +// // can access the first two arrays but not more. +// constexpr auto prefix = L::Partial(1); +// size_t n = *prefix.Pointer<0>(p); +// size_t m = *prefix.Pointer<1>(p); +// +// // Now we can get pointers to the payload. +// const L layout(1, 1, n, m); +// double* a = layout.Pointer(p); +// int* b = layout.Pointer(p); +// } +// +// The layout we used above combines fixed-size with dynamically-sized fields. +// This is quite common. Layout is optimized for this use case and generates +// optimal code. All computations that can be performed at compile time are +// indeed performed at compile time. +// +// Efficiency tip: The order of fields matters. In `Layout` try to +// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no +// padding in between arrays. +// +// You can manually override the alignment of an array by wrapping the type in +// `Aligned`. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). `N` cannot be less than `alignof(T)`. +// +// `AllocSize()` and `Pointer()` are the most basic methods for dealing with +// memory layouts. Check out the reference or code below to discover more. +// +// EXAMPLE +// +// // Immutable move-only string with sizeof equal to sizeof(void*). The +// // string size and the characters are kept in the same heap allocation. +// class CompactString { +// public: +// CompactString(const char* s = "") { +// const size_t size = strlen(s); +// // size_t[1] followed by char[size + 1]. +// const L layout(1, size + 1); +// p_.reset(new unsigned char[layout.AllocSize()]); +// // If running under ASAN, mark the padding bytes, if any, to catch +// // memory errors. +// layout.PoisonPadding(p_.get()); +// // Store the size in the allocation. +// *layout.Pointer(p_.get()) = size; +// // Store the characters in the allocation. +// memcpy(layout.Pointer(p_.get()), s, size + 1); +// } +// +// size_t size() const { +// // Equivalent to reinterpret_cast(*p). +// return *L::Partial().Pointer(p_.get()); +// } +// +// const char* c_str() const { +// // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). +// // The argument in Partial(1) specifies that we have size_t[1] in front +// // of the characters. +// return L::Partial(1).Pointer(p_.get()); +// } +// +// private: +// // Our heap allocation contains a size_t followed by an array of chars. +// using L = Layout; +// std::unique_ptr p_; +// }; +// +// int main() { +// CompactString s = "hello"; +// assert(s.size() == 5); +// assert(strcmp(s.c_str(), "hello") == 0); +// } +// +// DOCUMENTATION +// +// The interface exported by this file consists of: +// - class `Layout<>` and its public members. +// - The public members of class `internal_layout::LayoutImpl<>`. That class +// isn't intended to be used directly, and its name and template parameter +// list are internal implementation details, but the class itself provides +// most of the functionality in this file. See comments on its members for +// detailed documentation. +// +// `Layout::Partial(count1,..., countm)` (where `m` <= `n`) returns a +// `LayoutImpl<>` object. `Layout layout(count1,..., countn)` +// creates a `Layout` object, which exposes the same functionality by inheriting +// from `LayoutImpl<>`. + +#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_ +#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef ADDRESS_SANITIZER +#include +#endif + +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +#if defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE +#endif + +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A type wrapper that instructs `Layout` to use the specific alignment for the +// array. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). +// +// Requires: `N >= alignof(T)` and `N` is a power of 2. +template +struct Aligned; + +namespace internal_layout { + +template +struct NotAligned {}; + +template +struct NotAligned> { + static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); +}; + +template +using IntToSize = size_t; + +template +using TypeToSize = size_t; + +template +struct Type : NotAligned { + using type = T; +}; + +template +struct Type> { + using type = T; +}; + +template +struct SizeOf : NotAligned, std::integral_constant {}; + +template +struct SizeOf> : std::integral_constant {}; + +// Note: workaround for https://gcc.gnu.org/PR88115 +template +struct AlignOf : NotAligned { + static constexpr size_t value = alignof(T); +}; + +template +struct AlignOf> { + static_assert(N % alignof(T) == 0, + "Custom alignment can't be lower than the type's alignment"); + static constexpr size_t value = N; +}; + +// Does `Ts...` contain `T`? +template +using Contains = absl::disjunction...>; + +template +using CopyConst = + typename std::conditional::value, const To, To>::type; + +// Note: We're not qualifying this with absl:: because it doesn't compile under +// MSVC. +template +using SliceType = Span; + +// This namespace contains no types. It prevents functions defined in it from +// being found by ADL. +namespace adl_barrier { + +template +constexpr size_t Find(Needle, Needle, Ts...) { + static_assert(!Contains(), "Duplicate element type"); + return 0; +} + +template +constexpr size_t Find(Needle, T, Ts...) { + return adl_barrier::Find(Needle(), Ts()...) + 1; +} + +constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); } + +// Returns `q * m` for the smallest `q` such that `q * m >= n`. +// Requires: `m` is a power of two. It's enforced by IsLegalElementType below. +constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); } + +constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; } + +constexpr size_t Max(size_t a) { return a; } + +template +constexpr size_t Max(size_t a, size_t b, Ts... rest) { + return adl_barrier::Max(b < a ? a : b, rest...); +} + +template +std::string TypeName() { + std::string out; + int status = 0; + char* demangled = nullptr; +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE + demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); +#endif + if (status == 0 && demangled != nullptr) { // Demangling succeeded. + absl::StrAppend(&out, "<", demangled, ">"); + free(demangled); + } else { +#if defined(__GXX_RTTI) || defined(_CPPRTTI) + absl::StrAppend(&out, "<", typeid(T).name(), ">"); +#endif + } + return out; +} + +} // namespace adl_barrier + +template +using EnableIf = typename std::enable_if::type; + +// Can `T` be a template argument of `Layout`? +template +using IsLegalElementType = std::integral_constant< + bool, !std::is_reference::value && !std::is_volatile::value && + !std::is_reference::type>::value && + !std::is_volatile::type>::value && + adl_barrier::IsPow2(AlignOf::value)>; + +template +class LayoutImpl; + +// Public base class of `Layout` and the result type of `Layout::Partial()`. +// +// `Elements...` contains all template arguments of `Layout` that created this +// instance. +// +// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments +// passed to `Layout::Partial()` or `Layout::Layout()`. +// +// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is +// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we +// can compute offsets). +template +class LayoutImpl, absl::index_sequence, + absl::index_sequence> { + private: + static_assert(sizeof...(Elements) > 0, "At least one field is required"); + static_assert(absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + enum { + NumTypes = sizeof...(Elements), + NumSizes = sizeof...(SizeSeq), + NumOffsets = sizeof...(OffsetSeq), + }; + + // These are guaranteed by `Layout`. + static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), + "Internal error"); + static_assert(NumTypes > 0, "Internal error"); + + // Returns the index of `T` in `Elements...`. Results in a compilation error + // if `Elements...` doesn't contain exactly one instance of `T`. + template + static constexpr size_t ElementIndex() { + static_assert(Contains, Type::type>...>(), + "Type not found"); + return adl_barrier::Find(Type(), + Type::type>()...); + } + + template + using ElementAlignment = + AlignOf>::type>; + + public: + // Element types of all arrays packed in a tuple. + using ElementTypes = std::tuple::type...>; + + // Element type of the Nth array. + template + using ElementType = typename std::tuple_element::type; + + constexpr explicit LayoutImpl(IntToSize... sizes) + : size_{sizes...} {} + + // Alignment of the layout, equal to the strictest alignment of all elements. + // All pointers passed to the methods of layout must be aligned to this value. + static constexpr size_t Alignment() { + return adl_barrier::Max(AlignOf::value...); + } + + // Offset in bytes of the Nth array. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset<0>() == 0); // The ints starts from 0. + // assert(x.Offset<1>() == 16); // The doubles starts from 16. + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + template = 0> + constexpr size_t Offset() const { + return 0; + } + + template = 0> + constexpr size_t Offset() const { + static_assert(N < NumOffsets, "Index out of bounds"); + return adl_barrier::Align( + Offset() + SizeOf>() * size_[N - 1], + ElementAlignment::value); + } + + // Offset in bytes of the array with the specified element type. There must + // be exactly one such array and its zero-based index must be at most + // `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset() == 0); // The ints starts from 0. + // assert(x.Offset() == 16); // The doubles starts from 16. + template + constexpr size_t Offset() const { + return Offset()>(); + } + + // Offsets in bytes of all arrays for which the offsets are known. + constexpr std::array Offsets() const { + return {{Offset()...}}; + } + + // The number of elements in the Nth array. This is the Nth argument of + // `Layout::Partial()` or `Layout::Layout()` (zero-based). + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size<0>() == 3); + // assert(x.Size<1>() == 4); + // + // Requires: `N < NumSizes`. + template + constexpr size_t Size() const { + static_assert(N < NumSizes, "Index out of bounds"); + return size_[N]; + } + + // The number of elements in the array with the specified element type. + // There must be exactly one such array and its zero-based index must be + // at most `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size() == 3); + // assert(x.Size() == 4); + template + constexpr size_t Size() const { + return Size()>(); + } + + // The number of elements of all arrays for which they are known. + constexpr std::array Sizes() const { + return {{Size()...}}; + } + + // Pointer to the beginning of the Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<0>(p); + // double* doubles = x.Pointer<1>(p); + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst>* Pointer(Char* p) const { + using C = typename std::remove_const::type; + static_assert( + std::is_same() || std::is_same() || + std::is_same(), + "The argument must be a pointer to [const] [signed|unsigned] char"); + constexpr size_t alignment = Alignment(); + (void)alignment; + assert(reinterpret_cast(p) % alignment == 0); + return reinterpret_cast>*>(p + Offset()); + } + + // Pointer to the beginning of the array with the specified element type. + // There must be exactly one such array and its zero-based index must be at + // most `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer(p); + // double* doubles = x.Pointer(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst* Pointer(Char* p) const { + return Pointer()>(p); + } + + // Pointers to all arrays for which pointers are known. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // int* ints; + // double* doubles; + // std::tie(ints, doubles) = x.Pointers(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>*...> + Pointers(Char* p) const { + return std::tuple>*...>( + Pointer(p)...); + } + + // The Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice<0>(p); + // Span doubles = x.Slice<1>(p); + // + // Requires: `N < NumSizes`. + // Requires: `p` is aligned to `Alignment()`. + template + SliceType>> Slice(Char* p) const { + return SliceType>>(Pointer(p), Size()); + } + + // The array with the specified element type. There must be exactly one + // such array and its zero-based index must be less than `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice(p); + // Span doubles = x.Slice(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + SliceType> Slice(Char* p) const { + return Slice()>(p); + } + + // All arrays with known sizes. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // Span ints; + // Span doubles; + // std::tie(ints, doubles) = x.Slices(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>>...> + Slices(Char* p) const { + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed + // in 6.1). + (void)p; + return std::tuple>>...>( + Slice(p)...); + } + + // The size of the allocation that fits all arrays. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes + // + // Requires: `NumSizes == sizeof...(Ts)`. + constexpr size_t AllocSize() const { + static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); + return Offset() + + SizeOf>() * size_[NumTypes - 1]; + } + + // If built with --config=asan, poisons padding bytes (if any) in the + // allocation. The pointer must point to a memory block at least + // `AllocSize()` bytes in length. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // Requires: `p` is aligned to `Alignment()`. + template = 0> + void PoisonPadding(const Char* p) const { + Pointer<0>(p); // verify the requirements on `Char` and `p` + } + + template = 0> + void PoisonPadding(const Char* p) const { + static_assert(N < NumOffsets, "Index out of bounds"); + (void)p; +#ifdef ADDRESS_SANITIZER + PoisonPadding(p); + // The `if` is an optimization. It doesn't affect the observable behaviour. + if (ElementAlignment::value % ElementAlignment::value) { + size_t start = + Offset() + SizeOf>() * size_[N - 1]; + ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); + } +#endif + } + + // Human-readable description of the memory layout. Useful for debugging. + // Slow. + // + // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed + // // by an unknown number of doubles. + // auto x = Layout::Partial(5, 3); + // assert(x.DebugString() == + // "@0(1)[5]; @8(4)[3]; @24(8)"); + // + // Each field is in the following format: @offset(sizeof)[size] ( + // may be missing depending on the target platform). For example, + // @8(4)[3] means that at offset 8 we have an array of ints, where each + // int is 4 bytes, and we have 3 of those ints. The size of the last field may + // be missing (as in the example above). Only fields with known offsets are + // described. Type names may differ across platforms: one compiler might + // produce "unsigned*" where another produces "unsigned int *". + std::string DebugString() const { + const auto offsets = Offsets(); + const size_t sizes[] = {SizeOf>()...}; + const std::string types[] = { + adl_barrier::TypeName>()...}; + std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); + for (size_t i = 0; i != NumOffsets - 1; ++i) { + absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], + "(", sizes[i + 1], ")"); + } + // NumSizes is a constant that may be zero. Some compilers cannot see that + // inside the if statement "size_[NumSizes - 1]" must be valid. + int last = static_cast(NumSizes) - 1; + if (NumTypes == NumSizes && last >= 0) { + absl::StrAppend(&res, "[", size_[last], "]"); + } + return res; + } + + private: + // Arguments of `Layout::Partial()` or `Layout::Layout()`. + size_t size_[NumSizes > 0 ? NumSizes : 1]; +}; + +template +using LayoutType = LayoutImpl< + std::tuple, absl::make_index_sequence, + absl::make_index_sequence>; + +} // namespace internal_layout + +// Descriptor of arrays of various types and sizes laid out in memory one after +// another. See the top of the file for documentation. +// +// Check out the public API of internal_layout::LayoutImpl above. The type is +// internal to the library but its methods are public, and they are inherited +// by `Layout`. +template +class Layout : public internal_layout::LayoutType { + public: + static_assert(sizeof...(Ts) > 0, "At least one field is required"); + static_assert( + absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + // The result type of `Partial()` with `NumSizes` arguments. + template + using PartialType = internal_layout::LayoutType; + + // `Layout` knows the element types of the arrays we want to lay out in + // memory but not the number of elements in each array. + // `Partial(size1, ..., sizeN)` allows us to specify the latter. The + // resulting immutable object can be used to obtain pointers to the + // individual arrays. + // + // It's allowed to pass fewer array sizes than the number of arrays. E.g., + // if all you need is to the offset of the second array, you only need to + // pass one argument -- the number of elements in the first array. + // + // // int[3] followed by 4 bytes of padding and an unknown number of + // // doubles. + // auto x = Layout::Partial(3); + // // doubles start at byte 16. + // assert(x.Offset<1>() == 16); + // + // If you know the number of elements in all arrays, you can still call + // `Partial()` but it's more convenient to use the constructor of `Layout`. + // + // Layout x(3, 5); + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + // + // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. + // Requires: all arguments are convertible to `size_t`. + template + static constexpr PartialType Partial(Sizes&&... sizes) { + static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); + return PartialType(absl::forward(sizes)...); + } + + // Creates a layout with the sizes of all arrays specified. If you know + // only the sizes of the first N arrays (where N can be zero), you can use + // `Partial()` defined above. The constructor is essentially equivalent to + // calling `Partial()` and passing in all array sizes; the constructor is + // provided as a convenient abbreviation. + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + constexpr explicit Layout(internal_layout::TypeToSize... sizes) + : internal_layout::LayoutType(sizes...) {} +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/base/abseil/absl/container/internal/layout_test.cc b/base/abseil/absl/container/internal/layout_test.cc new file mode 100644 index 0000000..8f3628a --- /dev/null +++ b/base/abseil/absl/container/internal/layout_test.cc @@ -0,0 +1,1567 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/layout.h" + +// We need ::max_align_t because some libstdc++ versions don't provide +// std::max_align_t +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::Span; +using ::testing::ElementsAre; + +size_t Distance(const void* from, const void* to) { + ABSL_RAW_CHECK(from <= to, "Distance must be non-negative"); + return static_cast(to) - static_cast(from); +} + +template +Expected Type(Actual val) { + static_assert(std::is_same(), ""); + return val; +} + +// Helper classes to test different size and alignments. +struct alignas(8) Int128 { + uint64_t a, b; + friend bool operator==(Int128 lhs, Int128 rhs) { + return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b); + } + + static std::string Name() { + return internal_layout::adl_barrier::TypeName(); + } +}; + +// int64_t is *not* 8-byte aligned on all platforms! +struct alignas(8) Int64 { + int64_t a; + friend bool operator==(Int64 lhs, Int64 rhs) { + return lhs.a == rhs.a; + } +}; + +// Properties of types that this test relies on. +static_assert(sizeof(int8_t) == 1, ""); +static_assert(alignof(int8_t) == 1, ""); +static_assert(sizeof(int16_t) == 2, ""); +static_assert(alignof(int16_t) == 2, ""); +static_assert(sizeof(int32_t) == 4, ""); +static_assert(alignof(int32_t) == 4, ""); +static_assert(sizeof(Int64) == 8, ""); +static_assert(alignof(Int64) == 8, ""); +static_assert(sizeof(Int128) == 16, ""); +static_assert(alignof(Int128) == 8, ""); + +template +void SameType() { + static_assert(std::is_same(), ""); +} + +TEST(Layout, ElementType) { + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } +} + +TEST(Layout, ElementTypes) { + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, + decltype(L::Partial())::ElementTypes>(); + SameType, + decltype(L::Partial(0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0, 0))::ElementTypes>(); + } +} + +TEST(Layout, OffsetByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(0, L(3).Offset<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(12, L::Partial(3).Offset<1>()); + EXPECT_EQ(0, L::Partial(3, 5).Offset<0>()); + EXPECT_EQ(12, L::Partial(3, 5).Offset<1>()); + EXPECT_EQ(0, L(3, 5).Offset<0>()); + EXPECT_EQ(12, L(3, 5).Offset<1>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<1>()); + EXPECT_EQ(0, L::Partial(1).Offset<0>()); + EXPECT_EQ(4, L::Partial(1).Offset<1>()); + EXPECT_EQ(0, L::Partial(5).Offset<0>()); + EXPECT_EQ(8, L::Partial(5).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3).Offset<0>()); + EXPECT_EQ(8, L::Partial(5, 3).Offset<1>()); + EXPECT_EQ(24, L::Partial(5, 3).Offset<2>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>()); + EXPECT_EQ(0, L(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L(5, 3, 1).Offset<1>()); + } +} + +TEST(Layout, OffsetByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(3).Offset()); + EXPECT_EQ(0, L(3).Offset()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(1).Offset()); + EXPECT_EQ(4, L::Partial(1).Offset()); + EXPECT_EQ(0, L::Partial(5).Offset()); + EXPECT_EQ(8, L::Partial(5).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3).Offset()); + EXPECT_EQ(8, L::Partial(5, 3).Offset()); + EXPECT_EQ(24, L::Partial(5, 3).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(0, L(5, 3, 1).Offset()); + EXPECT_EQ(24, L(5, 3, 1).Offset()); + EXPECT_EQ(8, L(5, 3, 1).Offset()); + } +} + +TEST(Layout, Offsets) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0)); + EXPECT_THAT(L(3).Offsets(), ElementsAre(0)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4)); + EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8)); + EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + } +} + +TEST(Layout, AllocSize) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).AllocSize()); + EXPECT_EQ(12, L::Partial(3).AllocSize()); + EXPECT_EQ(12, L(3).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(32, L::Partial(3, 5).AllocSize()); + EXPECT_EQ(32, L(3, 5).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize()); + EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize()); + EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize()); + EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize()); + EXPECT_EQ(136, L(3, 5, 7).AllocSize()); + } +} + +TEST(Layout, SizeByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L(3).Size<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L(3, 5).Size<0>()); + EXPECT_EQ(5, L(3, 5).Size<1>()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>()); + EXPECT_EQ(3, L(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L(3, 5, 7).Size<2>()); + } +} + +TEST(Layout, SizeByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size()); + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L(3).Size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L::Partial(3, 5).Size()); + EXPECT_EQ(5, L::Partial(3, 5).Size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(3, L(3, 5, 7).Size()); + EXPECT_EQ(5, L(3, 5, 7).Size()); + EXPECT_EQ(7, L(3, 5, 7).Size()); + } +} + +TEST(Layout, Sizes) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L(3).Sizes(), ElementsAre(3)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + } +} + +TEST(Layout, PointerByIndex) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, + Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, PointerByType) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, MutablePointerByIndex) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, MutablePointerByType) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, Pointers) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } +} + +TEST(Layout, MutablePointers) { + alignas(max_align_t) unsigned char p[100]; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } +} + +TEST(Layout, SliceByIndexSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, SliceByTypeSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, MutableSliceByIndexSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, MutableSliceByTypeSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, SliceByIndexData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, + Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, + Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, + Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, + Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, SliceByTypeData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ(24, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, + Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +TEST(Layout, MutableSliceByIndexData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, Distance(p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ(24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ(8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, MutableSliceByTypeData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +MATCHER_P(IsSameSlice, slice, "") { + return arg.size() == slice.size() && arg.data() == slice.data(); +} + +template +class TupleMatcher { + public: + explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {} + + template + bool MatchAndExplain(const Tuple& p, + testing::MatchResultListener* /* listener */) const { + static_assert(std::tuple_size::value == sizeof...(M), ""); + return MatchAndExplainImpl( + p, absl::make_index_sequence::value>{}); + } + + // For the matcher concept. Left empty as we don't really need the diagnostics + // right now. + void DescribeTo(::std::ostream* os) const {} + void DescribeNegationTo(::std::ostream* os) const {} + + private: + template + bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence) const { + // Using std::min as a simple variadic "and". + return std::min( + {true, testing::SafeMatcherCast< + const typename std::tuple_element::type&>( + std::get(matchers_)) + .Matches(std::get(p))...}); + } + + std::tuple matchers_; +}; + +template +testing::PolymorphicMatcher> Tuple(M... matchers) { + return testing::MakePolymorphicMatcher( + TupleMatcher(std::move(matchers)...)); +} + +TEST(Layout, Slices) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT( + (Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, MutableSlices) { + alignas(max_align_t) unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT((Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT( + (Type, Span, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT( + (Type, Span, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, UnalignedTypes) { + constexpr Layout x(1, 2, 3); + alignas(max_align_t) unsigned char p[x.AllocSize() + 1]; + EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4)); +} + +TEST(Layout, CustomAlignment) { + constexpr Layout> x(1, 2); + alignas(max_align_t) unsigned char p[x.AllocSize()]; + EXPECT_EQ(10, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8)); +} + +TEST(Layout, OverAligned) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); + alignas(2 * M) unsigned char p[x.AllocSize()]; + EXPECT_EQ(2 * M + 3, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); +} + +TEST(Layout, Alignment) { + static_assert(Layout::Alignment() == 1, ""); + static_assert(Layout::Alignment() == 4, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout>::Alignment() == 64, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); +} + +TEST(Layout, ConstexprPartial) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); + static_assert(x.Partial(1).template Offset<1>() == 2 * M, ""); +} +// [from, to) +struct Region { + size_t from; + size_t to; +}; + +void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) { +#ifdef ADDRESS_SANITIZER + for (size_t i = 0; i != n; ++i) { + EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i)); + } +#endif +} + +template +void ExpectPoisoned(const unsigned char (&buf)[N], + std::initializer_list reg) { + size_t prev = 0; + for (const Region& r : reg) { + ExpectRegionPoisoned(buf + prev, r.from - prev, false); + ExpectRegionPoisoned(buf + r.from, r.to - r.from, true); + prev = r.to; + } + ExpectRegionPoisoned(buf + prev, N - prev, false); +} + +TEST(Layout, PoisonPadding) { + using L = Layout; + + constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize(); + { + constexpr auto x = L::Partial(); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {}); + } + { + constexpr auto x = L::Partial(1); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2, 3); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr auto x = L::Partial(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr L x(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } +} + +TEST(Layout, DebugString) { + { + constexpr auto x = Layout::Partial(); + EXPECT_EQ("@0(1)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1); + EXPECT_EQ("@0(1)[1]; @4(4)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2); + EXPECT_EQ("@0(1)[1]; @4(4)[2]; @12(1)", + x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2, 3); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)", + x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } + { + constexpr Layout x(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } +} + +TEST(Layout, CharTypes) { + constexpr Layout x(1); + alignas(max_align_t) char c[x.AllocSize()] = {}; + alignas(max_align_t) unsigned char uc[x.AllocSize()] = {}; + alignas(max_align_t) signed char sc[x.AllocSize()] = {}; + alignas(max_align_t) const char cc[x.AllocSize()] = {}; + alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {}; + alignas(max_align_t) const signed char csc[x.AllocSize()] = {}; + + Type(x.Pointer<0>(c)); + Type(x.Pointer<0>(uc)); + Type(x.Pointer<0>(sc)); + Type(x.Pointer<0>(cc)); + Type(x.Pointer<0>(cuc)); + Type(x.Pointer<0>(csc)); + + Type(x.Pointer(c)); + Type(x.Pointer(uc)); + Type(x.Pointer(sc)); + Type(x.Pointer(cc)); + Type(x.Pointer(cuc)); + Type(x.Pointer(csc)); + + Type>(x.Pointers(c)); + Type>(x.Pointers(uc)); + Type>(x.Pointers(sc)); + Type>(x.Pointers(cc)); + Type>(x.Pointers(cuc)); + Type>(x.Pointers(csc)); + + Type>(x.Slice<0>(c)); + Type>(x.Slice<0>(uc)); + Type>(x.Slice<0>(sc)); + Type>(x.Slice<0>(cc)); + Type>(x.Slice<0>(cuc)); + Type>(x.Slice<0>(csc)); + + Type>>(x.Slices(c)); + Type>>(x.Slices(uc)); + Type>>(x.Slices(sc)); + Type>>(x.Slices(cc)); + Type>>(x.Slices(cuc)); + Type>>(x.Slices(csc)); +} + +TEST(Layout, ConstElementType) { + constexpr Layout x(1); + alignas(int32_t) char c[x.AllocSize()] = {}; + const char* cc = c; + const int32_t* p = reinterpret_cast(cc); + + EXPECT_EQ(alignof(int32_t), x.Alignment()); + + EXPECT_EQ(0, x.Offset<0>()); + EXPECT_EQ(0, x.Offset()); + + EXPECT_THAT(x.Offsets(), ElementsAre(0)); + + EXPECT_EQ(1, x.Size<0>()); + EXPECT_EQ(1, x.Size()); + + EXPECT_THAT(x.Sizes(), ElementsAre(1)); + + EXPECT_EQ(sizeof(int32_t), x.AllocSize()); + + EXPECT_EQ(p, Type(x.Pointer<0>(c))); + EXPECT_EQ(p, Type(x.Pointer<0>(cc))); + + EXPECT_EQ(p, Type(x.Pointer(c))); + EXPECT_EQ(p, Type(x.Pointer(cc))); + + EXPECT_THAT(Type>(x.Pointers(c)), Tuple(p)); + EXPECT_THAT(Type>(x.Pointers(cc)), Tuple(p)); + + EXPECT_THAT(Type>(x.Slice<0>(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice<0>(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>(x.Slice(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>>(x.Slices(c)), + Tuple(IsSameSlice(Span(p, 1)))); + EXPECT_THAT(Type>>(x.Slices(cc)), + Tuple(IsSameSlice(Span(p, 1)))); +} + +namespace example { + +// Immutable move-only string with sizeof equal to sizeof(void*). The string +// size and the characters are kept in the same heap allocation. +class CompactString { + public: + CompactString(const char* s = "") { // NOLINT + const size_t size = strlen(s); + // size_t[1], followed by char[size + 1]. + // This statement doesn't allocate memory. + const L layout(1, size + 1); + // AllocSize() tells us how much memory we need to allocate for all our + // data. + p_.reset(new unsigned char[layout.AllocSize()]); + // If running under ASAN, mark the padding bytes, if any, to catch memory + // errors. + layout.PoisonPadding(p_.get()); + // Store the size in the allocation. + // Pointer() is a synonym for Pointer<0>(). + *layout.Pointer(p_.get()) = size; + // Store the characters in the allocation. + memcpy(layout.Pointer(p_.get()), s, size + 1); + } + + size_t size() const { + // Equivalent to reinterpret_cast(*p). + return *L::Partial().Pointer(p_.get()); + } + + const char* c_str() const { + // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). + // The argument in Partial(1) specifies that we have size_t[1] in front of + // the characters. + return L::Partial(1).Pointer(p_.get()); + } + + private: + // Our heap allocation contains a size_t followed by an array of chars. + using L = Layout; + std::unique_ptr p_; +}; + +TEST(CompactString, Works) { + CompactString s = "hello"; + EXPECT_EQ(5, s.size()); + EXPECT_STREQ("hello", s.c_str()); +} + +} // namespace example + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/node_hash_policy.h b/base/abseil/absl/container/internal/node_hash_policy.h new file mode 100644 index 0000000..4617162 --- /dev/null +++ b/base/abseil/absl/container/internal/node_hash_policy.h @@ -0,0 +1,92 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapts a policy for nodes. +// +// The node policy should model: +// +// struct Policy { +// // Returns a new node allocated and constructed using the allocator, using +// // the specified arguments. +// template +// value_type* new_element(Alloc* alloc, Args&&... args) const; +// +// // Destroys and deallocates node using the allocator. +// template +// void delete_element(Alloc* alloc, value_type* node) const; +// }; +// +// It may also optionally define `value()` and `apply()`. For documentation on +// these, see hash_policy_traits.h. + +#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct node_hash_policy { + static_assert(std::is_lvalue_reference::value, ""); + + using slot_type = typename std::remove_cv< + typename std::remove_reference::type>::type*; + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + *slot = Policy::new_element(alloc, std::forward(args)...); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::delete_element(alloc, *slot); + } + + template + static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { + *new_slot = *old_slot; + } + + static size_t space_used(const slot_type* slot) { + if (slot == nullptr) return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } + + static Reference element(slot_type* slot) { return **slot; } + + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + template + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { + return P::apply(std::forward(ts)...); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ diff --git a/base/abseil/absl/container/internal/node_hash_policy_test.cc b/base/abseil/absl/container/internal/node_hash_policy_test.cc new file mode 100644 index 0000000..84aabba --- /dev/null +++ b/base/abseil/absl/container/internal/node_hash_policy_test.cc @@ -0,0 +1,69 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/node_hash_policy.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_policy_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::Pointee; + +struct Policy : node_hash_policy { + using key_type = int; + using init_type = int; + + template + static int* new_element(Alloc* alloc, int value) { + return new int(value); + } + + template + static void delete_element(Alloc* alloc, int* elem) { + delete elem; + } +}; + +using NodePolicy = hash_policy_traits; + +struct NodeTest : ::testing::Test { + std::allocator alloc; + int n = 53; + int* a = &n; +}; + +TEST_F(NodeTest, ConstructDestroy) { + NodePolicy::construct(&alloc, &a, 42); + EXPECT_THAT(a, Pointee(42)); + NodePolicy::destroy(&alloc, &a); +} + +TEST_F(NodeTest, transfer) { + int s = 42; + int* b = &s; + NodePolicy::transfer(&alloc, &a, &b); + EXPECT_EQ(&s, a); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/raw_hash_map.h b/base/abseil/absl/container/internal/raw_hash_map.h new file mode 100644 index 0000000..0a02757 --- /dev/null +++ b/base/abseil/absl/container/internal/raw_hash_map.h @@ -0,0 +1,197 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ + +#include +#include +#include + +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class raw_hash_map : public raw_hash_set { + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()))); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()))); + + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + // TODO(alkis): remove this assertion and verify that reference mapped_type is + // supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() {} + using raw_hash_map::raw_hash_set::raw_hash_set; + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { + return insert_or_assign(k, v).first; + } + + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + template ::value, int>::type = 0, + K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template ::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) { + auto it = this->find(key); + if (it == this->end()) { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at"); + } + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const { + auto it = this->find(key); + if (it == this->end()) { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at"); + } + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) { + return Policy::value(&*try_emplace(std::forward(key)).first); + } + + template + MappedReference

operator[](const key_arg& key) { + return Policy::value(&*try_emplace(key).first); + } + + private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward(k), std::forward(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward(v); + return {this->iterator_at(res.first), res.second}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(res.first), res.second}; + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/base/abseil/absl/container/internal/raw_hash_set.cc b/base/abseil/absl/container/internal/raw_hash_set.cc new file mode 100644 index 0000000..919ac07 --- /dev/null +++ b/base/abseil/absl/container/internal/raw_hash_set.cc @@ -0,0 +1,48 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +constexpr size_t Group::kWidth; + +// Returns "random" seed. +inline size_t RandomSeed() { +#if ABSL_HAVE_THREAD_LOCAL + static thread_local size_t counter = 0; + size_t value = ++counter; +#else // ABSL_HAVE_THREAD_LOCAL + static std::atomic counter(0); + size_t value = counter.fetch_add(1, std::memory_order_relaxed); +#endif // ABSL_HAVE_THREAD_LOCAL + return value ^ static_cast(reinterpret_cast(&counter)); +} + +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) { + // To avoid problems with weak hashes and single bit tests, we use % 13. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/raw_hash_set.h b/base/abseil/absl/container/internal/raw_hash_set.h new file mode 100644 index 0000000..4103e02 --- /dev/null +++ b/base/abseil/absl/container/internal/raw_hash_set.h @@ -0,0 +1,1868 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// An open-addressing +// hashtable with quadratic probing. +// +// This is a low level hashtable on top of which different interfaces can be +// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. +// +// The table interface is similar to that of std::unordered_set. Notable +// differences are that most member functions support heterogeneous keys when +// BOTH the hash and eq functions are marked as transparent. They do so by +// providing a typedef called `is_transparent`. +// +// When heterogeneous lookup is enabled, functions that take key_type act as if +// they have an overload set like: +// +// iterator find(const key_type& key); +// template +// iterator find(const K& key); +// +// size_type erase(const key_type& key); +// template +// size_type erase(const K& key); +// +// std::pair equal_range(const key_type& key); +// template +// std::pair equal_range(const K& key); +// +// When heterogeneous lookup is disabled, only the explicit `key_type` overloads +// exist. +// +// find() also supports passing the hash explicitly: +// +// iterator find(const key_type& key, size_t hash); +// template +// iterator find(const U& key, size_t hash); +// +// In addition the pointer to element and iterator stability guarantees are +// weaker: all iterators and pointers are invalidated after a new element is +// inserted. +// +// IMPLEMENTATION DETAILS +// +// The table stores elements inline in a slot array. In addition to the slot +// array the table maintains some control state per slot. The extra state is one +// byte per slot and stores empty or deleted marks, or alternatively 7 bits from +// the hash of an occupied slot. The table is split into logical groups of +// slots, like so: +// +// Group 1 Group 2 Group 3 +// +---------------+---------------+---------------+ +// | | | | | | | | | | | | | | | | | | | | | | | | | +// +---------------+---------------+---------------+ +// +// On lookup the hash is split into two parts: +// - H2: 7 bits (those stored in the control bytes) +// - H1: the rest of the bits +// The groups are probed using H1. For each group the slots are matched to H2 in +// parallel. Because H2 is 7 bits (128 states) and the number of slots per group +// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. +// +// On insert, once the right group is found (as in lookup), its slots are +// filled in order. +// +// On erase a slot is cleared. In case the group did not have any empty slots +// before the erase, the erased slot is marked as deleted. +// +// Groups without empty slots (but maybe with deleted slots) extend the probe +// sequence. The probing algorithm is quadratic. Given N the number of groups, +// the probing function for the i'th probe is: +// +// P(0) = H1 % N +// +// P(i) = (P(i - 1) + i) % N +// +// This probing function guarantees that after N probes, all the groups of the +// table will be probed exactly once. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/bits.h" +#include "absl/base/internal/endian.h" +#include "absl/base/port.h" +#include "absl/container/internal/common.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_policy_traits.h" +#include "absl/container/internal/hashtable_debug_hooks.h" +#include "absl/container/internal/hashtablez_sampler.h" +#include "absl/container/internal/have_sse.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class probe_seq { + public: + probe_seq(size_t hash, size_t mask) { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + size_t offset() const { return offset_; } + size_t offset(size_t i) const { return (offset_ + i) & mask_; } + + void next() { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index. The i-th probe in the probe sequence. + size_t index() const { return index_; } + + private: + size_t mask_; + size_t offset_; + size_t index_ = 0; +}; + +template +struct RequireUsableKey { + template + std::pair< + decltype(std::declval()(std::declval())), + decltype(std::declval()(std::declval(), + std::declval()))>* + operator()(const PassedKey&, const Args&...) const; +}; + +template +struct IsDecomposable : std::false_type {}; + +template +struct IsDecomposable< + absl::void_t(), + std::declval()...))>, + Policy, Hash, Eq, Ts...> : std::true_type {}; + +// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. +template +constexpr bool IsNoThrowSwappable() { + using std::swap; + return noexcept(swap(std::declval(), std::declval())); +} + +template +int TrailingZeros(T x) { + return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64( + static_cast(x)) + : base_internal::CountTrailingZerosNonZero32( + static_cast(x)); +} + +template +int LeadingZeros(T x) { + return sizeof(T) == 8 + ? base_internal::CountLeadingZeros64(static_cast(x)) + : base_internal::CountLeadingZeros32(static_cast(x)); +} + +// An abstraction over a bitmask. It provides an easy way to iterate through the +// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE), +// this is a true bitmask. On non-SSE, platforms the arithematic used to +// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as +// either 0x00 or 0x80. +// +// For example: +// for (int i : BitMask(0x5)) -> yields 0, 2 +// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 +template +class BitMask { + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + // These are useful for unit tests (gunit). + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + explicit BitMask(T mask) : mask_(mask) {} + BitMask& operator++() { + mask_ &= (mask_ - 1); + return *this; + } + explicit operator bool() const { return mask_ != 0; } + int operator*() const { return LowestBitSet(); } + int LowestBitSet() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + int HighestBitSet() const { + return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) - + 1) >> + Shift; + } + + BitMask begin() const { return *this; } + BitMask end() const { return BitMask(0); } + + int TrailingZeros() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + int LeadingZeros() const { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift; + } + + private: + friend bool operator==(const BitMask& a, const BitMask& b) { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) { + return a.mask_ != b.mask_; + } + + T mask_; +}; + +using ctrl_t = signed char; +using h2_t = uint8_t; + +// The values here are selected for maximum performance. See the static asserts +// below for details. +enum Ctrl : ctrl_t { + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 +}; +static_assert( + kEmpty & kDeleted & kSentinel & 0x80, + "Special markers need to have the MSB to make checking for them efficient"); +static_assert(kEmpty < kSentinel && kDeleted < kSentinel, + "kEmpty and kDeleted must be smaller than kSentinel to make the " + "SIMD test of IsEmptyOrDeleted() efficient"); +static_assert(kSentinel == -1, + "kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(kEmpty == -128, + "kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); +static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F, + "kEmpty and kDeleted must share an unset bit that is not shared " + "by kSentinel to make the scalar test for MatchEmptyOrDeleted() " + "efficient"); +static_assert(kDeleted == -2, + "kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + +// A single block of empty control bytes for tables without any slots allocated. +// This enables removing a branch in the hot path of find(). +inline ctrl_t* EmptyGroup() { + alignas(16) static constexpr ctrl_t empty_group[] = { + kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, + kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty}; + return const_cast(empty_group); +} + +// Mixes a randomly generated per-process seed with `hash` and `ctrl` to +// randomize insertion order within groups. +bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl); + +// Returns a hash seed. +// +// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure +// non-determinism of iteration order in most cases. +inline size_t HashSeed(const ctrl_t* ctrl) { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast(ctrl) >> 12; +} + +inline size_t H1(size_t hash, const ctrl_t* ctrl) { + return (hash >> 7) ^ HashSeed(ctrl); +} +inline ctrl_t H2(size_t hash) { return hash & 0x7F; } + +inline bool IsEmpty(ctrl_t c) { return c == kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= 0; } +inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } + +#if SWISSTABLE_HAVE_SSE2 + +// https://github.com/abseil/abseil-cpp/issues/209 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 +// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char +// Work around this by using the portable implementation of Group +// when using -funsigned-char under GCC. +inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { +#if defined(__GNUC__) && !defined(__clang__) + if (std::is_unsigned::value) { + const __m128i mask = _mm_set1_epi8(0x80); + const __m128i diff = _mm_subs_epi8(b, a); + return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); + } +#endif + return _mm_cmpgt_epi8(a, b); +} + +struct GroupSse2Impl { + static constexpr size_t kWidth = 16; // the number of slots per group + + explicit GroupSse2Impl(const ctrl_t* pos) { + ctrl = _mm_loadu_si128(reinterpret_cast(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + BitMask Match(h2_t hash) const { + auto match = _mm_set1_epi8(hash); + return BitMask( + _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))); + } + + // Returns a bitmask representing the positions of empty slots. + BitMask MatchEmpty() const { +#if SWISSTABLE_HAVE_SSSE3 + // This only works because kEmpty is -128. + return BitMask( + _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); +#else + return Match(static_cast(kEmpty)); +#endif + } + + // Returns a bitmask representing the positions of empty or deleted slots. + BitMask MatchEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return BitMask( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))); + } + + // Returns the number of trailing empty or deleted elements in the group. + uint32_t CountLeadingEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return TrailingZeros( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + auto msbs = _mm_set1_epi8(static_cast(-128)); + auto x126 = _mm_set1_epi8(126); +#if SWISSTABLE_HAVE_SSSE3 + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); +#else + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); +#endif + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } + + __m128i ctrl; +}; +#endif // SWISSTABLE_HAVE_SSE2 + +struct GroupPortableImpl { + static constexpr size_t kWidth = 8; + + explicit GroupPortableImpl(const ctrl_t* pos) + : ctrl(little_endian::Load64(pos)) {} + + BitMask Match(h2_t hash) const { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on kEmpty, kDeleted, kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask((x - lsbs) & ~x & msbs); + } + + BitMask MatchEmpty() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl & (~ctrl << 6)) & msbs); + } + + BitMask MatchEmptyOrDeleted() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl & (~ctrl << 7)) & msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL; + return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; +}; + +#if SWISSTABLE_HAVE_SSE2 +using Group = GroupSse2Impl; +#else +using Group = GroupPortableImpl; +#endif + +template +class raw_hash_set; + +inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } + +// PRECONDITION: +// IsValidCapacity(capacity) +// ctrl[capacity] == kSentinel +// ctrl[i] != kSentinel for all i < capacity +// Applies mapping for every byte in ctrl: +// DELETED -> EMPTY +// EMPTY -> EMPTY +// FULL -> DELETED +inline void ConvertDeletedToEmptyAndFullToDeleted( + ctrl_t* ctrl, size_t capacity) { + assert(ctrl[capacity] == kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); + ctrl[capacity] = kSentinel; +} + +// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. +inline size_t NormalizeCapacity(size_t n) { + return n ? ~size_t{} >> LeadingZeros(n) : 1; +} + +// We use 7/8th as maximum load factor. +// For 16-wide groups, that gives an average of two empty slots per group. +inline size_t CapacityToGrowth(size_t capacity) { + assert(IsValidCapacity(capacity)); + // `capacity*7/8` + if (Group::kWidth == 8 && capacity == 7) { + // x-x/8 does not work when x==7. + return 6; + } + return capacity - capacity / 8; +} +// From desired "growth" to a lowerbound of the necessary capacity. +// Might not be a valid one and required NormalizeCapacity(). +inline size_t GrowthToLowerboundCapacity(size_t growth) { + // `growth*8/7` + if (Group::kWidth == 8 && growth == 7) { + // x+(x-1)/7 does not work when x==7. + return 8; + } + return growth + static_cast((static_cast(growth) - 1) / 7); +} + +// Policy: a policy defines how to perform different operations on +// the slots of the hashtable (see hash_policy_traits.h for the full interface +// of policy). +// +// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The +// functor should accept a key and return size_t as hash. For best performance +// it is important that the hash function provides high entropy across all bits +// of the hash. +// +// Eq: a (possibly polymorphic) functor that compares two keys for equality. It +// should accept two (of possibly different type) keys and return a bool: true +// if they are equal, false if they are not. If two keys compare equal, then +// their hash values as defined by Hash MUST be equal. +// +// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which +// the storage of the hashtable will be allocated and the elements will be +// constructed and destroyed. +template +class raw_hash_set { + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = typename KeyArgImpl::template type; + + private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using Layout = absl::container_internal::Layout; + + static Layout MakeLayout(size_t capacity) { + assert(IsValidCapacity(capacity)); + return Layout(capacity + Group::kWidth + 1, capacity); + } + + using AllocTraits = absl::allocator_traits; + using SlotAlloc = typename absl::allocator_traits< + allocator_type>::template rebind_alloc; + using SlotAllocTraits = typename absl::allocator_traits< + allocator_type>::template rebind_traits; + + static_assert(std::is_lvalue_reference::value, + "Policy::element() must return a reference"); + + template + struct SameAsElementReference + : std::is_same::type>::type, + typename std::remove_cv< + typename std::remove_reference::type>::type> {}; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template + using RequiresInsertable = typename std::enable_if< + absl::disjunction, + SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + + public: + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + + class iterator { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + absl::conditional_t; + using pointer = absl::remove_reference_t*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() {} + + // PRECONDITION: not an end() iterator. + reference operator*() const { + assert_is_full(); + return PolicyTraits::element(slot_); + } + + // PRECONDITION: not an end() iterator. + pointer operator->() const { return &operator*(); } + + // PRECONDITION: not an end() iterator. + iterator& operator++() { + /* To be enabled: assert_is_full(); */ + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) { + a.assert_is_valid(); + b.assert_is_valid(); + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end() + iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {} + + void assert_is_full() const { assert(IsFull(*ctrl_)); } + void assert_is_valid() const { + assert(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel); + } + + void skip_empty_or_deleted() { + while (IsEmptyOrDeleted(*ctrl_)) { + // ctrl is not necessarily aligned to Group::kWidth. It is also likely + // to read past the space for ctrl bytes and into slots. This is ok + // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there + // is no way to read outside the combined slot array. + uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + } + + ctrl_t* ctrl_ = nullptr; + // To avoid uninitialized member warnings, put slot_ in an anonymous union. + // The member is not initialized on singleton and end iterators. + union { + slot_type* slot_; + }; + }; + + class const_iterator { + friend class raw_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() {} + // Implicit construction from iterator. + const_iterator(iterator i) : inner_(std::move(i)) {} + + reference operator*() const { return *inner_; } + pointer operator->() const { return inner_.operator->(); } + + const_iterator& operator++() { + ++inner_; + return *this; + } + const_iterator operator++(int) { return inner_++; } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot) + : inner_(const_cast(ctrl), const_cast(slot)) {} + + iterator inner_; + }; + + using node_type = node_handle, Alloc>; + using insert_return_type = InsertReturnType; + + raw_hash_set() noexcept( + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value) {} + + explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), + const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { + if (bucket_count) { + capacity_ = NormalizeCapacity(bucket_count); + reset_growth_left(); + initialize_slots(); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, + const allocator_type& alloc) + : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) + : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} + + explicit raw_hash_set(const allocator_type& alloc) + : raw_hash_set(0, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(bucket_count, hash, eq, alloc) { + insert(first, last); + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) + : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // absl::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // absl::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, + const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(const raw_hash_set& that) + : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( + that.alloc_ref())) {} + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) + : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { + reserve(that.size()); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + auto target = find_first_non_full(hash); + set_ctrl(target.offset, H2(hash)); + emplace_at(target.offset, v); + infoz_.RecordInsert(hash, target.probe_length); + } + size_ = that.size(); + growth_left() -= that.size(); + } + + raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value) + : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), + slots_(absl::exchange(that.slots_, nullptr)), + size_(absl::exchange(that.size_, 0)), + capacity_(absl::exchange(that.capacity_, 0)), + infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())), + // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function, moving it + // would create a nullptr functor that cannot be called. + settings_(that.settings_) { + // growth_left was copied above, reset the one from `that`. + that.growth_left() = 0; + } + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) + : ctrl_(EmptyGroup()), + slots_(nullptr), + size_(0), + capacity_(0), + settings_(0, that.hash_ref(), that.eq_ref(), a) { + if (a == that.alloc_ref()) { + std::swap(ctrl_, that.ctrl_); + std::swap(slots_, that.slots_); + std::swap(size_, that.size_); + std::swap(capacity_, that.capacity_); + std::swap(growth_left(), that.growth_left()); + std::swap(infoz_, that.infoz_); + } else { + reserve(that.size()); + // Note: this will copy elements of dense_set and unordered_set instead of + // moving them. This can be fixed if it ever becomes an issue. + for (auto& elem : that) insert(std::move(elem)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) { + raw_hash_set tmp(that, + AllocTraits::propagate_on_container_copy_assignment::value + ? that.alloc_ref() + : alloc_ref()); + swap(tmp); + return *this; + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + absl::allocator_traits::is_always_equal::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_assignable::value) { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment()); + } + + ~raw_hash_set() { destroy_slots(); } + + iterator begin() { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() { return {ctrl_ + capacity_}; } + + const_iterator begin() const { + return const_cast(this)->begin(); + } + const_iterator end() const { return const_cast(this)->end(); } + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + bool empty() const { return !size(); } + size_t size() const { return size_; } + size_t capacity() const { return capacity_; } + size_t max_size() const { return (std::numeric_limits::max)(); } + + ABSL_ATTRIBUTE_REINITIALIZES void clear() { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + if (capacity_ > 127) { + destroy_slots(); + } else if (capacity_) { + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + size_ = 0; + reset_ctrl(); + reset_growth_left(); + } + assert(empty()); + infoz_.RecordStorageChanged(0, capacity_); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, + class T2 = T, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + std::pair insert(T&& value) { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_map s; + // s.insert({"abc", 42}); + std::pair insert(init_type&& value) { + return emplace(std::move(value)); + } + + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + iterator insert(const_iterator, T&& value) { + return insert(std::forward(value)).first; + } + + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, const T& value) { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + for (; first != last; ++first) insert(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) { + if (!node) return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); + auto res = PolicyTraits::apply( + InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, + elem); + if (res.second) { + CommonAccess::Reset(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, node_type&& node) { + return insert(std::move(node)).first; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + return PolicyTraits::apply(EmplaceDecomposable{*this}, + std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + typename std::aligned_storage::type + raw; + slot_type* slot = reinterpret_cast(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls f with one argument of type raw_hash_set::constructor. f + // MUST call raw_hash_set::constructor with arguments as if a + // raw_hash_set::value_type is constructed, otherwise the behavior is + // undefined. + // + // For example: + // + // std::unordered_set s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor { + friend class raw_hash_set; + + public: + template + void operator()(Args&&... args) const { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} + + allocator_type* alloc_; + slot_type** slot_; + }; + + template + iterator lazy_emplace(const key_arg& key, F&& f) { + auto res = find_or_prepare_insert(key); + if (res.second) { + slot_type* slot = slots_ + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template + size_type erase(const key_arg& key) { + auto it = find(key); + if (it == end()) return 0; + erase(it); + return 1; + } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). The + // iterator is invalidated, so any increment should be done before calling + // erase. In order to erase while iterating across a map, use the following + // idiom (which also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // // `erase()` will invalidate `it`, so advance `it` first. + // auto copy_it = it++; + // if () { + // m.erase(copy_it); + // } + // } + void erase(const_iterator cit) { erase(cit.inner_); } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + void erase(iterator it) { + it.assert_is_full(); + PolicyTraits::destroy(&alloc_ref(), it.slot_); + erase_meta_only(it); + } + + iterator erase(const_iterator first, const_iterator last) { + while (first != last) { + erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template + void merge(raw_hash_set& src) { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e;) { + auto next = std::next(it); + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, + PolicyTraits::element(it.slot_)) + .second) { + src.erase_meta_only(it); + } + it = next; + } + } + + template + void merge(raw_hash_set&& src) { + merge(src); + } + + node_type extract(const_iterator position) { + position.inner_.assert_is_full(); + auto node = + CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); + erase_meta_only(position); + return node; + } + + template < + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable() && IsNoThrowSwappable() && + (!AllocTraits::propagate_on_container_swap::value || + IsNoThrowSwappable())) { + using std::swap; + swap(ctrl_, that.ctrl_); + swap(slots_, that.slots_); + swap(size_, that.size_); + swap(capacity_, that.capacity_); + swap(growth_left(), that.growth_left()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + swap(infoz_, that.infoz_); + if (AllocTraits::propagate_on_container_swap::value) { + swap(alloc_ref(), that.alloc_ref()); + } else { + // If the allocators do not compare equal it is officially undefined + // behavior. We choose to do nothing. + } + } + + void rehash(size_t n) { + if (n == 0 && capacity_ == 0) return; + if (n == 0 && size_ == 0) { + destroy_slots(); + infoz_.RecordStorageChanged(0, 0); + return; + } + // bitor is a faster way of doing `max` here. We will round up to the next + // power-of-2-minus-1, so bitor is good enough. + auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity_) { + resize(m); + } + } + + void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template + size_t count(const key_arg& key) const { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + template + void prefetch(const key_arg& key) const { + (void)key; +#if defined(__GNUC__) + auto seq = probe(hash_ref()(key)); + __builtin_prefetch(static_cast(ctrl_ + seq.offset())); + __builtin_prefetch(static_cast(slots_ + seq.offset())); +#endif // __GNUC__ + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template + iterator find(const key_arg& key, size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end(); + seq.next(); + } + } + template + iterator find(const key_arg& key) { + return find(key, hash_ref()(key)); + } + + template + const_iterator find(const key_arg& key, size_t hash) const { + return const_cast(this)->find(key, hash); + } + template + const_iterator find(const key_arg& key) const { + return find(key, hash_ref()(key)); + } + + template + bool contains(const key_arg& key) const { + return find(key) != end(); + } + + template + std::pair equal_range(const key_arg& key) { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + template + std::pair equal_range( + const key_arg& key) const { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const { return capacity_; } + float load_factor() const { + return capacity_ ? static_cast(size()) / capacity_ : 0.0; + } + float max_load_factor() const { return 1.0f; } + void max_load_factor(float) { + // Does nothing. + } + + hasher hash_function() const { return hash_ref(); } + key_equal key_eq() const { return eq_ref(); } + allocator_type get_allocator() const { return alloc_ref(); } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { + if (a.size() != b.size()) return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) std::swap(outer, inner); + for (const value_type& elem : *outer) + if (!inner->has_element(elem)) return false; + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { + return !(a == b); + } + + friend void swap(raw_hash_set& a, + raw_hash_set& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); + } + + private: + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + struct FindElement { + template + const_iterator operator()(const K& key, Args&&...) const { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement { + template + size_t operator()(const K& key, Args&&...) const { + return h(key); + } + const hasher& h; + }; + + template + struct EqualElement { + template + bool operator()(const K2& lhs, Args&&...) const { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + struct EmplaceDecomposable { + template + std::pair operator()(const K& key, Args&&... args) const { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + s.emplace_at(res.first, std::forward(args)...); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + }; + + template + struct InsertSlot { + template + std::pair operator()(const K& key, Args&&...) && { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); + } else if (do_destroy) { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + // "erases" the object from the container, except that it doesn't actually + // destroy the object. It only updates all the metadata of the class. + // This can be used in conjunction with Policy::transfer to move the object to + // another place. + void erase_meta_only(const_iterator it) { + assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); + --size_; + const size_t index = it.inner_.ctrl_ - ctrl_; + const size_t index_before = (index - Group::kWidth) & capacity_; + const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); + + // We count how many consecutive non empties we have to the right and to the + // left of `it`. If the sum is >= kWidth then there is at least one probe + // window that might have seen a full group. + bool was_never_full = + empty_before && empty_after && + static_cast(empty_after.TrailingZeros() + + empty_before.LeadingZeros()) < Group::kWidth; + + set_ctrl(index, was_never_full ? kEmpty : kDeleted); + growth_left() += was_never_full; + infoz_.RecordErase(); + } + + void initialize_slots() { + assert(capacity_); + // Folks with custom allocators often make unwarranted assumptions about the + // behavior of their classes vis-a-vis trivial destructability and what + // calls they will or wont make. Avoid sampling for people with custom + // allocators to get us out of this mess. This is not a hard guarantee but + // a workaround while we plan the exact guarantee we want to provide. + // + // People are often sloppy with the exact type of their allocator (sometimes + // it has an extra const or is missing the pair, but rebinds made it work + // anyway). To avoid the ambiguity, we work off SlotAlloc which we have + // bound more carefully. + if (std::is_same>::value && + slots_ == nullptr) { + infoz_ = Sample(); + } + + auto layout = MakeLayout(capacity_); + char* mem = static_cast( + Allocate(&alloc_ref(), layout.AllocSize())); + ctrl_ = reinterpret_cast(layout.template Pointer<0>(mem)); + slots_ = layout.template Pointer<1>(mem); + reset_ctrl(); + reset_growth_left(); + infoz_.RecordStorageChanged(size_, capacity_); + } + + void destroy_slots() { + if (!capacity_) return; + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + auto layout = MakeLayout(capacity_); + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + Deallocate(&alloc_ref(), ctrl_, layout.AllocSize()); + ctrl_ = EmptyGroup(); + slots_ = nullptr; + size_ = 0; + capacity_ = 0; + growth_left() = 0; + } + + void resize(size_t new_capacity) { + assert(IsValidCapacity(new_capacity)); + auto* old_ctrl = ctrl_; + auto* old_slots = slots_; + const size_t old_capacity = capacity_; + capacity_ = new_capacity; + initialize_slots(); + + size_t total_probe_length = 0; + for (size_t i = 0; i != old_capacity; ++i) { + if (IsFull(old_ctrl[i])) { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(old_slots + i)); + auto target = find_first_non_full(hash); + size_t new_i = target.offset; + total_probe_length += target.probe_length; + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); + } + } + if (old_capacity) { + SanitizerUnpoisonMemoryRegion(old_slots, + sizeof(slot_type) * old_capacity); + auto layout = MakeLayout(old_capacity); + Deallocate(&alloc_ref(), old_ctrl, + layout.AllocSize()); + } + infoz_.RecordRehash(total_probe_length); + } + + void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { + assert(IsValidCapacity(capacity_)); + assert(!is_small()); + // Algorithm: + // - mark all DELETED slots as EMPTY + // - mark all FULL slots as DELETED + // - for each slot marked as DELETED + // hash = Hash(element) + // target = find_first_non_full(hash) + // if target is in the same group + // mark slot as FULL + // else if target is EMPTY + // transfer element to target + // mark slot as EMPTY + // mark target as FULL + // else if target is DELETED + // swap current element with target element + // mark target as FULL + // repeat procedure for current slot with moved from element (target) + ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); + typename std::aligned_storage::type + raw; + size_t total_probe_length = 0; + slot_type* slot = reinterpret_cast(&raw); + for (size_t i = 0; i != capacity_; ++i) { + if (!IsDeleted(ctrl_[i])) continue; + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(slots_ + i)); + auto target = find_first_non_full(hash); + size_t new_i = target.offset; + total_probe_length += target.probe_length; + + // Verify if the old and new i fall within the same group wrt the hash. + // If they do, we don't need to move the object as it falls already in the + // best probe we can. + const auto probe_index = [&](size_t pos) { + return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; + }; + + // Element doesn't move. + if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { + set_ctrl(i, H2(hash)); + continue; + } + if (IsEmpty(ctrl_[new_i])) { + // Transfer element to the empty spot. + // set_ctrl poisons/unpoisons the slots so we have to call it at the + // right time. + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); + set_ctrl(i, kEmpty); + } else { + assert(IsDeleted(ctrl_[new_i])); + set_ctrl(new_i, H2(hash)); + // Until we are done rehashing, DELETED marks previously FULL slots. + // Swap i and new_i elements. + PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); + PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); + --i; // repeat + } + } + reset_growth_left(); + infoz_.RecordRehash(total_probe_length); + } + + void rehash_and_grow_if_necessary() { + if (capacity_ == 0) { + resize(1); + } else if (size() <= CapacityToGrowth(capacity()) / 2) { + // Squash DELETED without growing if there is enough capacity. + drop_deletes_without_resize(); + } else { + // Otherwise grow the container. + resize(capacity_ * 2 + 1); + } + } + + bool has_element(const value_type& elem) const { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == + elem)) + return true; + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false; + seq.next(); + assert(seq.index() < capacity_ && "full table!"); + } + return false; + } + + // Probes the raw_hash_set with the probe sequence for hash and returns the + // pointer to the first empty or deleted slot. + // NOTE: this function must work with tables having both kEmpty and kDelete + // in one group. Such tables appears during drop_deletes_without_resize. + // + // This function is very useful when insertions happen and: + // - the input is already a set + // - there are enough slots + // - the element with the hash is not in the table + struct FindInfo { + size_t offset; + size_t probe_length; + }; + FindInfo find_first_non_full(size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + auto mask = g.MatchEmptyOrDeleted(); + if (mask) { +#if !defined(NDEBUG) + // We want to add entropy even when ASLR is not enabled. + // In debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) { + return {seq.offset(mask.HighestBitSet()), seq.index()}; + } +#endif + return {seq.offset(mask.LowestBitSet()), seq.index()}; + } + assert(seq.index() < capacity_ && "full table!"); + seq.next(); + } + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { + raw_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { + raw_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + + protected: + template + std::pair find_or_prepare_insert(const K& key) { + auto hash = hash_ref()(key); + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return {seq.offset(i), false}; + } + if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break; + seq.next(); + } + return {prepare_insert(hash), true}; + } + + size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { + auto target = find_first_non_full(hash); + if (ABSL_PREDICT_FALSE(growth_left() == 0 && + !IsDeleted(ctrl_[target.offset]))) { + rehash_and_grow_if_necessary(); + target = find_first_non_full(hash); + } + ++size_; + growth_left() -= IsEmpty(ctrl_[target.offset]); + set_ctrl(target.offset, H2(hash)); + infoz_.RecordInsert(hash, target.probe_length); + return target.offset; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). + template + void emplace_at(size_t i, Args&&... args) { + PolicyTraits::construct(&alloc_ref(), slots_ + i, + std::forward(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == + iterator_at(i) && + "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } + const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } + + private: + friend struct RawHashSetTestOnlyAccess; + + probe_seq probe(size_t hash) const { + return probe_seq(H1(hash, ctrl_), capacity_); + } + + // Reset all ctrl bytes back to kEmpty, except the sentinel. + void reset_ctrl() { + std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); + ctrl_[capacity_] = kSentinel; + SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + } + + void reset_growth_left() { + growth_left() = CapacityToGrowth(capacity()) - size_; + } + + // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at + // the end too. + void set_ctrl(size_t i, ctrl_t h) { + assert(i < capacity_); + + if (IsFull(h)) { + SanitizerUnpoisonObject(slots_ + i); + } else { + SanitizerPoisonObject(slots_ + i); + } + + ctrl_[i] = h; + ctrl_[((i - Group::kWidth) & capacity_) + 1 + + ((Group::kWidth - 1) & capacity_)] = h; + } + + size_t& growth_left() { return settings_.template get<0>(); } + + // The representation of the object has two modes: + // - small: For capacities < kWidth-1 + // - large: For the rest. + // + // Differences: + // - In small mode we are able to use the whole capacity. The extra control + // bytes give us at least one "empty" control byte to stop the iteration. + // This is important to make 1 a valid capacity. + // + // - In small mode only the first `capacity()` control bytes after the + // sentinel are valid. The rest contain dummy kEmpty values that do not + // represent a real slot. This is important to take into account on + // find_first_non_full(), where we never try ShouldInsertBackwards() for + // small tables. + bool is_small() const { return capacity_ < Group::kWidth - 1; } + + hasher& hash_ref() { return settings_.template get<1>(); } + const hasher& hash_ref() const { return settings_.template get<1>(); } + key_equal& eq_ref() { return settings_.template get<2>(); } + const key_equal& eq_ref() const { return settings_.template get<2>(); } + allocator_type& alloc_ref() { return settings_.template get<3>(); } + const allocator_type& alloc_ref() const { + return settings_.template get<3>(); + } + + // TODO(alkis): Investigate removing some of these fields: + // - ctrl/slots can be derived from each other + // - size can be moved into the slot array + ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t] + slot_type* slots_ = nullptr; // [capacity * slot_type] + size_t size_ = 0; // number of full slots + size_t capacity_ = 0; // total number of slots + HashtablezInfoHandle infoz_; + absl::container_internal::CompressedTuple + settings_{0, hasher{}, key_equal{}, allocator_type{}}; +}; + +namespace hashtable_debug_internal { +template +struct HashtableDebugAccess> { + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, + const typename Set::key_type& key) { + size_t num_probes = 0; + size_t hash = set.hash_ref()(key); + auto seq = set.probe(hash); + while (true) { + container_internal::Group g{set.ctrl_ + seq.offset()}; + for (int i : g.Match(container_internal::H2(hash))) { + if (Traits::apply( + typename Set::template EqualElement{ + key, set.eq_ref()}, + Traits::element(set.slots_ + seq.offset(i)))) + return num_probes; + ++num_probes; + } + if (g.MatchEmpty()) return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) { + size_t capacity = c.capacity_; + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); + + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * c.size(); + } else { + for (size_t i = 0; i != capacity; ++i) { + if (container_internal::IsFull(c.ctrl_[i])) { + m += Traits::space_used(c.slots_ + i); + } + } + } + return m; + } + + static size_t LowerBoundAllocatedByteSize(size_t size) { + size_t capacity = GrowthToLowerboundCapacity(size); + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(NormalizeCapacity(capacity)); + size_t m = layout.AllocSize(); + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * size; + } + return m; + } +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/base/abseil/absl/container/internal/raw_hash_set_allocator_test.cc b/base/abseil/absl/container/internal/raw_hash_set_allocator_test.cc new file mode 100644 index 0000000..7ac4b9f --- /dev/null +++ b/base/abseil/absl/container/internal/raw_hash_set_allocator_test.cc @@ -0,0 +1,430 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "gtest/gtest.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/container/internal/tracked.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +enum AllocSpec { + kPropagateOnCopy = 1, + kPropagateOnMove = 2, + kPropagateOnSwap = 4, +}; + +struct AllocState { + size_t num_allocs = 0; + std::set owned; +}; + +template +class CheckedAlloc { + public: + template + friend class CheckedAlloc; + + using value_type = T; + + CheckedAlloc() {} + explicit CheckedAlloc(size_t id) : id_(id) {} + CheckedAlloc(const CheckedAlloc&) = default; + CheckedAlloc& operator=(const CheckedAlloc&) = default; + + template + CheckedAlloc(const CheckedAlloc& that) + : id_(that.id_), state_(that.state_) {} + + template + struct rebind { + using other = CheckedAlloc; + }; + + using propagate_on_container_copy_assignment = + std::integral_constant; + + using propagate_on_container_move_assignment = + std::integral_constant; + + using propagate_on_container_swap = + std::integral_constant; + + CheckedAlloc select_on_container_copy_construction() const { + if (Spec & kPropagateOnCopy) return *this; + return {}; + } + + T* allocate(size_t n) { + T* ptr = std::allocator().allocate(n); + track_alloc(ptr); + return ptr; + } + void deallocate(T* ptr, size_t n) { + memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned. + track_dealloc(ptr); + return std::allocator().deallocate(ptr, n); + } + + friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) { + return !(a == b); + } + + size_t num_allocs() const { return state_->num_allocs; } + + void swap(CheckedAlloc& that) { + using std::swap; + swap(id_, that.id_); + swap(state_, that.state_); + } + + friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); } + + friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) { + return o << "alloc(" << a.id_ << ")"; + } + + private: + void track_alloc(void* ptr) { + AllocState* state = state_.get(); + ++state->num_allocs; + if (!state->owned.insert(ptr).second) + ADD_FAILURE() << *this << " got previously allocated memory: " << ptr; + } + void track_dealloc(void* ptr) { + if (state_->owned.erase(ptr) != 1) + ADD_FAILURE() << *this + << " deleting memory owned by another allocator: " << ptr; + } + + size_t id_ = std::numeric_limits::max(); + + std::shared_ptr state_ = std::make_shared(); +}; + +struct Identity { + int32_t operator()(int32_t v) const { return v; } +}; + +struct Policy { + using slot_type = Tracked; + using init_type = Tracked; + using key_type = int32_t; + + template + static void construct(allocator_type* alloc, slot_type* slot, + Args&&... args) { + std::allocator_traits::construct( + *alloc, slot, std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + template + static auto apply(F&& f, int32_t v) -> decltype(std::forward(f)(v, v)) { + return std::forward(f)(v, v); + } + + template + static auto apply(F&& f, const slot_type& v) + -> decltype(std::forward(f)(v.val(), v)) { + return std::forward(f)(v.val(), v); + } + + template + static auto apply(F&& f, slot_type&& v) + -> decltype(std::forward(f)(v.val(), std::move(v))) { + return std::forward(f)(v.val(), std::move(v)); + } + + static slot_type& element(slot_type* slot) { return *slot; } +}; + +template +struct PropagateTest : public ::testing::Test { + using Alloc = CheckedAlloc, Spec>; + + using Table = raw_hash_set, Alloc>; + + PropagateTest() { + EXPECT_EQ(a1, t1.get_allocator()); + EXPECT_NE(a2, t1.get_allocator()); + } + + Alloc a1 = Alloc(1); + Table t1 = Table(0, a1); + Alloc a2 = Alloc(2); +}; + +using PropagateOnAll = + PropagateTest; +using NoPropagateOnCopy = PropagateTest; +using NoPropagateOnMove = PropagateTest; + +TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); } + +TEST_F(PropagateOnAll, InsertAllocates) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, InsertDecomposes) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); + + EXPECT_FALSE(t1.insert(0).second); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, RehashMoves) { + auto it = t1.insert(0).first; + EXPECT_EQ(0, it->num_moves()); + t1.rehash(2 * t1.capacity()); + EXPECT_EQ(2, a1.num_allocs()); + it = t1.find(0); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, u.get_allocator().num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, Swap) { + auto it = t1.insert(0).first; + Table u(0, a2); + u.swap(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(a2, t1.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/base/abseil/absl/container/internal/raw_hash_set_test.cc b/base/abseil/absl/container/internal/raw_hash_set_test.cc new file mode 100644 index 0000000..38e5e0e --- /dev/null +++ b/base/abseil/absl/container/internal/raw_hash_set_test.cc @@ -0,0 +1,1918 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +struct RawHashSetTestOnlyAccess { + template + static auto GetSlots(const C& c) -> decltype(c.slots_) { + return c.slots_; + } +}; + +namespace { + +using ::testing::DoubleNear; +using ::testing::ElementsAre; +using ::testing::Ge; +using ::testing::Lt; +using ::testing::Optional; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +TEST(Util, NormalizeCapacity) { + EXPECT_EQ(1, NormalizeCapacity(0)); + EXPECT_EQ(1, NormalizeCapacity(1)); + EXPECT_EQ(3, NormalizeCapacity(2)); + EXPECT_EQ(3, NormalizeCapacity(3)); + EXPECT_EQ(7, NormalizeCapacity(4)); + EXPECT_EQ(7, NormalizeCapacity(7)); + EXPECT_EQ(15, NormalizeCapacity(8)); + EXPECT_EQ(15, NormalizeCapacity(15)); + EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1)); + EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2)); +} + +TEST(Util, GrowthAndCapacity) { + // Verify that GrowthToCapacity gives the minimum capacity that has enough + // growth. + for (size_t growth = 0; growth < 10000; ++growth) { + SCOPED_TRACE(growth); + size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth)); + // The capacity is large enough for `growth` + EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); + if (growth != 0 && capacity > 1) { + // There is no smaller capacity that works. + EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); + } + } + + for (size_t capacity = Group::kWidth - 1; capacity < 10000; + capacity = 2 * capacity + 1) { + SCOPED_TRACE(capacity); + size_t growth = CapacityToGrowth(capacity); + EXPECT_THAT(growth, Lt(capacity)); + EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity); + EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity); + } +} + +TEST(Util, probe_seq) { + probe_seq<16> seq(0, 127); + auto gen = [&]() { + size_t res = seq.offset(); + seq.next(); + return res; + }; + std::vector offsets(8); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); + seq = probe_seq<16>(128, 127); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); +} + +TEST(BitMask, Smoke) { + EXPECT_FALSE((BitMask(0))); + EXPECT_TRUE((BitMask(5))); + + EXPECT_THAT((BitMask(0)), ElementsAre()); + EXPECT_THAT((BitMask(0x1)), ElementsAre(0)); + EXPECT_THAT((BitMask(0x2)), ElementsAre(1)); + EXPECT_THAT((BitMask(0x3)), ElementsAre(0, 1)); + EXPECT_THAT((BitMask(0x4)), ElementsAre(2)); + EXPECT_THAT((BitMask(0x5)), ElementsAre(0, 2)); + EXPECT_THAT((BitMask(0x55)), ElementsAre(0, 2, 4, 6)); + EXPECT_THAT((BitMask(0xAA)), ElementsAre(1, 3, 5, 7)); +} + +TEST(BitMask, WithShift) { + // See the non-SSE version of Group for details on what this math is for. + uint64_t ctrl = 0x1716151413121110; + uint64_t hash = 0x12; + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + uint64_t mask = (x - lsbs) & ~x & msbs; + EXPECT_EQ(0x0000000080800000, mask); + + BitMask b(mask); + EXPECT_EQ(*b, 2); +} + +TEST(BitMask, LeadingTrailing) { + EXPECT_EQ((BitMask(0x00001a40).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x00001a40).TrailingZeros()), 6); + + EXPECT_EQ((BitMask(0x00000001).LeadingZeros()), 15); + EXPECT_EQ((BitMask(0x00000001).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x00008000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x00008000).TrailingZeros()), 15); + + EXPECT_EQ((BitMask(0x0000008080808000).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x0000008080808000).TrailingZeros()), 1); + + EXPECT_EQ((BitMask(0x0000000000000080).LeadingZeros()), 7); + EXPECT_EQ((BitMask(0x0000000000000080).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x8000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x8000000000000000).TrailingZeros()), 7); +} + +TEST(Group, EmptyGroup) { + for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h)); +} + +TEST(Group, Match) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); + EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); + EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); + EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); + EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MatchEmpty) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MatchEmptyOrDeleted) { + if (Group::kWidth == 16) { + ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7, + 7, 5, 3, 1, 1, 1, 1, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Batch, DropDeletes) { + constexpr size_t kCapacity = 63; + constexpr size_t kGroupWidth = container_internal::Group::kWidth; + std::vector ctrl(kCapacity + 1 + kGroupWidth); + ctrl[kCapacity] = kSentinel; + std::vector pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted}; + for (size_t i = 0; i != kCapacity; ++i) { + ctrl[i] = pattern[i % pattern.size()]; + if (i < kGroupWidth - 1) + ctrl[i + kCapacity + 1] = pattern[i % pattern.size()]; + } + ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity); + ASSERT_EQ(ctrl[kCapacity], kSentinel); + for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) { + ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()]; + if (i == kCapacity) expected = kSentinel; + if (expected == kDeleted) expected = kEmpty; + if (IsFull(expected)) expected = kDeleted; + EXPECT_EQ(ctrl[i], expected) + << i << " " << int{pattern[i % pattern.size()]}; + } +} + +TEST(Group, CountLeadingEmptyOrDeleted) { + const std::vector empty_examples = {kEmpty, kDeleted}; + const std::vector full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel}; + + for (ctrl_t empty : empty_examples) { + std::vector e(Group::kWidth, empty); + EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted()); + for (ctrl_t full : full_examples) { + for (size_t i = 0; i != Group::kWidth; ++i) { + std::vector f(Group::kWidth, empty); + f[i] = full; + EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + std::vector f(Group::kWidth, empty); + f[Group::kWidth * 2 / 3] = full; + f[Group::kWidth / 2] = full; + EXPECT_EQ( + Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + } +} + +struct IntPolicy { + using slot_type = int64_t; + using key_type = int64_t; + using init_type = int64_t; + + static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } + static void destroy(void*, int64_t*) {} + static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { + *new_slot = *old_slot; + } + + static int64_t& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, int64_t x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +class StringPolicy { + template ::value>::type> + decltype(std::declval()( + std::declval(), std::piecewise_construct, + std::declval>(), + std::declval())) static apply_impl(F&& f, + std::pair, V> p) { + const absl::string_view& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); + } + + public: + struct slot_type { + struct ctor {}; + + template + slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} + + std::pair pair; + }; + + using key_type = std::string; + using init_type = std::pair; + + template + static void construct(allocator_type* alloc, slot_type* slot, Args... args) { + std::allocator_traits::construct( + *alloc, slot, typename slot_type::ctor(), std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(old_slot->pair)); + destroy(alloc, old_slot); + } + + static std::pair& element(slot_type* slot) { + return slot->pair; + } + + template + static auto apply(F&& f, Args&&... args) + -> decltype(apply_impl(std::forward(f), + PairArgs(std::forward(args)...))) { + return apply_impl(std::forward(f), + PairArgs(std::forward(args)...)); + } +}; + +struct StringHash : absl::Hash { + using is_transparent = void; +}; +struct StringEq : std::equal_to { + using is_transparent = void; +}; + +struct StringTable + : raw_hash_set> { + using Base = typename StringTable::raw_hash_set; + StringTable() {} + using Base::Base; +}; + +struct IntTable + : raw_hash_set, + std::equal_to, std::allocator> { + using Base = typename IntTable::raw_hash_set; + using Base::Base; +}; + +template +struct CustomAlloc : std::allocator { + CustomAlloc() {} + + template + CustomAlloc(const CustomAlloc& other) {} + + template struct rebind { + using other = CustomAlloc; + }; +}; + +struct CustomAllocIntTable + : raw_hash_set, + std::equal_to, CustomAlloc> { + using Base = typename CustomAllocIntTable::raw_hash_set; + using Base::Base; +}; + +struct BadFastHash { + template + size_t operator()(const T&) const { + return 0; + } +}; + +struct BadTable : raw_hash_set, + std::allocator> { + using Base = typename BadTable::raw_hash_set; + BadTable() {} + using Base::Base; +}; + +TEST(Table, EmptyFunctorOptimization) { + static_assert(std::is_empty>::value, ""); + static_assert(std::is_empty>::value, ""); + + struct MockTable { + void* ctrl; + void* slots; + size_t size; + size_t capacity; + size_t growth_left; + void* infoz; + }; + struct StatelessHash { + size_t operator()(absl::string_view) const { return 0; } + }; + struct StatefulHash : StatelessHash { + size_t dummy; + }; + + EXPECT_EQ( + sizeof(MockTable), + sizeof( + raw_hash_set, std::allocator>)); + + EXPECT_EQ( + sizeof(MockTable) + sizeof(StatefulHash), + sizeof( + raw_hash_set, std::allocator>)); +} + +TEST(Table, Empty) { + IntTable t; + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.empty()); +} + +#ifdef __GNUC__ +template +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) { + asm volatile("" : : "r,m"(v) : "memory"); +} +#endif + +TEST(Table, Prefetch) { + IntTable t; + t.emplace(1); + // Works for both present and absent keys. + t.prefetch(1); + t.prefetch(2); + + // Do not run in debug mode, when prefetch is not implemented, or when + // sanitizers are enabled, or on WebAssembly. +#if defined(NDEBUG) && defined(__GNUC__) && defined(__x86_64__) && \ + !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \ + !defined(THREAD_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) && \ + !defined(__EMSCRIPTEN__) + const auto now = [] { return absl::base_internal::CycleClock::Now(); }; + + // Make size enough to not fit in L2 cache (16.7 Mb) + static constexpr int size = 1 << 22; + for (int i = 0; i < size; ++i) t.insert(i); + + int64_t no_prefetch = 0, prefetch = 0; + for (int iter = 0; iter < 10; ++iter) { + int64_t time = now(); + for (int i = 0; i < size; ++i) { + DoNotOptimize(t.find(i)); + } + no_prefetch += now() - time; + + time = now(); + for (int i = 0; i < size; ++i) { + t.prefetch(i + 20); + DoNotOptimize(t.find(i)); + } + prefetch += now() - time; + } + + // no_prefetch is at least 30% slower. + EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3); +#endif +} + +TEST(Table, LookupEmpty) { + IntTable t; + auto it = t.find(0); + EXPECT_TRUE(it == t.end()); +} + +TEST(Table, Insert1) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find(0), 0); +} + +TEST(Table, Insert2) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_TRUE(t.find(1) == t.end()); + res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, InsertCollision) { + BadTable t; + EXPECT_TRUE(t.find(1) == t.end()); + auto res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(1, t.size()); + + EXPECT_TRUE(t.find(2) == t.end()); + res = t.emplace(2); + EXPECT_THAT(*res.first, 2); + EXPECT_TRUE(res.second); + EXPECT_EQ(2, t.size()); + + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); +} + +// Test that we do not add existent element in case we need to search through +// many groups with deleted elements +TEST(Table, InsertCollisionAndFindAfterDelete) { + BadTable t; // all elements go to the same group. + // Have at least 2 groups with Group::kWidth collisions + // plus some extra collisions in the last group. + constexpr size_t kNumInserts = Group::kWidth * 2 + 5; + for (size_t i = 0; i < kNumInserts; ++i) { + auto res = t.emplace(i); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, i); + EXPECT_EQ(i + 1, t.size()); + } + + // Remove elements one by one and check + // that we still can find all other elements. + for (size_t i = 0; i < kNumInserts; ++i) { + EXPECT_EQ(1, t.erase(i)) << i; + for (size_t j = i + 1; j < kNumInserts; ++j) { + EXPECT_THAT(*t.find(j), j); + auto res = t.emplace(j); + EXPECT_FALSE(res.second) << i << " " << j; + EXPECT_THAT(*res.first, j); + EXPECT_EQ(kNumInserts - i - 1, t.size()); + } + } + EXPECT_TRUE(t.empty()); +} + +TEST(Table, LazyEmplace) { + StringTable t; + bool called = false; + auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "ABC"); + }); + EXPECT_TRUE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); + called = false; + it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "DEF"); + }); + EXPECT_FALSE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); +} + +TEST(Table, ContainsEmpty) { + IntTable t; + + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains1) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + EXPECT_EQ(1, t.erase(0)); + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains2) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + t.clear(); + EXPECT_FALSE(t.contains(0)); +} + +int decompose_constructed; +struct DecomposeType { + DecomposeType(int i) : i(i) { // NOLINT + ++decompose_constructed; + } + + explicit DecomposeType(const char* d) : DecomposeType(*d) {} + + int i; +}; + +struct DecomposeHash { + using is_transparent = void; + size_t operator()(DecomposeType a) const { return a.i; } + size_t operator()(int a) const { return a; } + size_t operator()(const char* a) const { return *a; } +}; + +struct DecomposeEq { + using is_transparent = void; + bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + bool operator()(DecomposeType a, const char* b) const { return a.i == *b; } +}; + +struct DecomposePolicy { + using slot_type = DecomposeType; + using key_type = DecomposeType; + using init_type = DecomposeType; + + template + static void construct(void*, DecomposeType* slot, T&& v) { + *slot = DecomposeType(std::forward(v)); + } + static void destroy(void*, DecomposeType*) {} + static DecomposeType& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, const T& x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +template +void TestDecompose(bool construct_three) { + DecomposeType elem{0}; + const int one = 1; + const char* three_p = "3"; + const auto& three = three_p; + + raw_hash_set> set1; + + decompose_constructed = 0; + int expected_constructed = 0; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(elem); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(1); + EXPECT_EQ(++expected_constructed, decompose_constructed); + set1.emplace("3"); + EXPECT_EQ(++expected_constructed, decompose_constructed); + EXPECT_EQ(expected_constructed, decompose_constructed); + + { // insert(T&&) + set1.insert(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(const T&) + set1.insert(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, T&&) + set1.insert(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, const T&) + set1.insert(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace(...) + set1.emplace(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace("3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace_hint(...) + set1.emplace_hint(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), "3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } +} + +TEST(Table, Decompose) { + TestDecompose(false); + + struct TransparentHashIntOverload { + size_t operator()(DecomposeType a) const { return a.i; } + size_t operator()(int a) const { return a; } + }; + struct TransparentEqIntOverload { + bool operator()(DecomposeType a, DecomposeType b) const { + return a.i == b.i; + } + bool operator()(DecomposeType a, int b) const { return a.i == b; } + }; + TestDecompose(true); + TestDecompose(true); + TestDecompose(true); +} + +// Returns the largest m such that a table with m elements has the same number +// of buckets as a table with n elements. +size_t MaxDensitySize(size_t n) { + IntTable t; + t.reserve(n); + for (size_t i = 0; i != n; ++i) t.emplace(i); + const size_t c = t.bucket_count(); + while (c == t.bucket_count()) t.emplace(n++); + return t.size() - 1; +} + +struct Modulo1000Hash { + size_t operator()(int x) const { return x % 1000; } +}; + +struct Modulo1000HashTable + : public raw_hash_set, + std::allocator> {}; + +// Test that rehash with no resize happen in case of many deleted slots. +TEST(Table, RehashWithNoResize) { + Modulo1000HashTable t; + // Adding the same length (and the same hash) strings + // to have at least kMinFullGroups groups + // with Group::kWidth collisions. Then fill up to MaxDensitySize; + const size_t kMinFullGroups = 7; + std::vector keys; + for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) { + int k = i * 1000; + t.emplace(k); + keys.push_back(k); + } + const size_t capacity = t.capacity(); + + // Remove elements from all groups except the first and the last one. + // All elements removed from full groups will be marked as kDeleted. + const size_t erase_begin = Group::kWidth / 2; + const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth; + for (size_t i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(1, t.erase(keys[i])) << i; + } + keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end); + + auto last_key = keys.back(); + size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key); + + // Make sure that we have to make a lot of probes for last key. + ASSERT_GT(last_key_num_probes, kMinFullGroups); + + int x = 1; + // Insert and erase one element, before inplace rehash happen. + while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) { + t.emplace(x); + ASSERT_EQ(capacity, t.capacity()); + // All elements should be there. + ASSERT_TRUE(t.find(x) != t.end()) << x; + for (const auto& k : keys) { + ASSERT_TRUE(t.find(k) != t.end()) << k; + } + t.erase(x); + ++x; + } +} + +TEST(Table, InsertEraseStressTest) { + IntTable t; + const size_t kMinElementCount = 250; + std::deque keys; + size_t i = 0; + for (; i < MaxDensitySize(kMinElementCount); ++i) { + t.emplace(i); + keys.push_back(i); + } + const size_t kNumIterations = 1000000; + for (; i < kNumIterations; ++i) { + ASSERT_EQ(1, t.erase(keys.front())); + keys.pop_front(); + t.emplace(i); + keys.push_back(i); + } +} + +TEST(Table, InsertOverloads) { + StringTable t; + // These should all trigger the insert(init_type) overload. + t.insert({{}, {}}); + t.insert({"ABC", {}}); + t.insert({"DEF", "!!!"}); + + EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""), + Pair("DEF", "!!!"))); +} + +TEST(Table, LargeTable) { + IntTable t; + for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40); + for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40)); +} + +// Timeout if copy is quadratic as it was in Rust. +TEST(Table, EnsureNonQuadraticAsInRust) { + static const size_t kLargeSize = 1 << 15; + + IntTable t; + for (size_t i = 0; i != kLargeSize; ++i) { + t.insert(i); + } + + // If this is quadratic, the test will timeout. + IntTable t2; + for (const auto& entry : t) t2.insert(entry); +} + +TEST(Table, ClearBug) { + IntTable t; + constexpr size_t capacity = container_internal::Group::kWidth - 1; + constexpr size_t max_size = capacity / 2 + 1; + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t original = reinterpret_cast(&*t.find(2)); + t.clear(); + ASSERT_EQ(capacity, t.capacity()); + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t second = reinterpret_cast(&*t.find(2)); + // We are checking that original and second are close enough to each other + // that they are probably still in the same group. This is not strictly + // guaranteed. + EXPECT_LT(std::abs(original - second), + capacity * sizeof(IntTable::value_type)); +} + +TEST(Table, Erase) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.erase(res.first); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, EraseMaintainsValidIterator) { + IntTable t; + const int kNumElements = 100; + for (int i = 0; i < kNumElements; i ++) { + EXPECT_TRUE(t.emplace(i).second); + } + EXPECT_EQ(t.size(), kNumElements); + + int num_erase_calls = 0; + auto it = t.begin(); + while (it != t.end()) { + t.erase(it++); + num_erase_calls++; + } + + EXPECT_TRUE(t.empty()); + EXPECT_EQ(num_erase_calls, kNumElements); +} + +// Collect N bad keys by following algorithm: +// 1. Create an empty table and reserve it to 2 * N. +// 2. Insert N random elements. +// 3. Take first Group::kWidth - 1 to bad_keys array. +// 4. Clear the table without resize. +// 5. Go to point 2 while N keys not collected +std::vector CollectBadMergeKeys(size_t N) { + static constexpr int kGroupSize = Group::kWidth - 1; + + auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector { + for (size_t i = b; i != e; ++i) { + t->emplace(i); + } + std::vector res; + res.reserve(kGroupSize); + auto it = t->begin(); + for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) { + res.push_back(*it); + } + return res; + }; + + std::vector bad_keys; + bad_keys.reserve(N); + IntTable t; + t.reserve(N * 2); + + for (size_t b = 0; bad_keys.size() < N; b += N) { + auto keys = topk_range(b, b + N, &t); + bad_keys.insert(bad_keys.end(), keys.begin(), keys.end()); + t.erase(t.begin(), t.end()); + EXPECT_TRUE(t.empty()); + } + return bad_keys; +} + +struct ProbeStats { + // Number of elements with specific probe length over all tested tables. + std::vector all_probes_histogram; + // Ratios total_probe_length/size for every tested table. + std::vector single_table_ratios; + + friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) { + ProbeStats res = a; + res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(), + b.all_probes_histogram.size())); + std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(), + res.all_probes_histogram.begin(), + res.all_probes_histogram.begin(), std::plus()); + res.single_table_ratios.insert(res.single_table_ratios.end(), + b.single_table_ratios.begin(), + b.single_table_ratios.end()); + return res; + } + + // Average ratio total_probe_length/size over tables. + double AvgRatio() const { + return std::accumulate(single_table_ratios.begin(), + single_table_ratios.end(), 0.0) / + single_table_ratios.size(); + } + + // Maximum ratio total_probe_length/size over tables. + double MaxRatio() const { + return *std::max_element(single_table_ratios.begin(), + single_table_ratios.end()); + } + + // Percentile ratio total_probe_length/size over tables. + double PercentileRatio(double Percentile = 0.95) const { + auto r = single_table_ratios; + auto mid = r.begin() + static_cast(r.size() * Percentile); + if (mid != r.end()) { + std::nth_element(r.begin(), mid, r.end()); + return *mid; + } else { + return MaxRatio(); + } + } + + // Maximum probe length over all elements and all tables. + size_t MaxProbe() const { return all_probes_histogram.size(); } + + // Fraction of elements with specified probe length. + std::vector ProbeNormalizedHistogram() const { + double total_elements = std::accumulate(all_probes_histogram.begin(), + all_probes_histogram.end(), 0ull); + std::vector res; + for (size_t p : all_probes_histogram) { + res.push_back(p / total_elements); + } + return res; + } + + size_t PercentileProbe(double Percentile = 0.99) const { + size_t idx = 0; + for (double p : ProbeNormalizedHistogram()) { + if (Percentile > p) { + Percentile -= p; + ++idx; + } else { + return idx; + } + } + return idx; + } + + friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) { + out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio() + << ", PercentileRatio:" << s.PercentileRatio() + << ", MaxProbe:" << s.MaxProbe() << ", Probes=["; + for (double p : s.ProbeNormalizedHistogram()) { + out << p << ","; + } + out << "]}"; + + return out; + } +}; + +struct ExpectedStats { + double avg_ratio; + double max_ratio; + std::vector> pecentile_ratios; + std::vector> pecentile_probes; + + friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) { + out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio + << ", PercentileRatios: ["; + for (auto el : s.pecentile_ratios) { + out << el.first << ":" << el.second << ", "; + } + out << "], PercentileProbes: ["; + for (auto el : s.pecentile_probes) { + out << el.first << ":" << el.second << ", "; + } + out << "]}"; + + return out; + } +}; + +void VerifyStats(size_t size, const ExpectedStats& exp, + const ProbeStats& stats) { + EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats; + EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats; + for (auto pr : exp.pecentile_ratios) { + EXPECT_LE(stats.PercentileRatio(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } + + for (auto pr : exp.pecentile_probes) { + EXPECT_LE(stats.PercentileProbe(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } +} + +using ProbeStatsPerSize = std::map; + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table and reserve it to keys.size() * 2 +// 2. Insert all keys xored with seed +// 3. Collect ProbeStats from final table. +ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector& keys, + size_t num_iters) { + const size_t reserve_size = keys.size() * 2; + + ProbeStats stats; + + int64_t seed = 0x71b1a19b907d6e33; + while (num_iters--) { + seed = static_cast(static_cast(seed) * 17 + 13); + IntTable t1; + t1.reserve(reserve_size); + for (const auto& key : keys) { + t1.emplace(key ^ seed); + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + keys.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats XorSeedExpectedStats() { + constexpr bool kRandomizesInserts = +#ifdef NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.05, + 1.0, + {{0.95, 0.5}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } else { + return {0.05, + 2.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 1.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 1.0, + {{0.95, 0.05}}, + {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}}; + } + } + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + return {}; +} + +TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = + CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200); + } + auto expected = XorSeedExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table +// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13 +// 3. Collect ProbeStats from final table +ProbeStats CollectProbeStatsOnLinearlyTransformedKeys( + const std::vector& keys, size_t num_iters) { + ProbeStats stats; + + std::random_device rd; + std::mt19937 rng(rd()); + auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; }; + std::uniform_int_distribution dist(0, keys.size()-1); + while (num_iters--) { + IntTable t1; + size_t num_keys = keys.size() / 10; + size_t start = dist(rng); + for (size_t i = 0; i != num_keys; ++i) { + for (size_t j = 0; j != 10; ++j) { + t1.emplace(linear_transform(keys[(i + start) % keys.size()], j)); + } + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + t1.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats LinearTransformExpectedStats() { + constexpr bool kRandomizesInserts = +#ifdef NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.1, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.15, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 0.4, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 0.2, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}}; + } + } + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + return {}; +} + +TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = CollectProbeStatsOnLinearlyTransformedKeys( + CollectBadMergeKeys(size), 300); + } + auto expected = LinearTransformExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +TEST(Table, EraseCollision) { + BadTable t; + + // 1 2 3 + t.emplace(1); + t.emplace(2); + t.emplace(3); + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(3, t.size()); + + // 1 DELETED 3 + t.erase(t.find(2)); + EXPECT_THAT(*t.find(1), 1); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(2, t.size()); + + // DELETED DELETED 3 + t.erase(t.find(1)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(1, t.size()); + + // DELETED DELETED DELETED + t.erase(t.find(3)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_TRUE(t.find(3) == t.end()); + EXPECT_EQ(0, t.size()); +} + +TEST(Table, EraseInsertProbing) { + BadTable t(100); + + // 1 2 3 4 + t.emplace(1); + t.emplace(2); + t.emplace(3); + t.emplace(4); + + // 1 DELETED 3 DELETED + t.erase(t.find(2)); + t.erase(t.find(4)); + + // 1 10 3 11 12 + t.emplace(10); + t.emplace(11); + t.emplace(12); + + EXPECT_EQ(5, t.size()); + EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12)); +} + +TEST(Table, Clear) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.clear(); + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.clear(); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, Swap) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + IntTable u; + t.swap(u); + EXPECT_EQ(0, t.size()); + EXPECT_EQ(1, u.size()); + EXPECT_TRUE(t.find(0) == t.end()); + EXPECT_THAT(*u.find(0), 0); +} + +TEST(Table, Rehash) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.emplace(0); + t.emplace(1); + EXPECT_EQ(2, t.size()); + t.rehash(128); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, RehashDoesNotRehashWhenNotNecessary) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(1); + EXPECT_EQ(p, &*t.find(0)); +} + +TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) { + IntTable t; + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroDeallocatesEmptyTable) { + IntTable t; + t.emplace(0); + t.clear(); + EXPECT_NE(0, t.bucket_count()); + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroForcesRehash) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(0); + EXPECT_NE(p, &*t.find(0)); +} + +TEST(Table, ConstructFromInitList) { + using P = std::pair; + struct Q { + operator P() const { return {}; } + }; + StringTable t = {P(), Q(), {}, {{}, {}}}; +} + +TEST(Table, CopyConstruct) { + IntTable t; + t.emplace(0); + EXPECT_EQ(1, t.size()); + { + IntTable u(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u{t}; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u = t; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } +} + +TEST(Table, CopyConstructWithAlloc) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(t, Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +struct ExplicitAllocIntTable + : raw_hash_set, + std::equal_to, Alloc> { + ExplicitAllocIntTable() {} +}; + +TEST(Table, AllocWithExplicitCtor) { + ExplicitAllocIntTable t; + EXPECT_EQ(0, t.size()); +} + +TEST(Table, MoveConstruct) { + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u(std::move(t)); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u{std::move(t)}; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } +} + +TEST(Table, MoveConstructWithAlloc) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(std::move(t), Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopyAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = t; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopySelfAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + t = *&t; + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find("a"), Pair("a", "b")); +} + +TEST(Table, MoveAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, Equality) { + StringTable t; + std::vector> v = {{"a", "b"}, + {"aa", "bb"}}; + t.insert(std::begin(v), std::end(v)); + StringTable u = t; + EXPECT_EQ(u, t); +} + +TEST(Table, Equality2) { + StringTable t; + std::vector> v1 = {{"a", "b"}, + {"aa", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, + {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, Equality3) { + StringTable t; + std::vector> v1 = {{"b", "b"}, + {"bb", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, + {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, NumDeletedRegression) { + IntTable t; + t.emplace(0); + t.erase(t.find(0)); + // construct over a deleted slot. + t.emplace(0); + t.clear(); +} + +TEST(Table, FindFullDeletedRegression) { + IntTable t; + for (int i = 0; i < 1000; ++i) { + t.emplace(i); + t.erase(t.find(i)); + } + EXPECT_EQ(0, t.size()); +} + +TEST(Table, ReplacingDeletedSlotDoesNotRehash) { + size_t n; + { + // Compute n such that n is the maximum number of elements before rehash. + IntTable t; + t.emplace(0); + size_t c = t.bucket_count(); + for (n = 1; c == t.bucket_count(); ++n) t.emplace(n); + --n; + } + IntTable t; + t.rehash(n); + const size_t c = t.bucket_count(); + for (size_t i = 0; i != n; ++i) t.emplace(i); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; + t.erase(0); + t.emplace(0); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; +} + +TEST(Table, NoThrowMoveConstruct) { + ASSERT_TRUE( + std::is_nothrow_copy_constructible>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible< + std::equal_to>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible>::value); + EXPECT_TRUE(std::is_nothrow_move_constructible::value); +} + +TEST(Table, NoThrowMoveAssign) { + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE(std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + absl::allocator_traits>::is_always_equal::value); + EXPECT_TRUE(std::is_nothrow_move_assignable::value); +} + +TEST(Table, NoThrowSwappable) { + ASSERT_TRUE( + container_internal::IsNoThrowSwappable>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable< + std::equal_to>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable>()); + EXPECT_TRUE(container_internal::IsNoThrowSwappable()); +} + +TEST(Table, HeterogeneousLookup) { + struct Hash { + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { + ADD_FAILURE(); + return i; + } + }; + struct Eq { + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(int64_t a, double b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(double a, double b) const { + ADD_FAILURE(); + return a == b; + } + }; + + struct THash { + using is_transparent = void; + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { return i; } + }; + struct TEq { + using is_transparent = void; + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { return a == b; } + bool operator()(int64_t a, double b) const { return a == b; } + bool operator()(double a, double b) const { return a == b; } + }; + + raw_hash_set> s{0, 1, 2}; + // It will convert to int64_t before the query. + EXPECT_EQ(1, *s.find(double{1.1})); + + raw_hash_set> ts{0, 1, 2}; + // It will try to use the double, and fail to find the object. + EXPECT_TRUE(ts.find(1.1) == ts.end()); +} + +template +using CallFind = decltype(std::declval().find(17)); + +template +using CallErase = decltype(std::declval().erase(17)); + +template +using CallExtract = decltype(std::declval().extract(17)); + +template +using CallPrefetch = decltype(std::declval().prefetch(17)); + +template +using CallCount = decltype(std::declval().count(17)); + +template