Merge "tp: delete all v1 db filtering and sorting code" into main
diff --git a/Android.bp b/Android.bp
index 4165586..4267433 100644
--- a/Android.bp
+++ b/Android.bp
@@ -10998,7 +10998,6 @@
filegroup {
name: "perfetto_src_trace_processor_db_unittests",
srcs: [
- "src/trace_processor/db/column_storage_overlay_unittest.cc",
"src/trace_processor/db/compare_unittest.cc",
"src/trace_processor/db/query_executor_unittest.cc",
"src/trace_processor/db/runtime_table_unittest.cc",
diff --git a/python/generators/trace_processor_table/serialize.py b/python/generators/trace_processor_table/serialize.py
index 1f92f44..2881ce0 100644
--- a/python/generators/trace_processor_table/serialize.py
+++ b/python/generators/trace_processor_table/serialize.py
@@ -675,16 +675,14 @@
Iterator IterateRows() {{ return Iterator(this, Table::IterateRows()); }}
ConstIterator FilterToIterator(
- const std::vector<Constraint>& cs,
- RowMap::OptimizeFor opt = RowMap::OptimizeFor::kMemory) const {{
+ const std::vector<Constraint>& cs) const {{
return ConstIterator(
- this, ApplyAndIterateRows(QueryToRowMap(cs, {{}}, opt)));
+ this, ApplyAndIterateRows(QueryToRowMap(cs, {{}})));
}}
Iterator FilterToIterator(
- const std::vector<Constraint>& cs,
- RowMap::OptimizeFor opt = RowMap::OptimizeFor::kMemory) {{
- return Iterator(this, ApplyAndIterateRows(QueryToRowMap(cs, {{}}, opt)));
+ const std::vector<Constraint>& cs) {{
+ return Iterator(this, ApplyAndIterateRows(QueryToRowMap(cs, {{}})));
}}
void ShrinkToFit() {{
diff --git a/src/trace_processor/containers/bit_vector.h b/src/trace_processor/containers/bit_vector.h
index 541bed2..884e1ca 100644
--- a/src/trace_processor/containers/bit_vector.h
+++ b/src/trace_processor/containers/bit_vector.h
@@ -299,11 +299,10 @@
// Creates a BitVector of size |end| with the bits between |start| and |end|
// filled by calling the filler function |f(index of bit)|.
//
- // As an example, suppose Range(3, 7, [](x) { return x < 5 }). This would
- // result in the following BitVector:
- // [0 0 0 1 1 0 0]
+ // As an example, suppose RangeForTesting(3, 7, [](x) { return x < 5 }). This
+ // would result in the following BitVector: [0 0 0 1 1 0 0]
template <typename Filler = bool(uint32_t)>
- static BitVector Range(uint32_t start, uint32_t end, Filler f) {
+ static BitVector RangeForTesting(uint32_t start, uint32_t end, Filler f) {
// Compute the block index and BitVector index where we start and end
// working one block at a time.
uint32_t start_fast_block = BlockCount(start);
diff --git a/src/trace_processor/containers/bit_vector_benchmark.cc b/src/trace_processor/containers/bit_vector_benchmark.cc
index 87d29af..fd723a6 100644
--- a/src/trace_processor/containers/bit_vector_benchmark.cc
+++ b/src/trace_processor/containers/bit_vector_benchmark.cc
@@ -222,28 +222,6 @@
}
BENCHMARK(BM_BitVectorResize);
-static void BM_BitVectorRangeFixedSize(benchmark::State& state) {
- static constexpr uint32_t kRandomSeed = 42;
- std::minstd_rand0 rnd_engine(kRandomSeed);
-
- uint32_t size = static_cast<uint32_t>(state.range(0));
- uint32_t set_percentage = static_cast<uint32_t>(state.range(1));
-
- std::vector<uint32_t> resize_fill_pool(size);
- for (uint32_t i = 0; i < size; ++i) {
- resize_fill_pool[i] = rnd_engine() % 100 < set_percentage ? 90 : 100;
- }
-
- for (auto _ : state) {
- auto filler = [&resize_fill_pool](uint32_t i) PERFETTO_ALWAYS_INLINE {
- return resize_fill_pool[i] < 95;
- };
- BitVector bv = BitVector::Range(0, size, filler);
- benchmark::ClobberMemory();
- }
-}
-BENCHMARK(BM_BitVectorRangeFixedSize)->Apply(BitVectorArgs);
-
static void BM_BitVectorUpdateSetBits(benchmark::State& state) {
static constexpr uint32_t kRandomSeed = 42;
std::minstd_rand0 rnd_engine(kRandomSeed);
diff --git a/src/trace_processor/containers/bit_vector_unittest.cc b/src/trace_processor/containers/bit_vector_unittest.cc
index 47a8aff..d803cef 100644
--- a/src/trace_processor/containers/bit_vector_unittest.cc
+++ b/src/trace_processor/containers/bit_vector_unittest.cc
@@ -455,7 +455,8 @@
}
TEST(BitVectorUnittest, IntersectRange) {
- BitVector bv = BitVector::Range(1, 20, [](uint32_t t) { return t % 2 == 0; });
+ BitVector bv =
+ BitVector::RangeForTesting(1, 20, [](uint32_t t) { return t % 2 == 0; });
BitVector intersected = bv.IntersectRange(3, 10);
ASSERT_EQ(intersected.IndexOfNthSet(0), 4u);
@@ -463,7 +464,8 @@
}
TEST(BitVectorUnittest, IntersectRangeFromStart) {
- BitVector bv = BitVector::Range(1, 20, [](uint32_t t) { return t % 2 == 0; });
+ BitVector bv =
+ BitVector::RangeForTesting(1, 20, [](uint32_t t) { return t % 2 == 0; });
BitVector intersected = bv.IntersectRange(0, 10);
ASSERT_EQ(intersected.IndexOfNthSet(0), 2u);
@@ -478,8 +480,8 @@
}
TEST(BitVectorUnittest, IntersectRangeAfterWord) {
- BitVector bv =
- BitVector::Range(64 + 1, 64 + 20, [](uint32_t t) { return t % 2 == 0; });
+ BitVector bv = BitVector::RangeForTesting(
+ 64 + 1, 64 + 20, [](uint32_t t) { return t % 2 == 0; });
BitVector intersected = bv.IntersectRange(64 + 3, 64 + 10);
ASSERT_EQ(intersected.IndexOfNthSet(0), 64 + 4u);
@@ -487,7 +489,8 @@
}
TEST(BitVectorUnittest, IntersectRangeSetBitsBeforeRange) {
- BitVector bv = BitVector::Range(10, 30, [](uint32_t t) { return t < 15; });
+ BitVector bv =
+ BitVector::RangeForTesting(10, 30, [](uint32_t t) { return t < 15; });
BitVector intersected = bv.IntersectRange(16, 50);
ASSERT_FALSE(intersected.CountSetBits());
@@ -503,8 +506,8 @@
}
TEST(BitVectorUnittest, IntersectRangeStressTest) {
- BitVector bv =
- BitVector::Range(65, 1024 + 1, [](uint32_t t) { return t % 2 == 0; });
+ BitVector bv = BitVector::RangeForTesting(
+ 65, 1024 + 1, [](uint32_t t) { return t % 2 == 0; });
BitVector intersected = bv.IntersectRange(30, 500);
ASSERT_EQ(intersected.IndexOfNthSet(0), 66u);
@@ -512,7 +515,8 @@
}
TEST(BitVectorUnittest, Range) {
- BitVector bv = BitVector::Range(1, 9, [](uint32_t t) { return t % 3 == 0; });
+ BitVector bv =
+ BitVector::RangeForTesting(1, 9, [](uint32_t t) { return t % 3 == 0; });
ASSERT_EQ(bv.size(), 9u);
ASSERT_FALSE(bv.IsSet(0));
@@ -523,8 +527,8 @@
}
TEST(BitVectorUnittest, RangeStressTest) {
- BitVector bv =
- BitVector::Range(1, 1025, [](uint32_t t) { return t % 3 == 0; });
+ BitVector bv = BitVector::RangeForTesting(
+ 1, 1025, [](uint32_t t) { return t % 3 == 0; });
ASSERT_EQ(bv.size(), 1025u);
ASSERT_FALSE(bv.IsSet(0));
for (uint32_t i = 1; i < 1025; ++i) {
@@ -654,8 +658,8 @@
}
TEST(BitVectorUnittest, NotBig) {
- BitVector bv =
- BitVector::Range(0, 1026, [](uint32_t i) { return i % 5 == 0; });
+ BitVector bv = BitVector::RangeForTesting(
+ 0, 1026, [](uint32_t i) { return i % 5 == 0; });
bv.Not();
EXPECT_EQ(bv.CountSetBits(), 820u);
@@ -673,13 +677,13 @@
}
TEST(BitVectorUnittest, OrBig) {
- BitVector bv =
- BitVector::Range(0, 1025, [](uint32_t i) { return i % 5 == 0; });
- BitVector bv_sec =
- BitVector::Range(0, 1025, [](uint32_t i) { return i % 3 == 0; });
+ BitVector bv = BitVector::RangeForTesting(
+ 0, 1025, [](uint32_t i) { return i % 5 == 0; });
+ BitVector bv_sec = BitVector::RangeForTesting(
+ 0, 1025, [](uint32_t i) { return i % 3 == 0; });
bv.Or(bv_sec);
- BitVector bv_or = BitVector::Range(
+ BitVector bv_or = BitVector::RangeForTesting(
0, 1025, [](uint32_t i) { return i % 5 == 0 || i % 3 == 0; });
ASSERT_EQ(bv.CountSetBits(), bv_or.CountSetBits());
diff --git a/src/trace_processor/containers/row_map.cc b/src/trace_processor/containers/row_map.cc
index 4ec4e8d..f7925ae 100644
--- a/src/trace_processor/containers/row_map.cc
+++ b/src/trace_processor/containers/row_map.cc
@@ -217,8 +217,7 @@
RowMap::RowMap() : RowMap(Range()) {}
-RowMap::RowMap(uint32_t start, uint32_t end, OptimizeFor optimize_for)
- : data_(Range{start, end}), optimize_for_(optimize_for) {}
+RowMap::RowMap(uint32_t start, uint32_t end) : data_(Range{start, end}) {}
RowMap::RowMap(Variant def) : data_(std::move(def)) {}
diff --git a/src/trace_processor/containers/row_map.h b/src/trace_processor/containers/row_map.h
index a4e31b6..b8f1d3d 100644
--- a/src/trace_processor/containers/row_map.h
+++ b/src/trace_processor/containers/row_map.h
@@ -174,22 +174,13 @@
const RowMap* rm_ = nullptr;
};
- // Enum to allow users of RowMap to decide whether they want to optimize for
- // memory usage or for speed of lookups.
- enum class OptimizeFor {
- kMemory,
- kLookupSpeed,
- };
-
// Creates an empty RowMap.
// By default this will be implemented using a range.
RowMap();
// Creates a RowMap containing the range of indices between |start| and |end|
// i.e. all indices between |start| (inclusive) and |end| (exclusive).
- RowMap(OutputIndex start,
- OutputIndex end,
- OptimizeFor optimize_for = OptimizeFor::kMemory);
+ RowMap(OutputIndex start, OutputIndex end);
// Creates a RowMap backed by a BitVector.
explicit RowMap(BitVector);
@@ -514,58 +505,6 @@
explicit RowMap(Variant);
- // TODO(lalitm): remove this when the coupling between RowMap and
- // ColumnStorage Selector is broken (after filtering is moved out of here).
- friend class ColumnStorageOverlay;
-
- template <typename Predicate>
- Variant FilterRange(Predicate p, Range r) {
- uint32_t count = r.size();
-
- // Optimization: if we are only going to scan a few indices, it's not
- // worth the haslle of working with a BitVector.
- constexpr uint32_t kSmallRangeLimit = 2048;
- bool is_small_range = count < kSmallRangeLimit;
-
- // Optimization: weif the cost of a BitVector is more than the highest
- // possible cost an index vector could have, use the index vector.
- uint32_t bit_vector_cost = BitVector::ApproxBytesCost(r.end);
- uint32_t index_vector_cost_ub = sizeof(uint32_t) * count;
-
- // If either of the conditions hold which make it better to use an
- // index vector, use it instead. Alternatively, if we are optimizing for
- // lookup speed, we also want to use an index vector.
- if (is_small_range || index_vector_cost_ub <= bit_vector_cost ||
- optimize_for_ == OptimizeFor::kLookupSpeed) {
- // Try and strike a good balance between not making the vector too
- // big and good performance.
- IndexVector iv(std::min(kSmallRangeLimit, count));
-
- uint32_t out_i = 0;
- for (uint32_t i = 0; i < count; ++i) {
- // If we reach the capacity add another small set of indices.
- if (PERFETTO_UNLIKELY(out_i == iv.size()))
- iv.resize(iv.size() + kSmallRangeLimit);
-
- // We keep this branch free by always writing the index but only
- // incrementing the out index if the return value is true.
- bool value = p(i + r.start);
- iv[out_i] = i + r.start;
- out_i += value;
- }
-
- // Make the vector the correct size and as small as possible.
- iv.resize(out_i);
- iv.shrink_to_fit();
-
- return std::move(iv);
- }
-
- // Otherwise, create a bitvector which spans the full range using
- // |p| as the filler for the bits between start and end.
- return BitVector::Range(r.start, r.end, p);
- }
-
PERFETTO_ALWAYS_INLINE static OutputIndex GetRange(Range r, InputRow row) {
return r.start + row;
}
@@ -596,7 +535,6 @@
}
Variant data_;
- OptimizeFor optimize_for_ = OptimizeFor::kMemory;
};
} // namespace trace_processor
diff --git a/src/trace_processor/containers/row_map_unittest.cc b/src/trace_processor/containers/row_map_unittest.cc
index 5af8b8c..28d5e0e 100644
--- a/src/trace_processor/containers/row_map_unittest.cc
+++ b/src/trace_processor/containers/row_map_unittest.cc
@@ -143,7 +143,7 @@
RowMap rm(10, 20);
// BitVector with values at 16, 18, 20 and so on.
RowMap selector(
- BitVector::Range(4, 8, [](uint32_t x) { return x % 2 == 0; }));
+ BitVector::RangeForTesting(4, 8, [](uint32_t x) { return x % 2 == 0; }));
RowMap selected = rm.SelectRows(selector);
ASSERT_EQ(selected.size(), 2u);
ASSERT_EQ(selected.Get(0), 14u);
@@ -158,7 +158,8 @@
}
TEST(RowMapUnittest, SelectRowsFromBVWithRange) {
- RowMap rm(BitVector::Range(10, 50, [](uint32_t x) { return x % 2 == 0; }));
+ RowMap rm(BitVector::RangeForTesting(10, 50,
+ [](uint32_t x) { return x % 2 == 0; }));
RowMap selector(4, 8);
RowMap selected = rm.SelectRows(selector);
@@ -167,17 +168,19 @@
}
TEST(RowMapUnittest, SelectRowsFromBVWithBV) {
- RowMap rm(BitVector::Range(10, 50, [](uint32_t x) { return x % 2 == 0; }));
+ RowMap rm(BitVector::RangeForTesting(10, 50,
+ [](uint32_t x) { return x % 2 == 0; }));
// BitVector with values at 16, 18, 20 and so on.
RowMap selector(
- BitVector::Range(4, 8, [](uint32_t x) { return x % 2 == 0; }));
+ BitVector::RangeForTesting(4, 8, [](uint32_t x) { return x % 2 == 0; }));
RowMap selected = rm.SelectRows(selector);
ASSERT_EQ(selected.size(), 2u);
ASSERT_EQ(selected.Get(0), 18u);
}
TEST(RowMapUnittest, SelectRowsFromBVWithIV) {
- RowMap rm(BitVector::Range(10, 50, [](uint32_t x) { return x % 2 == 0; }));
+ RowMap rm(BitVector::RangeForTesting(10, 50,
+ [](uint32_t x) { return x % 2 == 0; }));
RowMap selector(std::vector<uint32_t>{4, 6});
RowMap selected = rm.SelectRows(selector);
ASSERT_EQ(selected.size(), 2u);
@@ -196,7 +199,7 @@
TEST(RowMapUnittest, SelectRowsFromIVWithBV) {
RowMap rm(std::vector<uint32_t>{10, 12, 14, 16, 18, 20, 22, 24});
RowMap selector(
- BitVector::Range(4, 8, [](uint32_t x) { return x % 2 == 0; }));
+ BitVector::RangeForTesting(4, 8, [](uint32_t x) { return x % 2 == 0; }));
RowMap selected = rm.SelectRows(selector);
ASSERT_EQ(selected.size(), 2u);
ASSERT_EQ(selected.Get(0), 18u);
diff --git a/src/trace_processor/db/BUILD.gn b/src/trace_processor/db/BUILD.gn
index cbd4304..7609e25 100644
--- a/src/trace_processor/db/BUILD.gn
+++ b/src/trace_processor/db/BUILD.gn
@@ -69,7 +69,6 @@
perfetto_unittest_source_set("unittests") {
testonly = true
sources = [
- "column_storage_overlay_unittest.cc",
"compare_unittest.cc",
"query_executor_unittest.cc",
"runtime_table_unittest.cc",
@@ -103,9 +102,6 @@
"../tables:tables_python",
"column",
]
- sources = [
- "column_storage_overlay_benchmark.cc",
- "query_executor_benchmark.cc",
- ]
+ sources = [ "query_executor_benchmark.cc" ]
}
}
diff --git a/src/trace_processor/db/column.cc b/src/trace_processor/db/column.cc
index b675695..1eb7c05 100644
--- a/src/trace_processor/db/column.cc
+++ b/src/trace_processor/db/column.cc
@@ -15,14 +15,16 @@
*/
#include "src/trace_processor/db/column.h"
-#include "perfetto/base/logging.h"
-#include "src/trace_processor/db/compare.h"
-#include "src/trace_processor/db/table.h"
-#include "src/trace_processor/util/glob.h"
-#include "src/trace_processor/util/regex.h"
+#include <cstdint>
+#include <limits>
-namespace perfetto {
-namespace trace_processor {
+#include "column/types.h"
+#include "column_storage.h"
+#include "column_storage_overlay.h"
+#include "perfetto/base/logging.h"
+#include "src/trace_processor/db/table.h"
+
+namespace perfetto::trace_processor {
ColumnLegacy::ColumnLegacy(const ColumnLegacy& column,
uint32_t col_idx,
@@ -50,446 +52,19 @@
ColumnLegacy ColumnLegacy::DummyColumn(const char* name,
uint32_t col_idx_in_table) {
- return ColumnLegacy(name, ColumnType::kDummy, Flag::kNoFlag, col_idx_in_table,
- std::numeric_limits<uint32_t>::max(), nullptr);
+ return {name,
+ ColumnType::kDummy,
+ Flag::kNoFlag,
+ col_idx_in_table,
+ std::numeric_limits<uint32_t>::max(),
+ nullptr};
}
ColumnLegacy ColumnLegacy::IdColumn(uint32_t col_idx,
uint32_t overlay_idx,
const char* name,
uint32_t flags) {
- return ColumnLegacy(name, ColumnType::kId, flags, col_idx, overlay_idx,
- nullptr);
-}
-
-void ColumnLegacy::StableSort(bool desc, std::vector<uint32_t>* idx) const {
- if (desc) {
- StableSort<true /* desc */>(idx);
- } else {
- StableSort<false /* desc */>(idx);
- }
-}
-
-void ColumnLegacy::FilterIntoSlow(FilterOp op,
- SqlValue value,
- RowMap* rm) const {
- switch (type_) {
- case ColumnType::kInt32: {
- if (IsNullable()) {
- FilterIntoNumericSlow<int32_t, true /* is_nullable */>(op, value, rm);
- } else {
- FilterIntoNumericSlow<int32_t, false /* is_nullable */>(op, value, rm);
- }
- break;
- }
- case ColumnType::kUint32: {
- if (IsNullable()) {
- FilterIntoNumericSlow<uint32_t, true /* is_nullable */>(op, value, rm);
- } else {
- FilterIntoNumericSlow<uint32_t, false /* is_nullable */>(op, value, rm);
- }
- break;
- }
- case ColumnType::kInt64: {
- if (IsNullable()) {
- FilterIntoNumericSlow<int64_t, true /* is_nullable */>(op, value, rm);
- } else {
- FilterIntoNumericSlow<int64_t, false /* is_nullable */>(op, value, rm);
- }
- break;
- }
- case ColumnType::kDouble: {
- if (IsNullable()) {
- FilterIntoNumericSlow<double, true /* is_nullable */>(op, value, rm);
- } else {
- FilterIntoNumericSlow<double, false /* is_nullable */>(op, value, rm);
- }
- break;
- }
- case ColumnType::kString: {
- FilterIntoStringSlow(op, value, rm);
- break;
- }
- case ColumnType::kId: {
- FilterIntoIdSlow(op, value, rm);
- break;
- }
- case ColumnType::kDummy:
- PERFETTO_FATAL("FilterIntoSlow not allowed on dummy column");
- }
-}
-
-template <typename T, bool is_nullable>
-void ColumnLegacy::FilterIntoNumericSlow(FilterOp op,
- SqlValue value,
- RowMap* rm) const {
- PERFETTO_DCHECK(IsNullable() == is_nullable);
- PERFETTO_DCHECK(type_ == ColumnTypeHelper<T>::ToColumnType());
- PERFETTO_DCHECK(std::is_arithmetic<T>::value);
-
- if (op == FilterOp::kIsNull) {
- PERFETTO_DCHECK(value.is_null());
- if (is_nullable) {
- overlay().FilterInto(rm, [this](uint32_t row) {
- return !storage<std::optional<T>>().Get(row).has_value();
- });
- } else {
- rm->Clear();
- }
- return;
- } else if (op == FilterOp::kIsNotNull) {
- PERFETTO_DCHECK(value.is_null());
- if (is_nullable) {
- overlay().FilterInto(rm, [this](uint32_t row) {
- return storage<std::optional<T>>().Get(row).has_value();
- });
- }
- return;
- }
-
- if (value.type == SqlValue::Type::kDouble) {
- double double_value = value.double_value;
- if (std::is_same<T, double>::value) {
- auto fn = [double_value](T v) {
- // We static cast here as this code will be compiled even when T ==
- // int64_t as we don't have if constexpr in C++11. In reality the cast
- // is a noop but we cannot statically verify that for the compiler.
- return compare::Numeric(static_cast<double>(v), double_value);
- };
- FilterIntoNumericWithComparatorSlow<T, is_nullable>(op, rm, fn);
- } else {
- auto fn = [double_value](T v) {
- // We static cast here as this code will be compiled even when T ==
- // double as we don't have if constexpr in C++11. In reality the cast
- // is a noop but we cannot statically verify that for the compiler.
- return compare::LongToDouble(static_cast<int64_t>(v), double_value);
- };
- FilterIntoNumericWithComparatorSlow<T, is_nullable>(op, rm, fn);
- }
- } else if (value.type == SqlValue::Type::kLong) {
- int64_t long_value = value.long_value;
- if (std::is_same<T, double>::value) {
- auto fn = [long_value](T v) {
- // We negate the return value as the long is always the first
- // parameter for this function even though the LHS of the comparator
- // should actually be |v|. This saves us having a duplicate
- // implementation of the comparision function.
- return -compare::LongToDouble(long_value, static_cast<double>(v));
- };
- FilterIntoNumericWithComparatorSlow<T, is_nullable>(op, rm, fn);
- } else {
- auto fn = [long_value](T v) {
- // We static cast here as this code will be compiled even when T ==
- // double as we don't have if constexpr in C++11. In reality the cast
- // is a noop but we cannot statically verify that for the compiler.
- return compare::Numeric(static_cast<int64_t>(v), long_value);
- };
- FilterIntoNumericWithComparatorSlow<T, is_nullable>(op, rm, fn);
- }
- } else {
- rm->Clear();
- }
-}
-
-template <typename T, bool is_nullable, typename Comparator>
-void ColumnLegacy::FilterIntoNumericWithComparatorSlow(FilterOp op,
- RowMap* rm,
- Comparator cmp) const {
- switch (op) {
- case FilterOp::kLt:
- overlay().FilterInto(rm, [this, &cmp](uint32_t idx) {
- if (is_nullable) {
- auto opt_value = storage<std::optional<T>>().Get(idx);
- return opt_value && cmp(*opt_value) < 0;
- }
- return cmp(storage<T>().Get(idx)) < 0;
- });
- break;
- case FilterOp::kEq:
- overlay().FilterInto(rm, [this, &cmp](uint32_t idx) {
- if (is_nullable) {
- auto opt_value = storage<std::optional<T>>().Get(idx);
- return opt_value && cmp(*opt_value) == 0;
- }
- return cmp(storage<T>().Get(idx)) == 0;
- });
- break;
- case FilterOp::kGt:
- overlay().FilterInto(rm, [this, &cmp](uint32_t idx) {
- if (is_nullable) {
- auto opt_value = storage<std::optional<T>>().Get(idx);
- return opt_value && cmp(*opt_value) > 0;
- }
- return cmp(storage<T>().Get(idx)) > 0;
- });
- break;
- case FilterOp::kNe:
- overlay().FilterInto(rm, [this, &cmp](uint32_t idx) {
- if (is_nullable) {
- auto opt_value = storage<std::optional<T>>().Get(idx);
- return opt_value && cmp(*opt_value) != 0;
- }
- return cmp(storage<T>().Get(idx)) != 0;
- });
- break;
- case FilterOp::kLe:
- overlay().FilterInto(rm, [this, &cmp](uint32_t idx) {
- if (is_nullable) {
- auto opt_value = storage<std::optional<T>>().Get(idx);
- return opt_value && cmp(*opt_value) <= 0;
- }
- return cmp(storage<T>().Get(idx)) <= 0;
- });
- break;
- case FilterOp::kGe:
- overlay().FilterInto(rm, [this, &cmp](uint32_t idx) {
- if (is_nullable) {
- auto opt_value = storage<std::optional<T>>().Get(idx);
- return opt_value && cmp(*opt_value) >= 0;
- }
- return cmp(storage<T>().Get(idx)) >= 0;
- });
- break;
- case FilterOp::kGlob:
- rm->Clear();
- break;
- case FilterOp::kRegex:
- case FilterOp::kIsNull:
- case FilterOp::kIsNotNull:
- PERFETTO_FATAL("Should be handled above");
- }
-}
-
-void ColumnLegacy::FilterIntoStringSlow(FilterOp op,
- SqlValue value,
- RowMap* rm) const {
- PERFETTO_DCHECK(type_ == ColumnType::kString);
-
- if (op == FilterOp::kIsNull) {
- PERFETTO_DCHECK(value.is_null());
- overlay().FilterInto(rm, [this](uint32_t row) {
- return GetStringPoolStringAtIdx(row).data() == nullptr;
- });
- return;
- } else if (op == FilterOp::kIsNotNull) {
- PERFETTO_DCHECK(value.is_null());
- overlay().FilterInto(rm, [this](uint32_t row) {
- return GetStringPoolStringAtIdx(row).data() != nullptr;
- });
- return;
- }
-
- if (value.type != SqlValue::Type::kString) {
- rm->Clear();
- return;
- }
-
- NullTermStringView str_value = value.string_value;
- PERFETTO_DCHECK(str_value.data() != nullptr);
-
- switch (op) {
- case FilterOp::kLt:
- overlay().FilterInto(rm, [this, str_value](uint32_t idx) {
- auto v = GetStringPoolStringAtIdx(idx);
- return v.data() != nullptr && compare::String(v, str_value) < 0;
- });
- break;
- case FilterOp::kEq:
- overlay().FilterInto(rm, [this, str_value](uint32_t idx) {
- auto v = GetStringPoolStringAtIdx(idx);
- return v.data() != nullptr && compare::String(v, str_value) == 0;
- });
- break;
- case FilterOp::kGt:
- overlay().FilterInto(rm, [this, str_value](uint32_t idx) {
- auto v = GetStringPoolStringAtIdx(idx);
- return v.data() != nullptr && compare::String(v, str_value) > 0;
- });
- break;
- case FilterOp::kNe:
- overlay().FilterInto(rm, [this, str_value](uint32_t idx) {
- auto v = GetStringPoolStringAtIdx(idx);
- return v.data() != nullptr && compare::String(v, str_value) != 0;
- });
- break;
- case FilterOp::kLe:
- overlay().FilterInto(rm, [this, str_value](uint32_t idx) {
- auto v = GetStringPoolStringAtIdx(idx);
- return v.data() != nullptr && compare::String(v, str_value) <= 0;
- });
- break;
- case FilterOp::kGe:
- overlay().FilterInto(rm, [this, str_value](uint32_t idx) {
- auto v = GetStringPoolStringAtIdx(idx);
- return v.data() != nullptr && compare::String(v, str_value) >= 0;
- });
- break;
- case FilterOp::kGlob: {
- util::GlobMatcher matcher = util::GlobMatcher::FromPattern(str_value);
- overlay().FilterInto(rm, [this, &matcher](uint32_t idx) {
- auto v = GetStringPoolStringAtIdx(idx);
- return v.data() != nullptr && matcher.Matches(v);
- });
- break;
- }
- case FilterOp::kRegex: {
- if constexpr (regex::IsRegexSupported()) {
- auto regex = regex::Regex::Create(str_value.c_str());
- if (!regex.status().ok()) {
- rm->Clear();
- break;
- }
- overlay().FilterInto(rm, [this, ®ex](uint32_t idx) {
- auto v = GetStringPoolStringAtIdx(idx);
- return v.data() != nullptr && regex->Search(v.c_str());
- });
- } else {
- PERFETTO_FATAL("Regex not supported");
- }
- break;
- }
- case FilterOp::kIsNull:
- case FilterOp::kIsNotNull:
- PERFETTO_FATAL("Should be handled above");
- }
-}
-
-void ColumnLegacy::FilterIntoIdSlow(FilterOp op,
- SqlValue value,
- RowMap* rm) const {
- PERFETTO_DCHECK(type_ == ColumnType::kId);
-
- if (op == FilterOp::kIsNull) {
- PERFETTO_DCHECK(value.is_null());
- rm->Clear();
- return;
- } else if (op == FilterOp::kIsNotNull) {
- PERFETTO_DCHECK(value.is_null());
- return;
- }
-
- if (value.type != SqlValue::Type::kLong) {
- rm->Clear();
- return;
- }
-
- uint32_t id_value = static_cast<uint32_t>(value.long_value);
- switch (op) {
- case FilterOp::kLt:
- overlay().FilterInto(rm, [id_value](uint32_t idx) {
- return compare::Numeric(idx, id_value) < 0;
- });
- break;
- case FilterOp::kEq:
- overlay().FilterInto(rm, [id_value](uint32_t idx) {
- return compare::Numeric(idx, id_value) == 0;
- });
- break;
- case FilterOp::kGt:
- overlay().FilterInto(rm, [id_value](uint32_t idx) {
- return compare::Numeric(idx, id_value) > 0;
- });
- break;
- case FilterOp::kNe:
- overlay().FilterInto(rm, [id_value](uint32_t idx) {
- return compare::Numeric(idx, id_value) != 0;
- });
- break;
- case FilterOp::kLe:
- overlay().FilterInto(rm, [id_value](uint32_t idx) {
- return compare::Numeric(idx, id_value) <= 0;
- });
- break;
- case FilterOp::kGe:
- overlay().FilterInto(rm, [id_value](uint32_t idx) {
- return compare::Numeric(idx, id_value) >= 0;
- });
- break;
- case FilterOp::kGlob:
- case FilterOp::kRegex:
- rm->Clear();
- break;
- case FilterOp::kIsNull:
- case FilterOp::kIsNotNull:
- PERFETTO_FATAL("Should be handled above");
- }
-}
-
-template <bool desc>
-void ColumnLegacy::StableSort(std::vector<uint32_t>* out) const {
- switch (type_) {
- case ColumnType::kInt32: {
- if (IsNullable()) {
- StableSortNumeric<desc, int32_t, true /* is_nullable */>(out);
- } else {
- StableSortNumeric<desc, int32_t, false /* is_nullable */>(out);
- }
- break;
- }
- case ColumnType::kUint32: {
- if (IsNullable()) {
- StableSortNumeric<desc, uint32_t, true /* is_nullable */>(out);
- } else {
- StableSortNumeric<desc, uint32_t, false /* is_nullable */>(out);
- }
- break;
- }
- case ColumnType::kInt64: {
- if (IsNullable()) {
- StableSortNumeric<desc, int64_t, true /* is_nullable */>(out);
- } else {
- StableSortNumeric<desc, int64_t, false /* is_nullable */>(out);
- }
- break;
- }
- case ColumnType::kDouble: {
- if (IsNullable()) {
- StableSortNumeric<desc, double, true /* is_nullable */>(out);
- } else {
- StableSortNumeric<desc, double, false /* is_nullable */>(out);
- }
- break;
- }
- case ColumnType::kString: {
- overlay().StableSort(out, [this](uint32_t a_idx, uint32_t b_idx) {
- auto a_str = GetStringPoolStringAtIdx(a_idx);
- auto b_str = GetStringPoolStringAtIdx(b_idx);
-
- int res = compare::NullableString(a_str, b_str);
- return desc ? res > 0 : res < 0;
- });
- break;
- }
- case ColumnType::kId:
- overlay().StableSort(out, [](uint32_t a_idx, uint32_t b_idx) {
- int res = compare::Numeric(a_idx, b_idx);
- return desc ? res > 0 : res < 0;
- });
- break;
- case ColumnType::kDummy:
- PERFETTO_FATAL("StableSort not allowed on dummy column");
- }
-}
-
-template <bool desc, typename T, bool is_nullable>
-void ColumnLegacy::StableSortNumeric(std::vector<uint32_t>* out) const {
- PERFETTO_DCHECK(IsNullable() == is_nullable);
- PERFETTO_DCHECK(ColumnTypeHelper<T>::ToColumnType() == type_);
-
- overlay().StableSort(out, [this](uint32_t a_idx, uint32_t b_idx) {
- if (is_nullable) {
- auto a_val = storage<std::optional<T>>().Get(a_idx);
- auto b_val = storage<std::optional<T>>().Get(b_idx);
-
- int res = compare::NullableNumeric(a_val, b_val);
- return desc ? res > 0 : res < 0;
- }
- auto a_val = storage<T>().Get(a_idx);
- auto b_val = storage<T>().Get(b_idx);
-
- int res = compare::Numeric(a_val, b_val);
- return desc ? res > 0 : res < 0;
- });
+ return {name, ColumnType::kId, flags, col_idx, overlay_idx, nullptr};
}
const ColumnStorageOverlay& ColumnLegacy::overlay() const {
@@ -497,5 +72,4 @@
return table_->overlays_[overlay_index()];
}
-} // namespace trace_processor
-} // namespace perfetto
+} // namespace perfetto::trace_processor
diff --git a/src/trace_processor/db/column.h b/src/trace_processor/db/column.h
index 988b12f..262b346 100644
--- a/src/trace_processor/db/column.h
+++ b/src/trace_processor/db/column.h
@@ -242,45 +242,6 @@
PERFETTO_FATAL("For GCC");
}
- // Sorts |idx| in ascending or descending order (determined by |desc|) based
- // on the contents of this column.
- void StableSort(bool desc, std::vector<uint32_t>* idx) const;
-
- // Updates the given RowMap by only keeping rows where this column meets the
- // given filter constraint.
- void FilterInto(FilterOp op, SqlValue value, RowMap* rm) const {
- if (IsId() && op == FilterOp::kEq) {
- // If this is an equality constraint on an id column, try and find the
- // single row with the id (if it exists).
- auto opt_idx = IndexOf(value);
- if (opt_idx) {
- rm->IntersectExact(*opt_idx);
- } else {
- rm->Clear();
- }
- return;
- }
-
- if (IsSetId() && op == FilterOp::kEq && value.type == SqlValue::kLong) {
- // If the column is sorted and the value has the same type as the column,
- // we should be able to just do a binary search to find the range of rows
- // instead of a full table scan.
- FilterIntoSetIdEq(value.AsLong(), rm);
- return;
- }
-
- if (IsSorted() && value.type == type()) {
- // If the column is sorted and the value has the same type as the column,
- // we should be able to just do a binary search to find the range of rows
- // instead of a full table scan.
- bool handled = FilterIntoSorted(op, value, rm);
- if (handled)
- return;
- }
-
- FilterIntoSlow(op, value, rm);
- }
-
// Returns the minimum value in this column. Returns std::nullopt if this
// column is empty.
std::optional<SqlValue> Min() const {
@@ -495,122 +456,6 @@
return ToSqlValue(storage<T>().Get(idx));
}
- // Optimized filter method for sorted columns.
- // Returns whether the constraint was handled by the method.
- bool FilterIntoSorted(FilterOp op, SqlValue value, RowMap* rm) const {
- PERFETTO_DCHECK(IsSorted());
- PERFETTO_DCHECK(value.type == type());
-
- Iterator b(this, 0);
- Iterator e(this, overlay().size());
- switch (op) {
- case FilterOp::kEq: {
- uint32_t beg = std::distance(
- b, std::lower_bound(b, e, value, &compare::SqlValueComparator));
- uint32_t end = std::distance(
- b, std::upper_bound(b, e, value, &compare::SqlValueComparator));
- rm->Intersect({beg, end});
- return true;
- }
- case FilterOp::kLe: {
- uint32_t end = std::distance(
- b, std::upper_bound(b, e, value, &compare::SqlValueComparator));
- rm->Intersect({0, end});
- return true;
- }
- case FilterOp::kLt: {
- uint32_t end = std::distance(
- b, std::lower_bound(b, e, value, &compare::SqlValueComparator));
- rm->Intersect({0, end});
- return true;
- }
- case FilterOp::kGe: {
- uint32_t beg = std::distance(
- b, std::lower_bound(b, e, value, &compare::SqlValueComparator));
- rm->Intersect({beg, overlay().size()});
- return true;
- }
- case FilterOp::kGt: {
- uint32_t beg = std::distance(
- b, std::upper_bound(b, e, value, &compare::SqlValueComparator));
- rm->Intersect({beg, overlay().size()});
- return true;
- }
- case FilterOp::kNe:
- case FilterOp::kIsNull:
- case FilterOp::kIsNotNull:
- case FilterOp::kGlob:
- case FilterOp::kRegex:
- break;
- }
- return false;
- }
-
- void FilterIntoSetIdEq(int64_t value, RowMap* rm) const {
- PERFETTO_DCHECK(!IsNullable());
-
- uint32_t filter_set_id = static_cast<uint32_t>(value);
- const auto& st = storage<uint32_t>();
- const ColumnStorageOverlay& ov = overlay();
-
- // If the set id is beyond the end of the column, there's no chance that
- // it exists.
- if (PERFETTO_UNLIKELY(filter_set_id >= st.size())) {
- rm->Clear();
- return;
- }
-
- uint32_t set_id = st.Get(ov.Get(filter_set_id));
-
- // If the set at that index does not equal the set id we're looking for, the
- // set id doesn't exist either.
- if (PERFETTO_UNLIKELY(set_id != filter_set_id)) {
- PERFETTO_DCHECK(set_id < filter_set_id);
- rm->Clear();
- return;
- }
-
- // Otherwise, find the end of the set and return the intersection for this.
- for (uint32_t i = set_id + 1; i < ov.size(); ++i) {
- if (st.Get(ov.Get(i)) != filter_set_id) {
- RowMap r(set_id, i);
- rm->Intersect(r);
- return;
- }
- }
- RowMap r(set_id, ov.size());
- rm->Intersect(r);
- }
-
- // Slow path filter method which will perform a full table scan.
- void FilterIntoSlow(FilterOp op, SqlValue value, RowMap* rm) const;
-
- // Slow path filter method for numerics which will perform a full table scan.
- template <typename T, bool is_nullable>
- void FilterIntoNumericSlow(FilterOp op, SqlValue value, RowMap* rm) const;
-
- // Slow path filter method for numerics with a comparator which will perform a
- // full table scan.
- template <typename T, bool is_nullable, typename Comparator = int(T)>
- void FilterIntoNumericWithComparatorSlow(FilterOp op,
- RowMap* rm,
- Comparator cmp) const;
-
- // Slow path filter method for strings which will perform a full table scan.
- void FilterIntoStringSlow(FilterOp op, SqlValue value, RowMap* rm) const;
-
- // Slow path filter method for ids which will perform a full table scan.
- void FilterIntoIdSlow(FilterOp op, SqlValue value, RowMap* rm) const;
-
- // Stable sorts this column storing the result in |out|.
- template <bool desc>
- void StableSort(std::vector<uint32_t>* out) const;
-
- // Stable sorts this column storing the result in |out|.
- // |T| and |is_nullable| should match the type and nullability of this column.
- template <bool desc, typename T, bool is_nullable>
- void StableSortNumeric(std::vector<uint32_t>* out) const;
-
static constexpr bool IsDense(uint32_t flags) {
return (flags & Flag::kDense) != 0;
}
diff --git a/src/trace_processor/db/column_storage_overlay.h b/src/trace_processor/db/column_storage_overlay.h
index 6ff16ca..c13c095 100644
--- a/src/trace_processor/db/column_storage_overlay.h
+++ b/src/trace_processor/db/column_storage_overlay.h
@@ -19,17 +19,13 @@
#include <stdint.h>
-#include <memory>
#include <optional>
#include <vector>
-#include "perfetto/base/logging.h"
#include "src/trace_processor/containers/bit_vector.h"
-#include "src/trace_processor/containers/bit_vector_iterators.h"
#include "src/trace_processor/containers/row_map.h"
-namespace perfetto {
-namespace trace_processor {
+namespace perfetto::trace_processor {
// Contains indices which can be used to lookup data in one or more
// ColumnStorages.
@@ -136,66 +132,6 @@
// state.
void Clear() { *this = ColumnStorageOverlay(); }
- // Filters the current ColumnStorageOverlay into the RowMap given by |out|
- // based on the return value of |p(idx)|.
- //
- // Precondition: |out| should be sorted by the indices inside it (this is
- // required to keep this method efficient). This is automatically true if the
- // mode of |out| is Range or BitVector but needs to be enforced if the mode is
- // IndexVector.
- //
- // Specifically, the setup for each of the variables is as follows:
- // this: contains the indices passed to p to filter.
- // out : contains indicies into |this| and will be filtered down to only
- // contain indicies where p returns true.
- // p : takes an index given by |this| and returns whether the index should
- // be retained in |out|.
- //
- // Concretely, the algorithm being invoked looks like (but more efficient
- // based on the mode of |this| and |out|):
- // for (idx : out)
- // this_idx = (*this)[idx]
- // if (!p(this_idx))
- // out->Remove(idx)
- template <typename Predicate>
- void FilterInto(RowMap* out, Predicate p) const {
- PERFETTO_DCHECK(size() >= out->size());
-
- if (out->empty()) {
- // If the output ColumnStorageOverlay is empty, we don't need to do
- // anything.
- return;
- }
-
- if (out->size() == 1) {
- // If the output ColumnStorageOverlay has a single entry, just lookup
- // that entry and see if we should keep it.
- if (!p(Get(out->Get(0))))
- out->Clear();
- return;
- }
-
- // TODO(lalitm): investigate whether we should have another fast path for
- // cases where |out| has only a few entries so we can scan |out| instead of
- // scanning |this|.
-
- // Ideally, we'd always just scan |out| and keep the indices in |this| which
- // meet |p|. However, if |this| is a BitVector, we end up needing expensive
- // |IndexOfNthSet| calls (as we need to convert the row to an index before
- // passing it to |p|).
- if (row_map_.IsBitVector()) {
- FilterIntoScanSelfBv(out, p);
- return;
- }
- auto ip = [this, p](uint32_t row) { return p(row_map_.Get(row)); };
- out->Filter(ip);
- }
-
- template <typename Comparator = bool(uint32_t, uint32_t)>
- void StableSort(std::vector<uint32_t>* out, Comparator c) const {
- return row_map_.StableSort(out, c);
- }
-
// Returns the iterator over the rows in this ColumnStorageOverlay.
Iterator IterateRows() const { return Iterator(row_map_.IterateRows()); }
@@ -204,66 +140,9 @@
private:
explicit ColumnStorageOverlay(RowMap rm) : row_map_(std::move(rm)) {}
- // Filters the current ColumnStorageOverlay into |out| by performing a full
- // scan on |row_map.bit_vector_|. See |FilterInto| for a full breakdown of the
- // semantics of this function.
-
- template <typename Predicate>
- struct FilterIntoScanSelfBvVisitor {
- void operator()(RowMap::Range out_r) {
- BitVector bv(out_r.end, false);
- for (auto out_it = bv.IterateAllBits(); bv_iter;
- bv_iter.Next(), out_it.Next()) {
- uint32_t ordinal = bv_iter.ordinal();
- if (ordinal < out_r.start)
- continue;
- if (ordinal >= out_r.end)
- break;
-
- if (p(bv_iter.index())) {
- out_it.Set();
- }
- }
- *out = RowMap(std::move(bv));
- }
- void operator()(const BitVector& out_bv) {
- auto out_it = out_bv.IterateAllBits();
- for (; out_it; bv_iter.Next(), out_it.Next()) {
- PERFETTO_DCHECK(bv_iter);
- if (out_it.IsSet() && !p(bv_iter.index()))
- out_it.Clear();
- }
- }
- void operator()(std::vector<OutputIndex>& out_vec) {
- PERFETTO_DCHECK(std::is_sorted(out_vec.begin(), out_vec.end()));
- auto fn = [this](uint32_t i) {
- while (bv_iter.ordinal() < i) {
- bv_iter.Next();
- PERFETTO_DCHECK(bv_iter);
- }
- PERFETTO_DCHECK(bv_iter.ordinal() == i);
- return !p(bv_iter.index());
- };
- auto iv_it = std::remove_if(out_vec.begin(), out_vec.end(), fn);
- out_vec.erase(iv_it, out_vec.end());
- }
- RowMap* out;
- Predicate p;
- internal::SetBitsIterator bv_iter;
- };
-
- template <typename Predicate>
- void FilterIntoScanSelfBv(RowMap* out, Predicate p) const {
- const BitVector* bv = std::get_if<BitVector>(&row_map_.data_);
- auto it = bv->IterateSetBits();
- std::visit(FilterIntoScanSelfBvVisitor<Predicate>{out, p, std::move(it)},
- out->data_);
- }
-
RowMap row_map_;
};
-} // namespace trace_processor
-} // namespace perfetto
+} // namespace perfetto::trace_processor
#endif // SRC_TRACE_PROCESSOR_DB_COLUMN_STORAGE_OVERLAY_H_
diff --git a/src/trace_processor/db/column_storage_overlay_benchmark.cc b/src/trace_processor/db/column_storage_overlay_benchmark.cc
deleted file mode 100644
index 49be7c7..0000000
--- a/src/trace_processor/db/column_storage_overlay_benchmark.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright (C) 2022 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <random>
-
-#include <benchmark/benchmark.h>
-
-#include "src/trace_processor/db/column_storage_overlay.h"
-
-using perfetto::trace_processor::BitVector;
-using perfetto::trace_processor::ColumnStorageOverlay;
-using perfetto::trace_processor::RowMap;
-
-namespace {
-
-static constexpr uint32_t kPoolSize = 100000;
-static constexpr uint32_t kSize = 123456;
-
-template <typename Container>
-Container CreateRange(uint32_t end) {
- static constexpr uint32_t kRandomSeed = 32;
- std::minstd_rand0 rnd_engine(kRandomSeed);
-
- uint32_t start = rnd_engine() % end;
- uint32_t size = rnd_engine() % (end - start);
- return Container(start, start + size);
-}
-
-std::vector<uint32_t> CreateIndexVector(uint32_t size, uint32_t mod) {
- static constexpr uint32_t kRandomSeed = 476;
- std::minstd_rand0 rnd_engine(kRandomSeed);
- std::vector<uint32_t> rows(size);
- for (uint32_t i = 0; i < size; ++i) {
- rows[i] = rnd_engine() % mod;
- }
- return rows;
-}
-
-BitVector CreateBitVector(uint32_t size) {
- static constexpr uint32_t kRandomSeed = 42;
- std::minstd_rand0 rnd_engine(kRandomSeed);
- BitVector bv;
- for (uint32_t i = 0; i < size; ++i) {
- if (rnd_engine() % 2) {
- bv.AppendTrue();
- } else {
- bv.AppendFalse();
- }
- }
- return bv;
-}
-
-template <typename Factory>
-void BenchFilterInto(benchmark::State& state,
- ColumnStorageOverlay rm,
- Factory factory) {
- auto pool_vec = CreateIndexVector(kPoolSize, kSize);
-
- uint32_t pool_idx = 0;
- for (auto _ : state) {
- state.PauseTiming();
- RowMap out = factory();
- state.ResumeTiming();
-
- auto fn = [&pool_vec, pool_idx](uint32_t row) {
- return pool_vec[pool_idx] != 0 && (row % pool_vec[pool_idx]) != 0;
- };
- rm.FilterInto(&out, fn);
- pool_idx = (pool_idx + 1) % kPoolSize;
-
- benchmark::ClobberMemory();
- }
-}
-
-} // namespace
-
-static void BM_CSOFilterIntoRangeWithRange(benchmark::State& state) {
- ColumnStorageOverlay overlay(CreateRange<ColumnStorageOverlay>(kSize));
- uint32_t overlay_size = overlay.size();
- BenchFilterInto(state, std::move(overlay), [overlay_size]() {
- return CreateRange<RowMap>(overlay_size);
- });
-}
-BENCHMARK(BM_CSOFilterIntoRangeWithRange);
-
-static void BM_CSOFilterIntoRangeWithBv(benchmark::State& state) {
- ColumnStorageOverlay overlay(CreateRange<ColumnStorageOverlay>(kSize));
- uint32_t overlay_size = overlay.size();
- BenchFilterInto(state, std::move(overlay), [overlay_size]() {
- return RowMap(CreateBitVector(overlay_size));
- });
-}
-BENCHMARK(BM_CSOFilterIntoRangeWithBv);
-
-static void BM_CSOFilterIntoBvWithRange(benchmark::State& state) {
- ColumnStorageOverlay overlay(CreateBitVector(kSize));
- uint32_t overlay_size = overlay.size();
- BenchFilterInto(state, std::move(overlay), [overlay_size]() {
- return CreateRange<RowMap>(overlay_size);
- });
-}
-BENCHMARK(BM_CSOFilterIntoBvWithRange);
-
-static void BM_CSOFilterIntoBvWithBv(benchmark::State& state) {
- ColumnStorageOverlay overlay(CreateBitVector(kSize));
- uint32_t overlay_size = overlay.size();
- BenchFilterInto(state, std::move(overlay), [overlay_size]() {
- return RowMap(CreateBitVector(overlay_size));
- });
-}
-BENCHMARK(BM_CSOFilterIntoBvWithBv);
-
-static void BM_CSOFilterIntoIvWithRange(benchmark::State& state) {
- ColumnStorageOverlay overlay(CreateIndexVector(kSize, kSize));
- uint32_t overlay_size = overlay.size();
- BenchFilterInto(state, std::move(overlay), [overlay_size]() {
- return CreateRange<RowMap>(overlay_size);
- });
-}
-BENCHMARK(BM_CSOFilterIntoIvWithRange);
-
-static void BM_CSOFilterIntoIvWithBv(benchmark::State& state) {
- ColumnStorageOverlay overlay(CreateIndexVector(kSize, kSize));
- uint32_t overlay_size = overlay.size();
- BenchFilterInto(state, std::move(overlay), [overlay_size]() {
- return RowMap(CreateBitVector(overlay_size));
- });
-}
-BENCHMARK(BM_CSOFilterIntoIvWithBv);
diff --git a/src/trace_processor/db/column_storage_overlay_unittest.cc b/src/trace_processor/db/column_storage_overlay_unittest.cc
deleted file mode 100644
index 827153f..0000000
--- a/src/trace_processor/db/column_storage_overlay_unittest.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) 2022 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "src/trace_processor/db/column_storage_overlay.h"
-
-#include <memory>
-
-#include "src/base/test/gtest_test_suite.h"
-#include "test/gtest_and_gmock.h"
-
-namespace perfetto {
-namespace trace_processor {
-namespace {
-
-TEST(ColumnStorageOverlay, FilterIntoEmptyOutput) {
- ColumnStorageOverlay rm(0, 10000);
- RowMap filter(4, 4);
- rm.FilterInto(&filter, [](uint32_t) -> bool {
- ADD_FAILURE() << "Should not have called lambda";
- return true;
- });
-
- ASSERT_EQ(filter.size(), 0u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoSingleRowTrue) {
- ColumnStorageOverlay rm(100, 10000);
- RowMap filter(6, 7);
- rm.FilterInto(&filter, [](uint32_t row) { return row == 106u; });
-
- ASSERT_EQ(filter.size(), 1u);
- ASSERT_EQ(filter.Get(0u), 6u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoSingleRowFalse) {
- ColumnStorageOverlay rm(100, 10000);
- RowMap filter(6, 7);
- rm.FilterInto(&filter, [](uint32_t row) {
- EXPECT_EQ(row, 106u);
- return row != 106u;
- });
-
- ASSERT_EQ(filter.size(), 0u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoRangeWithRange) {
- ColumnStorageOverlay rm(93, 157);
- RowMap filter(4, 7);
- rm.FilterInto(&filter, [](uint32_t row) { return row == 97u || row == 98u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 4u);
- ASSERT_EQ(filter.Get(1u), 5u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoOffsetRangeWithRange) {
- ColumnStorageOverlay rm(100000, 100010);
- RowMap filter(4, 7);
- rm.FilterInto(&filter, [](uint32_t row) { return row == 100004u; });
-
- ASSERT_EQ(filter.size(), 1u);
- ASSERT_EQ(filter.Get(0u), 4u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoLargeRangeWithRange) {
- ColumnStorageOverlay rm(0, 100000);
- RowMap filter(0, 100000);
- rm.FilterInto(&filter, [](uint32_t row) { return row % 2 == 0; });
-
- ASSERT_EQ(filter.size(), 100000u / 2);
- for (uint32_t i = 0; i < 100000 / 2; ++i) {
- ASSERT_EQ(filter.Get(i), i * 2);
- }
-}
-
-TEST(ColumnStorageOverlay, FilterIntoBitVectorWithRange) {
- ColumnStorageOverlay rm(
- BitVector{true, false, false, true, false, true, false, true, true});
- RowMap filter(1u, 5u);
- rm.FilterInto(&filter, [](uint32_t row) { return row == 3u || row == 7u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 1u);
- ASSERT_EQ(filter.Get(1u), 3u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoIndexVectorWithRange) {
- ColumnStorageOverlay rm(std::vector<uint32_t>{33, 2u, 45u, 7u, 8u, 9u});
- RowMap filter(2, 5);
- rm.FilterInto(&filter, [](uint32_t row) { return row == 45u || row == 8u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 2u);
- ASSERT_EQ(filter.Get(1u), 4u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoRangeWithBitVector) {
- ColumnStorageOverlay rm(27, 31);
- RowMap filter(BitVector{true, false, true, true});
- rm.FilterInto(&filter, [](uint32_t row) { return row == 29u || row == 30u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 2u);
- ASSERT_EQ(filter.Get(1u), 3u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoBitVectorWithBitVector) {
- ColumnStorageOverlay rm(BitVector{true, false, true, true, false, true});
- RowMap filter(BitVector{true, true, false, true});
- rm.FilterInto(&filter, [](uint32_t row) { return row == 2u || row == 5u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 1u);
- ASSERT_EQ(filter.Get(1u), 3u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoIndexVectorWithBitVector) {
- ColumnStorageOverlay rm(std::vector<uint32_t>{0u, 2u, 3u, 5u});
- RowMap filter(BitVector{true, true, false, true});
- rm.FilterInto(&filter, [](uint32_t row) { return row == 2u || row == 5u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 1u);
- ASSERT_EQ(filter.Get(1u), 3u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoRangeWithIndexVector) {
- ColumnStorageOverlay rm(27, 41);
- RowMap filter(std::vector<uint32_t>{3u, 5u, 9u, 10u, 12u});
- rm.FilterInto(&filter, [](uint32_t row) { return row == 32u || row == 39u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 5u);
- ASSERT_EQ(filter.Get(1u), 12u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoBitVectorWithIndexVector) {
- ColumnStorageOverlay rm(
- BitVector{false, true, false, true, true, false, true});
- RowMap filter(std::vector<uint32_t>{1u, 2u, 3u});
- rm.FilterInto(&filter, [](uint32_t row) { return row == 3u || row == 4u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 1u);
- ASSERT_EQ(filter.Get(1u), 2u);
-}
-
-TEST(ColumnStorageOverlay, FilterIntoIndexVectorWithIndexVector) {
- ColumnStorageOverlay rm(std::vector<uint32_t>{33u, 2u, 45u, 7u, 8u, 9u});
- RowMap filter(std::vector<uint32_t>{1u, 2u, 3u});
- rm.FilterInto(&filter, [](uint32_t row) { return row == 2u || row == 7u; });
-
- ASSERT_EQ(filter.size(), 2u);
- ASSERT_EQ(filter.Get(0u), 1u);
- ASSERT_EQ(filter.Get(1u), 3u);
-}
-
-} // namespace
-} // namespace trace_processor
-} // namespace perfetto
diff --git a/src/trace_processor/db/query_executor_benchmark.cc b/src/trace_processor/db/query_executor_benchmark.cc
index fa79206..dd1e8a9 100644
--- a/src/trace_processor/db/query_executor_benchmark.cc
+++ b/src/trace_processor/db/query_executor_benchmark.cc
@@ -67,8 +67,6 @@
constexpr std::string_view kHeapGraphObjectTable =
"test/data/heap_pgraph_object_for_benchmarks_query.csv";
-enum DB { V1, V2 };
-
std::vector<std::string> SplitCSVLine(const std::string& line) {
std::vector<std::string> output;
uint32_t start = 0;
@@ -231,7 +229,6 @@
void BenchmarkSliceTableFilter(benchmark::State& state,
SliceTableForBenchmark& table,
std::initializer_list<Constraint> c) {
- Table::kUseFilterV2 = state.range(0) == 1;
for (auto _ : state) {
benchmark::DoNotOptimize(table.table_.QueryToRowMap(c, {}));
}
@@ -244,7 +241,6 @@
void BenchmarkSliceTableSort(benchmark::State& state,
SliceTableForBenchmark& table,
std::initializer_list<Order> ob) {
- Table::kUseSortV2 = state.range(0) == 1;
for (auto _ : state) {
benchmark::DoNotOptimize(table.table_.Sort(ob));
}
@@ -258,7 +254,6 @@
benchmark::State& state,
ExpectedFrameTimelineTableForBenchmark& table,
Constraint c) {
- Table::kUseFilterV2 = state.range(0) == 1;
for (auto _ : state) {
benchmark::DoNotOptimize(table.table_.QueryToRowMap({c}, {}));
}
@@ -271,7 +266,6 @@
void BenchmarkFtraceEventTableFilter(benchmark::State& state,
FtraceEventTableForBenchmark& table,
std::initializer_list<Constraint> c) {
- Table::kUseFilterV2 = state.range(0) == 1;
for (auto _ : state) {
benchmark::DoNotOptimize(table.table_.QueryToRowMap(c, {}));
}
@@ -284,7 +278,6 @@
void BenchmarkFtraceEventTableSort(benchmark::State& state,
FtraceEventTableForBenchmark& table,
std::initializer_list<Order> ob) {
- Table::kUseSortV2 = state.range(0) == 1;
for (auto _ : state) {
benchmark::DoNotOptimize(table.table_.Sort(ob));
}
@@ -299,7 +292,7 @@
BenchmarkSliceTableFilter(state, table, {table.table_.track_id().eq(100)});
}
-BENCHMARK(BM_QESliceTableTrackIdEq)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableTrackIdEq);
void BM_QESliceTableParentIdIsNotNull(benchmark::State& state) {
SliceTableForBenchmark table(state);
@@ -307,28 +300,28 @@
{table.table_.parent_id().is_not_null()});
}
-BENCHMARK(BM_QESliceTableParentIdIsNotNull)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableParentIdIsNotNull);
void BM_QESliceTableParentIdEq(benchmark::State& state) {
SliceTableForBenchmark table(state);
BenchmarkSliceTableFilter(state, table, {table.table_.parent_id().eq(88)});
}
-BENCHMARK(BM_QESliceTableParentIdEq)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableParentIdEq);
void BM_QESliceTableNameEq(benchmark::State& state) {
SliceTableForBenchmark table(state);
BenchmarkSliceTableFilter(state, table, {table.table_.name().eq("cheese")});
}
-BENCHMARK(BM_QESliceTableNameEq)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableNameEq);
void BM_QESliceTableNameGlobNoStars(benchmark::State& state) {
SliceTableForBenchmark table(state);
BenchmarkSliceTableFilter(state, table, {table.table_.name().glob("cheese")});
}
-BENCHMARK(BM_QESliceTableNameGlobNoStars)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableNameGlobNoStars);
void BM_QESliceTableNameGlob(benchmark::State& state) {
SliceTableForBenchmark table(state);
@@ -336,7 +329,7 @@
{table.table_.name().glob("chee*se")});
}
-BENCHMARK(BM_QESliceTableNameGlob)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableNameGlob);
void BM_QESliceTableNameRegex(benchmark::State& state) {
SliceTableForBenchmark table(state);
@@ -344,14 +337,14 @@
{table.table_.name().regex(".*Pool.*")});
}
-BENCHMARK(BM_QESliceTableNameRegex)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableNameRegex);
void BM_QESliceTableSorted(benchmark::State& state) {
SliceTableForBenchmark table(state);
BenchmarkSliceTableFilter(state, table, {table.table_.ts().gt(1000)});
}
-BENCHMARK(BM_QESliceTableSorted)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableSorted);
void BM_QEFilterWithSparseSelector(benchmark::State& state) {
ExpectedFrameTimelineTableForBenchmark table(state);
@@ -359,28 +352,28 @@
table.table_.track_id().eq(88));
}
-BENCHMARK(BM_QEFilterWithSparseSelector)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEFilterWithSparseSelector);
void BM_QEFilterWithDenseSelector(benchmark::State& state) {
FtraceEventTableForBenchmark table(state);
BenchmarkFtraceEventTableFilter(state, table, {table.table_.cpu().eq(4)});
}
-BENCHMARK(BM_QEFilterWithDenseSelector)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEFilterWithDenseSelector);
void BM_QESliceEventFilterId(benchmark::State& state) {
SliceTableForBenchmark table(state);
BenchmarkSliceTableFilter(state, table, {table.table_.id().eq(500)});
}
-BENCHMARK(BM_QESliceEventFilterId)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceEventFilterId);
void BM_QEFtraceEventFilterId(benchmark::State& state) {
FtraceEventTableForBenchmark table(state);
BenchmarkFtraceEventTableFilter(state, table, {table.table_.id().eq(500)});
}
-BENCHMARK(BM_QEFtraceEventFilterId)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEFtraceEventFilterId);
void BM_QESliceTableTsAndTrackId(benchmark::State& state) {
SliceTableForBenchmark table(state);
@@ -390,7 +383,7 @@
table.table_.track_id().eq(100)});
}
-BENCHMARK(BM_QESliceTableTsAndTrackId)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceTableTsAndTrackId);
void BM_QEFilterOneElement(benchmark::State& state) {
SliceTableForBenchmark table(state);
@@ -398,11 +391,9 @@
state, table, {table.table_.id().eq(10), table.table_.dur().eq(100)});
}
-BENCHMARK(BM_QEFilterOneElement)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEFilterOneElement);
void BM_QEFilterWithArrangement(benchmark::State& state) {
- Table::kUseFilterV2 = state.range(0) == 1;
-
SliceTableForBenchmark table(state);
Order order{table.table_.dur().index_in_table(), false};
Table slice_sorted_with_duration = table.table_.Sort({order});
@@ -418,11 +409,9 @@
benchmark::Counter::kInvert);
}
-BENCHMARK(BM_QEFilterWithArrangement)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEFilterWithArrangement);
void BM_QEDenseNullFilter(benchmark::State& state) {
- Table::kUseFilterV2 = state.range(0) == 1;
-
HeapGraphObjectTableForBenchmark table(state);
Constraint c{table.table_.reference_set_id().index_in_table(), FilterOp::kGt,
SqlValue::Long(1000)};
@@ -434,11 +423,9 @@
benchmark::Counter::kIsIterationInvariantRate |
benchmark::Counter::kInvert);
}
-BENCHMARK(BM_QEDenseNullFilter)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEDenseNullFilter);
void BM_QEDenseNullFilterIsNull(benchmark::State& state) {
- Table::kUseFilterV2 = state.range(0) == 1;
-
HeapGraphObjectTableForBenchmark table(state);
Constraint c{table.table_.reference_set_id().index_in_table(),
FilterOp::kIsNull, SqlValue()};
@@ -450,7 +437,7 @@
benchmark::Counter::kIsIterationInvariantRate |
benchmark::Counter::kInvert);
}
-BENCHMARK(BM_QEDenseNullFilterIsNull)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEDenseNullFilterIsNull);
void BM_QEIdColumnWithIntAsDouble(benchmark::State& state) {
SliceTableForBenchmark table(state);
@@ -459,7 +446,7 @@
BenchmarkSliceTableFilter(state, table, {c});
}
-BENCHMARK(BM_QEIdColumnWithIntAsDouble)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEIdColumnWithIntAsDouble);
void BM_QEIdColumnWithDouble(benchmark::State& state) {
SliceTableForBenchmark table(state);
@@ -468,11 +455,9 @@
BenchmarkSliceTableFilter(state, table, {c});
}
-BENCHMARK(BM_QEIdColumnWithDouble)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEIdColumnWithDouble);
void BM_QEFilterOrderedArrangement(benchmark::State& state) {
- Table::kUseFilterV2 = state.range(0) == 1;
-
SliceTableForBenchmark table(state);
Order order{table.table_.dur().index_in_table(), false};
Table slice_sorted_with_duration = table.table_.Sort({order});
@@ -488,29 +473,28 @@
benchmark::Counter::kInvert);
}
-BENCHMARK(BM_QEFilterOrderedArrangement)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEFilterOrderedArrangement);
void BM_QESliceSortNumericAsc(benchmark::State& state) {
SliceTableForBenchmark table(state);
BenchmarkSliceTableSort(state, table, {table.table_.track_id().ascending()});
}
-BENCHMARK(BM_QESliceSortNumericAsc)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceSortNumericAsc);
void BM_QESliceSortNullNumericAsc(benchmark::State& state) {
SliceTableForBenchmark table(state);
BenchmarkSliceTableSort(state, table, {table.table_.parent_id().ascending()});
}
-BENCHMARK(BM_QESliceSortNullNumericAsc)->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QESliceSortNullNumericAsc);
void BM_QEFtraceEventSortSelectorNumericAsc(benchmark::State& state) {
FtraceEventTableForBenchmark table(state);
BenchmarkFtraceEventTableSort(state, table, {table.table_.cpu().ascending()});
}
-BENCHMARK(BM_QEFtraceEventSortSelectorNumericAsc)
- ->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEFtraceEventSortSelectorNumericAsc);
void BM_QEFtraceEventSortSelectorNumericDesc(benchmark::State& state) {
FtraceEventTableForBenchmark table(state);
@@ -518,8 +502,7 @@
{table.table_.cpu().descending()});
}
-BENCHMARK(BM_QEFtraceEventSortSelectorNumericDesc)
- ->ArgsProduct({{DB::V1, DB::V2}});
+BENCHMARK(BM_QEFtraceEventSortSelectorNumericDesc);
} // namespace
} // namespace perfetto::trace_processor
diff --git a/src/trace_processor/db/table.cc b/src/trace_processor/db/table.cc
index 964e567..7bedbb7 100644
--- a/src/trace_processor/db/table.cc
+++ b/src/trace_processor/db/table.cc
@@ -38,9 +38,6 @@
namespace perfetto::trace_processor {
-bool Table::kUseFilterV2 = true;
-bool Table::kUseSortV2 = false;
-
Table::Table(StringPool* pool,
uint32_t row_count,
std::vector<ColumnLegacy> columns,
@@ -91,8 +88,7 @@
}
RowMap Table::QueryToRowMap(const std::vector<Constraint>& cs,
- const std::vector<Order>& ob,
- RowMap::OptimizeFor optimize_for) const {
+ const std::vector<Order>& ob) const {
// We need to delay creation of the chains to this point because of Chrome
// does not want the binary size overhead of including the chain
// implementations. As they also don't query tables (instead just iterating)
@@ -105,7 +101,7 @@
CreateChains();
}
- RowMap rm = FilterToRowMap(cs, optimize_for);
+ RowMap rm = QueryExecutor::FilterLegacy(this, cs);
if (ob.empty())
return rm;
@@ -125,43 +121,7 @@
PERFETTO_DCHECK(ob.front().desc);
std::reverse(idx.begin(), idx.end());
} else {
- // As our data is columnar, it's always more efficient to sort one column
- // at a time rather than try and sort lexiographically all at once.
- // To preserve correctness, we need to stably sort the index vector once
- // for each order by in *reverse* order. Reverse order is important as it
- // preserves the lexiographical property.
- //
- // For example, suppose we have the following:
- // Table {
- // Column x;
- // Column y
- // Column z;
- // }
- //
- // Then, to sort "y asc, x desc", we could do one of two things:
- // 1) sort the index vector all at once and on each index, we compare
- // y then z. This is slow as the data is columnar and we need to
- // repeatedly branch inside each column.
- // 2) we can stably sort first on x desc and then sort on y asc. This will
- // first put all the x in the correct order such that when we sort on
- // y asc, we will have the correct order of x where y is the same (since
- // the sort is stable).
- //
- // TODO(lalitm): it is possible that we could sort the last constraint (i.e.
- // the first constraint in the below loop) in a non-stable way. However,
- // this is more subtle than it appears as we would then need special
- // handling where there are order bys on a column which is already sorted
- // (e.g. ts, id). Investigate whether the performance gains from this are
- // worthwhile. This also needs changes to the constraint modification logic
- // in DbSqliteTable which currently eliminates constraints on sorted
- // columns.
- if (Table::kUseSortV2) {
- QueryExecutor::SortLegacy(this, ob, idx);
- } else {
- for (auto it = ob.rbegin(); it != ob.rend(); ++it) {
- columns_[it->col_idx].StableSort(it->desc, &idx);
- }
- }
+ QueryExecutor::SortLegacy(this, ob, idx);
}
return RowMap(std::move(idx));
}
diff --git a/src/trace_processor/db/table.h b/src/trace_processor/db/table.h
index 5e8a583..eab9f92 100644
--- a/src/trace_processor/db/table.h
+++ b/src/trace_processor/db/table.h
@@ -118,9 +118,6 @@
std::vector<Column> columns;
};
- static bool kUseFilterV2;
- static bool kUseSortV2;
-
virtual ~Table();
// We explicitly define the move constructor here because we need to update
@@ -135,10 +132,8 @@
// Filters and sorts the tables with the arguments specified, returning the
// result as a RowMap.
- RowMap QueryToRowMap(
- const std::vector<Constraint>&,
- const std::vector<Order>&,
- RowMap::OptimizeFor = RowMap::OptimizeFor::kMemory) const;
+ RowMap QueryToRowMap(const std::vector<Constraint>&,
+ const std::vector<Order>&) const;
// Applies the RowMap |rm| onto this table and returns an iterator over the
// resulting rows.
@@ -202,26 +197,6 @@
private:
friend class ColumnLegacy;
- PERFETTO_ALWAYS_INLINE RowMap FilterToRowMap(
- const std::vector<Constraint>& cs,
- RowMap::OptimizeFor optimize_for = RowMap::OptimizeFor::kMemory) const {
- if (cs.empty()) {
- return {0, row_count_, optimize_for};
- }
-
- if (kUseFilterV2) {
- if (optimize_for == RowMap::OptimizeFor::kMemory) {
- return QueryExecutor::FilterLegacy(this, cs);
- }
- return RowMap(QueryExecutor::FilterLegacy(this, cs).TakeAsIndexVector());
- }
- RowMap rm(0, row_count_, optimize_for);
- for (const Constraint& c : cs) {
- columns_[c.col_idx].FilterInto(c.op, c.value, &rm);
- }
- return rm;
- }
-
void CreateChains() const;
Table CopyExceptOverlays() const;
diff --git a/src/trace_processor/sqlite/db_sqlite_table.cc b/src/trace_processor/sqlite/db_sqlite_table.cc
index 8131d36..4d38bd6 100644
--- a/src/trace_processor/sqlite/db_sqlite_table.cc
+++ b/src/trace_processor/sqlite/db_sqlite_table.cc
@@ -646,15 +646,7 @@
}
});
- // Attempt to filter into a RowMap first - weall figure out whether to apply
- // this to the table or we should use the RowMap directly. Also, if we are
- // going to sort on the RowMap, it makes sense that we optimize for lookup
- // speed so our sorting is not super slow.
- RowMap::OptimizeFor optimize_for = orders_.empty()
- ? RowMap::OptimizeFor::kMemory
- : RowMap::OptimizeFor::kLookupSpeed;
- RowMap filter_map =
- SourceTable()->QueryToRowMap(constraints_, orders_, optimize_for);
+ RowMap filter_map = SourceTable()->QueryToRowMap(constraints_, orders_);
if (filter_map.IsRange() && filter_map.size() <= 1) {
// Currently, our criteria where we have a special fast path is if it's
// a single ranged row. We have this fast path for joins on id columns
diff --git a/src/trace_processor/tables/py_tables_benchmark.cc b/src/trace_processor/tables/py_tables_benchmark.cc
index 37bb9f7..6aea3d5 100644
--- a/src/trace_processor/tables/py_tables_benchmark.cc
+++ b/src/trace_processor/tables/py_tables_benchmark.cc
@@ -95,29 +95,6 @@
}
BENCHMARK(BM_TableIteratorChild)->Apply(TableFilterArgs);
-static void BM_TableFilterAndSortRoot(benchmark::State& state) {
- StringPool pool;
- RootTestTable root(&pool);
-
- auto size = static_cast<uint32_t>(state.range(0));
- uint32_t partitions = 8;
-
- std::minstd_rand0 rnd_engine(45);
- for (uint32_t i = 0; i < size; ++i) {
- RootTestTable::Row row;
- row.root_non_null = rnd_engine() % partitions;
- row.root_non_null_2 = static_cast<uint32_t>(rnd_engine());
- root.Insert(row);
- }
-
- for (auto _ : state) {
- benchmark::DoNotOptimize(root.ApplyAndIterateRows(root.QueryToRowMap(
- {root.root_non_null().eq(5)}, {root.root_non_null_2().ascending()},
- RowMap::OptimizeFor::kLookupSpeed)));
- }
-}
-BENCHMARK(BM_TableFilterAndSortRoot)->Apply(TableFilterArgs);
-
static void BM_TableFilterRootId(benchmark::State& state) {
StringPool pool;
RootTestTable root(&pool);
diff --git a/src/trace_processor/trace_processor_impl.cc b/src/trace_processor/trace_processor_impl.cc
index 41fdab3..8894f47 100644
--- a/src/trace_processor/trace_processor_impl.cc
+++ b/src/trace_processor/trace_processor_impl.cc
@@ -339,18 +339,6 @@
context_.content_analyzer.reset(new ProtoContentAnalyzer(&context_));
}
- auto v2 = context_.config.dev_flags.find("enable_db2_filtering");
- if (v2 != context_.config.dev_flags.end()) {
- if (v2->second == "true") {
- Table::kUseFilterV2 = true;
- } else if (v2->second == "false") {
- Table::kUseFilterV2 = false;
- } else {
- PERFETTO_ELOG("Unknown value for enable_db2_filtering %s",
- v2->second.c_str());
- }
- }
-
// Add metrics to descriptor pool
const std::vector<std::string> sanitized_extension_paths =
SanitizeMetricMountPaths(config_.skip_builtin_metric_paths);