Merge "tp: Add diff_tests folder for diff tests"
diff --git a/Android.bp b/Android.bp
index eadd256..420723f 100644
--- a/Android.bp
+++ b/Android.bp
@@ -10088,6 +10088,14 @@
name: "perfetto_src_trace_processor_unittests",
}
+// GN: //src/trace_processor/util:bump_allocator
+filegroup {
+ name: "perfetto_src_trace_processor_util_bump_allocator",
+ srcs: [
+ "src/trace_processor/util/bump_allocator.cc",
+ ],
+}
+
// GN: //src/trace_processor/util:descriptors
filegroup {
name: "perfetto_src_trace_processor_util_descriptors",
@@ -10176,6 +10184,7 @@
filegroup {
name: "perfetto_src_trace_processor_util_unittests",
srcs: [
+ "src/trace_processor/util/bump_allocator_unittest.cc",
"src/trace_processor/util/debug_annotation_parser_unittest.cc",
"src/trace_processor/util/glob_unittest.cc",
"src/trace_processor/util/gzip_utils_unittest.cc",
@@ -11551,6 +11560,7 @@
":perfetto_src_trace_processor_types_types",
":perfetto_src_trace_processor_types_unittests",
":perfetto_src_trace_processor_unittests",
+ ":perfetto_src_trace_processor_util_bump_allocator",
":perfetto_src_trace_processor_util_descriptors",
":perfetto_src_trace_processor_util_glob",
":perfetto_src_trace_processor_util_gzip",
diff --git a/src/trace_processor/util/BUILD.gn b/src/trace_processor/util/BUILD.gn
index af28c16..98ff445 100644
--- a/src/trace_processor/util/BUILD.gn
+++ b/src/trace_processor/util/BUILD.gn
@@ -34,6 +34,17 @@
sources = [ "sql_modules.h" ]
}
+source_set("bump_allocator") {
+ sources = [
+ "bump_allocator.cc",
+ "bump_allocator.h",
+ ]
+ deps = [
+ "../../../gn:default_deps",
+ "../../base",
+ ]
+}
+
source_set("gzip") {
sources = [
"gzip_utils.cc",
@@ -206,6 +217,7 @@
source_set("unittests") {
sources = [
+ "bump_allocator_unittest.cc",
"debug_annotation_parser_unittest.cc",
"glob_unittest.cc",
"proto_profiler_unittest.cc",
@@ -217,6 +229,7 @@
]
testonly = true
deps = [
+ ":bump_allocator",
":descriptors",
":glob",
":gzip",
diff --git a/src/trace_processor/util/bump_allocator.cc b/src/trace_processor/util/bump_allocator.cc
new file mode 100644
index 0000000..1c00a8c
--- /dev/null
+++ b/src/trace_processor/util/bump_allocator.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/util/bump_allocator.h"
+
+#include "perfetto/base/compiler.h"
+#include "perfetto/base/logging.h"
+#include "perfetto/ext/base/optional.h"
+#include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace trace_processor {
+namespace {
+
+// TODO(b/266983484): consider using base::PagedMemory unless a) we are on a
+// platform where that doesn't make sense (WASM) b) we are trying to do heap
+// profiling.
+base::AlignedUniquePtr<uint8_t[]> Allocate(uint32_t size) {
+ uint8_t* ptr = static_cast<uint8_t*>(base::AlignedAlloc(8, size));
+ // Poison the region to try and catch out of bound accesses.
+ PERFETTO_ASAN_POISON(ptr, size);
+ return base::AlignedUniquePtr<uint8_t[]>(ptr);
+}
+
+} // namespace
+
+BumpAllocator::BumpAllocator() = default;
+
+BumpAllocator::~BumpAllocator() {
+ for (const auto& chunk : chunks_) {
+ PERFETTO_CHECK(chunk.unfreed_allocations == 0);
+ }
+}
+
+BumpAllocator::AllocId BumpAllocator::Alloc(uint32_t size) {
+ // Size is required to be a multiple of 8 to avoid needing to deal with
+ // alignment. It must also be at most kChunkSize as we do not support cross
+ // chunk spanning allocations.
+ PERFETTO_DCHECK(size % 8 == 0);
+ PERFETTO_DCHECK(size <= kChunkSize);
+
+ // Fast path: check if we have space to service this allocation in the current
+ // chunk.
+ base::Optional<AllocId> alloc_id = TryAllocInLastChunk(size);
+ if (alloc_id) {
+ return *alloc_id;
+ }
+
+ // Slow path: we don't have enough space in the last chunk so we create one.
+ Chunk chunk;
+ chunk.allocation = Allocate(kChunkSize);
+ chunks_.emplace_back(std::move(chunk));
+
+ // Ensure that we haven't exceeded the maximum number of chunks.
+ PERFETTO_CHECK(LastChunkIndex() < kMaxChunkCount);
+
+ // This time the allocation should definitely succeed in the last chunk (which
+ // we just added).
+ alloc_id = TryAllocInLastChunk(size);
+ PERFETTO_CHECK(alloc_id);
+ return *alloc_id;
+}
+
+void BumpAllocator::Free(AllocId id) {
+ Chunk& chunk = chunks_.at(ChunkIndexToQueueIndex(id.chunk_index));
+ PERFETTO_DCHECK(chunk.unfreed_allocations > 0);
+ chunk.unfreed_allocations--;
+}
+
+void* BumpAllocator::GetPointer(AllocId id) {
+ uint32_t queue_index = ChunkIndexToQueueIndex(id.chunk_index);
+ return chunks_.at(queue_index).allocation.get() + id.chunk_offset;
+}
+
+uint32_t BumpAllocator::EraseFrontFreeChunks() {
+ uint32_t to_erase_chunks = 0;
+ for (; to_erase_chunks < chunks_.size(); ++to_erase_chunks) {
+ // Break on the first chunk which still has unfreed allocations.
+ if (chunks_.at(to_erase_chunks).unfreed_allocations > 0) {
+ break;
+ }
+ }
+ chunks_.erase_front(to_erase_chunks);
+ erased_front_chunks_count_ += to_erase_chunks;
+ return to_erase_chunks;
+}
+
+uint32_t BumpAllocator::PastEndSerializedId() {
+ if (chunks_.empty()) {
+ return AllocId{erased_front_chunks_count_, 0}.Serialize();
+ }
+ return AllocId{LastChunkIndex(), chunks_.back().bump_offset}.Serialize();
+}
+
+base::Optional<BumpAllocator::AllocId> BumpAllocator::TryAllocInLastChunk(
+ uint32_t size) {
+ if (chunks_.empty()) {
+ return base::nullopt;
+ }
+
+ // TODO(266983484): consider switching this to bump downwards instead of
+ // upwards for more efficient code generation.
+ Chunk& chunk = chunks_.back();
+
+ // Verify some invariants:
+ // 1) The allocation must exist
+ // 2) The bump must be in the bounds of the chunk.
+ PERFETTO_DCHECK(chunk.allocation);
+ PERFETTO_DCHECK(chunk.bump_offset <= kChunkSize);
+
+ // If the end of the allocation ends up after this chunk, we cannot service it
+ // in this chunk.
+ uint32_t alloc_offset = chunk.bump_offset;
+ uint32_t new_bump_offset = chunk.bump_offset + size;
+ if (new_bump_offset > kChunkSize) {
+ return base::nullopt;
+ }
+
+ // Set the new offset equal to the end of this allocation and increment the
+ // unfreed allocation counter.
+ chunk.bump_offset = new_bump_offset;
+ chunk.unfreed_allocations++;
+
+ // Unpoison the allocation range to allow access to it on ASAN builds.
+ PERFETTO_ASAN_UNPOISON(chunk.allocation.get() + alloc_offset, size);
+
+ return AllocId{LastChunkIndex(), alloc_offset};
+}
+
+} // namespace trace_processor
+} // namespace perfetto
diff --git a/src/trace_processor/util/bump_allocator.h b/src/trace_processor/util/bump_allocator.h
new file mode 100644
index 0000000..cd5e593
--- /dev/null
+++ b/src/trace_processor/util/bump_allocator.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_UTIL_BUMP_ALLOCATOR_H_
+#define SRC_TRACE_PROCESSOR_UTIL_BUMP_ALLOCATOR_H_
+
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <limits>
+#include <memory>
+#include "perfetto/ext/base/circular_queue.h"
+#include "perfetto/ext/base/optional.h"
+#include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+// A simple memory allocator which "bumps" a pointer to service allocations.
+// See [1] for more details for an overview of bump allocators.
+//
+// This implementation works by obtaining a large chunk of memory from the
+// system allocator (i.e. from malloc). Every allocation uses that chunk as long
+// as there is free space inside. Once an allocation is requested which does not
+// fit in that chunk, a new chunk is requested from the system.
+//
+// IMPORTANT: all allocations returned from this allocator are 8-aligned and
+// all allocation sizes must be a multiple of 8.
+//
+// IMPORTANT: this allocator can allocate a total of 4GB of memory (2^32). Once
+// this is exhausted, any further allocation will cause a CHECK.
+//
+// IMPORTANT: all allocations *must* be explicitly freed before destroying this
+// object. The destructor will CHECK if it detects any allocation which is
+// unfreed.
+//
+// [1] https://rust-hosted-langs.github.io/book/chapter-simple-bump.html
+class BumpAllocator {
+ public:
+ // The limit on the total amount of memory which can be allocated. Required
+ // as we can only address 4GB of memory with AllocId.
+ static constexpr uint64_t kAllocLimit = 4ull * 1024 * 1024 * 1024; // 4GB
+
+ // The size of the "large chunk" requested from the system allocator.
+ // The size of this value trades-off between unused memory use vs CPU cost
+ // of going to the system allocator. 64KB feels a good trade-off there.
+ static constexpr uint32_t kChunkSize = 64u * 1024; // 64KB
+
+ // The maximum number of chunks which this allocator can have.
+ static constexpr uint32_t kMaxChunkCount = kAllocLimit / kChunkSize;
+
+ // The number of bits used to represent the offset the chunk in AllocId.
+ //
+ // This is simply log2(kChunkSize): we have a separate constant as log2 is
+ // not a constexpr function: the static assets below verify this stays in
+ // sync.
+ static constexpr uint32_t kChunkOffsetAllocIdBits = 16u;
+
+ // The number of bits used to represent the chunk index in AllocId.
+ static constexpr uint32_t kChunkIndexAllocIdBits =
+ 32u - kChunkOffsetAllocIdBits;
+
+ // Represents an allocation returned from the allocator. We return this
+ // instead of just returning a pointer to allow looking up a chunk an
+ // allocation belongs to without needing having to scan chunks.
+ struct AllocId {
+ uint32_t chunk_index : kChunkIndexAllocIdBits;
+ uint32_t chunk_offset : kChunkOffsetAllocIdBits;
+
+ uint32_t Serialize() const {
+ return static_cast<uint32_t>(chunk_index) << kChunkOffsetAllocIdBits |
+ chunk_offset;
+ }
+
+ static AllocId FromSerialized(uint32_t serialized) {
+ AllocId id;
+ id.chunk_index = serialized >> kChunkOffsetAllocIdBits;
+ id.chunk_offset = serialized;
+ return id;
+ }
+ };
+ static_assert(sizeof(AllocId) == sizeof(uint32_t),
+ "AllocId should be 32-bit in size to allow serialization");
+ static_assert(
+ kMaxChunkCount == (1 << kChunkIndexAllocIdBits),
+ "Max chunk count must match the number of bits used for chunk indices");
+ static_assert(
+ kChunkSize == (1 << kChunkOffsetAllocIdBits),
+ "Chunk size must match the number of bits used for offset within chunk");
+ static_assert(kAllocLimit == 1ull << sizeof(AllocId) * 8,
+ "Total limit on allocations must be equal to the number of "
+ "bits used for AllocId");
+
+ BumpAllocator();
+
+ // Verifies that all calls to |Alloc| were paired with matching calls to
+ // |Free|.
+ ~BumpAllocator();
+
+ BumpAllocator(BumpAllocator&&) noexcept = default;
+ BumpAllocator& operator=(BumpAllocator&&) noexcept = default;
+
+ // Allocates |size| bytes of memory. |size| must be a multiple of 8 and less
+ // than or equal to |kChunkSize|.
+ //
+ // Returns an |AllocId| which can be converted to a pointer using
+ // |GetPointer|.
+ AllocId Alloc(uint32_t size);
+
+ // Frees an allocation previously allocated by |Alloc|. This function is *not*
+ // idempotent.
+ //
+ // Once this function returns, |id| is no longer valid for any use. Trying
+ // to use it further (e.g. to passing to other methods including Free itself)
+ // will cause undefined behaviour.
+ void Free(AllocId id);
+
+ // Given an AllocId, returns a pointer which can be read from/written to.
+ //
+ // The caller is only allowed to access up to |size| bytes, where |size| ==
+ // the |size| argument to Alloc.
+ void* GetPointer(AllocId);
+
+ // Removes chunks from the start of this allocator where all the allocations
+ // in the chunks have been freed. This releases the memory back to the system.
+ //
+ // Returns the number of chunks freed.
+ uint32_t EraseFrontFreeChunks();
+
+ // Returns a "past the end" serialized AllocId i.e. a serialized value
+ // greater than all previously returned AllocIds.
+ uint32_t PastEndSerializedId();
+
+ // Returns the number of erased chunks from the start of this allocator.
+ //
+ // This value may change any time |EraseFrontFreeChunks| is called but is
+ // constant otherwise.
+ uint32_t erased_front_chunks_count() const {
+ return erased_front_chunks_count_;
+ }
+
+ private:
+ struct Chunk {
+ // The allocation from the system for this chunk. Because all allocations
+ // need to be 8 byte aligned, the chunk also needs to be 8-byte aligned.
+ // base::AlignedUniquePtr ensures this is the case.
+ base::AlignedUniquePtr<uint8_t[]> allocation;
+
+ // The bump offset relative to |allocation.data|. Incremented to service
+ // Alloc requests.
+ uint32_t bump_offset = 0;
+
+ // The number of unfreed allocations in this chunk.
+ uint32_t unfreed_allocations = 0;
+ };
+
+ // Tries to allocate |size| bytes in the final chunk in |chunks_|. Returns
+ // an AllocId if this was successful or base::nullopt otherwise.
+ base::Optional<AllocId> TryAllocInLastChunk(uint32_t size);
+
+ uint32_t ChunkIndexToQueueIndex(uint32_t chunk_index) const {
+ return chunk_index - erased_front_chunks_count_;
+ }
+ uint32_t QueueIndexToChunkIndex(uint32_t index_in_chunks_vec) const {
+ return erased_front_chunks_count_ + index_in_chunks_vec;
+ }
+ uint32_t LastChunkIndex() const {
+ PERFETTO_DCHECK(!chunks_.empty());
+ return QueueIndexToChunkIndex(static_cast<uint32_t>(chunks_.size() - 1));
+ }
+
+ base::CircularQueue<Chunk> chunks_;
+ uint32_t erased_front_chunks_count_ = 0;
+};
+
+} // namespace trace_processor
+} // namespace perfetto
+
+#endif // SRC_TRACE_PROCESSOR_UTIL_BUMP_ALLOCATOR_H_
diff --git a/src/trace_processor/util/bump_allocator_unittest.cc b/src/trace_processor/util/bump_allocator_unittest.cc
new file mode 100644
index 0000000..4608be6
--- /dev/null
+++ b/src/trace_processor/util/bump_allocator_unittest.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/util/bump_allocator.h"
+
+#include <limits>
+#include <random>
+#include <vector>
+
+#include "perfetto/ext/base/utils.h"
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+class BumpAllocatorUnittest : public ::testing::Test {
+ public:
+ // Allocates |size| bytes of memory with aligned to |align|, writes |size|
+ // bytes in the region, reads |size| bytes and then frees the memory.
+ //
+ // Very useful to check that none of the internal DCHECKs of the allocator
+ // fire.
+ void AllocateWriteReadAndFree(uint32_t size) {
+ BumpAllocator::AllocId id = allocator_.Alloc(size);
+ uint8_t* ptr = static_cast<uint8_t*>(allocator_.GetPointer(id));
+
+ std::vector<uint8_t> data(size);
+ for (uint32_t i = 0; i < size; ++i) {
+ data[i] = static_cast<uint8_t>(rnd_engine_() &
+ std::numeric_limits<uint8_t>::max());
+ }
+ memcpy(ptr, data.data(), size);
+ ASSERT_EQ(memcmp(ptr, data.data(), size), 0);
+ allocator_.Free(id);
+ }
+
+ protected:
+ std::minstd_rand0 rnd_engine_;
+ BumpAllocator allocator_;
+};
+
+TEST_F(BumpAllocatorUnittest, AllocSmoke) {
+ AllocateWriteReadAndFree(8);
+ AllocateWriteReadAndFree(16);
+ AllocateWriteReadAndFree(24);
+ AllocateWriteReadAndFree(64);
+ AllocateWriteReadAndFree(1024);
+ AllocateWriteReadAndFree(BumpAllocator::kChunkSize);
+
+ allocator_.EraseFrontFreeChunks();
+}
+
+TEST_F(BumpAllocatorUnittest, EraseFrontAtAnyTime) {
+ BumpAllocator::AllocId id = allocator_.Alloc(8);
+ allocator_.EraseFrontFreeChunks();
+ allocator_.Free(id);
+ allocator_.EraseFrontFreeChunks();
+}
+
+TEST_F(BumpAllocatorUnittest, Serialize) {
+ BumpAllocator::AllocId id = allocator_.Alloc(8);
+ ASSERT_EQ(id.Serialize(), 0u);
+ ASSERT_EQ(allocator_.PastEndSerializedId(), 8u);
+ allocator_.Free(id);
+
+ id = allocator_.Alloc(8);
+ ASSERT_EQ(id.Serialize(), 8u);
+ allocator_.Free(id);
+
+ id = allocator_.Alloc(BumpAllocator::kChunkSize);
+ ASSERT_EQ(id.Serialize(), BumpAllocator::kChunkSize);
+ allocator_.Free(id);
+}
+
+TEST_F(BumpAllocatorUnittest, HighNumberSerialize) {
+ BumpAllocator::AllocId id = BumpAllocator::AllocId::FromSerialized(1138352);
+ ASSERT_EQ(id.chunk_index, 1138352 / BumpAllocator::kChunkSize);
+ ASSERT_EQ(id.chunk_offset, 1138352 % BumpAllocator::kChunkSize);
+ ASSERT_EQ(id.Serialize(), 1138352u);
+}
+
+TEST_F(BumpAllocatorUnittest, EraseFrontAccounting) {
+ AllocateWriteReadAndFree(8);
+ ASSERT_EQ(allocator_.EraseFrontFreeChunks(), 1u);
+ ASSERT_EQ(allocator_.erased_front_chunks_count(), 1u);
+ AllocateWriteReadAndFree(8);
+ ASSERT_EQ(allocator_.EraseFrontFreeChunks(), 1u);
+ ASSERT_EQ(allocator_.erased_front_chunks_count(), 2u);
+}
+
+TEST_F(BumpAllocatorUnittest, EraseFrontFreeChunk) {
+ AllocateWriteReadAndFree(8);
+ allocator_.EraseFrontFreeChunks();
+
+ auto past_id =
+ BumpAllocator::AllocId::FromSerialized(allocator_.PastEndSerializedId());
+ ASSERT_EQ(past_id.chunk_index, 1u);
+ ASSERT_EQ(past_id.chunk_offset, 0u);
+
+ auto id = allocator_.Alloc(8);
+ ASSERT_EQ(id.chunk_index, past_id.chunk_index);
+ ASSERT_EQ(id.chunk_offset, past_id.chunk_offset);
+ allocator_.Free(id);
+}
+
+TEST_F(BumpAllocatorUnittest, StressTest) {
+ std::minstd_rand0 rnd_engine;
+ for (int i = 0; i < 1000; i++) {
+ uint32_t size =
+ static_cast<uint32_t>((rnd_engine() * 8) % BumpAllocator::kChunkSize);
+ AllocateWriteReadAndFree(size);
+ }
+}
+
+} // namespace trace_processor
+} // namespace perfetto
diff --git a/ui/src/assets/common.scss b/ui/src/assets/common.scss
index 570fa44..5470643 100644
--- a/ui/src/assets/common.scss
+++ b/ui/src/assets/common.scss
@@ -11,6 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+
+@import "fonts";
+
:root {
--sidebar-width: 256px;
--topbar-height: 48px;
@@ -199,28 +202,64 @@
width: 100%;
}
+@mixin table-font-size {
+ font-size: 14px;
+ line-height: 18px;
+}
+
+$table-hover-color: hsl(214, 22%, 90%);
+
+$table-border-color: rgba(60, 76, 92, 0.4);
+
+.pivot-table {
+ @include bottom-panel-font;
+ @include table-font-size;
+
+ width: 100%;
+ border-collapse: collapse;
+
+ thead,
+ i {
+ cursor: pointer;
+ }
+ thead {
+ font-weight: normal;
+ }
+ td {
+ padding: 2px 1px;
+ }
+ td.first {
+ border-left: 1px solid $table-border-color;
+ padding-left: 6px;
+ }
+ tr.header {
+ border-bottom: 1px solid $table-border-color;
+ text-align: center;
+ }
+ thead td.reorderable-cell {
+ cursor: grab;
+ }
+ tr:hover td {
+ background-color: $table-hover-color;
+ }
+ .disabled {
+ cursor: default;
+ }
+ .indent {
+ display: inline-block;
+ // 16px is the width of expand_more/expand_less icon to pad out cells
+ // without the button
+ width: 16px;
+ }
+ strong {
+ font-weight: 400;
+ }
+}
+
.query-table {
width: 100%;
font-size: 14px;
border: 0;
- &.pivot-table {
- thead,
- i {
- cursor: pointer;
- }
- thead td.reorderable-cell {
- cursor: grab;
- }
- .disabled {
- cursor: default;
- }
- .indent {
- display: inline-block;
- // 16px is the width of expand_more/expand_less icon to pad out cells
- // without the button
- width: 16px;
- }
- }
thead td {
position: sticky;
top: 0;
@@ -755,6 +794,7 @@
.pivot-table-redux {
user-select: text;
+ padding: 10px;
button.mode-button {
border-radius: 10px;
@@ -763,11 +803,6 @@
background-color: #c7d0db;
}
- &.edit {
- padding: 10px;
- display: flex;
- }
-
&.query-error {
color: red;
}
@@ -817,11 +852,11 @@
}
&.highlight-left {
- border-left-color: red;
+ background: linear-gradient(90deg, $table-border-color, transparent 20%);
}
&.highlight-right {
- border-right-color: red;
+ background: linear-gradient(270deg, $table-border-color, transparent 20%);
}
}
diff --git a/ui/src/assets/details.scss b/ui/src/assets/details.scss
index 11186c2..3faaadd 100644
--- a/ui/src/assets/details.scss
+++ b/ui/src/assets/details.scss
@@ -97,9 +97,7 @@
}
.details-panel {
- font-family: "Roboto Condensed", sans-serif;
- font-weight: 300;
- color: #3c4b5d;
+ @include bottom-panel-font;
.material-icons {
@include transition(0.3s);
@@ -234,8 +232,7 @@
table {
@include transition(0.1s);
- font-size: 14px;
- line-height: 18px;
+ @include table-font-size;
width: 100%;
// Aggregation panel uses multiple table elements that need to be aligned,
// which is done by using fixed table layout.
@@ -245,7 +242,7 @@
tr:hover {
td,
th {
- background-color: hsl(214, 22%, 90%);
+ background-color: $table-hover-color;
&.no-highlight {
background-color: white;
@@ -392,7 +389,7 @@
font-weight: bolder;
font-size: 12px;
.sum-data {
- border-bottom: 1px solid rgba(60, 76, 92, 0.4);
+ border-bottom: 1px solid $table-border-color;
}
}
@@ -543,7 +540,7 @@
background-color: hsl(214, 22%, 95%);
}
&:hover {
- background-color: hsl(214, 22%, 90%);
+ background-color: $table-hover-color;
}
.cell {
font-size: 11px;
diff --git a/ui/src/assets/fonts.scss b/ui/src/assets/fonts.scss
new file mode 100644
index 0000000..6f52d19
--- /dev/null
+++ b/ui/src/assets/fonts.scss
@@ -0,0 +1,20 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+@mixin bottom-panel-font {
+ font-family: "Roboto Condensed", sans-serif;
+ font-weight: 300;
+ color: #3c4b5d;
+}
+
diff --git a/ui/src/frontend/pivot_table_redux.ts b/ui/src/frontend/pivot_table_redux.ts
index 3caaf33..f17bb24 100644
--- a/ui/src/frontend/pivot_table_redux.ts
+++ b/ui/src/frontend/pivot_table_redux.ts
@@ -52,7 +52,7 @@
TableColumn,
} from './pivot_table_redux_types';
import {PopupMenuButton, PopupMenuItem} from './popup_menu';
-import {ReorderableCellGroup} from './reorderable_cells';
+import {ReorderableCell, ReorderableCellGroup} from './reorderable_cells';
interface PathItem {
@@ -100,6 +100,13 @@
}
}
+export function markFirst(index: number) {
+ if (index === 0) {
+ return '.first';
+ }
+ return '';
+}
+
export class PivotTableRedux extends Panel<PivotTableReduxAttrs> {
get pivotState() {
return globals.state.nonSerializableState.pivotTableRedux;
@@ -167,7 +174,7 @@
for (let i = 0; i < tree.aggregates.length; i++) {
const renderedValue = this.renderCell(
result.metadata.aggregationColumns[i].column, tree.aggregates[i]);
- renderedCells.push(m('td', renderedValue));
+ renderedCells.push(m('td' + markFirst(i), renderedValue));
}
const drillFilters: DrillFilter[] = [];
@@ -236,7 +243,7 @@
const value = row[aggregationIndex(treeDepth, j)];
const renderedValue = this.renderCell(
result.metadata.aggregationColumns[j].column, value);
- renderedCells.push(m('td', renderedValue));
+ renderedCells.push(m('td.aggregation' + markFirst(j), renderedValue));
}
renderedCells.push(this.renderDrillDownCell(area, drillFilters));
@@ -251,7 +258,7 @@
m('strong', 'Total values:'))];
for (let i = 0; i < queryResult.tree.aggregates.length; i++) {
overallValuesRow.push(
- m('td',
+ m('td' + markFirst(i),
this.renderCell(
queryResult.metadata.aggregationColumns[i].column,
queryResult.tree.aggregates[i])));
@@ -319,7 +326,7 @@
renderAggregationHeaderCell(
aggregation: Aggregation, index: number,
- removeItem: boolean): m.Children {
+ removeItem: boolean): ReorderableCell {
const popupItems: PopupMenuItem[] = [];
const state = globals.state.nonSerializableState.pivotTableRedux;
let icon = 'more_horiz';
@@ -384,13 +391,16 @@
popupItems.push(sliceAggregationsItem);
}
- return [
- this.readableAggregationName(aggregation),
- m(PopupMenuButton, {
- icon,
- items: popupItems,
- }),
- ];
+ return {
+ extraClass: '.aggregation' + markFirst(index),
+ content: [
+ this.readableAggregationName(aggregation),
+ m(PopupMenuButton, {
+ icon,
+ items: popupItems,
+ }),
+ ],
+ };
}
showModal = false;
@@ -424,7 +434,7 @@
renderPivotColumnHeader(
queryResult: PivotTableReduxResult, pivot: TableColumn,
- selectedPivots: Set<string>): m.Children {
+ selectedPivots: Set<string>): ReorderableCell {
const items: PopupMenuItem[] = [{
itemType: 'regular',
text: 'Add argument pivot',
@@ -478,10 +488,12 @@
});
}
- return [
- readableColumnName(pivot),
- m(PopupMenuButton, {icon: 'more_horiz', items}),
- ];
+ return {
+ content: [
+ readableColumnName(pivot),
+ m(PopupMenuButton, {icon: 'more_horiz', items}),
+ ],
+ };
}
renderResultsTable(attrs: PivotTableReduxAttrs) {
@@ -519,13 +531,13 @@
aggregation, index, removeItem));
return m(
- 'table.query-table.pivot-table',
+ 'table.pivot-table',
m('thead',
// First row of the table, containing names of pivot and aggregation
// columns, as well as popup menus to modify the columns. Last cell
// is empty because of an extra column with "drill down" button for
// each pivot table row.
- m('tr',
+ m('tr.header',
m(ReorderableCellGroup, {
cells: pivotTableHeaders,
onReorder: (
diff --git a/ui/src/frontend/reorderable_cells.ts b/ui/src/frontend/reorderable_cells.ts
index dd5992b..e3977a2 100644
--- a/ui/src/frontend/reorderable_cells.ts
+++ b/ui/src/frontend/reorderable_cells.ts
@@ -20,8 +20,13 @@
import {globals} from './globals';
+export interface ReorderableCell {
+ content: m.Children;
+ extraClass?: string;
+}
+
export interface ReorderableCellGroupAttrs {
- cells: m.Children[];
+ cells: ReorderableCell[];
onReorder: (from: number, to: number, side: DropDirection) => void;
}
@@ -62,7 +67,7 @@
view(vnode: m.Vnode<ReorderableCellGroupAttrs>): m.Children {
return vnode.attrs.cells.map(
(cell, index) => m(
- 'td.reorderable-cell',
+ `td.reorderable-cell${cell.extraClass ?? ''}`,
{
draggable: 'draggable',
class: this.getClassForIndex(index),
@@ -138,7 +143,7 @@
globals.rafScheduler.scheduleFullRedraw();
},
},
- cell));
+ cell.content));
}
oncreate(vnode: m.VnodeDOM<ReorderableCellGroupAttrs, this>) {