Improve unit tests for trace writers

* New unit test suite for interceptor_trace_writer.
* Changes to trace_writer_impl_unittest:
  * Couple of new tests.
  * Minor cleanups:
    * Remove redundant SharedMemoryArbiterImpl construction.
    * Avoid stringstream to build large strings.
    * Avoid yoda comparisons in EXPECT and ASSERT.
    * Use EXPECT instead of ASSERT where possible.
    * Use SizeIs() matcher. This makes it clear that we're asserting the
      size of an array before accessing its elements.

Change-Id: I9e347fde690c2e7c54eea2a5d3cc12233dae0bd4
diff --git a/Android.bp b/Android.bp
index 381211a..a7c4677 100644
--- a/Android.bp
+++ b/Android.bp
@@ -9398,6 +9398,7 @@
 filegroup {
     name: "perfetto_src_tracing_unittests",
     srcs: [
+        "src/tracing/internal/interceptor_trace_writer_unittest.cc",
         "src/tracing/traced_proto_unittest.cc",
         "src/tracing/traced_value_unittest.cc",
     ],
diff --git a/include/perfetto/tracing/interceptor.h b/include/perfetto/tracing/interceptor.h
index 643b5a4..91f4155 100644
--- a/include/perfetto/tracing/interceptor.h
+++ b/include/perfetto/tracing/interceptor.h
@@ -171,6 +171,7 @@
 
 namespace internal {
 class InterceptorTraceWriter;
+class InterceptorTraceWriterTest;
 class TracingMuxer;
 class TracingMuxerFake;
 class TracingMuxerImpl;
@@ -213,6 +214,7 @@
 
  private:
   friend class internal::InterceptorTraceWriter;
+  friend class internal::InterceptorTraceWriterTest;
   friend class internal::TracingMuxer;
   friend class internal::TracingMuxerFake;
   friend class internal::TracingMuxerImpl;
diff --git a/src/tracing/BUILD.gn b/src/tracing/BUILD.gn
index ebf198c..5ce9c62 100644
--- a/src/tracing/BUILD.gn
+++ b/src/tracing/BUILD.gn
@@ -145,12 +145,12 @@
     "test:test_support",
   ]
 
-  sources = []
+  sources = [ "internal/interceptor_trace_writer_unittest.cc" ]
 
   # TODO(primiano): remove the build_with_chromium conditional once the root
   # //BUILD.gn:libperfetto (in chromium) stops adding tracing:platform_fake.
   # The problem is the following: in chrome builds we end up with duplicate
-  # symbol definitions in the test because both platorm (impl and fake) are
+  # symbol definitions in the test because both platforms (impl and fake) are
   # present: impl added here and fake coming from chromium's base (full path:
   # perfetto_unittests -> //(chromium)base:test_support -> //(chromium)base
   # -> libperfetto -> platform_fake.
diff --git a/src/tracing/core/shared_memory_arbiter_impl.cc b/src/tracing/core/shared_memory_arbiter_impl.cc
index 7912826..42795e2 100644
--- a/src/tracing/core/shared_memory_arbiter_impl.cc
+++ b/src/tracing/core/shared_memory_arbiter_impl.cc
@@ -180,7 +180,7 @@
     }  // scoped_lock
 
     if (buffer_exhausted_policy == BufferExhaustedPolicy::kDrop) {
-      PERFETTO_DLOG("Shared memory buffer exhaused, returning invalid Chunk!");
+      PERFETTO_DLOG("Shared memory buffer exhausted, returning invalid Chunk!");
       return Chunk();
     }
 
diff --git a/src/tracing/core/trace_writer_impl.cc b/src/tracing/core/trace_writer_impl.cc
index 4cfa734..bacdb8c 100644
--- a/src/tracing/core/trace_writer_impl.cc
+++ b/src/tracing/core/trace_writer_impl.cc
@@ -302,7 +302,6 @@
     // Descend in the stack of non-finalized nested submessages (if any) and
     // detour their |size_field| into the |patch_list_|. At this point we have
     // to release the chunk and they cannot write anymore into that.
-    // TODO(primiano): add tests to cover this logic.
     bool chunk_needs_patching = false;
     for (auto* nested_msg = cur_packet_->nested_message(); nested_msg;
          nested_msg = nested_msg->nested_message()) {
diff --git a/src/tracing/core/trace_writer_impl_unittest.cc b/src/tracing/core/trace_writer_impl_unittest.cc
index e59e897..a0f3f3d 100644
--- a/src/tracing/core/trace_writer_impl_unittest.cc
+++ b/src/tracing/core/trace_writer_impl_unittest.cc
@@ -33,6 +33,13 @@
 namespace perfetto {
 namespace {
 
+using ::testing::AllOf;
+using ::testing::MockFunction;
+using ::testing::Ne;
+using ::testing::NotNull;
+using ::testing::SizeIs;
+using ::testing::ValuesIn;
+
 class TraceWriterImplTest : public AlignedBufferTest {
  public:
   void SetUp() override {
@@ -53,13 +60,10 @@
   FakeProducerEndpoint fake_producer_endpoint_;
   std::unique_ptr<base::TestTaskRunner> task_runner_;
   std::unique_ptr<SharedMemoryArbiterImpl> arbiter_;
-  std::function<void(const std::vector<uint32_t>&)> on_pages_complete_;
 };
 
 size_t const kPageSizes[] = {4096, 65536};
-INSTANTIATE_TEST_SUITE_P(PageSize,
-                         TraceWriterImplTest,
-                         ::testing::ValuesIn(kPageSizes));
+INSTANTIATE_TEST_SUITE_P(PageSize, TraceWriterImplTest, ValuesIn(kPageSizes));
 
 TEST_P(TraceWriterImplTest, SingleWriter) {
   const BufferID kBufId = 42;
@@ -67,9 +71,8 @@
   const size_t kNumPackets = 32;
   for (size_t i = 0; i < kNumPackets; i++) {
     auto packet = writer->NewTracePacket();
-    char str[16];
-    sprintf(str, "foobar %zu", i);
-    packet->set_for_testing()->set_str(str);
+    packet->set_for_testing()->set_str(
+        std::string("foobar " + std::to_string(i)));
   }
 
   // Destroying the TraceWriteImpl should cause the last packet to be finalized
@@ -78,7 +81,7 @@
 
   SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
   size_t packets_count = 0;
-  for (size_t page_idx = 0; page_idx < kNumPages; page_idx++) {
+  for (size_t page_idx = 0; page_idx < abi->num_pages(); page_idx++) {
     uint32_t page_layout = abi->GetPageLayout(page_idx);
     size_t num_chunks = SharedMemoryABI::GetNumChunksForLayout(page_layout);
     for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
@@ -91,7 +94,7 @@
       packets_count += chunk.header()->packets.load().count;
     }
   }
-  EXPECT_EQ(kNumPackets, packets_count);
+  EXPECT_EQ(packets_count, kNumPackets);
   // TODO(primiano): check also the content of the packets decoding the protos.
 }
 
@@ -103,20 +106,17 @@
   // than two chunks.
   auto packet = writer->NewTracePacket();
   size_t chunk_size = page_size() / 4;
-  std::stringstream large_string_writer;
-  for (size_t pos = 0; pos < chunk_size; pos++)
-    large_string_writer << "x";
-  std::string large_string = large_string_writer.str();
-  packet->set_for_testing()->set_str(large_string.data(), large_string.size());
+  std::string large_string(chunk_size, 'x');
+  packet->set_for_testing()->set_str(large_string);
 
   // First chunk should be committed.
   arbiter_->FlushPendingCommitDataRequests();
   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
-  ASSERT_EQ(1, last_commit.chunks_to_move_size());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
-  EXPECT_EQ(kBufId, last_commit.chunks_to_move()[0].target_buffer());
-  EXPECT_EQ(0, last_commit.chunks_to_patch_size());
+  ASSERT_THAT(last_commit.chunks_to_move(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_move()[0].page(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].chunk(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].target_buffer(), kBufId);
+  EXPECT_THAT(last_commit.chunks_to_patch(), SizeIs(0));
 
   // We will simulate a batching cycle by first setting the batching period to a
   // very large value and then force-flushing when we are done writing data.
@@ -130,7 +130,7 @@
   // cannot be applied locally because the first chunk was already committed.
   packet->Finalize();
   auto packet2 = writer->NewTracePacket();
-  packet2->set_for_testing()->set_str(large_string.data(), large_string.size());
+  packet2->set_for_testing()->set_str(large_string);
 
   // Starting a new packet yet again should cause the patches for the second
   // packet (i.e. for the second chunk) to be applied in the producer, because
@@ -147,39 +147,39 @@
   // The first allocated chunk should be complete but need patching, since the
   // packet extended past the chunk and no patches for the packet size or string
   // field size were applied yet.
-  ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
+  ASSERT_EQ(abi->GetChunkState(0u, 0u), SharedMemoryABI::kChunkComplete);
   auto chunk = abi->TryAcquireChunkForReading(0u, 0u);
   ASSERT_TRUE(chunk.is_valid());
-  ASSERT_EQ(1, chunk.header()->packets.load().count);
-  ASSERT_TRUE(chunk.header()->packets.load().flags &
+  EXPECT_EQ(chunk.header()->packets.load().count, 1u);
+  EXPECT_TRUE(chunk.header()->packets.load().flags &
               SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
-  ASSERT_TRUE(chunk.header()->packets.load().flags &
+  EXPECT_TRUE(chunk.header()->packets.load().flags &
               SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
 
   // Verify that a patch for the first chunk was sent to the service.
-  ASSERT_EQ(1, last_commit.chunks_to_patch_size());
-  EXPECT_EQ(writer->writer_id(), last_commit.chunks_to_patch()[0].writer_id());
-  EXPECT_EQ(kBufId, last_commit.chunks_to_patch()[0].target_buffer());
-  EXPECT_EQ(chunk.header()->chunk_id.load(),
-            last_commit.chunks_to_patch()[0].chunk_id());
+  ASSERT_THAT(last_commit.chunks_to_patch(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_patch()[0].writer_id(), writer->writer_id());
+  EXPECT_EQ(last_commit.chunks_to_patch()[0].target_buffer(), kBufId);
+  EXPECT_EQ(last_commit.chunks_to_patch()[0].chunk_id(),
+            chunk.header()->chunk_id.load());
   EXPECT_FALSE(last_commit.chunks_to_patch()[0].has_more_patches());
-  ASSERT_EQ(1, last_commit.chunks_to_patch()[0].patches_size());
+  EXPECT_THAT(last_commit.chunks_to_patch()[0].patches(), SizeIs(1));
 
   // Verify that the second chunk was committed.
-  ASSERT_EQ(1, last_commit.chunks_to_move_size());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
-  EXPECT_EQ(1u, last_commit.chunks_to_move()[0].chunk());
-  EXPECT_EQ(kBufId, last_commit.chunks_to_move()[0].target_buffer());
+  ASSERT_THAT(last_commit.chunks_to_move(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_move()[0].page(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].chunk(), 1u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].target_buffer(), kBufId);
 
   // The second chunk should be in a complete state and should not need
   // patching, as the patches to it should have been applied in the producer.
-  ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 1u));
+  ASSERT_EQ(abi->GetChunkState(0u, 1u), SharedMemoryABI::kChunkComplete);
   auto chunk2 = abi->TryAcquireChunkForReading(0u, 1u);
   ASSERT_TRUE(chunk2.is_valid());
-  ASSERT_EQ(2, chunk2.header()->packets.load().count);
-  ASSERT_TRUE(chunk2.header()->packets.load().flags &
+  EXPECT_EQ(chunk2.header()->packets.load().count, 2);
+  EXPECT_TRUE(chunk2.header()->packets.load().flags &
               SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
-  ASSERT_FALSE(chunk2.header()->packets.load().flags &
+  EXPECT_FALSE(chunk2.header()->packets.load().flags &
                SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
 }
 
@@ -197,11 +197,8 @@
   // Write a packet that's guaranteed to span more than a single chunk.
   auto packet = writer->NewTracePacket();
   size_t chunk_size = page_size() / 4;
-  std::stringstream large_string_writer;
-  for (size_t pos = 0; pos < chunk_size; pos++)
-    large_string_writer << "x";
-  std::string large_string = large_string_writer.str();
-  packet->set_for_testing()->set_str(large_string.data(), large_string.size());
+  std::string large_string(chunk_size, 'x');
+  packet->set_for_testing()->set_str(large_string);
 
   // Starting a new packet should cause the first chunk and its patches to be
   // committed to the service.
@@ -213,40 +210,36 @@
   // packet extended past the chunk and no patches for the packet size or string
   // field size were applied in the producer.
   SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
-  ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
+  ASSERT_EQ(abi->GetChunkState(0u, 0u), SharedMemoryABI::kChunkComplete);
   auto chunk = abi->TryAcquireChunkForReading(0u, 0u);
   ASSERT_TRUE(chunk.is_valid());
-  ASSERT_EQ(1, chunk.header()->packets.load().count);
-  ASSERT_TRUE(chunk.header()->packets.load().flags &
+  EXPECT_EQ(chunk.header()->packets.load().count, 1);
+  EXPECT_TRUE(chunk.header()->packets.load().flags &
               SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
-  ASSERT_TRUE(chunk.header()->packets.load().flags &
+  EXPECT_TRUE(chunk.header()->packets.load().flags &
               SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
 
   // The first chunk was committed.
   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
-  ASSERT_EQ(1, last_commit.chunks_to_move_size());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
-  EXPECT_EQ(kBufId, last_commit.chunks_to_move()[0].target_buffer());
+  ASSERT_THAT(last_commit.chunks_to_move(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_move()[0].page(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].chunk(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].target_buffer(), kBufId);
 
   // The patches for the first chunk were committed.
-  ASSERT_EQ(1, last_commit.chunks_to_patch_size());
-  EXPECT_EQ(writer->writer_id(), last_commit.chunks_to_patch()[0].writer_id());
-  EXPECT_EQ(kBufId, last_commit.chunks_to_patch()[0].target_buffer());
-  EXPECT_EQ(chunk.header()->chunk_id.load(),
-            last_commit.chunks_to_patch()[0].chunk_id());
+  ASSERT_THAT(last_commit.chunks_to_patch(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_patch()[0].writer_id(), writer->writer_id());
+  EXPECT_EQ(last_commit.chunks_to_patch()[0].target_buffer(), kBufId);
+  EXPECT_EQ(last_commit.chunks_to_patch()[0].chunk_id(),
+            chunk.header()->chunk_id.load());
   EXPECT_FALSE(last_commit.chunks_to_patch()[0].has_more_patches());
-  ASSERT_EQ(1, last_commit.chunks_to_patch()[0].patches_size());
+  EXPECT_THAT(last_commit.chunks_to_patch()[0].patches(), SizeIs(1));
 }
 
 // Sets up a scenario in which the SMB is exhausted and TraceWriter fails to get
 // a new chunk while fragmenting a packet. Verifies that data is dropped until
 // the SMB is freed up and TraceWriter can get a new chunk.
 TEST_P(TraceWriterImplTest, FragmentingPacketWhileBufferExhausted) {
-  arbiter_.reset(new SharedMemoryArbiterImpl(buf(), buf_size(), page_size(),
-                                             &fake_producer_endpoint_,
-                                             task_runner_.get()));
-
   const BufferID kBufId = 42;
   std::unique_ptr<TraceWriter> writer =
       arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
@@ -271,11 +264,8 @@
   // |writer| to attempt to acquire a new chunk but fail to do so.
   auto packet2 = writer->NewTracePacket();
   size_t chunk_size = page_size() / 4;
-  std::stringstream large_string_writer;
-  for (size_t pos = 0; pos < chunk_size; pos++)
-    large_string_writer << "x";
-  std::string large_string = large_string_writer.str();
-  packet2->set_for_testing()->set_str(large_string.data(), large_string.size());
+  std::string large_string(chunk_size, 'x');
+  packet2->set_for_testing()->set_str(large_string);
 
   EXPECT_TRUE(reinterpret_cast<TraceWriterImpl*>(writer.get())
                   ->drop_packets_for_testing());
@@ -283,27 +273,27 @@
   // First chunk should be committed.
   arbiter_->FlushPendingCommitDataRequests();
   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
-  ASSERT_EQ(1, last_commit.chunks_to_move_size());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
-  EXPECT_EQ(kBufId, last_commit.chunks_to_move()[0].target_buffer());
-  EXPECT_EQ(0, last_commit.chunks_to_patch_size());
+  ASSERT_THAT(last_commit.chunks_to_move(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_move()[0].page(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].chunk(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].target_buffer(), kBufId);
+  EXPECT_THAT(last_commit.chunks_to_patch(), SizeIs(0));
 
   // It should not need patching and not have the continuation flag set.
   SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
   auto chunk = abi->TryAcquireChunkForReading(0u, 0u);
   ASSERT_TRUE(chunk.is_valid());
-  ASSERT_EQ(2, chunk.header()->packets.load().count);
-  ASSERT_FALSE(chunk.header()->packets.load().flags &
+  EXPECT_EQ(chunk.header()->packets.load().count, 2);
+  EXPECT_FALSE(chunk.header()->packets.load().flags &
                SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
-  ASSERT_FALSE(chunk.header()->packets.load().flags &
+  EXPECT_FALSE(chunk.header()->packets.load().flags &
                SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
 
   // Writing more data while in garbage mode succeeds. This data is dropped.
   packet2->Finalize();
   auto packet3 = writer->NewTracePacket();
-  packet3->set_for_testing()->set_str(large_string.data(), large_string.size());
+  packet3->set_for_testing()->set_str(large_string);
 
   // Release the |writer|'s first chunk as free, so that it can grab it again.
   abi->ReleaseChunkAsFree(std::move(chunk));
@@ -323,22 +313,22 @@
 
   // Flushing the writer causes the chunk to be released again.
   writer->Flush();
-  EXPECT_EQ(1, last_commit.chunks_to_move_size());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
-  ASSERT_EQ(0, last_commit.chunks_to_patch_size());
+  EXPECT_THAT(last_commit.chunks_to_move(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_move()[0].page(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].chunk(), 0u);
+  EXPECT_THAT(last_commit.chunks_to_patch(), SizeIs(0));
 
   // Chunk should contain only |packet4| and not have any continuation flag set.
-  ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
+  ASSERT_EQ(abi->GetChunkState(0u, 0u), SharedMemoryABI::kChunkComplete);
   chunk = abi->TryAcquireChunkForReading(0u, 0u);
   ASSERT_TRUE(chunk.is_valid());
-  ASSERT_EQ(1, chunk.header()->packets.load().count);
-  ASSERT_FALSE(chunk.header()->packets.load().flags &
+  ASSERT_EQ(chunk.header()->packets.load().count, 1);
+  EXPECT_FALSE(chunk.header()->packets.load().flags &
                SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
-  ASSERT_FALSE(
+  EXPECT_FALSE(
       chunk.header()->packets.load().flags &
       SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk);
-  ASSERT_FALSE(chunk.header()->packets.load().flags &
+  EXPECT_FALSE(chunk.header()->packets.load().flags &
                SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
 }
 
@@ -346,10 +336,6 @@
 // acquires a garbage chunk later recovers and writes a previous_packet_dropped
 // marker into the trace.
 TEST_P(TraceWriterImplTest, FlushBeforeBufferExhausted) {
-  arbiter_.reset(new SharedMemoryArbiterImpl(buf(), buf_size(), page_size(),
-                                             &fake_producer_endpoint_,
-                                             task_runner_.get()));
-
   const BufferID kBufId = 42;
   std::unique_ptr<TraceWriter> writer =
       arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
@@ -367,9 +353,9 @@
   // First chunk should be committed. Don't release it as free just yet.
   arbiter_->FlushPendingCommitDataRequests();
   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
-  ASSERT_EQ(1, last_commit.chunks_to_move_size());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
+  ASSERT_THAT(last_commit.chunks_to_move(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_move()[0].page(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].chunk(), 0u);
 
   // Grab all the remaining chunks in the SMB in new writers.
   std::array<std::unique_ptr<TraceWriter>, kNumPages * 4 - 1> other_writers;
@@ -390,11 +376,8 @@
   // Make sure that we fill the garbage chunk, so that |writer| tries to
   // re-acquire a valid chunk for the next packet.
   size_t chunk_size = page_size() / 4;
-  std::stringstream large_string_writer;
-  for (size_t pos = 0; pos < chunk_size; pos++)
-    large_string_writer << "x";
-  std::string large_string = large_string_writer.str();
-  packet2->set_for_testing()->set_str(large_string.data(), large_string.size());
+  std::string large_string(chunk_size, 'x');
+  packet2->set_for_testing()->set_str(large_string);
   packet2->Finalize();
 
   // Next packet should still be in the garbage chunk.
@@ -411,7 +394,7 @@
 
   // Fill the garbage chunk, so that the writer attempts to grab another chunk
   // for |packet4|.
-  packet3->set_for_testing()->set_str(large_string.data(), large_string.size());
+  packet3->set_for_testing()->set_str(large_string);
   packet3->Finalize();
 
   // Next packet should go into the reacquired chunk we just released.
@@ -425,16 +408,16 @@
 
   // Flushing the writer causes the chunk to be released again.
   writer->Flush();
-  EXPECT_EQ(1, last_commit.chunks_to_move_size());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
-  EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
-  ASSERT_EQ(0, last_commit.chunks_to_patch_size());
+  ASSERT_THAT(last_commit.chunks_to_move(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_move()[0].page(), 0u);
+  EXPECT_EQ(last_commit.chunks_to_move()[0].chunk(), 0u);
+  EXPECT_THAT(last_commit.chunks_to_patch(), SizeIs(0));
 
   // Chunk should contain only |packet4| and not have any continuation flag set.
   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
   chunk = abi->TryAcquireChunkForReading(0u, 0u);
   ASSERT_TRUE(chunk.is_valid());
-  ASSERT_EQ(1, chunk.header()->packets.load().count);
+  ASSERT_EQ(chunk.header()->packets.load().count, 1);
   ASSERT_FALSE(chunk.header()->packets.load().flags &
                SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
   ASSERT_FALSE(
@@ -448,10 +431,6 @@
 // packet still has uncommitted patches doesn't hit a DCHECK / crash the writer
 // thread.
 TEST_P(TraceWriterImplTest, FlushAfterFragmentingPacketWhileBufferExhausted) {
-  arbiter_.reset(new SharedMemoryArbiterImpl(buf(), buf_size(), page_size(),
-                                             &fake_producer_endpoint_,
-                                             task_runner_.get()));
-
   const BufferID kBufId = 42;
   std::unique_ptr<TraceWriter> writer =
       arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
@@ -477,11 +456,8 @@
   // second.
   auto packet2 = writer->NewTracePacket();
   size_t chunk_size = page_size() / 4;
-  std::stringstream large_string_writer;
-  for (size_t pos = 0; pos < chunk_size * 2; pos++)
-    large_string_writer << "x";
-  std::string large_string = large_string_writer.str();
-  packet2->set_for_testing()->set_str(large_string.data(), large_string.size());
+  std::string large_string(chunk_size * 2, 'x');
+  packet2->set_for_testing()->set_str(large_string);
 
   EXPECT_TRUE(reinterpret_cast<TraceWriterImpl*>(writer.get())
                   ->drop_packets_for_testing());
@@ -489,7 +465,7 @@
   // First two chunks should be committed.
   arbiter_->FlushPendingCommitDataRequests();
   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
-  ASSERT_EQ(2, last_commit.chunks_to_move_size());
+  ASSERT_THAT(last_commit.chunks_to_move(), SizeIs(2));
 
   // Flushing should succeed, even though some patches are still in the writer's
   // patch list.
@@ -497,8 +473,81 @@
   writer->Flush();
 }
 
+TEST_P(TraceWriterImplTest, Flush) {
+  MockFunction<void()> flush_cb;
+
+  const BufferID kBufId = 42;
+  std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(kBufId);
+  {
+    auto packet = writer->NewTracePacket();
+    packet->set_for_testing()->set_str("foobar");
+  }
+
+  EXPECT_CALL(flush_cb, Call).Times(0);
+  ASSERT_FALSE(fake_producer_endpoint_.last_commit_data_callback);
+  writer->Flush(flush_cb.AsStdFunction());
+  ASSERT_TRUE(fake_producer_endpoint_.last_commit_data_callback);
+  EXPECT_CALL(flush_cb, Call).Times(1);
+  fake_producer_endpoint_.last_commit_data_callback();
+}
+
+TEST_P(TraceWriterImplTest, NestedMsgsPatches) {
+  const BufferID kBufId = 42;
+  const uint32_t kNestedFieldId = 1;
+  const uint32_t kStringFieldId = 2;
+  const uint32_t kIntFieldId = 3;
+  std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(kBufId);
+
+  size_t chunk_size = page_size() / 4;
+  std::string large_string(chunk_size, 'x');
+
+  auto packet = writer->NewTracePacket();
+  auto* nested1 =
+      packet->BeginNestedMessage<protozero::Message>(kNestedFieldId);
+  auto* nested2 =
+      nested1->BeginNestedMessage<protozero::Message>(kNestedFieldId);
+  auto* nested3 =
+      nested2->BeginNestedMessage<protozero::Message>(kNestedFieldId);
+  uint8_t* const old_nested_1_size_field = nested1->size_field();
+  uint8_t* const old_nested_2_size_field = nested2->size_field();
+  uint8_t* const old_nested_3_size_field = nested3->size_field();
+  EXPECT_THAT(old_nested_1_size_field, NotNull());
+  EXPECT_THAT(old_nested_2_size_field, NotNull());
+  EXPECT_THAT(old_nested_3_size_field, NotNull());
+
+  // Append a small field, which will fit in the current chunk.
+  nested3->AppendVarInt<uint64_t>(kIntFieldId, 1);
+
+  // The `size_field`s still point to the same old location, inside the chunk.
+  EXPECT_EQ(nested1->size_field(), old_nested_1_size_field);
+  EXPECT_EQ(nested2->size_field(), old_nested_2_size_field);
+  EXPECT_EQ(nested3->size_field(), old_nested_3_size_field);
+
+  // Append a large string, which will not fit in the current chunk.
+  nested3->AppendString(kStringFieldId, large_string);
+
+  // The `size_field`s will now point to different locations (patches).
+  EXPECT_THAT(nested1->size_field(),
+              AllOf(Ne(old_nested_1_size_field), NotNull()));
+  EXPECT_THAT(nested2->size_field(),
+              AllOf(Ne(old_nested_2_size_field), NotNull()));
+  EXPECT_THAT(nested3->size_field(),
+              AllOf(Ne(old_nested_3_size_field), NotNull()));
+
+  packet->Finalize();
+  writer->Flush();
+
+  arbiter_->FlushPendingCommitDataRequests();
+
+  const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
+  ASSERT_THAT(last_commit.chunks_to_patch(), SizeIs(1));
+  EXPECT_EQ(last_commit.chunks_to_patch()[0].writer_id(), writer->writer_id());
+  EXPECT_EQ(last_commit.chunks_to_patch()[0].target_buffer(), kBufId);
+  EXPECT_FALSE(last_commit.chunks_to_patch()[0].has_more_patches());
+  EXPECT_THAT(last_commit.chunks_to_patch()[0].patches(), SizeIs(3));
+}
+
 // TODO(primiano): add multi-writer test.
-// TODO(primiano): add Flush() test.
 
 }  // namespace
 }  // namespace perfetto
diff --git a/src/tracing/internal/interceptor_trace_writer_unittest.cc b/src/tracing/internal/interceptor_trace_writer_unittest.cc
new file mode 100644
index 0000000..7b32487
--- /dev/null
+++ b/src/tracing/internal/interceptor_trace_writer_unittest.cc
@@ -0,0 +1,127 @@
+#include "perfetto/tracing/internal/interceptor_trace_writer.h"
+
+#include "perfetto/tracing/interceptor.h"
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto {
+namespace internal {
+namespace {
+
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::IsNull;
+using ::testing::MockFunction;
+using ::testing::NotNull;
+
+constexpr uint32_t kInstanceIndex = 42;
+
+}  // namespace
+
+class InterceptorTraceWriterTest : public testing::Test {
+ protected:
+  using TracePacketCallbackArgs = InterceptorBase::TracePacketCallbackArgs;
+  using ThreadLocalState = InterceptorBase::ThreadLocalState;
+  using MockTracePacketCallback = MockFunction<void(TracePacketCallbackArgs)>;
+
+  InterceptorTraceWriterTest()
+      : tls_ptr_(new ThreadLocalState()),
+        tw_(std::unique_ptr<ThreadLocalState>(tls_ptr_),
+            TracePacketCallback,
+            &dss_,
+            kInstanceIndex) {}
+
+  void SetUp() override {
+    static_trace_packet_callback_ = &trace_packet_callback_;
+  }
+
+  void TearDown() override { static_trace_packet_callback_ = nullptr; }
+
+  static void TracePacketCallback(
+      InterceptorBase::TracePacketCallbackArgs args) {
+    ASSERT_THAT(static_trace_packet_callback_, NotNull());
+    static_trace_packet_callback_->Call(args);
+  }
+
+  MockTracePacketCallback trace_packet_callback_;
+  static MockTracePacketCallback* static_trace_packet_callback_;
+
+  ThreadLocalState* tls_ptr_;
+  DataSourceStaticState dss_;
+  InterceptorTraceWriter tw_;
+};
+
+InterceptorTraceWriterTest::MockTracePacketCallback*
+    InterceptorTraceWriterTest::static_trace_packet_callback_;
+
+TEST_F(InterceptorTraceWriterTest, TracePacketCallbackParams) {
+  EXPECT_CALL(trace_packet_callback_,
+              Call(AllOf(Field(&TracePacketCallbackArgs::instance_index,
+                               kInstanceIndex),
+                         Field(&TracePacketCallbackArgs::static_state, &dss_),
+                         Field(&TracePacketCallbackArgs::tls, tls_ptr_))))
+      .Times(1);
+
+  tw_.NewTracePacket();
+  tw_.Flush();
+}
+
+TEST_F(InterceptorTraceWriterTest, NewTracePacketAutomaticallyAddedFields) {
+  std::string first_packet;
+  std::string second_packet;
+  EXPECT_CALL(trace_packet_callback_, Call)
+      .WillOnce(Invoke([&](TracePacketCallbackArgs args) {
+        first_packet = args.packet_data.ToStdString();
+      }))
+      .WillOnce(Invoke([&](TracePacketCallbackArgs args) {
+        second_packet = args.packet_data.ToStdString();
+      }));
+
+  tw_.NewTracePacket();
+  tw_.NewTracePacket();
+  tw_.Flush();
+
+  protos::pbzero::TracePacket::Decoder first(first_packet);
+  protos::pbzero::TracePacket::Decoder second(second_packet);
+  EXPECT_TRUE(first.has_trusted_packet_sequence_id());
+  EXPECT_TRUE(second.has_trusted_packet_sequence_id());
+  EXPECT_EQ(first.trusted_packet_sequence_id(),
+            second.trusted_packet_sequence_id());
+}
+
+TEST_F(InterceptorTraceWriterTest, NewTracePacketLargePacket) {
+  size_t first_packet_size;
+  size_t second_packet_size;
+  EXPECT_CALL(trace_packet_callback_, Call)
+      .WillOnce(Invoke([&](TracePacketCallbackArgs args) {
+        first_packet_size = args.packet_data.size;
+      }))
+      .WillOnce(Invoke([&](TracePacketCallbackArgs args) {
+        second_packet_size = args.packet_data.size;
+      }));
+
+  tw_.NewTracePacket();
+  {
+    auto msg = tw_.NewTracePacket();
+    std::vector<uint8_t> large(20000u, 0);
+    msg->AppendRawProtoBytes(large.data(), large.size());
+  }
+  tw_.Flush();
+
+  EXPECT_EQ(second_packet_size, first_packet_size + 20000u);
+}
+
+TEST_F(InterceptorTraceWriterTest, FlushCallback) {
+  MockFunction<void()> flush_cb;
+
+  InSequence seq;
+  EXPECT_CALL(trace_packet_callback_, Call).Times(1);
+  EXPECT_CALL(flush_cb, Call).Times(1);
+
+  tw_.NewTracePacket();
+  tw_.Flush(flush_cb.AsStdFunction());
+}
+
+}  // namespace internal
+}  // namespace perfetto