Adds graph processor to Perfetto

GraphProcessor and GlobalDumpGraph with unit tests from Chromium project has been refacorized and added to Perfetto project. All dependencies to Chromium project has been removed after the code has been moved to Perfetto. See crbug.com/1095982 for more details.

Bug: 1095982
Change-Id: Id2de30949714c7c869861ecb58738f69f2a25e20
diff --git a/Android.bp b/Android.bp
index a682da4..1c693fe 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1501,6 +1501,11 @@
   name: "perfetto_include_perfetto_ext_trace_processor_export_json",
 }
 
+// GN: //include/perfetto/ext/trace_processor/importers/memory_tracker:memory_tracker
+filegroup {
+  name: "perfetto_include_perfetto_ext_trace_processor_importers_memory_tracker_memory_tracker",
+}
+
 // GN: //include/perfetto/ext/traced:sys_stats_counters
 filegroup {
   name: "perfetto_include_perfetto_ext_traced_sys_stats_counters",
@@ -6735,6 +6740,18 @@
   ],
 }
 
+// GN: //src/trace_processor/importers/memory_tracker:graph_processor
+filegroup {
+  name: "perfetto_src_trace_processor_importers_memory_tracker_graph_processor",
+  srcs: [
+    "src/trace_processor/importers/memory_tracker/graph.cc",
+    "src/trace_processor/importers/memory_tracker/graph_processor.cc",
+    "src/trace_processor/importers/memory_tracker/memory_allocator_node_id.cc",
+    "src/trace_processor/importers/memory_tracker/raw_memory_graph_node.cc",
+    "src/trace_processor/importers/memory_tracker/raw_process_memory_node.cc",
+  ],
+}
+
 // GN: //src/trace_processor/importers:unittests
 filegroup {
   name: "perfetto_src_trace_processor_importers_unittests",
@@ -6959,6 +6976,9 @@
     "src/trace_processor/forwarding_trace_parser_unittest.cc",
     "src/trace_processor/importers/ftrace/sched_event_tracker_unittest.cc",
     "src/trace_processor/importers/fuchsia/fuchsia_trace_utils_unittest.cc",
+    "src/trace_processor/importers/memory_tracker/graph_processor_unittest.cc",
+    "src/trace_processor/importers/memory_tracker/graph_unittest.cc",
+    "src/trace_processor/importers/memory_tracker/raw_process_memory_node_unittest.cc",
     "src/trace_processor/importers/proto/args_table_utils_unittest.cc",
     "src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc",
     "src/trace_processor/importers/proto/heap_profile_tracker_unittest.cc",
@@ -7761,6 +7781,7 @@
     ":perfetto_include_perfetto_ext_base_base",
     ":perfetto_include_perfetto_ext_ipc_ipc",
     ":perfetto_include_perfetto_ext_trace_processor_export_json",
+    ":perfetto_include_perfetto_ext_trace_processor_importers_memory_tracker_memory_tracker",
     ":perfetto_include_perfetto_ext_traced_sys_stats_counters",
     ":perfetto_include_perfetto_ext_traced_traced",
     ":perfetto_include_perfetto_ext_tracing_core_core",
@@ -7884,6 +7905,7 @@
     ":perfetto_src_trace_processor_export_json",
     ":perfetto_src_trace_processor_ftrace_descriptors",
     ":perfetto_src_trace_processor_importers_common",
+    ":perfetto_src_trace_processor_importers_memory_tracker_graph_processor",
     ":perfetto_src_trace_processor_importers_unittests",
     ":perfetto_src_trace_processor_lib",
     ":perfetto_src_trace_processor_metatrace",
diff --git a/BUILD.gn b/BUILD.gn
index 0a1b23c..e5d95c4 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -234,6 +234,7 @@
   component("libperfetto") {
     public_configs = [ "gn:public_config" ]
     deps = [
+      "src/trace_processor/importers/memory_tracker:graph_processor",
       "src/tracing:client_api",
       "src/tracing/core",
 
@@ -243,6 +244,7 @@
     configs -= [ "//build/config/compiler:chromium_code" ]
     configs += [ "//build/config/compiler:no_chromium_code" ]
     public_deps = [
+      "include/perfetto/ext/trace_processor/importers/memory_tracker",
       "include/perfetto/ext/tracing/core",
       "include/perfetto/tracing",
       "protos/perfetto/common:zero",
diff --git a/include/perfetto/ext/trace_processor/importers/memory_tracker/BUILD.gn b/include/perfetto/ext/trace_processor/importers/memory_tracker/BUILD.gn
new file mode 100644
index 0000000..7d093f4
--- /dev/null
+++ b/include/perfetto/ext/trace_processor/importers/memory_tracker/BUILD.gn
@@ -0,0 +1,31 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../../../../gn/perfetto.gni")
+
+source_set("memory_tracker") {
+  deps = [ "../../../../../../gn:default_deps" ]
+  public_deps = [
+    "../../../../base",
+    "../../../base",
+  ]
+  sources = [
+    "graph.h",
+    "graph_processor.h",
+    "memory_allocator_node_id.h",
+    "memory_graph_edge.h",
+    "raw_memory_graph_node.h",
+    "raw_process_memory_node.h",
+  ]
+}
diff --git a/include/perfetto/ext/trace_processor/importers/memory_tracker/graph.h b/include/perfetto/ext/trace_processor/importers/memory_tracker/graph.h
new file mode 100644
index 0000000..0882634
--- /dev/null
+++ b/include/perfetto/ext/trace_processor/importers/memory_tracker/graph.h
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_GRAPH_H_
+#define INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_GRAPH_H_
+
+#include <sys/types.h>
+
+#include <forward_list>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "perfetto/base/export.h"
+#include "perfetto/base/proc_utils.h"
+#include "perfetto/ext/base/string_utils.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/memory_allocator_node_id.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+const base::PlatformProcessId kNullProcessId = 0;
+
+// Contains processed node graphs for each process and in the global space.
+// This class is also the arena which owns the nodes of the graph.
+class PERFETTO_EXPORT GlobalNodeGraph {
+ public:
+  class Node;
+  class Edge;
+  class PreOrderIterator;
+  class PostOrderIterator;
+
+  // Graph of nodes either associated with a process or with
+  // the shared space.
+  class Process {
+   public:
+    Process(base::PlatformProcessId pid, GlobalNodeGraph* global_graph);
+    ~Process();
+
+    // Creates a node in the node graph which is associated with the
+    // given |id|, |path| and |weak|ness and returns it.
+    GlobalNodeGraph::Node* CreateNode(MemoryAllocatorNodeId id,
+                                      const std::string& path,
+                                      bool weak);
+
+    // Returns the node in the graph at the given |path| or nullptr
+    // if no such node exists in the provided |graph|.
+    GlobalNodeGraph::Node* FindNode(const std::string& path);
+
+    base::PlatformProcessId pid() const { return pid_; }
+    GlobalNodeGraph* global_graph() const { return global_graph_; }
+    GlobalNodeGraph::Node* root() const { return root_; }
+
+   private:
+    base::PlatformProcessId pid_;
+    GlobalNodeGraph* global_graph_;
+    GlobalNodeGraph::Node* root_;
+    Process(const Process&) = delete;
+    Process& operator=(const Process&) = delete;
+  };
+
+  // A single node in the graph of allocator nodes associated with a
+  // certain path and containing the entries for this path.
+  class Node {
+   public:
+    // Auxilary data (a scalar number or a string) about this node each
+    // associated with a key.
+    struct Entry {
+      enum Type {
+        kUInt64,
+        kString,
+      };
+
+      // The units of the entry if the entry is a scalar. The scalar
+      // refers to either a number of objects or a size in bytes.
+      enum ScalarUnits {
+        kObjects,
+        kBytes,
+      };
+
+      // Creates the entry with the appropriate type.
+      Entry(ScalarUnits units, uint64_t value);
+      explicit Entry(const std::string& value);
+
+      const Type type;
+      const ScalarUnits units;
+
+      // The value of the entry if this entry has a string type.
+      const std::string value_string;
+
+      // The value of the entry if this entry has a integer type.
+      const uint64_t value_uint64;
+    };
+
+    explicit Node(GlobalNodeGraph::Process* node_graph, Node* parent);
+    ~Node();
+
+    // Gets the direct child of a node for the given |subpath|.
+    Node* GetChild(const std::string& name) const;
+
+    // Inserts the given |node| as a child of the current node
+    // with the given |subpath| as the key.
+    void InsertChild(const std::string& name, Node* node);
+
+    // Creates a child for this node with the given |name| as the key.
+    Node* CreateChild(const std::string& name);
+
+    // Checks if the current node is a descendent (i.e. exists as a child,
+    // child of a child, etc.) of the given node |possible_parent|.
+    bool IsDescendentOf(const Node& possible_parent) const;
+
+    // Adds an entry for this node node with the given |name|, |units| and
+    // type.
+    void AddEntry(const std::string& name,
+                  Entry::ScalarUnits units,
+                  uint64_t value);
+    void AddEntry(const std::string& name, const std::string& value);
+
+    // Adds an edge which indicates that this node is owned by
+    // another node.
+    void AddOwnedByEdge(Edge* edge);
+
+    // Sets the edge indicates that this node owns another node.
+    void SetOwnsEdge(Edge* edge);
+
+    bool is_weak() const { return weak_; }
+    void set_weak(bool weak) { weak_ = weak; }
+    bool is_explicit() const { return explicit_; }
+    void set_explicit(bool explicit_node) { explicit_ = explicit_node; }
+    uint64_t not_owned_sub_size() const { return not_owned_sub_size_; }
+    void add_not_owned_sub_size(uint64_t addition) {
+      not_owned_sub_size_ += addition;
+    }
+    uint64_t not_owning_sub_size() const { return not_owning_sub_size_; }
+    void add_not_owning_sub_size(uint64_t addition) {
+      not_owning_sub_size_ += addition;
+    }
+    double owned_coefficient() const { return owned_coefficient_; }
+    void set_owned_coefficient(double owned_coefficient) {
+      owned_coefficient_ = owned_coefficient;
+    }
+    double owning_coefficient() const { return owning_coefficient_; }
+    void set_owning_coefficient(double owning_coefficient) {
+      owning_coefficient_ = owning_coefficient;
+    }
+    double cumulative_owned_coefficient() const {
+      return cumulative_owned_coefficient_;
+    }
+    void set_cumulative_owned_coefficient(double cumulative_owned_coefficient) {
+      cumulative_owned_coefficient_ = cumulative_owned_coefficient;
+    }
+    double cumulative_owning_coefficient() const {
+      return cumulative_owning_coefficient_;
+    }
+    void set_cumulative_owning_coefficient(
+        double cumulative_owning_coefficient) {
+      cumulative_owning_coefficient_ = cumulative_owning_coefficient;
+    }
+    MemoryAllocatorNodeId id() const { return id_; }
+    void set_id(MemoryAllocatorNodeId id) { id_ = id; }
+    GlobalNodeGraph::Edge* owns_edge() const { return owns_edge_; }
+    std::map<std::string, Node*>* children() { return &children_; }
+    const std::map<std::string, Node*>& const_children() const {
+      return children_;
+    }
+    std::vector<GlobalNodeGraph::Edge*>* owned_by_edges() {
+      return &owned_by_edges_;
+    }
+    const Node* parent() const { return parent_; }
+    const GlobalNodeGraph::Process* node_graph() const { return node_graph_; }
+    std::map<std::string, Entry>* entries() { return &entries_; }
+    const std::map<std::string, Entry>& const_entries() const {
+      return entries_;
+    }
+
+   private:
+    GlobalNodeGraph::Process* node_graph_;
+    Node* const parent_;
+    MemoryAllocatorNodeId id_;
+    std::map<std::string, Entry> entries_;
+    std::map<std::string, Node*> children_;
+    bool explicit_ = false;
+    bool weak_ = false;
+    uint64_t not_owning_sub_size_ = 0;
+    uint64_t not_owned_sub_size_ = 0;
+    double owned_coefficient_ = 1;
+    double owning_coefficient_ = 1;
+    double cumulative_owned_coefficient_ = 1;
+    double cumulative_owning_coefficient_ = 1;
+
+    GlobalNodeGraph::Edge* owns_edge_;
+    std::vector<GlobalNodeGraph::Edge*> owned_by_edges_;
+
+    Node(const Node&) = delete;
+    Node& operator=(const Node&) = delete;
+  };
+
+  // An edge in the node graph which indicates ownership between the
+  // source and target nodes.
+  class Edge {
+   public:
+    Edge(GlobalNodeGraph::Node* source,
+         GlobalNodeGraph::Node* target,
+         int priority);
+
+    GlobalNodeGraph::Node* source() const { return source_; }
+    GlobalNodeGraph::Node* target() const { return target_; }
+    int priority() const { return priority_; }
+
+   private:
+    GlobalNodeGraph::Node* const source_;
+    GlobalNodeGraph::Node* const target_;
+    const int priority_;
+  };
+
+  // An iterator-esque class which yields nodes in a depth-first pre order.
+  class PreOrderIterator {
+   public:
+    explicit PreOrderIterator(std::vector<Node*>&& root_nodes);
+    PreOrderIterator(PreOrderIterator&& other);
+    ~PreOrderIterator();
+
+    // Yields the next node in the DFS post-order traversal.
+    Node* next();
+
+   private:
+    std::vector<Node*> to_visit_;
+    std::set<const Node*> visited_;
+  };
+
+  // An iterator-esque class which yields nodes in a depth-first post order.
+  class PostOrderIterator {
+   public:
+    explicit PostOrderIterator(std::vector<Node*>&& root_nodes);
+    PostOrderIterator(PostOrderIterator&& other);
+    ~PostOrderIterator();
+
+    // Yields the next node in the DFS post-order traversal.
+    Node* next();
+
+   private:
+    std::vector<Node*> to_visit_;
+    std::set<Node*> visited_;
+    std::vector<Node*> path_;
+  };
+
+  using ProcessNodeGraphMap =
+      std::map<base::PlatformProcessId,
+               std::unique_ptr<GlobalNodeGraph::Process>>;
+  using IdNodeMap = std::map<MemoryAllocatorNodeId, Node*>;
+
+  GlobalNodeGraph();
+  ~GlobalNodeGraph();
+
+  // Creates a container for all the node graphs for the process given
+  // by the given |process_id|.
+  GlobalNodeGraph::Process* CreateGraphForProcess(
+      base::PlatformProcessId process_id);
+
+  // Adds an edge in the node graph with the given source and target nodes
+  // and edge priority.
+  void AddNodeOwnershipEdge(Node* owner, Node* owned, int priority);
+
+  // Returns an iterator which yields nodes in the nodes in this graph in
+  // pre-order. That is, children and owners of nodes are returned after the
+  // node itself.
+  PreOrderIterator VisitInDepthFirstPreOrder();
+
+  // Returns an iterator which yields nodes in the nodes in this graph in
+  // post-order. That is, children and owners of nodes are returned before the
+  // node itself.
+  PostOrderIterator VisitInDepthFirstPostOrder();
+
+  const IdNodeMap& nodes_by_id() const { return nodes_by_id_; }
+  GlobalNodeGraph::Process* shared_memory_graph() const {
+    return shared_memory_graph_.get();
+  }
+  const ProcessNodeGraphMap& process_node_graphs() const {
+    return process_node_graphs_;
+  }
+  const std::forward_list<Edge>& edges() const { return all_edges_; }
+
+ private:
+  // Creates a node in the arena which is associated with the given
+  // |node_graph| and for the given |parent|.
+  Node* CreateNode(GlobalNodeGraph::Process* node_graph,
+                   GlobalNodeGraph::Node* parent);
+
+  std::forward_list<Node> all_nodes_;
+  std::forward_list<Edge> all_edges_;
+  IdNodeMap nodes_by_id_;
+  std::unique_ptr<GlobalNodeGraph::Process> shared_memory_graph_;
+  ProcessNodeGraphMap process_node_graphs_;
+  GlobalNodeGraph(const GlobalNodeGraph&) = delete;
+  GlobalNodeGraph& operator=(const GlobalNodeGraph&) = delete;
+};
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_GRAPH_H_
diff --git a/include/perfetto/ext/trace_processor/importers/memory_tracker/graph_processor.h b/include/perfetto/ext/trace_processor/importers/memory_tracker/graph_processor.h
new file mode 100644
index 0000000..c9fea6b
--- /dev/null
+++ b/include/perfetto/ext/trace_processor/importers/memory_tracker/graph_processor.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_GRAPH_PROCESSOR_H_
+#define INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_GRAPH_PROCESSOR_H_
+
+#include <sys/types.h>
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+
+#include "perfetto/base/proc_utils.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/graph.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/raw_process_memory_node.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+class PERFETTO_EXPORT GraphProcessor {
+ public:
+  // This map does not own the pointers inside.
+  using RawMemoryNodeMap =
+      std::map<base::PlatformProcessId, std::unique_ptr<RawProcessMemoryNode>>;
+
+  static std::unique_ptr<GlobalNodeGraph> CreateMemoryGraph(
+      const RawMemoryNodeMap& process_nodes);
+
+  static void RemoveWeakNodesFromGraph(GlobalNodeGraph* global_graph);
+
+  static void AddOverheadsAndPropagateEntries(GlobalNodeGraph* global_graph);
+
+  static void CalculateSizesForGraph(GlobalNodeGraph* global_graph);
+
+  static std::map<base::PlatformProcessId, uint64_t>
+  ComputeSharedFootprintFromGraph(const GlobalNodeGraph& global_graph);
+
+ private:
+  friend class GraphProcessorTest;
+
+  static void CollectAllocatorNodes(const RawProcessMemoryNode& source,
+                                    GlobalNodeGraph* global_graph,
+                                    GlobalNodeGraph::Process* process_graph);
+
+  static void AddEdges(const RawProcessMemoryNode& source,
+                       GlobalNodeGraph* global_graph);
+
+  static void MarkImplicitWeakParentsRecursively(GlobalNodeGraph::Node* node);
+
+  static void MarkWeakOwnersAndChildrenRecursively(
+      GlobalNodeGraph::Node* node,
+      std::set<const GlobalNodeGraph::Node*>* nodes);
+
+  static void RemoveWeakNodesRecursively(GlobalNodeGraph::Node* parent);
+
+  static void AssignTracingOverhead(const std::string& allocator,
+                                    GlobalNodeGraph* global_graph,
+                                    GlobalNodeGraph::Process* process);
+
+  static GlobalNodeGraph::Node::Entry AggregateNumericWithNameForNode(
+      GlobalNodeGraph::Node* node,
+      const std::string& name);
+
+  static void AggregateNumericsRecursively(GlobalNodeGraph::Node* node);
+
+  static void PropagateNumericsAndDiagnosticsRecursively(
+      GlobalNodeGraph::Node* node);
+
+  static base::Optional<uint64_t> AggregateSizeForDescendantNode(
+      GlobalNodeGraph::Node* root,
+      GlobalNodeGraph::Node* descendant);
+
+  static void CalculateSizeForNode(GlobalNodeGraph::Node* node);
+
+  /**
+   * Calculate not-owned and not-owning sub-sizes of a memory allocator node
+   * from its children's (sub-)sizes.
+   *
+   * Not-owned sub-size refers to the aggregated memory of all children which
+   * is not owned by other MADs. Conversely, not-owning sub-size is the
+   * aggregated memory of all children which do not own another MAD. The
+   * diagram below illustrates these two concepts:
+   *
+   *     ROOT 1                         ROOT 2
+   *     size: 4                        size: 5
+   *     not-owned sub-size: 4          not-owned sub-size: 1 (!)
+   *     not-owning sub-size: 0 (!)     not-owning sub-size: 5
+   *
+   *      ^                              ^
+   *      |                              |
+   *
+   *     PARENT 1   ===== owns =====>   PARENT 2
+   *     size: 4                        size: 5
+   *     not-owned sub-size: 4          not-owned sub-size: 5
+   *     not-owning sub-size: 4         not-owning sub-size: 5
+   *
+   *      ^                              ^
+   *      |                              |
+   *
+   *     CHILD 1                        CHILD 2
+   *     size [given]: 4                size [given]: 5
+   *     not-owned sub-size: 4          not-owned sub-size: 5
+   *     not-owning sub-size: 4         not-owning sub-size: 5
+   *
+   * This method assumes that (1) the size of the node, its children, and its
+   * owners [see calculateSizes()] and (2) the not-owned and not-owning
+   * sub-sizes of both the children and owners of the node have already been
+   * calculated [depth-first post-order traversal].
+   */
+  static void CalculateNodeSubSizes(GlobalNodeGraph::Node* node);
+
+  /**
+   * Calculate owned and owning coefficients of a memory allocator node and
+   * its owners.
+   *
+   * The owning coefficient refers to the proportion of a node's not-owning
+   * sub-size which is attributed to the node (only relevant to owning MADs).
+   * Conversely, the owned coefficient is the proportion of a node's
+   * not-owned sub-size, which is attributed to it (only relevant to owned
+   * MADs).
+   *
+   * The not-owned size of the owned node is split among its owners in the
+   * order of the ownership importance as demonstrated by the following
+   * example:
+   *
+   *                                          memory allocator nodes
+   *                                   OWNED  OWNER1  OWNER2  OWNER3 OWNER4
+   *       not-owned sub-size [given]     10       -       -       - -
+   *      not-owning sub-size [given]      -       6       7       5 8
+   *               importance [given]      -       2       2       1 0
+   *    attributed not-owned sub-size      2       -       -       - -
+   *   attributed not-owning sub-size      -       3       4       0 1
+   *                owned coefficient   2/10       -       -       - -
+   *               owning coefficient      -     3/6     4/7     0/5 1/8
+   *
+   * Explanation: Firstly, 6 bytes are split equally among OWNER1 and OWNER2
+   * (highest importance). OWNER2 owns one more byte, so its attributed
+   * not-owning sub-size is 6/2 + 1 = 4 bytes. OWNER3 is attributed no size
+   * because it is smaller than the owners with higher priority. However,
+   * OWNER4 is larger, so it's attributed the difference 8 - 7 = 1 byte.
+   * Finally, 2 bytes remain unattributed and are hence kept in the OWNED
+   * node as attributed not-owned sub-size. The coefficients are then
+   * directly calculated as fractions of the sub-sizes and corresponding
+   * attributed sub-sizes.
+   *
+   * Note that we always assume that all ownerships of a node overlap (e.g.
+   * OWNER3 is subsumed by both OWNER1 and OWNER2). Hence, the table could
+   * be alternatively represented as follows:
+   *
+   *                                 owned memory range
+   *              0   1   2    3    4    5    6        7        8   9  10
+   *   Priority 2 |  OWNER1 + OWNER2 (split)  | OWNER2 |
+   *   Priority 1 | (already attributed) |
+   *   Priority 0 | - - -  (already attributed)  - - - | OWNER4 |
+   *    Remainder | - - - - - (already attributed) - - - - - -  | OWNED |
+   *
+   * This method assumes that (1) the size of the node [see calculateSizes()]
+   * and (2) the not-owned size of the node and not-owning sub-sizes of its
+   * owners [see the first step of calculateEffectiveSizes()] have already
+   * been calculated. Note that the method doesn't make any assumptions about
+   * the order in which nodes are visited.
+   */
+  static void CalculateNodeOwnershipCoefficient(GlobalNodeGraph::Node* node);
+
+  /**
+   * Calculate cumulative owned and owning coefficients of a memory allocator
+   * node from its (non-cumulative) owned and owning coefficients and the
+   * cumulative coefficients of its parent and/or owned node.
+   *
+   * The cumulative coefficients represent the total effect of all
+   * (non-strict) ancestor ownerships on a memory allocator node. The
+   * cumulative owned coefficient of a MAD can be calculated simply as:
+   *
+   *   cumulativeOwnedC(M) = ownedC(M) * cumulativeOwnedC(parent(M))
+   *
+   * This reflects the assumption that if a parent of a child MAD is
+   * (partially) owned, then the parent's owner also indirectly owns (a part
+   * of) the child MAD.
+   *
+   * The cumulative owning coefficient of a MAD depends on whether the MAD
+   * owns another node:
+   *
+   *                           [if M doesn't own another MAD]
+   *                         / cumulativeOwningC(parent(M))
+   *   cumulativeOwningC(M) =
+   *                         \ [if M owns another MAD]
+   *                           owningC(M) * cumulativeOwningC(owned(M))
+   *
+   * The reasoning behind the first case is similar to the one for cumulative
+   * owned coefficient above. The only difference is that we don't need to
+   * include the node's (non-cumulative) owning coefficient because it is
+   * implicitly 1.
+   *
+   * The formula for the second case is derived as follows: Since the MAD
+   * owns another node, its memory is not included in its parent's not-owning
+   * sub-size and hence shouldn't be affected by the parent's corresponding
+   * cumulative coefficient. Instead, the MAD indirectly owns everything
+   * owned by its owned node (and so it should be affected by the
+   * corresponding coefficient).
+   *
+   * Note that undefined coefficients (and coefficients of non-existent
+   * nodes) are implicitly assumed to be 1.
+   *
+   * This method assumes that (1) the size of the node [see calculateSizes()],
+   * (2) the (non-cumulative) owned and owning coefficients of the node [see
+   * the second step of calculateEffectiveSizes()], and (3) the cumulative
+   * coefficients of the node's parent and owned MADs (if present)
+   * [depth-first pre-order traversal] have already been calculated.
+   */
+  static void CalculateNodeCumulativeOwnershipCoefficient(
+      GlobalNodeGraph::Node* node);
+
+  /**
+   * Calculate the effective size of a memory allocator node.
+   *
+   * In order to simplify the (already complex) calculation, we use the fact
+   * that effective size is cumulative (unlike regular size), i.e. the
+   * effective size of a non-leaf node is equal to the sum of effective sizes
+   * of its children. The effective size of a leaf MAD is calculated as:
+   *
+   *   effectiveSize(M) = size(M) * cumulativeOwningC(M) * cumulativeOwnedC(M)
+   *
+   * This method assumes that (1) the size of the node and its children [see
+   * calculateSizes()] and (2) the cumulative owning and owned coefficients
+   * of the node (if it's a leaf node) [see the third step of
+   * calculateEffectiveSizes()] or the effective sizes of its children (if
+   * it's a non-leaf node) [depth-first post-order traversal] have already
+   * been calculated.
+   */
+  static void CalculateNodeEffectiveSize(GlobalNodeGraph::Node* node);
+};
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_GRAPH_PROCESSOR_H_
diff --git a/include/perfetto/ext/trace_processor/importers/memory_tracker/memory_allocator_node_id.h b/include/perfetto/ext/trace_processor/importers/memory_tracker/memory_allocator_node_id.h
new file mode 100644
index 0000000..f4899d5
--- /dev/null
+++ b/include/perfetto/ext/trace_processor/importers/memory_tracker/memory_allocator_node_id.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_MEMORY_ALLOCATOR_NODE_ID_H_
+#define INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_MEMORY_ALLOCATOR_NODE_ID_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+class PERFETTO_EXPORT MemoryAllocatorNodeId {
+ public:
+  MemoryAllocatorNodeId();
+  explicit MemoryAllocatorNodeId(uint64_t id);
+
+  uint64_t ToUint64() const { return id_; }
+
+  // Returns a (hex-encoded) string representation of the id.
+  std::string ToString() const;
+
+  bool empty() const { return id_ == 0u; }
+
+  bool operator==(const MemoryAllocatorNodeId& other) const {
+    return id_ == other.id_;
+  }
+
+  bool operator!=(const MemoryAllocatorNodeId& other) const {
+    return !(*this == other);
+  }
+
+  bool operator<(const MemoryAllocatorNodeId& other) const {
+    return id_ < other.id_;
+  }
+
+ private:
+  uint64_t id_;
+
+  // Deliberately copy-able.
+};
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_MEMORY_ALLOCATOR_NODE_ID_H_
diff --git a/include/perfetto/ext/trace_processor/importers/memory_tracker/memory_graph_edge.h b/include/perfetto/ext/trace_processor/importers/memory_tracker/memory_graph_edge.h
new file mode 100644
index 0000000..6f7d4fe
--- /dev/null
+++ b/include/perfetto/ext/trace_processor/importers/memory_tracker/memory_graph_edge.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_MEMORY_GRAPH_EDGE_H_
+#define INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_MEMORY_GRAPH_EDGE_H_
+
+#include <stdint.h>
+
+#include "perfetto/base/export.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/memory_allocator_node_id.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+class PERFETTO_EXPORT MemoryGraphEdge {
+ public:
+  MemoryGraphEdge(MemoryAllocatorNodeId s,
+                  MemoryAllocatorNodeId t,
+                  int i,
+                  bool o)
+      : source(s), target(t), importance(i), overridable(o) {}
+
+  MemoryGraphEdge& operator=(const MemoryGraphEdge& edge) {
+    source = edge.source;
+    target = edge.target;
+    importance = edge.importance;
+    overridable = edge.overridable;
+    return *this;
+  }
+
+  MemoryAllocatorNodeId source;
+  MemoryAllocatorNodeId target;
+  int importance;
+  bool overridable;
+
+  // Deliberately copy-able.
+};
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_MEMORY_GRAPH_EDGE_H_
diff --git a/include/perfetto/ext/trace_processor/importers/memory_tracker/raw_memory_graph_node.h b/include/perfetto/ext/trace_processor/importers/memory_tracker/raw_memory_graph_node.h
new file mode 100644
index 0000000..e865afe
--- /dev/null
+++ b/include/perfetto/ext/trace_processor/importers/memory_tracker/raw_memory_graph_node.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_RAW_MEMORY_GRAPH_NODE_H_
+#define INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_RAW_MEMORY_GRAPH_NODE_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "perfetto/base/export.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/memory_allocator_node_id.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/memory_graph_edge.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+// Describes the level of detail of the memory graph.
+enum class LevelOfDetail : uint32_t {
+  kFirst,
+
+  // For background tracing mode. The node time is quick, and typically just the
+  // totals are expected. Suballocations need not be specified. Node name must
+  // contain only pre-defined strings and string arguments cannot be added.
+  kBackground = kFirst,
+
+  // For the levels below, MemoryNodeProvider instances must guarantee that the
+  // total size reported in the root node is consistent. Only the granularity of
+  // the child MemoryAllocatorNode(s) differs with the levels.
+
+  // Few entries, typically a fixed number, per node.
+  kLight,
+
+  // Unrestricted amount of entries per node.
+  kDetailed,
+
+  kLast = kDetailed
+};
+
+// Data model for user-land memory nodes.
+class PERFETTO_EXPORT RawMemoryGraphNode {
+ public:
+  enum Flags {
+    kDefault = 0,
+
+    // A node marked weak will be discarded if there is no ownership edge exists
+    // from a non-weak node.
+    kWeak = 1 << 0,
+  };
+
+  // In the UI table each MemoryAllocatorNode becomes
+  // a row and each Entry generates a column (if it doesn't already
+  // exist).
+  struct PERFETTO_EXPORT MemoryNodeEntry {
+    enum EntryType {
+      kUint64,
+      kString,
+    };
+
+    MemoryNodeEntry(const std::string& name,
+                    const std::string& units,
+                    uint64_t value);
+    MemoryNodeEntry(const std::string& name,
+                    const std::string& units,
+                    const std::string& value);
+
+    bool operator==(const MemoryNodeEntry& rhs) const;
+
+    std::string name;
+    std::string units;
+
+    EntryType entry_type;
+
+    uint64_t value_uint64;
+    std::string value_string;
+  };
+
+  RawMemoryGraphNode(const std::string& absolute_name,
+                     LevelOfDetail level,
+                     MemoryAllocatorNodeId id);
+
+  RawMemoryGraphNode(
+      const std::string& absolute_name,
+      LevelOfDetail level,
+      MemoryAllocatorNodeId id,
+      std::vector<RawMemoryGraphNode::MemoryNodeEntry>&& entries);
+
+  // Standard attribute |name|s for the AddScalar and AddString() methods.
+  static const char kNameSize[];         // To represent allocated space.
+  static const char kNameObjectCount[];  // To represent number of objects.
+
+  // Standard attribute |unit|s for the AddScalar and AddString() methods.
+  static const char kUnitsBytes[];    // Unit name to represent bytes.
+  static const char kUnitsObjects[];  // Unit name to represent #objects.
+
+  // Constants used only internally and by tests.
+  static const char kTypeScalar[];  // Type name for scalar attributes.
+  static const char kTypeString[];  // Type name for string attributes.
+
+  // |id| is an optional global node identifier, unique across all processes
+  // within the scope of a global node.
+  // Subsequent MemoryAllocatorNode(s) with the same |absolute_name| are
+  // expected to have the same id.
+  MemoryAllocatorNodeId id() const { return id_; }
+
+  // Absolute name, unique within the scope of an entire ProcessMemoryNode.
+  const std::string& absolute_name() const { return absolute_name_; }
+
+  const std::vector<MemoryNodeEntry>& entries() const { return entries_; }
+
+  LevelOfDetail level_of_detail() const { return level_of_detail_; }
+
+  // Use enum Flags to set values.
+  void set_flags(int flags) { flags_ |= flags; }
+  void clear_flags(int flags) { flags_ &= ~flags; }
+  int flags() const { return flags_; }
+
+ private:
+  std::string absolute_name_;
+  LevelOfDetail level_of_detail_;
+  std::vector<MemoryNodeEntry> entries_;
+  MemoryAllocatorNodeId id_;
+
+  // A node marked weak will be discarded by TraceViewer.
+  int flags_;  // See enum Flags.
+};
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_RAW_MEMORY_GRAPH_NODE_H_
diff --git a/include/perfetto/ext/trace_processor/importers/memory_tracker/raw_process_memory_node.h b/include/perfetto/ext/trace_processor/importers/memory_tracker/raw_process_memory_node.h
new file mode 100644
index 0000000..66b4e89
--- /dev/null
+++ b/include/perfetto/ext/trace_processor/importers/memory_tracker/raw_process_memory_node.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_RAW_PROCESS_MEMORY_NODE_H_
+#define INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_RAW_PROCESS_MEMORY_NODE_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "perfetto/base/export.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/memory_allocator_node_id.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/memory_graph_edge.h"
+#include "perfetto/ext/trace_processor/importers/memory_tracker/raw_memory_graph_node.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+// ProcessMemoryNode is as a strongly typed container which holds the nodes
+// produced by the MemoryNodeProvider(s) for a specific process.
+class PERFETTO_EXPORT RawProcessMemoryNode {
+ public:
+  // Maps allocator nodes absolute names (allocator_name/heap/subheap) to
+  // MemoryAllocatorNode instances.
+  using MemoryNodesMap =
+      std::map<std::string, std::unique_ptr<RawMemoryGraphNode>>;
+
+  // Stores allocator node edges indexed by source allocator node GUID.
+  using AllocatorNodeEdgesMap =
+      std::map<MemoryAllocatorNodeId, std::unique_ptr<MemoryGraphEdge>>;
+
+  explicit RawProcessMemoryNode(
+      LevelOfDetail level_of_detail,
+      AllocatorNodeEdgesMap&& edges_map = AllocatorNodeEdgesMap{},
+      MemoryNodesMap&& nodes_map = MemoryNodesMap{});
+  RawProcessMemoryNode(RawProcessMemoryNode&&);
+  ~RawProcessMemoryNode();
+  RawProcessMemoryNode& operator=(RawProcessMemoryNode&&);
+
+  // Looks up a MemoryAllocatorNode given its allocator and heap names, or
+  // nullptr if not found.
+  RawMemoryGraphNode* GetAllocatorNode(const std::string& absolute_name) const;
+
+  // Returns the map of the MemoryAllocatorNodes added to this node.
+  const MemoryNodesMap& allocator_nodes() const { return allocator_nodes_; }
+
+  const AllocatorNodeEdgesMap& allocator_nodes_edges() const {
+    return allocator_nodes_edges_;
+  }
+
+  const LevelOfDetail& level_of_detail() const { return level_of_detail_; }
+
+ private:
+  LevelOfDetail level_of_detail_;
+
+  // Keeps track of relationships between MemoryAllocatorNode(s).
+  AllocatorNodeEdgesMap allocator_nodes_edges_;
+
+  // Level of detail of the current node.
+  MemoryNodesMap allocator_nodes_;
+
+  // This class is uncopyable and unassignable.
+  RawProcessMemoryNode(const RawProcessMemoryNode&) = delete;
+  RawProcessMemoryNode& operator=(const RawProcessMemoryNode&) = delete;
+};
+
+}  // namespace trace_processor
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACE_PROCESSOR_IMPORTERS_MEMORY_TRACKER_RAW_PROCESS_MEMORY_NODE_H_
diff --git a/src/trace_processor/BUILD.gn b/src/trace_processor/BUILD.gn
index bba2139..1d401a7 100644
--- a/src/trace_processor/BUILD.gn
+++ b/src/trace_processor/BUILD.gn
@@ -358,6 +358,9 @@
     "forwarding_trace_parser_unittest.cc",
     "importers/ftrace/sched_event_tracker_unittest.cc",
     "importers/fuchsia/fuchsia_trace_utils_unittest.cc",
+    "importers/memory_tracker/graph_processor_unittest.cc",
+    "importers/memory_tracker/graph_unittest.cc",
+    "importers/memory_tracker/raw_process_memory_node_unittest.cc",
     "importers/proto/args_table_utils_unittest.cc",
     "importers/proto/heap_graph_tracker_unittest.cc",
     "importers/proto/heap_profile_tracker_unittest.cc",
@@ -388,6 +391,7 @@
     "db:unittests",
     "importers:common",
     "importers:unittests",
+    "importers/memory_tracker:graph_processor",
     "rpc:unittests",
     "storage",
     "tables:unittests",
diff --git a/src/trace_processor/importers/memory_tracker/BUILD.gn b/src/trace_processor/importers/memory_tracker/BUILD.gn
new file mode 100644
index 0000000..4ac6669
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/BUILD.gn
@@ -0,0 +1,31 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../../gn/perfetto.gni")
+
+source_set("graph_processor") {
+  deps = [ "../../../../gn:default_deps" ]
+  public_deps = [
+    "../../../../include/perfetto/base",
+    "../../../../include/perfetto/ext/base",
+    "../../../../include/perfetto/ext/trace_processor/importers/memory_tracker",
+  ]
+  sources = [
+    "graph.cc",
+    "graph_processor.cc",
+    "memory_allocator_node_id.cc",
+    "raw_memory_graph_node.cc",
+    "raw_process_memory_node.cc",
+  ]
+}
diff --git a/src/trace_processor/importers/memory_tracker/graph.cc b/src/trace_processor/importers/memory_tracker/graph.cc
new file mode 100644
index 0000000..d156da6
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/graph.cc
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/ext/trace_processor/importers/memory_tracker/graph.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+namespace {
+
+using Edge = GlobalNodeGraph::Edge;
+using PostOrderIterator = GlobalNodeGraph::PostOrderIterator;
+using PreOrderIterator = GlobalNodeGraph::PreOrderIterator;
+using Process = GlobalNodeGraph::Process;
+using Node = GlobalNodeGraph::Node;
+using perfetto::base::SplitString;
+
+}  // namespace
+
+GlobalNodeGraph::GlobalNodeGraph()
+    : shared_memory_graph_(
+          std::unique_ptr<Process>(new Process(kNullProcessId, this))) {}
+GlobalNodeGraph::~GlobalNodeGraph() {}
+
+Process* GlobalNodeGraph::CreateGraphForProcess(
+    base::PlatformProcessId process_id) {
+  auto id_to_node_iterator = process_node_graphs_.emplace(
+      process_id, std::unique_ptr<Process>(new Process(process_id, this)));
+  return id_to_node_iterator.first->second.get();
+}
+
+void GlobalNodeGraph::AddNodeOwnershipEdge(Node* owner,
+                                           Node* owned,
+                                           int importance) {
+  all_edges_.emplace_front(owner, owned, importance);
+  Edge* edge = &*all_edges_.begin();
+  owner->SetOwnsEdge(edge);
+  owned->AddOwnedByEdge(edge);
+}
+
+Node* GlobalNodeGraph::CreateNode(Process* process_graph, Node* parent) {
+  all_nodes_.emplace_front(process_graph, parent);
+  return &*all_nodes_.begin();
+}
+
+PreOrderIterator GlobalNodeGraph::VisitInDepthFirstPreOrder() {
+  std::vector<Node*> roots;
+  for (auto it = process_node_graphs_.rbegin();
+       it != process_node_graphs_.rend(); it++) {
+    roots.push_back(it->second->root());
+  }
+  roots.push_back(shared_memory_graph_->root());
+  return PreOrderIterator{std::move(roots)};
+}
+
+PostOrderIterator GlobalNodeGraph::VisitInDepthFirstPostOrder() {
+  std::vector<Node*> roots;
+  for (auto it = process_node_graphs_.rbegin();
+       it != process_node_graphs_.rend(); it++) {
+    roots.push_back(it->second->root());
+  }
+  roots.push_back(shared_memory_graph_->root());
+  return PostOrderIterator(std::move(roots));
+}
+
+Process::Process(base::PlatformProcessId pid, GlobalNodeGraph* global_graph)
+    : pid_(pid),
+      global_graph_(global_graph),
+      root_(global_graph->CreateNode(this, nullptr)) {}
+Process::~Process() {}
+
+Node* Process::CreateNode(MemoryAllocatorNodeId id,
+                          const std::string& path,
+                          bool weak) {
+  auto tokens = base::SplitString(path, "/");
+
+  // Perform a tree traversal, creating the nodes if they do not
+  // already exist on the path to the child.
+  Node* current = root_;
+  for (const auto& key : tokens) {
+    Node* parent = current;
+    current = current->GetChild(key);
+    if (!current) {
+      current = global_graph_->CreateNode(this, parent);
+      parent->InsertChild(key, current);
+    }
+  }
+
+  // The final node should have the weakness specified by the
+  // argument and also be considered explicit.
+  current->set_weak(weak);
+  current->set_explicit(true);
+
+  // The final node should also have the associated |id|.
+  current->set_id(id);
+
+  // Add to the global id map as well if it exists.
+  if (!id.empty())
+    global_graph_->nodes_by_id_.emplace(id, current);
+
+  return current;
+}
+
+Node* Process::FindNode(const std::string& path) {
+  auto tokens = base::SplitString(path, "/");
+
+  Node* current = root_;
+  for (const auto& key : tokens) {
+    current = current->GetChild(key);
+    if (!current)
+      return nullptr;
+  }
+  return current;
+}
+
+Node::Node(Process* node_graph, Node* parent)
+    : node_graph_(node_graph), parent_(parent), owns_edge_(nullptr) {}
+Node::~Node() {}
+
+Node* Node::GetChild(const std::string& name) const {
+  auto child = children_.find(name);
+  return child == children_.end() ? nullptr : child->second;
+}
+
+void Node::InsertChild(const std::string& name, Node* node) {
+  PERFETTO_DCHECK(node);
+  children_.emplace(name, node);
+}
+
+Node* Node::CreateChild(const std::string& name) {
+  Node* new_child = node_graph_->global_graph()->CreateNode(node_graph_, this);
+  InsertChild(name, new_child);
+  return new_child;
+}
+
+bool Node::IsDescendentOf(const Node& possible_parent) const {
+  const Node* current = this;
+  while (current != nullptr) {
+    if (current == &possible_parent)
+      return true;
+    current = current->parent();
+  }
+  return false;
+}
+
+void Node::AddOwnedByEdge(Edge* edge) {
+  owned_by_edges_.push_back(edge);
+}
+
+void Node::SetOwnsEdge(Edge* owns_edge) {
+  owns_edge_ = owns_edge;
+}
+
+void Node::AddEntry(const std::string& name,
+                    Node::Entry::ScalarUnits units,
+                    uint64_t value) {
+  entries_.emplace(name, Node::Entry(units, value));
+}
+
+void Node::AddEntry(const std::string& name, const std::string& value) {
+  entries_.emplace(name, Node::Entry(value));
+}
+
+Node::Entry::Entry(Entry::ScalarUnits units2, uint64_t value)
+    : type(Node::Entry::Type::kUInt64), units(units2), value_uint64(value) {}
+
+Node::Entry::Entry(const std::string& value)
+    : type(Node::Entry::Type::kString),
+      units(Node::Entry::ScalarUnits::kObjects),
+      value_string(value),
+      value_uint64(0) {}
+
+Edge::Edge(Node* source, Node* target, int priority)
+    : source_(source), target_(target), priority_(priority) {}
+
+PreOrderIterator::PreOrderIterator(std::vector<Node*>&& roots)
+    : to_visit_(std::move(roots)) {}
+PreOrderIterator::PreOrderIterator(PreOrderIterator&& other) = default;
+PreOrderIterator::~PreOrderIterator() {}
+
+// Yields the next node in the DFS post-order traversal.
+Node* PreOrderIterator::next() {
+  while (!to_visit_.empty()) {
+    // Retain a pointer to the node at the top and remove it from stack.
+    Node* node = to_visit_.back();
+    to_visit_.pop_back();
+
+    // If the node has already been visited, don't visit it again.
+    if (visited_.count(node) != 0)
+      continue;
+
+    // If we haven't visited the node which this node owns then wait for that.
+    if (node->owns_edge() && visited_.count(node->owns_edge()->target()) == 0)
+      continue;
+
+    // If we haven't visited the node's parent then wait for that.
+    if (node->parent() && visited_.count(node->parent()) == 0)
+      continue;
+
+    // Visit all children of this node.
+    for (auto it = node->children()->rbegin(); it != node->children()->rend();
+         it++) {
+      to_visit_.push_back(it->second);
+    }
+
+    // Visit all owners of this node.
+    for (auto it = node->owned_by_edges()->rbegin();
+         it != node->owned_by_edges()->rend(); it++) {
+      to_visit_.push_back((*it)->source());
+    }
+
+    // Add this node to the visited set.
+    visited_.insert(node);
+    return node;
+  }
+  return nullptr;
+}
+
+PostOrderIterator::PostOrderIterator(std::vector<Node*>&& roots)
+    : to_visit_(std::move(roots)) {}
+PostOrderIterator::PostOrderIterator(PostOrderIterator&& other) = default;
+PostOrderIterator::~PostOrderIterator() = default;
+
+// Yields the next node in the DFS post-order traversal.
+Node* PostOrderIterator::next() {
+  while (!to_visit_.empty()) {
+    // Retain a pointer to the node at the top and remove it from stack.
+    Node* node = to_visit_.back();
+    to_visit_.pop_back();
+
+    // If the node has already been visited, don't visit it again.
+    if (visited_.count(node) != 0)
+      continue;
+
+    // If the node is at the top of the path, we have already looked
+    // at its children and owners.
+    if (!path_.empty() && path_.back() == node) {
+      // Mark the current node as visited so we don't visit again.
+      visited_.insert(node);
+
+      // The current node is no longer on the path.
+      path_.pop_back();
+
+      return node;
+    }
+
+    // If the node is not at the front, it should also certainly not be
+    // anywhere else in the path. If it is, there is a cycle in the graph.
+    path_.push_back(node);
+
+    // Add this node back to the queue of nodes to visit.
+    to_visit_.push_back(node);
+
+    // Visit all children of this node.
+    for (auto it = node->children()->rbegin(); it != node->children()->rend();
+         it++) {
+      to_visit_.push_back(it->second);
+    }
+
+    // Visit all owners of this node.
+    for (auto it = node->owned_by_edges()->rbegin();
+         it != node->owned_by_edges()->rend(); it++) {
+      to_visit_.push_back((*it)->source());
+    }
+  }
+  return nullptr;
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/importers/memory_tracker/graph_processor.cc b/src/trace_processor/importers/memory_tracker/graph_processor.cc
new file mode 100644
index 0000000..c97dc19
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/graph_processor.cc
@@ -0,0 +1,801 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/ext/trace_processor/importers/memory_tracker/graph_processor.h"
+
+#include <list>
+
+namespace perfetto {
+namespace trace_processor {
+
+using Edge = GlobalNodeGraph::Edge;
+using Node = GlobalNodeGraph::Node;
+using Process = GlobalNodeGraph::Process;
+
+namespace {
+
+const char kSharedMemoryRootNode[] = "shared_memory";
+const char kSizeEntryName[] = "size";
+const char kEffectiveSizeEntryName[] = "effective_size";
+
+Node::Entry::ScalarUnits EntryUnitsFromString(const std::string& units) {
+  if (units == RawMemoryGraphNode::kUnitsBytes) {
+    return Node::Entry::ScalarUnits::kBytes;
+  } else if (units == RawMemoryGraphNode::kUnitsObjects) {
+    return Node::Entry::ScalarUnits::kObjects;
+  } else {
+    // Invalid units so we just return a value of the correct type.
+    return Node::Entry::ScalarUnits::kObjects;
+  }
+}
+
+base::Optional<uint64_t> GetSizeEntryOfNode(Node* node) {
+  auto size_it = node->entries()->find(kSizeEntryName);
+  if (size_it == node->entries()->end())
+    return base::nullopt;
+
+  PERFETTO_DCHECK(size_it->second.type == Node::Entry::Type::kUInt64);
+  PERFETTO_DCHECK(size_it->second.units == Node::Entry::ScalarUnits::kBytes);
+  return base::Optional<uint64_t>(size_it->second.value_uint64);
+}
+
+}  // namespace
+
+// static
+std::unique_ptr<GlobalNodeGraph> GraphProcessor::CreateMemoryGraph(
+    const GraphProcessor::RawMemoryNodeMap& process_nodes) {
+  auto global_graph = std::unique_ptr<GlobalNodeGraph>(new GlobalNodeGraph());
+
+  // First pass: collects allocator nodes into a graph and populate
+  // with entries.
+  for (const auto& pid_to_node : process_nodes) {
+    // There can be null entries in the map; simply filter these out.
+    if (!pid_to_node.second)
+      continue;
+
+    auto* graph = global_graph->CreateGraphForProcess(pid_to_node.first);
+    CollectAllocatorNodes(*pid_to_node.second, global_graph.get(), graph);
+  }
+
+  // Second pass: generate the graph of edges between the nodes.
+  for (const auto& pid_to_node : process_nodes) {
+    // There can be null entries in the map; simply filter these out.
+    if (!pid_to_node.second)
+      continue;
+
+    AddEdges(*pid_to_node.second, global_graph.get());
+  }
+
+  return global_graph;
+}
+
+// static
+void GraphProcessor::RemoveWeakNodesFromGraph(GlobalNodeGraph* global_graph) {
+  auto* global_root = global_graph->shared_memory_graph()->root();
+
+  // Third pass: mark recursively nodes as weak if they don't have an associated
+  // node and all their children are weak.
+  MarkImplicitWeakParentsRecursively(global_root);
+  for (const auto& pid_to_process : global_graph->process_node_graphs()) {
+    MarkImplicitWeakParentsRecursively(pid_to_process.second->root());
+  }
+
+  // Fourth pass: recursively mark nodes as weak if they own a node which is
+  // weak or if they have a parent who is weak.
+  {
+    std::set<const Node*> visited;
+    MarkWeakOwnersAndChildrenRecursively(global_root, &visited);
+    for (const auto& pid_to_process : global_graph->process_node_graphs()) {
+      MarkWeakOwnersAndChildrenRecursively(pid_to_process.second->root(),
+                                           &visited);
+    }
+  }
+
+  // Fifth pass: remove all nodes which are weak (including their descendants)
+  // and clean owned by edges to match.
+  RemoveWeakNodesRecursively(global_root);
+  for (const auto& pid_to_process : global_graph->process_node_graphs()) {
+    RemoveWeakNodesRecursively(pid_to_process.second->root());
+  }
+}
+
+// static
+void GraphProcessor::AddOverheadsAndPropagateEntries(
+    GlobalNodeGraph* global_graph) {
+  // Sixth pass: account for tracing overhead in system memory allocators.
+  for (auto& pid_to_process : global_graph->process_node_graphs()) {
+    Process* process = pid_to_process.second.get();
+    if (process->FindNode("winheap")) {
+      AssignTracingOverhead("winheap", global_graph,
+                            pid_to_process.second.get());
+    } else if (process->FindNode("malloc")) {
+      AssignTracingOverhead("malloc", global_graph,
+                            pid_to_process.second.get());
+    }
+  }
+
+  // Seventh pass: aggregate non-size integer entries into parents and propagate
+  // string and int entries for shared graph.
+  auto* global_root = global_graph->shared_memory_graph()->root();
+  AggregateNumericsRecursively(global_root);
+  PropagateNumericsAndDiagnosticsRecursively(global_root);
+  for (auto& pid_to_process : global_graph->process_node_graphs()) {
+    AggregateNumericsRecursively(pid_to_process.second->root());
+  }
+}
+
+// static
+void GraphProcessor::CalculateSizesForGraph(GlobalNodeGraph* global_graph) {
+  // Eighth pass: calculate the size field for nodes by considering the sizes
+  // of their children and owners.
+  {
+    auto it = global_graph->VisitInDepthFirstPostOrder();
+    while (Node* node = it.next()) {
+      CalculateSizeForNode(node);
+    }
+  }
+
+  // Ninth pass: Calculate not-owned and not-owning sub-sizes of all nodes.
+  {
+    auto it = global_graph->VisitInDepthFirstPostOrder();
+    while (Node* node = it.next()) {
+      CalculateNodeSubSizes(node);
+    }
+  }
+
+  // Tenth pass: Calculate owned and owning coefficients of owned and owner
+  // nodes.
+  {
+    auto it = global_graph->VisitInDepthFirstPostOrder();
+    while (Node* node = it.next()) {
+      CalculateNodeOwnershipCoefficient(node);
+    }
+  }
+
+  // Eleventh pass: Calculate cumulative owned and owning coefficients of all
+  // nodes.
+  {
+    auto it = global_graph->VisitInDepthFirstPreOrder();
+    while (Node* node = it.next()) {
+      CalculateNodeCumulativeOwnershipCoefficient(node);
+    }
+  }
+
+  // Twelfth pass: Calculate the effective sizes of all nodes.
+  {
+    auto it = global_graph->VisitInDepthFirstPostOrder();
+    while (Node* node = it.next()) {
+      CalculateNodeEffectiveSize(node);
+    }
+  }
+}
+
+// static
+std::map<base::PlatformProcessId, uint64_t>
+GraphProcessor::ComputeSharedFootprintFromGraph(
+    const GlobalNodeGraph& global_graph) {
+  // Go through all nodes associated with global nodes and find if they are
+  // owned by shared memory nodes.
+  Node* global_root =
+      global_graph.shared_memory_graph()->root()->GetChild("global");
+
+  // If there are no global nodes then just return an empty map with no data.
+  if (!global_root)
+    return std::map<base::PlatformProcessId, uint64_t>();
+
+  struct GlobalNodeOwners {
+    std::list<Edge*> edges;
+    int max_priority = 0;
+  };
+
+  std::map<Node*, GlobalNodeOwners> global_node_to_shared_owners;
+  for (const auto& path_to_child : *global_root->children()) {
+    // The path of this node is something like "global/foo".
+    Node* global_node = path_to_child.second;
+
+    // If there's no size to attribute, there's no point in propagating
+    // anything.
+    if (global_node->entries()->count(kSizeEntryName) == 0)
+      continue;
+
+    for (auto* edge : *global_node->owned_by_edges()) {
+      // Find if the source node's path starts with "shared_memory/" which
+      // indcates shared memory.
+      Node* source_root = edge->source()->node_graph()->root();
+      const Node* current = edge->source();
+      PERFETTO_DCHECK(current != source_root);
+
+      // Traverse up until we hit the point where |current| holds a node which
+      // is the child of |source_root|.
+      while (current->parent() != source_root)
+        current = current->parent();
+
+      // If the source is indeed a shared memory node, add the edge to the map.
+      if (source_root->GetChild(kSharedMemoryRootNode) == current) {
+        GlobalNodeOwners* owners = &global_node_to_shared_owners[global_node];
+        owners->edges.push_back(edge);
+        owners->max_priority = std::max(owners->max_priority, edge->priority());
+      }
+    }
+  }
+
+  // Go through the map and leave only the edges which have the maximum
+  // priority.
+  for (auto& global_to_shared_edges : global_node_to_shared_owners) {
+    int max_priority = global_to_shared_edges.second.max_priority;
+    global_to_shared_edges.second.edges.remove_if(
+        [max_priority](Edge* edge) { return edge->priority() < max_priority; });
+  }
+
+  // Compute the footprints by distributing the memory of the nodes
+  // among the processes which have edges left.
+  std::map<base::PlatformProcessId, uint64_t> pid_to_shared_footprint;
+  for (const auto& global_to_shared_edges : global_node_to_shared_owners) {
+    Node* node = global_to_shared_edges.first;
+    const auto& edges = global_to_shared_edges.second.edges;
+
+    const Node::Entry& size_entry =
+        node->entries()->find(kSizeEntryName)->second;
+    PERFETTO_DCHECK(size_entry.type == Node::Entry::kUInt64);
+
+    uint64_t size_per_process = size_entry.value_uint64 / edges.size();
+    for (auto* edge : edges) {
+      base::PlatformProcessId pid = edge->source()->node_graph()->pid();
+      pid_to_shared_footprint[pid] += size_per_process;
+    }
+  }
+
+  return pid_to_shared_footprint;
+}
+
+// static
+void GraphProcessor::CollectAllocatorNodes(const RawProcessMemoryNode& source,
+                                           GlobalNodeGraph* global_graph,
+                                           Process* process_graph) {
+  // Turn each node into a node in the graph of nodes in the appropriate
+  // process node or global node.
+  for (const auto& path_to_node : source.allocator_nodes()) {
+    const std::string& path = path_to_node.first;
+    const RawMemoryGraphNode& raw_node = *path_to_node.second;
+
+    // All global nodes (i.e. those starting with global/) should be redirected
+    // to the shared graph.
+    bool is_global = base::StartsWith(path, "global/");
+    Process* process =
+        is_global ? global_graph->shared_memory_graph() : process_graph;
+
+    Node* node;
+    auto node_iterator = global_graph->nodes_by_id().find(raw_node.id());
+    if (node_iterator == global_graph->nodes_by_id().end()) {
+      // Storing whether the process is weak here will allow for later
+      // computations on whether or not the node should be removed.
+      bool is_weak = raw_node.flags() & RawMemoryGraphNode::Flags::kWeak;
+      node = process->CreateNode(raw_node.id(), path, is_weak);
+    } else {
+      node = node_iterator->second;
+
+      PERFETTO_DCHECK(node == process->FindNode(path));
+
+      PERFETTO_DCHECK(is_global);
+    }
+
+    // Copy any entries not already present into the node.
+    for (auto& entry : raw_node.entries()) {
+      switch (entry.entry_type) {
+        case RawMemoryGraphNode::MemoryNodeEntry::EntryType::kUint64:
+          node->AddEntry(entry.name, EntryUnitsFromString(entry.units),
+                         entry.value_uint64);
+          break;
+        case RawMemoryGraphNode::MemoryNodeEntry::EntryType::kString:
+          node->AddEntry(entry.name, entry.value_string);
+          break;
+      }
+    }
+  }
+}
+
+// static
+void GraphProcessor::AddEdges(const RawProcessMemoryNode& source,
+                              GlobalNodeGraph* global_graph) {
+  const auto& nodes_by_id = global_graph->nodes_by_id();
+  for (const auto& id_to_edge : source.allocator_nodes_edges()) {
+    auto& edge = id_to_edge.second;
+
+    // Find the source and target nodes in the global map by id.
+    auto source_it = nodes_by_id.find(edge->source);
+    auto target_it = nodes_by_id.find(edge->target);
+
+    if (source_it == nodes_by_id.end()) {
+      // If the source is missing then simply pretend the edge never existed
+      // leading to the memory being allocated to the target (if it exists).
+      continue;
+    } else if (target_it == nodes_by_id.end()) {
+      // If the target is lost but the source is present, then also ignore
+      // this edge for now.
+      // TODO(lalitm): see crbug.com/770712 for the permanent fix for this
+      // issue.
+      continue;
+    } else {
+      // Add an edge indicating the source node owns the memory of the
+      // target node with the given importance of the edge.
+      global_graph->AddNodeOwnershipEdge(source_it->second, target_it->second,
+                                         edge->importance);
+    }
+  }
+}
+
+// static
+void GraphProcessor::MarkImplicitWeakParentsRecursively(Node* node) {
+  // Ensure that we aren't in a bad state where we have an implicit node
+  // which doesn't have any children (which is not the root node).
+  PERFETTO_DCHECK(node->is_explicit() || !node->children()->empty() ||
+                  !node->parent());
+
+  // Check that at this stage, any node which is weak is only so because
+  // it was explicitly created as such.
+  PERFETTO_DCHECK(!node->is_weak() || node->is_explicit());
+
+  // If a node is already weak then all children will be marked weak at a
+  // later stage.
+  if (node->is_weak())
+    return;
+
+  // Recurse into each child and find out if all the children of this node are
+  // weak.
+  bool all_children_weak = true;
+  for (const auto& path_to_child : *node->children()) {
+    MarkImplicitWeakParentsRecursively(path_to_child.second);
+    all_children_weak = all_children_weak && path_to_child.second->is_weak();
+  }
+
+  // If all the children are weak and the parent is only an implicit one then we
+  // consider the parent as weak as well and we will later remove it.
+  node->set_weak(!node->is_explicit() && all_children_weak);
+}
+
+// static
+void GraphProcessor::MarkWeakOwnersAndChildrenRecursively(
+    Node* node,
+    std::set<const Node*>* visited) {
+  // If we've already visited this node then nothing to do.
+  if (visited->count(node) != 0)
+    return;
+
+  // If we haven't visited the node which this node owns then wait for that.
+  if (node->owns_edge() && visited->count(node->owns_edge()->target()) == 0)
+    return;
+
+  // If we haven't visited the node's parent then wait for that.
+  if (node->parent() && visited->count(node->parent()) == 0)
+    return;
+
+  // If either the node we own or our parent is weak, then mark this node
+  // as weak.
+  if ((node->owns_edge() && node->owns_edge()->target()->is_weak()) ||
+      (node->parent() && node->parent()->is_weak())) {
+    node->set_weak(true);
+  }
+  visited->insert(node);
+
+  // Recurse into each owner node to mark any other nodes.
+  for (auto* owned_by_edge : *node->owned_by_edges()) {
+    MarkWeakOwnersAndChildrenRecursively(owned_by_edge->source(), visited);
+  }
+
+  // Recurse into each child and find out if all the children of this node are
+  // weak.
+  for (const auto& path_to_child : *node->children()) {
+    MarkWeakOwnersAndChildrenRecursively(path_to_child.second, visited);
+  }
+}
+
+// static
+void GraphProcessor::RemoveWeakNodesRecursively(Node* node) {
+  auto* children = node->children();
+  for (auto child_it = children->begin(); child_it != children->end();) {
+    Node* child = child_it->second;
+
+    // If the node is weak, remove it. This automatically makes all
+    // descendents unreachable from the parents. If this node owned
+    // by another, it will have been marked earlier in
+    // |MarkWeakOwnersAndChildrenRecursively| and so will be removed
+    // by this method at some point.
+    if (child->is_weak()) {
+      child_it = children->erase(child_it);
+      continue;
+    }
+
+    // We should never be in a situation where we're about to
+    // keep a node which owns a weak node (which will be/has been
+    // removed).
+    PERFETTO_DCHECK(!child->owns_edge() ||
+                    !child->owns_edge()->target()->is_weak());
+
+    // Descend and remove all weak child nodes.
+    RemoveWeakNodesRecursively(child);
+
+    // Remove all edges with owner nodes which are weak.
+    std::vector<Edge*>* owned_by_edges = child->owned_by_edges();
+    auto new_end =
+        std::remove_if(owned_by_edges->begin(), owned_by_edges->end(),
+                       [](Edge* edge) { return edge->source()->is_weak(); });
+    owned_by_edges->erase(new_end, owned_by_edges->end());
+
+    ++child_it;
+  }
+}
+
+// static
+void GraphProcessor::AssignTracingOverhead(const std::string& allocator,
+                                           GlobalNodeGraph* global_graph,
+                                           Process* process) {
+  // This method should only be called if the allocator node exists.
+  PERFETTO_DCHECK(process->FindNode(allocator));
+
+  // Check that the tracing node exists and isn't already owning another node.
+  Node* tracing_node = process->FindNode("tracing");
+  if (!tracing_node)
+    return;
+
+  // This should be first edge associated with the tracing node.
+  PERFETTO_DCHECK(!tracing_node->owns_edge());
+
+  // Create the node under the allocator to which tracing overhead can be
+  // assigned.
+  std::string child_name = allocator + "/allocated_objects/tracing_overhead";
+  Node* child_node = process->CreateNode(MemoryAllocatorNodeId(), child_name,
+                                         false /* weak */);
+
+  // Assign the overhead of tracing to the tracing node.
+  global_graph->AddNodeOwnershipEdge(tracing_node, child_node,
+                                     0 /* importance */);
+}
+
+// static
+Node::Entry GraphProcessor::AggregateNumericWithNameForNode(
+    Node* node,
+    const std::string& name) {
+  bool first = true;
+  Node::Entry::ScalarUnits units = Node::Entry::ScalarUnits::kObjects;
+  uint64_t aggregated = 0;
+  for (auto& path_to_child : *node->children()) {
+    auto* entries = path_to_child.second->entries();
+
+    // Retrieve the entry with the given column name.
+    auto name_to_entry_it = entries->find(name);
+    if (name_to_entry_it == entries->end())
+      continue;
+
+    // Extract the entry from the iterator.
+    const Node::Entry& entry = name_to_entry_it->second;
+
+    // Ensure that the entry is numeric.
+    PERFETTO_DCHECK(entry.type == Node::Entry::Type::kUInt64);
+
+    // Check that the units of every child's entry with the given name is the
+    // same (i.e. we don't get a number for one child and size for another
+    // child). We do this by having a DCHECK that the units match the first
+    // child's units.
+    PERFETTO_DCHECK(first || units == entry.units);
+    units = entry.units;
+    aggregated += entry.value_uint64;
+    first = false;
+  }
+  return Node::Entry(units, aggregated);
+}
+
+// static
+void GraphProcessor::AggregateNumericsRecursively(Node* node) {
+  std::set<std::string> numeric_names;
+
+  for (const auto& path_to_child : *node->children()) {
+    AggregateNumericsRecursively(path_to_child.second);
+    for (const auto& name_to_entry : *path_to_child.second->entries()) {
+      const std::string& name = name_to_entry.first;
+      if (name_to_entry.second.type == Node::Entry::Type::kUInt64 &&
+          name != kSizeEntryName && name != kEffectiveSizeEntryName) {
+        numeric_names.insert(name);
+      }
+    }
+  }
+
+  for (auto& name : numeric_names) {
+    node->entries()->emplace(name, AggregateNumericWithNameForNode(node, name));
+  }
+}
+
+// static
+void GraphProcessor::PropagateNumericsAndDiagnosticsRecursively(Node* node) {
+  for (const auto& name_to_entry : *node->entries()) {
+    for (auto* edge : *node->owned_by_edges()) {
+      edge->source()->entries()->insert(name_to_entry);
+    }
+  }
+  for (const auto& path_to_child : *node->children()) {
+    PropagateNumericsAndDiagnosticsRecursively(path_to_child.second);
+  }
+}
+
+// static
+base::Optional<uint64_t> GraphProcessor::AggregateSizeForDescendantNode(
+    Node* root,
+    Node* descendant) {
+  Edge* owns_edge = descendant->owns_edge();
+  if (owns_edge && owns_edge->target()->IsDescendentOf(*root))
+    return base::make_optional(0UL);
+
+  if (descendant->children()->size() == 0)
+    return GetSizeEntryOfNode(descendant).value_or(0ul);
+
+  base::Optional<uint64_t> size;
+  for (auto path_to_child : *descendant->children()) {
+    auto c_size = AggregateSizeForDescendantNode(root, path_to_child.second);
+    if (size) {
+      *size += c_size.value_or(0);
+    } else {
+      size = std::move(c_size);
+    }
+  }
+  return size;
+}
+
+// Assumes that this function has been called on all children and owner nodes.
+// static
+void GraphProcessor::CalculateSizeForNode(Node* node) {
+  // Get the size at the root node if it exists.
+  base::Optional<uint64_t> node_size = GetSizeEntryOfNode(node);
+
+  // Aggregate the size of all the child nodes.
+  base::Optional<uint64_t> aggregated_size;
+  for (auto path_to_child : *node->children()) {
+    auto c_size = AggregateSizeForDescendantNode(node, path_to_child.second);
+    if (aggregated_size) {
+      *aggregated_size += c_size.value_or(0ul);
+    } else {
+      aggregated_size = std::move(c_size);
+    }
+  }
+
+  // Check that if both aggregated and node sizes exist that the node size
+  // is bigger than the aggregated.
+  // TODO(lalitm): the following condition is triggered very often even though
+  // it is a warning in JS code. Find a way to add the warning to display in UI
+  // or to fix all instances where this is violated and then enable this check.
+  // PERFETTO_DCHECK(!node_size || !aggregated_size || *node_size >=
+  // *aggregated_size);
+
+  // Calculate the maximal size of an owner node.
+  base::Optional<uint64_t> max_owner_size;
+  for (auto* edge : *node->owned_by_edges()) {
+    auto o_size = GetSizeEntryOfNode(edge->source());
+    if (max_owner_size) {
+      *max_owner_size = std::max(o_size.value_or(0ul), *max_owner_size);
+    } else {
+      max_owner_size = std::move(o_size);
+    }
+  }
+
+  // Check that if both owner and node sizes exist that the node size
+  // is bigger than the owner.
+  // TODO(lalitm): the following condition is triggered very often even though
+  // it is a warning in JS code. Find a way to add the warning to display in UI
+  // or to fix all instances where this is violated and then enable this check.
+  // PERFETTO_DCHECK(!node_size || !max_owner_size || *node_size >=
+  // *max_owner_size);
+
+  // Clear out any existing size entry which may exist.
+  node->entries()->erase(kSizeEntryName);
+
+  // If no inference about size can be made then simply return.
+  if (!node_size && !aggregated_size && !max_owner_size)
+    return;
+
+  // Update the node with the new size entry.
+  uint64_t aggregated_size_value = aggregated_size.value_or(0ul);
+  uint64_t process_size =
+      std::max({node_size.value_or(0ul), aggregated_size_value,
+                max_owner_size.value_or(0ul)});
+  node->AddEntry(kSizeEntryName, Node::Entry::ScalarUnits::kBytes,
+                 process_size);
+
+  // If this is an intermediate node then add a ghost node which stores
+  // all sizes not accounted for by the children.
+  uint64_t unaccounted = process_size - aggregated_size_value;
+  if (unaccounted > 0 && !node->children()->empty()) {
+    Node* unspecified = node->CreateChild("<unspecified>");
+    unspecified->AddEntry(kSizeEntryName, Node::Entry::ScalarUnits::kBytes,
+                          unaccounted);
+  }
+}
+
+// Assumes that this function has been called on all children and owner nodes.
+// static
+void GraphProcessor::CalculateNodeSubSizes(Node* node) {
+  // Completely skip nodes with undefined size.
+  base::Optional<uint64_t> size_opt = GetSizeEntryOfNode(node);
+  if (!size_opt)
+    return;
+
+  // If the node is a leaf node, then both sub-sizes are equal to the size.
+  if (node->children()->empty()) {
+    node->add_not_owning_sub_size(*size_opt);
+    node->add_not_owned_sub_size(*size_opt);
+    return;
+  }
+
+  // Calculate this node's not-owning sub-size by summing up the not-owning
+  // sub-sizes of children which do not own another node.
+  for (const auto& path_to_child : *node->children()) {
+    if (path_to_child.second->owns_edge())
+      continue;
+    node->add_not_owning_sub_size(path_to_child.second->not_owning_sub_size());
+  }
+
+  // Calculate this node's not-owned sub-size.
+  for (const auto& path_to_child : *node->children()) {
+    Node* child = path_to_child.second;
+
+    // If the child node is not owned, then add its not-owned sub-size.
+    if (child->owned_by_edges()->empty()) {
+      node->add_not_owned_sub_size(child->not_owned_sub_size());
+      continue;
+    }
+
+    // If the child node is owned, then add the difference between its size
+    // and the largest owner.
+    uint64_t largest_owner_size = 0;
+    for (Edge* edge : *child->owned_by_edges()) {
+      uint64_t source_size = GetSizeEntryOfNode(edge->source()).value_or(0);
+      largest_owner_size = std::max(largest_owner_size, source_size);
+    }
+    uint64_t child_size = GetSizeEntryOfNode(child).value_or(0);
+    node->add_not_owned_sub_size(child_size - largest_owner_size);
+  }
+}
+
+// static
+void GraphProcessor::CalculateNodeOwnershipCoefficient(Node* node) {
+  // Completely skip nodes with undefined size.
+  base::Optional<uint64_t> size_opt = GetSizeEntryOfNode(node);
+  if (!size_opt)
+    return;
+
+  // We only need to consider owned nodes.
+  if (node->owned_by_edges()->empty())
+    return;
+
+  // Sort the owners in decreasing order of ownership priority and
+  // increasing order of not-owning sub-size (in case of equal priority).
+  std::vector<Edge*> owners = *node->owned_by_edges();
+  std::sort(owners.begin(), owners.end(), [](Edge* a, Edge* b) {
+    if (a->priority() == b->priority()) {
+      return a->source()->not_owning_sub_size() <
+             b->source()->not_owning_sub_size();
+    }
+    return b->priority() < a->priority();
+  });
+
+  // Loop over the list of owners and distribute the owned node's not-owned
+  // sub-size among them according to their ownership priority and
+  // not-owning sub-size.
+  uint64_t already_attributed_sub_size = 0;
+  for (auto current_it = owners.begin(); current_it != owners.end();) {
+    // Find the position of the first owner with lower priority.
+    int current_priority = (*current_it)->priority();
+    auto next_it =
+        std::find_if(current_it, owners.end(), [current_priority](Edge* edge) {
+          return edge->priority() < current_priority;
+        });
+
+    // Compute the number of nodes which have the same priority as current.
+    size_t difference = static_cast<size_t>(std::distance(current_it, next_it));
+
+    // Visit the owners with the same priority in increasing order of
+    // not-owned sub-size, split the owned memory among them appropriately,
+    // and calculate their owning coefficients.
+    double attributed_not_owning_sub_size = 0;
+    for (; current_it != next_it; current_it++) {
+      uint64_t not_owning_sub_size =
+          (*current_it)->source()->not_owning_sub_size();
+      if (not_owning_sub_size > already_attributed_sub_size) {
+        attributed_not_owning_sub_size +=
+            static_cast<double>(not_owning_sub_size -
+                                already_attributed_sub_size) /
+            static_cast<double>(difference);
+        already_attributed_sub_size = not_owning_sub_size;
+      }
+
+      if (not_owning_sub_size != 0) {
+        double coeff = attributed_not_owning_sub_size /
+                       static_cast<double>(not_owning_sub_size);
+        (*current_it)->source()->set_owning_coefficient(coeff);
+      }
+      difference--;
+    }
+
+    // At the end of this loop, we should move to a node with a lower priority.
+    PERFETTO_DCHECK(current_it == next_it);
+  }
+
+  // Attribute the remainder of the owned node's not-owned sub-size to
+  // the node itself and calculate its owned coefficient.
+  uint64_t not_owned_sub_size = node->not_owned_sub_size();
+  if (not_owned_sub_size != 0) {
+    double remainder_sub_size =
+        static_cast<double>(not_owned_sub_size - already_attributed_sub_size);
+    node->set_owned_coefficient(remainder_sub_size /
+                                static_cast<double>(not_owned_sub_size));
+  }
+}
+
+// static
+void GraphProcessor::CalculateNodeCumulativeOwnershipCoefficient(Node* node) {
+  // Completely skip nodes with undefined size.
+  base::Optional<uint64_t> size_opt = GetSizeEntryOfNode(node);
+  if (!size_opt)
+    return;
+
+  double cumulative_owned_coefficient = node->owned_coefficient();
+  if (node->parent()) {
+    cumulative_owned_coefficient *=
+        node->parent()->cumulative_owned_coefficient();
+  }
+  node->set_cumulative_owned_coefficient(cumulative_owned_coefficient);
+
+  if (node->owns_edge()) {
+    node->set_cumulative_owning_coefficient(
+        node->owning_coefficient() *
+        node->owns_edge()->target()->cumulative_owning_coefficient());
+  } else if (node->parent()) {
+    node->set_cumulative_owning_coefficient(
+        node->parent()->cumulative_owning_coefficient());
+  } else {
+    node->set_cumulative_owning_coefficient(1);
+  }
+}
+
+// static
+void GraphProcessor::CalculateNodeEffectiveSize(Node* node) {
+  // Completely skip nodes with undefined size. As a result, each node will
+  // have defined effective size if and only if it has defined size.
+  base::Optional<uint64_t> size_opt = GetSizeEntryOfNode(node);
+  if (!size_opt) {
+    node->entries()->erase(kEffectiveSizeEntryName);
+    return;
+  }
+
+  uint64_t effective_size = 0;
+  if (node->children()->empty()) {
+    // Leaf node.
+    effective_size = static_cast<uint64_t>(
+        static_cast<double>(*size_opt) * node->cumulative_owning_coefficient() *
+        node->cumulative_owned_coefficient());
+  } else {
+    // Non-leaf node.
+    for (const auto& path_to_child : *node->children()) {
+      Node* child = path_to_child.second;
+      if (!GetSizeEntryOfNode(child))
+        continue;
+      effective_size +=
+          child->entries()->find(kEffectiveSizeEntryName)->second.value_uint64;
+    }
+  }
+  node->AddEntry(kEffectiveSizeEntryName, Node::Entry::ScalarUnits::kBytes,
+                 effective_size);
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/importers/memory_tracker/graph_processor_unittest.cc b/src/trace_processor/importers/memory_tracker/graph_processor_unittest.cc
new file mode 100644
index 0000000..8d1cc31
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/graph_processor_unittest.cc
@@ -0,0 +1,684 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/ext/trace_processor/importers/memory_tracker/graph_processor.h"
+
+#include <stddef.h>
+
+#include <unordered_map>
+
+#include "perfetto/base/build_config.h"
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+using Edge = GlobalNodeGraph::Edge;
+using Node = GlobalNodeGraph::Node;
+using Process = GlobalNodeGraph::Process;
+
+namespace {
+
+const MemoryAllocatorNodeId kEmptyId;
+
+}  // namespace
+
+class GraphProcessorTest : public testing::Test {
+ public:
+  GraphProcessorTest() {}
+
+  void MarkImplicitWeakParentsRecursively(Node* node) {
+    GraphProcessor::MarkImplicitWeakParentsRecursively(node);
+  }
+
+  void MarkWeakOwnersAndChildrenRecursively(Node* node) {
+    std::set<const Node*> visited;
+    GraphProcessor::MarkWeakOwnersAndChildrenRecursively(node, &visited);
+  }
+
+  void RemoveWeakNodesRecursively(Node* node) {
+    GraphProcessor::RemoveWeakNodesRecursively(node);
+  }
+
+  void AssignTracingOverhead(const std::string& allocator,
+                             GlobalNodeGraph* global_graph,
+                             Process* process) {
+    GraphProcessor::AssignTracingOverhead(allocator, global_graph, process);
+  }
+
+  GlobalNodeGraph::Node::Entry AggregateNumericWithNameForNode(
+      Node* node,
+      const std::string& name) {
+    return GraphProcessor::AggregateNumericWithNameForNode(node, name);
+  }
+
+  void AggregateNumericsRecursively(Node* node) {
+    GraphProcessor::AggregateNumericsRecursively(node);
+  }
+
+  void PropagateNumericsAndDiagnosticsRecursively(Node* node) {
+    GraphProcessor::PropagateNumericsAndDiagnosticsRecursively(node);
+  }
+
+  base::Optional<uint64_t> AggregateSizeForDescendantNode(Node* root,
+                                                          Node* descendant) {
+    return GraphProcessor::AggregateSizeForDescendantNode(root, descendant);
+  }
+
+  void CalculateSizeForNode(Node* node) {
+    GraphProcessor::CalculateSizeForNode(node);
+  }
+
+  void CalculateNodeSubSizes(Node* node) {
+    GraphProcessor::CalculateNodeSubSizes(node);
+  }
+
+  void CalculateNodeOwnershipCoefficient(Node* node) {
+    GraphProcessor::CalculateNodeOwnershipCoefficient(node);
+  }
+
+  void CalculateNodeCumulativeOwnershipCoefficient(Node* node) {
+    GraphProcessor::CalculateNodeCumulativeOwnershipCoefficient(node);
+  }
+
+  void CalculateNodeEffectiveSize(Node* node) {
+    GraphProcessor::CalculateNodeEffectiveSize(node);
+  }
+
+ protected:
+  GlobalNodeGraph graph;
+};
+
+TEST_F(GraphProcessorTest, SmokeComputeMemoryGraph) {
+  std::map<base::PlatformProcessId, std::unique_ptr<RawProcessMemoryNode>>
+      process_nodes;
+
+  std::unique_ptr<RawMemoryGraphNode> source(new RawMemoryGraphNode(
+      "test1/test2/test3", LevelOfDetail::kDetailed, MemoryAllocatorNodeId(42),
+      std::vector<RawMemoryGraphNode::MemoryNodeEntry>{
+          {RawMemoryGraphNode::kNameSize, RawMemoryGraphNode::kUnitsBytes,
+           10}}));
+
+  std::unique_ptr<RawMemoryGraphNode> target(new RawMemoryGraphNode(
+      "target", LevelOfDetail::kDetailed, MemoryAllocatorNodeId(4242)));
+
+  std::unique_ptr<MemoryGraphEdge> edge(
+      new MemoryGraphEdge(source->id(), target->id(), 10, false));
+  RawProcessMemoryNode::AllocatorNodeEdgesMap edgesMap;
+  edgesMap.emplace(edge->source, std::move(edge));
+
+  RawProcessMemoryNode::MemoryNodesMap nodesMap;
+  nodesMap.emplace(source->absolute_name(), std::move(source));
+  nodesMap.emplace(target->absolute_name(), std::move(target));
+
+  auto pmd = std::unique_ptr<RawProcessMemoryNode>(new RawProcessMemoryNode(
+      LevelOfDetail::kDetailed, std::move(edgesMap), std::move(nodesMap)));
+  process_nodes.emplace(1, std::move(pmd));
+
+  auto global_node = GraphProcessor::CreateMemoryGraph(process_nodes);
+
+  ASSERT_EQ(1u, global_node->process_node_graphs().size());
+
+  auto id_to_node_it = global_node->process_node_graphs().find(1);
+  auto* first_child = id_to_node_it->second->FindNode("test1");
+  ASSERT_NE(first_child, nullptr);
+  ASSERT_EQ(first_child->parent(), id_to_node_it->second->root());
+
+  auto* second_child = first_child->GetChild("test2");
+  ASSERT_NE(second_child, nullptr);
+  ASSERT_EQ(second_child->parent(), first_child);
+
+  auto* third_child = second_child->GetChild("test3");
+  ASSERT_NE(third_child, nullptr);
+  ASSERT_EQ(third_child->parent(), second_child);
+
+  auto* direct = id_to_node_it->second->FindNode("test1/test2/test3");
+  ASSERT_EQ(third_child, direct);
+
+  ASSERT_EQ(third_child->entries()->size(), 1ul);
+
+  auto size = third_child->entries()->find(RawMemoryGraphNode::kNameSize);
+  ASSERT_EQ(10ul, size->second.value_uint64);
+
+  auto& edges = global_node->edges();
+  auto edge_it = edges.begin();
+  ASSERT_EQ(std::distance(edges.begin(), edges.end()), 1l);
+  ASSERT_EQ(edge_it->source(), direct);
+  ASSERT_EQ(edge_it->target(), id_to_node_it->second->FindNode("target"));
+  ASSERT_EQ(edge_it->priority(), 10);
+}
+
+TEST_F(GraphProcessorTest, ComputeSharedFootprintFromGraphSameImportance) {
+  Process* global_process = graph.shared_memory_graph();
+  Node* global_node = global_process->CreateNode(kEmptyId, "global/1", false);
+  global_node->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 100);
+
+  Process* first = graph.CreateGraphForProcess(1);
+  Node* shared_1 = first->CreateNode(kEmptyId, "shared_memory/1", false);
+
+  Process* second = graph.CreateGraphForProcess(2);
+  Node* shared_2 = second->CreateNode(kEmptyId, "shared_memory/2", false);
+
+  graph.AddNodeOwnershipEdge(shared_1, global_node, 1);
+  graph.AddNodeOwnershipEdge(shared_2, global_node, 1);
+
+  auto pid_to_sizes = GraphProcessor::ComputeSharedFootprintFromGraph(graph);
+  ASSERT_EQ(pid_to_sizes[1], 50ul);
+  ASSERT_EQ(pid_to_sizes[2], 50ul);
+}
+
+TEST_F(GraphProcessorTest, ComputeSharedFootprintFromGraphSomeDiffImportance) {
+  Process* global_process = graph.shared_memory_graph();
+
+  Node* global_node = global_process->CreateNode(kEmptyId, "global/1", false);
+  global_node->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 100);
+
+  Process* first = graph.CreateGraphForProcess(1);
+  Node* shared_1 = first->CreateNode(kEmptyId, "shared_memory/1", false);
+
+  Process* second = graph.CreateGraphForProcess(2);
+  Node* shared_2 = second->CreateNode(kEmptyId, "shared_memory/2", false);
+
+  Process* third = graph.CreateGraphForProcess(3);
+  Node* shared_3 = third->CreateNode(kEmptyId, "shared_memory/3", false);
+
+  Process* fourth = graph.CreateGraphForProcess(4);
+  Node* shared_4 = fourth->CreateNode(kEmptyId, "shared_memory/4", false);
+
+  Process* fifth = graph.CreateGraphForProcess(5);
+  Node* shared_5 = fifth->CreateNode(kEmptyId, "shared_memory/5", false);
+
+  graph.AddNodeOwnershipEdge(shared_1, global_node, 1);
+  graph.AddNodeOwnershipEdge(shared_2, global_node, 2);
+  graph.AddNodeOwnershipEdge(shared_3, global_node, 3);
+  graph.AddNodeOwnershipEdge(shared_4, global_node, 3);
+  graph.AddNodeOwnershipEdge(shared_5, global_node, 3);
+
+  auto pid_to_sizes = GraphProcessor::ComputeSharedFootprintFromGraph(graph);
+  ASSERT_EQ(pid_to_sizes[1], 0ul);
+  ASSERT_EQ(pid_to_sizes[2], 0ul);
+  ASSERT_EQ(pid_to_sizes[3], 33ul);
+  ASSERT_EQ(pid_to_sizes[4], 33ul);
+  ASSERT_EQ(pid_to_sizes[5], 33ul);
+}
+
+TEST_F(GraphProcessorTest, MarkWeakParentsSimple) {
+  Process* process = graph.CreateGraphForProcess(1);
+  Node* parent = process->CreateNode(kEmptyId, "parent", false);
+  Node* first = process->CreateNode(kEmptyId, "parent/first", true);
+  Node* second = process->CreateNode(kEmptyId, "parent/second", false);
+
+  // Case where one child is not weak.
+  parent->set_explicit(false);
+  first->set_explicit(true);
+  second->set_explicit(true);
+
+  // The function should be a no-op.
+  MarkImplicitWeakParentsRecursively(parent);
+  ASSERT_FALSE(parent->is_weak());
+  ASSERT_TRUE(first->is_weak());
+  ASSERT_FALSE(second->is_weak());
+
+  // Case where all children is weak.
+  second->set_weak(true);
+
+  // The function should mark parent as weak.
+  MarkImplicitWeakParentsRecursively(parent);
+  ASSERT_TRUE(parent->is_weak());
+  ASSERT_TRUE(first->is_weak());
+  ASSERT_TRUE(second->is_weak());
+}
+
+TEST_F(GraphProcessorTest, MarkWeakParentsComplex) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  // |first| is explicitly strong but |first_child| is implicitly so.
+  Node* parent = process->CreateNode(kEmptyId, "parent", false);
+  Node* first = process->CreateNode(kEmptyId, "parent/f", false);
+  Node* first_child = process->CreateNode(kEmptyId, "parent/f/c", false);
+  Node* first_gchild = process->CreateNode(kEmptyId, "parent/f/c/c", true);
+
+  parent->set_explicit(false);
+  first->set_explicit(true);
+  first_child->set_explicit(false);
+  first_gchild->set_explicit(true);
+
+  // That should lead to |first_child| marked implicitly weak.
+  MarkImplicitWeakParentsRecursively(parent);
+  ASSERT_FALSE(parent->is_weak());
+  ASSERT_FALSE(first->is_weak());
+  ASSERT_TRUE(first_child->is_weak());
+  ASSERT_TRUE(first_gchild->is_weak());
+
+  // Reset and change so that first is now only implicitly strong.
+  first->set_explicit(false);
+  first_child->set_weak(false);
+
+  // The whole chain should now be weak.
+  MarkImplicitWeakParentsRecursively(parent);
+  ASSERT_TRUE(parent->is_weak());
+  ASSERT_TRUE(first->is_weak());
+  ASSERT_TRUE(first_child->is_weak());
+  ASSERT_TRUE(first_gchild->is_weak());
+}
+
+TEST_F(GraphProcessorTest, MarkWeakOwners) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  // Make only the ultimate owned node weak.
+  Node* owner = process->CreateNode(kEmptyId, "owner", false);
+  Node* owned = process->CreateNode(kEmptyId, "owned", false);
+  Node* owned_2 = process->CreateNode(kEmptyId, "owned2", true);
+
+  graph.AddNodeOwnershipEdge(owner, owned, 0);
+  graph.AddNodeOwnershipEdge(owned, owned_2, 0);
+
+  // Starting from leaf node should lead to everything being weak.
+  MarkWeakOwnersAndChildrenRecursively(process->root());
+  ASSERT_TRUE(owner->is_weak());
+  ASSERT_TRUE(owned->is_weak());
+  ASSERT_TRUE(owned_2->is_weak());
+}
+
+TEST_F(GraphProcessorTest, MarkWeakParent) {
+  Process* process = graph.CreateGraphForProcess(1);
+  Node* parent = process->CreateNode(kEmptyId, "parent", true);
+  Node* child = process->CreateNode(kEmptyId, "parent/c", false);
+  Node* child_2 = process->CreateNode(kEmptyId, "parent/c/c", false);
+
+  // Starting from parent node should lead to everything being weak.
+  MarkWeakOwnersAndChildrenRecursively(process->root());
+  ASSERT_TRUE(parent->is_weak());
+  ASSERT_TRUE(child->is_weak());
+  ASSERT_TRUE(child_2->is_weak());
+}
+
+TEST_F(GraphProcessorTest, MarkWeakParentOwner) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  // Make only the parent node weak.
+  Node* parent = process->CreateNode(kEmptyId, "parent", true);
+  Node* child = process->CreateNode(kEmptyId, "parent/c", false);
+  Node* child_2 = process->CreateNode(kEmptyId, "parent/c/c", false);
+  Node* owner = process->CreateNode(kEmptyId, "owner", false);
+
+  graph.AddNodeOwnershipEdge(owner, parent, 0);
+
+  // Starting from parent node should lead to everything being weak.
+  MarkWeakOwnersAndChildrenRecursively(process->root());
+  ASSERT_TRUE(parent->is_weak());
+  ASSERT_TRUE(child->is_weak());
+  ASSERT_TRUE(child_2->is_weak());
+  ASSERT_TRUE(owner->is_weak());
+}
+
+TEST_F(GraphProcessorTest, RemoveWeakNodesRecursively) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  // Make only the child node weak.
+  Node* parent = process->CreateNode(kEmptyId, "parent", false);
+  Node* child = process->CreateNode(kEmptyId, "parent/c", true);
+  process->CreateNode(kEmptyId, "parent/c/c", false);
+  Node* owned = process->CreateNode(kEmptyId, "parent/owned", false);
+
+  graph.AddNodeOwnershipEdge(child, owned, 0);
+
+  // Starting from parent node should lead child and child_2 being
+  // removed and owned to have the edge from it removed.
+  RemoveWeakNodesRecursively(parent);
+
+  ASSERT_EQ(parent->children()->size(), 1ul);
+  ASSERT_EQ(parent->children()->begin()->second, owned);
+
+  ASSERT_TRUE(owned->owned_by_edges()->empty());
+}
+
+TEST_F(GraphProcessorTest, RemoveWeakNodesRecursivelyBetweenGraphs) {
+  Process* f_process = graph.CreateGraphForProcess(1);
+  Process* s_process = graph.CreateGraphForProcess(2);
+
+  // Make only the child node weak.
+  Node* child = f_process->CreateNode(kEmptyId, "c", true);
+  f_process->CreateNode(kEmptyId, "c/c", false);
+  Node* owned = s_process->CreateNode(kEmptyId, "owned", false);
+
+  graph.AddNodeOwnershipEdge(child, owned, 0);
+
+  // Starting from root node should lead child and child_2 being
+  // removed.
+  RemoveWeakNodesRecursively(f_process->root());
+
+  ASSERT_EQ(f_process->root()->children()->size(), 0ul);
+  ASSERT_EQ(s_process->root()->children()->size(), 1ul);
+
+  // This should be false until our next pass.
+  ASSERT_FALSE(owned->owned_by_edges()->empty());
+
+  RemoveWeakNodesRecursively(s_process->root());
+
+  // We should now have cleaned up the owned node's edges.
+  ASSERT_TRUE(owned->owned_by_edges()->empty());
+}
+
+TEST_F(GraphProcessorTest, AssignTracingOverhead) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  // Now add an allocator node.
+  process->CreateNode(kEmptyId, "malloc", false);
+
+  // If the tracing node does not exist, this should do nothing.
+  AssignTracingOverhead("malloc", &graph, process);
+  ASSERT_TRUE(process->root()->GetChild("malloc")->children()->empty());
+
+  // Now add a tracing node.
+  process->CreateNode(kEmptyId, "tracing", false);
+
+  // This should now add a node with the allocator.
+  AssignTracingOverhead("malloc", &graph, process);
+  ASSERT_NE(process->FindNode("malloc/allocated_objects/tracing_overhead"),
+            nullptr);
+}
+
+TEST_F(GraphProcessorTest, AggregateNumericWithNameForNode) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  Node* c1 = process->CreateNode(kEmptyId, "c1", false);
+  Node* c2 = process->CreateNode(kEmptyId, "c2", false);
+  Node* c3 = process->CreateNode(kEmptyId, "c3", false);
+
+  c1->AddEntry("random_numeric", Node::Entry::ScalarUnits::kBytes, 100);
+  c2->AddEntry("random_numeric", Node::Entry::ScalarUnits::kBytes, 256);
+  c3->AddEntry("other_numeric", Node::Entry::ScalarUnits::kBytes, 1000);
+
+  Node* root = process->root();
+  Node::Entry entry = AggregateNumericWithNameForNode(root, "random_numeric");
+  ASSERT_EQ(entry.value_uint64, 356ul);
+  ASSERT_EQ(entry.units, Node::Entry::ScalarUnits::kBytes);
+}
+
+TEST_F(GraphProcessorTest, AggregateNumericsRecursively) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  Node* c1 = process->CreateNode(kEmptyId, "c1", false);
+  Node* c2 = process->CreateNode(kEmptyId, "c2", false);
+  Node* c2_c1 = process->CreateNode(kEmptyId, "c2/c1", false);
+  Node* c2_c2 = process->CreateNode(kEmptyId, "c2/c2", false);
+  Node* c3_c1 = process->CreateNode(kEmptyId, "c3/c1", false);
+  Node* c3_c2 = process->CreateNode(kEmptyId, "c3/c2", false);
+
+  // If an entry already exists in the parent, the child should not
+  // ovewrite it. If nothing exists, then the child can aggregrate.
+  c1->AddEntry("random_numeric", Node::Entry::ScalarUnits::kBytes, 100);
+  c2->AddEntry("random_numeric", Node::Entry::ScalarUnits::kBytes, 256);
+  c2_c1->AddEntry("random_numeric", Node::Entry::ScalarUnits::kBytes, 256);
+  c2_c2->AddEntry("random_numeric", Node::Entry::ScalarUnits::kBytes, 256);
+  c3_c1->AddEntry("random_numeric", Node::Entry::ScalarUnits::kBytes, 10);
+  c3_c2->AddEntry("random_numeric", Node::Entry::ScalarUnits::kBytes, 10);
+
+  Node* root = process->root();
+  AggregateNumericsRecursively(root);
+  ASSERT_EQ(root->entries()->size(), 1ul);
+
+  auto entry = root->entries()->begin()->second;
+  ASSERT_EQ(entry.value_uint64, 376ul);
+  ASSERT_EQ(entry.units, Node::Entry::ScalarUnits::kBytes);
+}
+
+TEST_F(GraphProcessorTest, AggregateSizeForDescendantNode) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  Node* c1 = process->CreateNode(kEmptyId, "c1", false);
+  Node* c2 = process->CreateNode(kEmptyId, "c2", false);
+  Node* c2_c1 = process->CreateNode(kEmptyId, "c2/c1", false);
+  Node* c2_c2 = process->CreateNode(kEmptyId, "c2/c2", false);
+  Node* c3_c1 = process->CreateNode(kEmptyId, "c3/c1", false);
+  Node* c3_c2 = process->CreateNode(kEmptyId, "c3/c2", false);
+
+  c1->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 100);
+  c2_c1->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 256);
+  c2_c2->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 256);
+  c3_c1->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 10);
+  c3_c2->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 10);
+
+  graph.AddNodeOwnershipEdge(c2_c2, c3_c2, 0);
+
+  // Aggregating root should give size of (100 + 256 + 10 * 2) = 376.
+  // |c2_c2| is not counted because it is owns by |c3_c2|.
+  Node* root = process->root();
+  ASSERT_EQ(376ul, *AggregateSizeForDescendantNode(root, root));
+
+  // Aggregating c2 should give size of (256 * 2) = 512. |c2_c2| is counted
+  // because |c3_c2| is not a child of |c2|.
+  ASSERT_EQ(512ul, *AggregateSizeForDescendantNode(c2, c2));
+}
+
+TEST_F(GraphProcessorTest, CalculateSizeForNode) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  Node* c1 = process->CreateNode(kEmptyId, "c1", false);
+  Node* c2 = process->CreateNode(kEmptyId, "c2", false);
+  Node* c2_c1 = process->CreateNode(kEmptyId, "c2/c1", false);
+  Node* c2_c2 = process->CreateNode(kEmptyId, "c2/c2", false);
+  Node* c3 = process->CreateNode(kEmptyId, "c3", false);
+  Node* c3_c1 = process->CreateNode(kEmptyId, "c3/c1", false);
+  Node* c3_c2 = process->CreateNode(kEmptyId, "c3/c2", false);
+
+  c1->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 600);
+  c2_c1->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 10);
+  c2_c2->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 10);
+  c3->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 600);
+  c3_c1->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 256);
+  c3_c2->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 256);
+
+  graph.AddNodeOwnershipEdge(c2_c2, c3_c2, 0);
+
+  // Compute size entry for |c2| since computations for |c2_c1| and |c2_c2|
+  // are already complete.
+  CalculateSizeForNode(c2);
+
+  // Check that |c2| now has a size entry of 20 (sum of children).
+  auto c2_entry = c2->entries()->begin()->second;
+  ASSERT_EQ(c2_entry.value_uint64, 20ul);
+  ASSERT_EQ(c2_entry.units, Node::Entry::ScalarUnits::kBytes);
+
+  // Compute size entry for |c3_c2| which should not change in size.
+  CalculateSizeForNode(c3_c2);
+
+  // Check that |c3_c2| now has unchanged size.
+  auto c3_c2_entry = c3_c2->entries()->begin()->second;
+  ASSERT_EQ(c3_c2_entry.value_uint64, 256ul);
+  ASSERT_EQ(c3_c2_entry.units, Node::Entry::ScalarUnits::kBytes);
+
+  // Compute size entry for |c3| which should add an unspecified node.
+  CalculateSizeForNode(c3);
+
+  // Check that |c3| has unchanged size.
+  auto c3_entry = c3->entries()->begin()->second;
+  ASSERT_EQ(c3_entry.value_uint64, 600ul);
+  ASSERT_EQ(c3_entry.units, Node::Entry::ScalarUnits::kBytes);
+
+  // Check that the unspecified node is a child of |c3| and has size
+  // 600 - 512 = 88.
+  Node* c3_child = c3->children()->find("<unspecified>")->second;
+  auto c3_child_entry = c3_child->entries()->begin()->second;
+  ASSERT_EQ(c3_child_entry.value_uint64, 88ul);
+  ASSERT_EQ(c3_child_entry.units, Node::Entry::ScalarUnits::kBytes);
+
+  // Compute size entry for |root| which should aggregate children sizes.
+  CalculateSizeForNode(process->root());
+
+  // Check that |root| has been assigned a size of 600 + 10 + 600 = 1210.
+  // Note that |c2_c2| is not counted because it ows |c3_c2| which is a
+  // descendant of |root|.
+  auto root_entry = process->root()->entries()->begin()->second;
+  ASSERT_EQ(root_entry.value_uint64, 1210ul);
+  ASSERT_EQ(root_entry.units, Node::Entry::ScalarUnits::kBytes);
+}
+
+TEST_F(GraphProcessorTest, CalculateNodeSubSizes) {
+  Process* process_1 = graph.CreateGraphForProcess(1);
+  Process* process_2 = graph.CreateGraphForProcess(2);
+
+  Node* parent_1 = process_1->CreateNode(kEmptyId, "parent", false);
+  Node* child_1 = process_1->CreateNode(kEmptyId, "parent/child", false);
+
+  Node* parent_2 = process_2->CreateNode(kEmptyId, "parent", false);
+  Node* child_2 = process_2->CreateNode(kEmptyId, "parent/child", false);
+
+  graph.AddNodeOwnershipEdge(parent_1, parent_2, 0);
+
+  process_1->root()->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 4);
+  parent_1->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 4);
+  child_1->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 4);
+  process_2->root()->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 5);
+  parent_2->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 5);
+  child_2->AddEntry("size", Node::Entry::ScalarUnits::kBytes, 5);
+
+  // Each of these nodes should have owner/owned same as size itself.
+  CalculateNodeSubSizes(child_1);
+  ASSERT_EQ(child_1->not_owned_sub_size(), 4ul);
+  ASSERT_EQ(child_1->not_owning_sub_size(), 4ul);
+  CalculateNodeSubSizes(child_2);
+  ASSERT_EQ(child_2->not_owned_sub_size(), 5ul);
+  ASSERT_EQ(child_2->not_owning_sub_size(), 5ul);
+
+  // These nodes should also have size of children.
+  CalculateNodeSubSizes(parent_1);
+  ASSERT_EQ(parent_1->not_owned_sub_size(), 4ul);
+  ASSERT_EQ(parent_1->not_owning_sub_size(), 4ul);
+  CalculateNodeSubSizes(parent_2);
+  ASSERT_EQ(parent_2->not_owned_sub_size(), 5ul);
+  ASSERT_EQ(parent_2->not_owning_sub_size(), 5ul);
+
+  // These nodes should account for edge between parents.
+  CalculateNodeSubSizes(process_1->root());
+  ASSERT_EQ(process_1->root()->not_owned_sub_size(), 4ul);
+  ASSERT_EQ(process_1->root()->not_owning_sub_size(), 0ul);
+  CalculateNodeSubSizes(process_2->root());
+  ASSERT_EQ(process_2->root()->not_owned_sub_size(), 1ul);
+  ASSERT_EQ(process_2->root()->not_owning_sub_size(), 5ul);
+}
+
+TEST_F(GraphProcessorTest, CalculateNodeOwnershipCoefficient) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  Node* owned = process->CreateNode(kEmptyId, "owned", false);
+  Node* owner_1 = process->CreateNode(kEmptyId, "owner1", false);
+  Node* owner_2 = process->CreateNode(kEmptyId, "owner2", false);
+  Node* owner_3 = process->CreateNode(kEmptyId, "owner3", false);
+  Node* owner_4 = process->CreateNode(kEmptyId, "owner4", false);
+
+  graph.AddNodeOwnershipEdge(owner_1, owned, 2);
+  graph.AddNodeOwnershipEdge(owner_2, owned, 2);
+  graph.AddNodeOwnershipEdge(owner_3, owned, 1);
+  graph.AddNodeOwnershipEdge(owner_4, owned, 0);
+
+  // Ensure the owned node has a size otherwise calculations will not happen.
+  owned->AddEntry("size", Node::Entry::kBytes, 10);
+
+  // Setup the owned/owning sub sizes.
+  owned->add_not_owned_sub_size(10);
+  owner_1->add_not_owning_sub_size(6);
+  owner_2->add_not_owning_sub_size(7);
+  owner_3->add_not_owning_sub_size(5);
+  owner_4->add_not_owning_sub_size(8);
+
+  // Perform the computation.
+  CalculateNodeOwnershipCoefficient(owned);
+
+  // Ensure that the coefficients are correct.
+  ASSERT_DOUBLE_EQ(owned->owned_coefficient(), 2.0 / 10.0);
+  ASSERT_DOUBLE_EQ(owner_1->owning_coefficient(), 3.0 / 6.0);
+  ASSERT_DOUBLE_EQ(owner_2->owning_coefficient(), 4.0 / 7.0);
+  ASSERT_DOUBLE_EQ(owner_3->owning_coefficient(), 0.0 / 5.0);
+  ASSERT_DOUBLE_EQ(owner_4->owning_coefficient(), 1.0 / 8.0);
+}
+
+TEST_F(GraphProcessorTest, CalculateNodeCumulativeOwnershipCoefficient) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  Node* c1 = process->CreateNode(kEmptyId, "c1", false);
+  Node* c1_c1 = process->CreateNode(kEmptyId, "c1/c1", false);
+  Node* c1_c2 = process->CreateNode(kEmptyId, "c1/c2", false);
+  Node* owned = process->CreateNode(kEmptyId, "owned", false);
+
+  graph.AddNodeOwnershipEdge(c1_c2, owned, 2);
+
+  // Ensure all nodes have sizes otherwise calculations will not happen.
+  c1_c1->AddEntry("size", Node::Entry::kBytes, 10);
+  c1_c2->AddEntry("size", Node::Entry::kBytes, 10);
+  owned->AddEntry("size", Node::Entry::kBytes, 10);
+
+  // Setup the owned/owning cummulative coefficients.
+  c1->set_cumulative_owning_coefficient(0.123);
+  c1->set_cumulative_owned_coefficient(0.456);
+  owned->set_cumulative_owning_coefficient(0.789);
+  owned->set_cumulative_owned_coefficient(0.987);
+
+  // Set owning and owned for the children.
+  c1_c1->set_owning_coefficient(0.654);
+  c1_c1->set_owned_coefficient(0.321);
+  c1_c2->set_owning_coefficient(0.135);
+  c1_c2->set_owned_coefficient(0.246);
+
+  // Perform the computation and check our answers.
+  CalculateNodeCumulativeOwnershipCoefficient(c1_c1);
+  ASSERT_DOUBLE_EQ(c1_c1->cumulative_owning_coefficient(), 0.123);
+  ASSERT_DOUBLE_EQ(c1_c1->cumulative_owned_coefficient(), 0.456 * 0.321);
+
+  CalculateNodeCumulativeOwnershipCoefficient(c1_c2);
+  ASSERT_DOUBLE_EQ(c1_c2->cumulative_owning_coefficient(), 0.135 * 0.789);
+  ASSERT_DOUBLE_EQ(c1_c2->cumulative_owned_coefficient(), 0.456 * 0.246);
+}
+
+TEST_F(GraphProcessorTest, CalculateNodeEffectiveSize) {
+  Process* process = graph.CreateGraphForProcess(1);
+
+  Node* c1 = process->CreateNode(kEmptyId, "c1", false);
+  Node* c1_c1 = process->CreateNode(kEmptyId, "c1/c1", false);
+  Node* c1_c2 = process->CreateNode(kEmptyId, "c1/c2", false);
+
+  // Ensure all nodes have sizes otherwise calculations will not happen.
+  c1->AddEntry("size", Node::Entry::kBytes, 200);
+  c1_c1->AddEntry("size", Node::Entry::kBytes, 32);
+  c1_c2->AddEntry("size", Node::Entry::kBytes, 20);
+
+  // Setup the owned/owning cummulative coefficients.
+  c1_c1->set_cumulative_owning_coefficient(0.123);
+  c1_c1->set_cumulative_owned_coefficient(0.456);
+  c1_c2->set_cumulative_owning_coefficient(0.789);
+  c1_c2->set_cumulative_owned_coefficient(0.987);
+
+  // Perform the computation and check our answers.
+  CalculateNodeEffectiveSize(c1_c1);
+  const Node::Entry& entry_c1_c1 =
+      c1_c1->entries()->find("effective_size")->second;
+  uint64_t expected_c1_c1 = static_cast<int>(0.123 * 0.456 * 32);
+  ASSERT_EQ(entry_c1_c1.value_uint64, expected_c1_c1);
+
+  CalculateNodeEffectiveSize(c1_c2);
+  const Node::Entry& entry_c1_c2 =
+      c1_c2->entries()->find("effective_size")->second;
+  uint64_t expected_c1_c2 = static_cast<int>(0.789 * 0.987 * 20);
+  ASSERT_EQ(entry_c1_c2.value_uint64, expected_c1_c2);
+
+  CalculateNodeEffectiveSize(c1);
+  const Node::Entry& entry_c1 = c1->entries()->find("effective_size")->second;
+  ASSERT_EQ(entry_c1.value_uint64, expected_c1_c1 + expected_c1_c2);
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/importers/memory_tracker/graph_unittest.cc b/src/trace_processor/importers/memory_tracker/graph_unittest.cc
new file mode 100644
index 0000000..6017c73
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/graph_unittest.cc
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/ext/trace_processor/importers/memory_tracker/graph.h"
+
+#include "perfetto/base/build_config.h"
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+namespace {
+
+using Node = GlobalNodeGraph::Node;
+using Process = GlobalNodeGraph::Process;
+
+const MemoryAllocatorNodeId kEmptyId;
+
+}  // namespace
+
+TEST(GlobalNodeGraphTest, CreateContainerForProcess) {
+  GlobalNodeGraph global_dump_graph;
+
+  Process* dump = global_dump_graph.CreateGraphForProcess(10);
+  ASSERT_NE(dump, nullptr);
+
+  auto* map = global_dump_graph.process_node_graphs().find(10)->second.get();
+  ASSERT_EQ(dump, map);
+}
+
+TEST(GlobalNodeGraphTest, AddNodeOwnershipEdge) {
+  GlobalNodeGraph global_dump_graph;
+  Node owner(global_dump_graph.shared_memory_graph(), nullptr);
+  Node owned(global_dump_graph.shared_memory_graph(), nullptr);
+
+  global_dump_graph.AddNodeOwnershipEdge(&owner, &owned, 1);
+
+  auto& edges = global_dump_graph.edges();
+  ASSERT_NE(edges.begin(), edges.end());
+
+  auto& edge = *edges.begin();
+  ASSERT_EQ(edge.source(), &owner);
+  ASSERT_EQ(edge.target(), &owned);
+  ASSERT_EQ(edge.priority(), 1);
+}
+
+TEST(GlobalNodeGraphTest, VisitInDepthFirstPostOrder) {
+  GlobalNodeGraph graph;
+  Process* process_1 = graph.CreateGraphForProcess(1);
+  Process* process_2 = graph.CreateGraphForProcess(2);
+
+  Node* c1 = process_1->CreateNode(kEmptyId, "c1", false);
+  Node* c2 = process_1->CreateNode(kEmptyId, "c2", false);
+  Node* c2_c1 = process_1->CreateNode(kEmptyId, "c2/c1", false);
+  Node* c2_c2 = process_1->CreateNode(kEmptyId, "c2/c2", false);
+
+  Node* c3 = process_2->CreateNode(kEmptyId, "c3", false);
+  Node* c3_c1 = process_2->CreateNode(kEmptyId, "c3/c1", false);
+  Node* c3_c2 = process_2->CreateNode(kEmptyId, "c3/c2", false);
+
+  // |c3_c2| owns |c2_c2|.
+  graph.AddNodeOwnershipEdge(c3_c2, c2_c2, 1);
+
+  // This method should always call owners and then children before the node
+  // itself.
+  auto iterator = graph.VisitInDepthFirstPostOrder();
+  ASSERT_EQ(iterator.next(), graph.shared_memory_graph()->root());
+  ASSERT_EQ(iterator.next(), c1);
+  ASSERT_EQ(iterator.next(), c2_c1);
+  ASSERT_EQ(iterator.next(), c3_c2);
+  ASSERT_EQ(iterator.next(), c2_c2);
+  ASSERT_EQ(iterator.next(), c2);
+  ASSERT_EQ(iterator.next(), process_1->root());
+  ASSERT_EQ(iterator.next(), c3_c1);
+  ASSERT_EQ(iterator.next(), c3);
+  ASSERT_EQ(iterator.next(), process_2->root());
+  ASSERT_EQ(iterator.next(), nullptr);
+}
+
+TEST(GlobalNodeGraphTest, VisitInDepthFirstPreOrder) {
+  GlobalNodeGraph graph;
+  Process* process_1 = graph.CreateGraphForProcess(1);
+  Process* process_2 = graph.CreateGraphForProcess(2);
+
+  Node* c1 = process_1->CreateNode(kEmptyId, "c1", false);
+  Node* c2 = process_1->CreateNode(kEmptyId, "c2", false);
+  Node* c2_c1 = process_1->CreateNode(kEmptyId, "c2/c1", false);
+  Node* c2_c2 = process_1->CreateNode(kEmptyId, "c2/c2", false);
+
+  Node* c3 = process_2->CreateNode(kEmptyId, "c3", false);
+  Node* c3_c1 = process_2->CreateNode(kEmptyId, "c3/c1", false);
+  Node* c3_c2 = process_2->CreateNode(kEmptyId, "c3/c2", false);
+
+  // |c2_c2| owns |c3_c2|. Note this is opposite of post-order.
+  graph.AddNodeOwnershipEdge(c2_c2, c3_c2, 1);
+
+  // This method should always call owners and then children after the node
+  // itself.
+  auto iterator = graph.VisitInDepthFirstPreOrder();
+  ASSERT_EQ(iterator.next(), graph.shared_memory_graph()->root());
+  ASSERT_EQ(iterator.next(), process_1->root());
+  ASSERT_EQ(iterator.next(), c1);
+  ASSERT_EQ(iterator.next(), c2);
+  ASSERT_EQ(iterator.next(), c2_c1);
+  ASSERT_EQ(iterator.next(), process_2->root());
+  ASSERT_EQ(iterator.next(), c3);
+  ASSERT_EQ(iterator.next(), c3_c1);
+  ASSERT_EQ(iterator.next(), c3_c2);
+  ASSERT_EQ(iterator.next(), c2_c2);
+  ASSERT_EQ(iterator.next(), nullptr);
+}
+
+TEST(ProcessTest, CreateAndFindNode) {
+  GlobalNodeGraph global_dump_graph;
+  Process graph(1, &global_dump_graph);
+
+  Node* first =
+      graph.CreateNode(MemoryAllocatorNodeId(1), "simple/test/1", false);
+  Node* second =
+      graph.CreateNode(MemoryAllocatorNodeId(2), "simple/test/2", false);
+  Node* third =
+      graph.CreateNode(MemoryAllocatorNodeId(3), "simple/other/1", false);
+  Node* fourth =
+      graph.CreateNode(MemoryAllocatorNodeId(4), "complex/path", false);
+  Node* fifth =
+      graph.CreateNode(MemoryAllocatorNodeId(5), "complex/path/child/1", false);
+
+  ASSERT_EQ(graph.FindNode("simple/test/1"), first);
+  ASSERT_EQ(graph.FindNode("simple/test/2"), second);
+  ASSERT_EQ(graph.FindNode("simple/other/1"), third);
+  ASSERT_EQ(graph.FindNode("complex/path"), fourth);
+  ASSERT_EQ(graph.FindNode("complex/path/child/1"), fifth);
+
+  auto& nodes_by_id = global_dump_graph.nodes_by_id();
+  ASSERT_EQ(nodes_by_id.find(MemoryAllocatorNodeId(1))->second, first);
+  ASSERT_EQ(nodes_by_id.find(MemoryAllocatorNodeId(2))->second, second);
+  ASSERT_EQ(nodes_by_id.find(MemoryAllocatorNodeId(3))->second, third);
+  ASSERT_EQ(nodes_by_id.find(MemoryAllocatorNodeId(4))->second, fourth);
+  ASSERT_EQ(nodes_by_id.find(MemoryAllocatorNodeId(5))->second, fifth);
+}
+
+TEST(ProcessTest, CreateNodeParent) {
+  GlobalNodeGraph global_dump_graph;
+  Process graph(1, &global_dump_graph);
+
+  Node* parent = graph.CreateNode(MemoryAllocatorNodeId(1), "simple", false);
+  Node* child =
+      graph.CreateNode(MemoryAllocatorNodeId(1), "simple/child", false);
+
+  ASSERT_EQ(parent->parent(), graph.root());
+  ASSERT_EQ(child->parent(), parent);
+}
+
+TEST(ProcessTest, WeakAndExplicit) {
+  GlobalNodeGraph global_dump_graph;
+  Process graph(1, &global_dump_graph);
+
+  Node* first =
+      graph.CreateNode(MemoryAllocatorNodeId(1), "simple/test/1", true);
+  Node* second =
+      graph.CreateNode(MemoryAllocatorNodeId(2), "simple/test/2", false);
+
+  ASSERT_TRUE(first->is_weak());
+  ASSERT_FALSE(second->is_weak());
+
+  ASSERT_TRUE(first->is_explicit());
+  ASSERT_TRUE(second->is_explicit());
+
+  Node* parent = graph.FindNode("simple/test");
+  ASSERT_NE(parent, nullptr);
+  ASSERT_FALSE(parent->is_weak());
+  ASSERT_FALSE(parent->is_explicit());
+
+  Node* grandparent = graph.FindNode("simple");
+  ASSERT_NE(grandparent, nullptr);
+  ASSERT_FALSE(grandparent->is_weak());
+  ASSERT_FALSE(grandparent->is_explicit());
+}
+
+TEST(NodeTest, GetChild) {
+  GlobalNodeGraph global_dump_graph;
+  Node node(global_dump_graph.shared_memory_graph(), nullptr);
+
+  ASSERT_EQ(node.GetChild("test"), nullptr);
+
+  Node child(global_dump_graph.shared_memory_graph(), &node);
+  node.InsertChild("child", &child);
+  ASSERT_EQ(node.GetChild("child"), &child);
+}
+
+TEST(NodeTest, InsertChild) {
+  GlobalNodeGraph global_dump_graph;
+  Node node(global_dump_graph.shared_memory_graph(), nullptr);
+
+  ASSERT_EQ(node.GetChild("test"), nullptr);
+
+  Node child(global_dump_graph.shared_memory_graph(), &node);
+  node.InsertChild("child", &child);
+  ASSERT_EQ(node.GetChild("child"), &child);
+}
+
+TEST(NodeTest, AddEntry) {
+  GlobalNodeGraph global_dump_graph;
+  Node node(global_dump_graph.shared_memory_graph(), nullptr);
+
+  node.AddEntry("scalar", Node::Entry::ScalarUnits::kBytes, 100ul);
+  ASSERT_EQ(node.entries()->size(), 1ul);
+
+  node.AddEntry("string", "data");
+  ASSERT_EQ(node.entries()->size(), 2ul);
+
+  auto scalar = node.entries()->find("scalar");
+  ASSERT_EQ(scalar->first, "scalar");
+  ASSERT_EQ(scalar->second.units, Node::Entry::ScalarUnits::kBytes);
+  ASSERT_EQ(scalar->second.value_uint64, 100ul);
+
+  auto string = node.entries()->find("string");
+  ASSERT_EQ(string->first, "string");
+  ASSERT_EQ(string->second.value_string, "data");
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/importers/memory_tracker/memory_allocator_node_id.cc b/src/trace_processor/importers/memory_tracker/memory_allocator_node_id.cc
new file mode 100644
index 0000000..9d80101
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/memory_allocator_node_id.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/ext/trace_processor/importers/memory_tracker/memory_allocator_node_id.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+MemoryAllocatorNodeId::MemoryAllocatorNodeId(uint64_t id) : id_(id) {}
+
+MemoryAllocatorNodeId::MemoryAllocatorNodeId() : MemoryAllocatorNodeId(0u) {}
+
+std::string MemoryAllocatorNodeId::ToString() const {
+  size_t max_size = 19;  // Max uint64 is 0xFFFFFFFFFFFFFFFF + 1 for null byte.
+  std::string buf;
+  buf.resize(max_size);
+  auto final_size = snprintf(&buf[0], max_size, "%" PRIu64, id_);
+  PERFETTO_DCHECK(final_size >= 0);
+  buf.resize(static_cast<size_t>(final_size));  // Cuts off the final null byte.
+  return buf;
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/importers/memory_tracker/raw_memory_graph_node.cc b/src/trace_processor/importers/memory_tracker/raw_memory_graph_node.cc
new file mode 100644
index 0000000..74a9207
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/raw_memory_graph_node.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/ext/trace_processor/importers/memory_tracker/raw_memory_graph_node.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+const char RawMemoryGraphNode::kNameSize[] = "size";
+const char RawMemoryGraphNode::kNameObjectCount[] = "object_count";
+const char RawMemoryGraphNode::kTypeScalar[] = "scalar";
+const char RawMemoryGraphNode::kTypeString[] = "string";
+const char RawMemoryGraphNode::kUnitsBytes[] = "bytes";
+const char RawMemoryGraphNode::kUnitsObjects[] = "objects";
+
+RawMemoryGraphNode::MemoryNodeEntry::MemoryNodeEntry(const std::string& n,
+                                                     const std::string& u,
+                                                     uint64_t v)
+    : name(n), units(u), entry_type(kUint64), value_uint64(v) {}
+
+RawMemoryGraphNode::MemoryNodeEntry::MemoryNodeEntry(const std::string& n,
+                                                     const std::string& u,
+                                                     const std::string& v)
+    : name(n), units(u), entry_type(kString), value_string(v) {}
+
+bool RawMemoryGraphNode::MemoryNodeEntry::operator==(
+    const MemoryNodeEntry& rhs) const {
+  if (!(name == rhs.name && units == rhs.units && entry_type == rhs.entry_type))
+    return false;
+  switch (entry_type) {
+    case EntryType::kUint64:
+      return value_uint64 == rhs.value_uint64;
+    case EntryType::kString:
+      return value_string == rhs.value_string;
+  }
+  return false;
+}
+
+RawMemoryGraphNode::RawMemoryGraphNode(const std::string& absolute_name,
+                                       LevelOfDetail level,
+                                       MemoryAllocatorNodeId id)
+    : absolute_name_(absolute_name),
+      level_of_detail_(level),
+      id_(id),
+      flags_(Flags::kDefault) {}
+
+RawMemoryGraphNode::RawMemoryGraphNode(
+    const std::string& absolute_name,
+    LevelOfDetail level,
+    MemoryAllocatorNodeId id,
+    std::vector<RawMemoryGraphNode::MemoryNodeEntry>&& entries)
+    : absolute_name_(absolute_name),
+      level_of_detail_(level),
+      entries_(std::move(entries)),
+      id_(id),
+      flags_(Flags::kDefault) {}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/importers/memory_tracker/raw_process_memory_node.cc b/src/trace_processor/importers/memory_tracker/raw_process_memory_node.cc
new file mode 100644
index 0000000..faaf084
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/raw_process_memory_node.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/ext/trace_processor/importers/memory_tracker/raw_process_memory_node.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <functional>
+#include <memory>
+
+#include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+RawProcessMemoryNode::RawProcessMemoryNode(LevelOfDetail level_of_detail,
+                                           AllocatorNodeEdgesMap&& edges_map,
+                                           MemoryNodesMap&& nodes_map)
+    : level_of_detail_(level_of_detail),
+      allocator_nodes_edges_(std::move(edges_map)),
+      allocator_nodes_(std::move(nodes_map)) {}
+
+RawProcessMemoryNode::RawProcessMemoryNode(RawProcessMemoryNode&&) = default;
+RawProcessMemoryNode::~RawProcessMemoryNode() = default;
+RawProcessMemoryNode& RawProcessMemoryNode::operator=(RawProcessMemoryNode&&) =
+    default;
+
+RawMemoryGraphNode* RawProcessMemoryNode::GetAllocatorNode(
+    const std::string& absolute_name) const {
+  auto it = allocator_nodes_.find(absolute_name);
+  if (it != allocator_nodes_.end())
+    return it->second.get();
+  return nullptr;
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/importers/memory_tracker/raw_process_memory_node_unittest.cc b/src/trace_processor/importers/memory_tracker/raw_process_memory_node_unittest.cc
new file mode 100644
index 0000000..91a27ec
--- /dev/null
+++ b/src/trace_processor/importers/memory_tracker/raw_process_memory_node_unittest.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/ext/trace_processor/importers/memory_tracker/raw_process_memory_node.h"
+
+#include <stddef.h>
+
+#include <unordered_map>
+
+#include "perfetto/base/build_config.h"
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+namespace {
+
+const LevelOfDetail kLevelOfDetail = LevelOfDetail::kDetailed;
+
+}  // namespace
+
+TEST(RawProcessMemoryNodeTest, MoveConstructor) {
+  const auto source = MemoryAllocatorNodeId(42);
+  const auto target = MemoryAllocatorNodeId(4242);
+
+  std::unique_ptr<RawMemoryGraphNode> mad1(
+      new RawMemoryGraphNode("mad1", kLevelOfDetail, source));
+  std::unique_ptr<RawMemoryGraphNode> mad2(
+      new RawMemoryGraphNode("mad2", kLevelOfDetail, target));
+
+  RawProcessMemoryNode::MemoryNodesMap nodesMap;
+  nodesMap.emplace(mad1->absolute_name(), std::move(mad1));
+  nodesMap.emplace(mad2->absolute_name(), std::move(mad2));
+
+  std::unique_ptr<MemoryGraphEdge> edge(
+      new MemoryGraphEdge(source, target, 10, false));
+
+  RawProcessMemoryNode::AllocatorNodeEdgesMap edgesMap;
+  edgesMap.emplace(edge->source, std::move(edge));
+
+  RawProcessMemoryNode pmd1(kLevelOfDetail, std::move(edgesMap),
+                            std::move(nodesMap));
+
+  RawProcessMemoryNode pmd2(std::move(pmd1));
+
+  EXPECT_EQ(1u, pmd2.allocator_nodes().count("mad1"));
+  EXPECT_EQ(1u, pmd2.allocator_nodes().count("mad2"));
+  EXPECT_EQ(LevelOfDetail::kDetailed, pmd2.level_of_detail());
+  EXPECT_EQ(1u, pmd2.allocator_nodes_edges().size());
+}
+
+TEST(RawProcessMemoryNodeTest, MoveAssignment) {
+  const auto source = MemoryAllocatorNodeId(42);
+  const auto target = MemoryAllocatorNodeId(4242);
+
+  std::unique_ptr<RawMemoryGraphNode> mad1(
+      new RawMemoryGraphNode("mad1", kLevelOfDetail, source));
+  std::unique_ptr<RawMemoryGraphNode> mad2(
+      new RawMemoryGraphNode("mad2", kLevelOfDetail, target));
+
+  RawProcessMemoryNode::MemoryNodesMap nodesMap;
+  nodesMap.emplace(mad1->absolute_name(), std::move(mad1));
+  nodesMap.emplace(mad2->absolute_name(), std::move(mad2));
+
+  std::unique_ptr<MemoryGraphEdge> edge(
+      new MemoryGraphEdge(source, target, 10, false));
+
+  RawProcessMemoryNode::AllocatorNodeEdgesMap edgesMap;
+  edgesMap.emplace(edge->source, std::move(edge));
+
+  RawProcessMemoryNode pmd1(kLevelOfDetail, std::move(edgesMap),
+                            std::move(nodesMap));
+
+  RawProcessMemoryNode pmd2(LevelOfDetail::kBackground);
+
+  pmd2 = std::move(pmd1);
+  EXPECT_EQ(1u, pmd2.allocator_nodes().count("mad1"));
+  EXPECT_EQ(1u, pmd2.allocator_nodes().count("mad2"));
+  EXPECT_EQ(0u, pmd2.allocator_nodes().count("mad3"));
+  EXPECT_EQ(LevelOfDetail::kDetailed, pmd2.level_of_detail());
+  EXPECT_EQ(1u, pmd2.allocator_nodes_edges().size());
+}
+
+}  // namespace trace_processor
+}  // namespace perfetto