Merge "Add methodCount metric based on method trace" into androidx-main
diff --git a/benchmark/benchmark-common/src/androidTest/java/androidx/benchmark/MethodTracingTest.kt b/benchmark/benchmark-common/src/androidTest/java/androidx/benchmark/MethodTracingTest.kt
new file mode 100644
index 0000000..3750694
--- /dev/null
+++ b/benchmark/benchmark-common/src/androidTest/java/androidx/benchmark/MethodTracingTest.kt
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2025 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package androidx.benchmark
+
+import androidx.benchmark.traceprocessor.TraceProcessor
+import androidx.benchmark.traceprocessor.runSingleSessionServer
+import androidx.test.ext.junit.runners.AndroidJUnit4
+import androidx.test.filters.LargeTest
+import androidx.test.filters.SdkSuppress
+import kotlin.test.assertEquals
+import org.junit.Test
+import org.junit.runner.RunWith
+
+@SdkSuppress(minSdkVersion = 23)
+@LargeTest
+@RunWith(AndroidJUnit4::class)
+class MethodTracingTest {
+    val methodTrace =
+        createTempFileFromAsset("api35_runWithMeasurementDisabled-methodTracing", ".trace")
+    val perfettoTrace =
+        createTempFileFromAsset("api35_runWithMeasurementDisabled", ".perfetto-trace")
+
+    @Test
+    fun embed_and_methodCount() {
+        MethodTracing.embedInPerfettoTrace(methodTrace, perfettoTrace)
+
+        val metrics =
+            TraceProcessor.runSingleSessionServer(perfettoTrace.absolutePath) {
+                // Note that queryMetric relies on current classnames. This means if this *test apk*
+                // is minified, these classnames will not match what's inside the fixed method trace
+                // minifying this test isn't expected, but minifying benchmarks is, which is why the
+                // current in-process class/method name are used
+                MethodTracing.queryMetrics(this)
+            }
+
+        assertEquals(listOf(MetricResult("methodCount", listOf(38.0))), metrics)
+    }
+}
diff --git a/benchmark/benchmark-common/src/androidTest/java/androidx/benchmark/MicrobenchmarkPhaseConfigTest.kt b/benchmark/benchmark-common/src/androidTest/java/androidx/benchmark/MicrobenchmarkPhaseConfigTest.kt
index 5fea55f..9d63a73 100644
--- a/benchmark/benchmark-common/src/androidTest/java/androidx/benchmark/MicrobenchmarkPhaseConfigTest.kt
+++ b/benchmark/benchmark-common/src/androidTest/java/androidx/benchmark/MicrobenchmarkPhaseConfigTest.kt
@@ -65,7 +65,7 @@
                     }
                 )
             microbenchmark.executePhases()
-            microbenchmark.output(null)
+            microbenchmark.output()
         }
 
         val calculatedIterations =
diff --git a/benchmark/benchmark-common/src/main/java/androidx/benchmark/MetricResult.kt b/benchmark/benchmark-common/src/main/java/androidx/benchmark/MetricResult.kt
index d4b026d..4c1f7b8 100644
--- a/benchmark/benchmark-common/src/main/java/androidx/benchmark/MetricResult.kt
+++ b/benchmark/benchmark-common/src/main/java/androidx/benchmark/MetricResult.kt
@@ -18,6 +18,7 @@
 
 import android.os.Bundle
 import androidx.annotation.RestrictTo
+import kotlin.math.abs
 import kotlin.math.pow
 import kotlin.math.sqrt
 
@@ -26,7 +27,7 @@
  * for those measurements (min/median/max).
  */
 @RestrictTo(RestrictTo.Scope.LIBRARY_GROUP)
-public class MetricResult(
+class MetricResult(
     val name: String,
     val data: List<Double>,
     val iterationData: List<List<Double>>? = null
@@ -68,7 +69,7 @@
             if (data.size == 1) {
                 0.0
             } else {
-                val sum = values.map { (it - mean).pow(2) }.sum()
+                val sum = values.sumOf { (it - mean).pow(2) }
                 sqrt(sum / (size - 1).toDouble())
             }
         coefficientOfVariation =
@@ -84,7 +85,7 @@
             "standardDeviation: $standardDeviation"
     }
 
-    public fun putInBundle(status: Bundle, prefix: String) {
+    fun putInBundle(status: Bundle, prefix: String) {
         // format string to be in instrumentation results format
         val bundleName = name.toOutputMetricName()
 
@@ -93,7 +94,7 @@
         status.putDouble("${prefix}${bundleName}_stddev", standardDeviation)
     }
 
-    public fun putPercentilesInBundle(status: Bundle, prefix: String) {
+    fun putPercentilesInBundle(status: Bundle, prefix: String) {
         // format string to be in instrumentation results format
         val bundleName = name.toOutputMetricName()
 
@@ -103,7 +104,19 @@
         status.putDouble("${prefix}${bundleName}_p99", p99)
     }
 
-    // NOTE: Studio-generated, re-generate if members change
+    override fun toString(): String {
+        return "MetricResult(name='$name', data=$data, iterationData=$iterationData)"
+    }
+
+    fun listsAreEqualish(left: List<Double>, right: List<Double>): Boolean {
+        if (left.size != right.size) return false
+
+        for (i in left.indices) {
+            if (abs(left[i] - right[i]) > 1e-6) return false
+        }
+        return true
+    }
+
     override fun equals(other: Any?): Boolean {
         if (this === other) return true
         if (javaClass != other?.javaClass) return false
@@ -111,26 +124,23 @@
         other as MetricResult
 
         if (name != other.name) return false
-        if (median != other.median) return false
-        if (medianIndex != other.medianIndex) return false
-        if (min != other.min) return false
-        if (minIndex != other.minIndex) return false
-        if (max != other.max) return false
-        if (maxIndex != other.maxIndex) return false
-        if (standardDeviation != other.standardDeviation) return false
+        if (!listsAreEqualish(data, other.data)) return false
+        if ((iterationData == null) != (other.iterationData == null)) return false
+        if (iterationData != null) {
+            if (iterationData.size != other.iterationData!!.size) return false
+            // both have iteration data, do deep compare
+            for (i in iterationData.indices) {
+                if (!listsAreEqualish(iterationData[i], other.iterationData[i])) return false
+            }
+        }
 
         return true
     }
 
     override fun hashCode(): Int {
         var result = name.hashCode()
-        result = 31 * result + median.hashCode()
-        result = 31 * result + medianIndex
-        result = 31 * result + min.hashCode()
-        result = 31 * result + minIndex
-        result = 31 * result + max.hashCode()
-        result = 31 * result + maxIndex
-        result = 31 * result + standardDeviation.hashCode()
+        result = 31 * result + data.hashCode()
+        result = 31 * result + (iterationData?.hashCode() ?: 0)
         return result
     }
 
diff --git a/benchmark/benchmark-common/src/main/java/androidx/benchmark/Microbenchmark.kt b/benchmark/benchmark-common/src/main/java/androidx/benchmark/Microbenchmark.kt
index 9dc3c3e..49e1926 100644
--- a/benchmark/benchmark-common/src/main/java/androidx/benchmark/Microbenchmark.kt
+++ b/benchmark/benchmark-common/src/main/java/androidx/benchmark/Microbenchmark.kt
@@ -18,13 +18,17 @@
 
 import android.os.Build
 import android.util.Log
+import androidx.annotation.RequiresApi
 import androidx.annotation.RestrictTo
 import androidx.benchmark.BenchmarkState.Companion.enableMethodTracingAffectsMeasurementError
+import androidx.benchmark.json.BenchmarkData.TestResult.ProfilerOutput
 import androidx.benchmark.perfetto.PerfettoCapture
 import androidx.benchmark.perfetto.PerfettoCaptureWrapper
 import androidx.benchmark.perfetto.PerfettoConfig
 import androidx.benchmark.perfetto.UiState
 import androidx.benchmark.perfetto.appendUiState
+import androidx.benchmark.traceprocessor.TraceProcessor
+import androidx.benchmark.traceprocessor.runSingleSessionServer
 import androidx.test.platform.app.InstrumentationRegistry
 import androidx.tracing.Trace
 import androidx.tracing.trace
@@ -319,17 +323,45 @@
         }
     }
 
-    fun output(perfettoTracePath: String?): MicrobenchmarkOutput {
+    /** Register a PerfettoTrace to be added to outputs, and used to extract metrics. */
+    @RequiresApi(23)
+    fun processPerfettoTrace(perfettoTracePath: String) {
+        // trace completed, and copied into shell writeable dir
+        val file = File(perfettoTracePath)
+        file.appendUiState(
+            UiState(
+                timelineStart = null,
+                timelineEnd = null,
+                highlightPackage = InstrumentationRegistry.getInstrumentation().context.packageName
+            )
+        )
+        state.profilerResults.forEach { it.embedInPerfettoTrace(perfettoTracePath) }
+        if (state.profilerResults.any { it.type == ProfilerOutput.Type.MethodTrace }) {
+            TraceProcessor.runSingleSessionServer(absoluteTracePath = perfettoTracePath) {
+                // NOTE: this query assumes that method trace only occurs once
+                state.metricResults.addAll(MethodTracing.queryMetrics(this))
+            }
+        }
+
+        // add at front since this affects output order
+        state.profilerResults.add(
+            0,
+            Profiler.ResultFile.ofPerfettoTrace(label = "Trace", absolutePath = perfettoTracePath)
+        )
+    }
+
+    fun output(): MicrobenchmarkOutput {
         Log.i(
             BenchmarkState.TAG,
             definition.outputTestName +
                 state.metricResults.map { it.getSummary() } +
                 "count=${state.maxIterationsPerRepeat}"
         )
+        state.profilerResults.forEach { it.convertBeforeSync?.invoke() }
         return MicrobenchmarkOutput(
                 definition = definition,
                 metricResults = state.metricResults,
-                profilerResults = processProfilerResults(perfettoTracePath),
+                profilerResults = state.profilerResults,
                 totalRunTimeNs = System.nanoTime() - startTimeNs,
                 warmupIterations = state.warmupIterations,
                 repeatIterations = state.maxIterationsPerRepeat,
@@ -346,35 +378,6 @@
         return state.metricResults.first { it.name == "timeNs" }.min
     }
 
-    private fun processProfilerResults(perfettoTracePath: String?): List<Profiler.ResultFile> {
-        // prepare profiling result files
-        perfettoTracePath?.apply {
-            // trace completed, and copied into shell writeable dir
-            val file = File(this)
-            file.appendUiState(
-                UiState(
-                    timelineStart = null,
-                    timelineEnd = null,
-                    highlightPackage =
-                        InstrumentationRegistry.getInstrumentation().context.packageName
-                )
-            )
-        }
-        state.profilerResults.forEach {
-            it.convertBeforeSync?.invoke()
-            if (perfettoTracePath != null) {
-                it.embedInPerfettoTrace(perfettoTracePath)
-            }
-        }
-        val profilerResults =
-            listOfNotNull(
-                perfettoTracePath?.let {
-                    Profiler.ResultFile.ofPerfettoTrace(label = "Trace", absolutePath = it)
-                }
-            ) + state.profilerResults
-        return profilerResults
-    }
-
     companion object {
         internal const val TAG = "Benchmark"
     }
@@ -424,7 +427,7 @@
         )
         .apply {
             executePhases()
-            output(perfettoTracePath = null)
+            output()
         }
 }
 
@@ -453,7 +456,10 @@
                 }
             }
         }
-    microbenchmark.output(perfettoTracePath)
+    if (perfettoTracePath != null && Build.VERSION.SDK_INT > 23) {
+        microbenchmark.processPerfettoTrace(perfettoTracePath)
+    }
+    microbenchmark.output()
 }
 
 /**
diff --git a/benchmark/benchmark-common/src/main/java/androidx/benchmark/Profiler.kt b/benchmark/benchmark-common/src/main/java/androidx/benchmark/Profiler.kt
index 1aadeff..f370dc9 100644
--- a/benchmark/benchmark-common/src/main/java/androidx/benchmark/Profiler.kt
+++ b/benchmark/benchmark-common/src/main/java/androidx/benchmark/Profiler.kt
@@ -32,6 +32,7 @@
 import androidx.benchmark.perfetto.StackSamplingConfig
 import androidx.benchmark.simpleperf.ProfileSession
 import androidx.benchmark.simpleperf.RecordOptions
+import androidx.benchmark.traceprocessor.TraceProcessor
 import androidx.benchmark.vmtrace.ArtTrace
 import java.io.File
 import java.io.FileOutputStream
@@ -278,6 +279,41 @@
             .writeAsPerfettoTrace(FileOutputStream(perfettoTrace, /* append= */ true))
     }
 
+    fun queryMetrics(session: TraceProcessor.Session): List<MetricResult> {
+        // NOTE: This query assumes that method trace only wraps a single measurement iteration
+        val row =
+            session
+                .query(
+                    """
+                    CREATE OR REPLACE PERFETTO FUNCTION is_unmeasured(slice_id INT)
+                        RETURNS INT AS
+                        SELECT MIN(1, COUNT(*))
+                        FROM ancestor_slice(${'$'}slice_id)
+                        WHERE
+                          NAME = '${ArtTrace.RUN_WITH_MEASUREMENT_DISABLED_FULLNAME}' OR
+                          NAME = 'java.lang.invoke.MethodType.makeImpl: (Ljava/lang/Class;[Ljava/lang/Class;Z)Ljava/lang/invoke/MethodType;';
+
+                    SELECT COUNT(*) as methodCount, is_unmeasured from (
+                      SELECT
+                        name,
+                        is_unmeasured(slice.id) AS is_unmeasured
+                      FROM slice
+                      WHERE track_id like (
+                        SELECT id FROM track WHERE name LIKE 'Instr:%(Method Trace)' OR name = 'main (Method Trace)'
+                      )
+                      AND is_unmeasured = false
+                    ) GROUP BY is_unmeasured
+                """
+                        .trimIndent()
+                )
+                .firstOrNull()
+        return if (row != null) {
+            listOf(MetricResult("methodCount", data = listOf(row.long("methodCount").toDouble())))
+        } else {
+            emptyList()
+        }
+    }
+
     var hasBeenUsed: Boolean = false
         private set
 }
diff --git a/benchmark/benchmark-common/src/main/java/androidx/benchmark/vmtrace/ArtTrace.kt b/benchmark/benchmark-common/src/main/java/androidx/benchmark/vmtrace/ArtTrace.kt
index 62f0464..e51b947 100644
--- a/benchmark/benchmark-common/src/main/java/androidx/benchmark/vmtrace/ArtTrace.kt
+++ b/benchmark/benchmark-common/src/main/java/androidx/benchmark/vmtrace/ArtTrace.kt
@@ -17,6 +17,7 @@
 package androidx.benchmark.vmtrace
 
 import androidx.annotation.VisibleForTesting
+import androidx.benchmark.MicrobenchmarkScope
 import java.io.File
 import java.io.OutputStream
 import java.util.UUID
@@ -293,12 +294,15 @@
          */
         private const val internOffset = 100L
 
-        private const val PAUSE_MEASUREMENT_FULLNAME =
-            "androidx.benchmark.MicrobenchmarkScope.pauseMeasurement: ()V"
-        private const val RESUME_MEASUREMENT_FULLNAME =
-            "androidx.benchmark.MicrobenchmarkScope.resumeMeasurement: ()V"
-        private const val RUN_WITH_MEASUREMENT_DISABLED_FULLNAME =
-            "androidx.benchmark.MicrobenchmarkScope.runWithMeasurementDisabled: (Lkotlin/jvm/functions/Function0;)Ljava/lang/Object;"
+        // Derive constant strings dynamically to account for identifier minification
+        private val PAUSE_MEASUREMENT_FULLNAME =
+            "${MicrobenchmarkScope::class.qualifiedName}.${MicrobenchmarkScope::pauseMeasurement.name}: ()V"
+        private val RESUME_MEASUREMENT_FULLNAME =
+            "${MicrobenchmarkScope::class.qualifiedName}.${MicrobenchmarkScope::resumeMeasurement.name}: ()V"
+        val RUN_WITH_MEASUREMENT_DISABLED_FULLNAME =
+            "${MicrobenchmarkScope::class.qualifiedName}.runWithMeasurementDisabled: (Lkotlin/jvm/functions/Function0;)Ljava/lang/Object;"
+
+        /** Unique ID used by the injected runWithMeasurementDisabled entry */
         private const val RUN_WITH_MEASUREMENT_DISABLED_INTERNID = internOffset - 1
 
         private val SequenceDataInitial =
diff --git a/benchmark/benchmark-macro/src/androidTest/java/androidx/benchmark/macro/MetricResultExtensionsTest.kt b/benchmark/benchmark-macro/src/androidTest/java/androidx/benchmark/macro/MetricResultExtensionsTest.kt
index 40de339..170b4ca 100644
--- a/benchmark/benchmark-macro/src/androidTest/java/androidx/benchmark/macro/MetricResultExtensionsTest.kt
+++ b/benchmark/benchmark-macro/src/androidTest/java/androidx/benchmark/macro/MetricResultExtensionsTest.kt
@@ -84,8 +84,8 @@
             expected =
                 listOf(
                     // note, bar sorted first
-                    MetricResult("bar", listOf(1.0)),
-                    MetricResult("foo", listOf(0.0))
+                    MetricResult("bar", listOf(1.0), listOf(listOf(1.0))),
+                    MetricResult("foo", listOf(0.0), listOf(listOf(0.0)))
                 ),
             actual =
                 listOf(mapOf("foo" to listOf(0.0), "bar" to listOf(1.0)))
@@ -95,12 +95,14 @@
 
     @Test
     fun mergeToSampledMetricResults_singleMeasurement() {
+        val expectedBar = listOf(101.0, 301.0, 201.0)
+        val expectedFoo = listOf(100.0, 300.0, 200.0)
         assertEquals(
             expected =
                 listOf(
                     // note, bar sorted first
-                    MetricResult("bar", listOf(101.0, 301.0, 201.0)),
-                    MetricResult("foo", listOf(100.0, 300.0, 200.0))
+                    MetricResult("bar", expectedBar, expectedBar.map { listOf(it) }),
+                    MetricResult("foo", expectedFoo, expectedFoo.map { listOf(it) })
                 ),
             actual =
                 listOf(
@@ -118,8 +120,16 @@
             expected =
                 listOf(
                     // note, bar sorted first
-                    MetricResult("bar", List(6) { it.toDouble() }),
-                    MetricResult("foo", List(6) { it.toDouble() })
+                    MetricResult(
+                        "bar",
+                        List(6) { it.toDouble() },
+                        listOf(listOf(0.0), listOf(1.0, 2.0, 3.0, 4.0, 5.0))
+                    ),
+                    MetricResult(
+                        "foo",
+                        List(6) { it.toDouble() },
+                        listOf(listOf(0.0, 1.0, 2.0), listOf(3.0, 4.0, 5.0))
+                    )
                 ),
             actual =
                 listOf(