Skip to content

Commit 8305de5

Browse files
committed
Add jmh benchmark comparing multiple cc store strategies
1 parent fe60802 commit 8305de5

File tree

4 files changed

+264
-7
lines changed

4 files changed

+264
-7
lines changed

platforms/core-configuration/configuration-cache/build.gradle.kts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ plugins {
22
id("gradlebuild.distribution.implementation-kotlin")
33
id("gradlebuild.kotlin-dsl-sam-with-receiver")
44
id("gradlebuild.kotlin-experimental-contracts")
5+
id("gradlebuild.jmh")
56
}
67

78
description = "Configuration cache implementation"
@@ -103,6 +104,9 @@ dependencies {
103104
testImplementation(libs.mockitoKotlin2)
104105
testImplementation(libs.kotlinCoroutinesDebug)
105106

107+
jmhImplementation(projects.beanSerializationServices)
108+
jmhImplementation(libs.mockitoKotlin2)
109+
106110
integTestImplementation(projects.jvmServices)
107111
integTestImplementation(projects.toolingApi)
108112
integTestImplementation(projects.platformJvm)
Lines changed: 252 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,252 @@
1+
/*
2+
* Copyright 2024 the original author or authors.
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package org.gradle.internal.cc.jmh
18+
19+
import com.nhaarman.mockitokotlin2.mock
20+
import org.apache.commons.compress.compressors.snappy.FramedSnappyCompressorOutputStream
21+
import org.apache.commons.compress.compressors.snappy.SnappyCompressorInputStream
22+
import org.apache.commons.compress.compressors.snappy.SnappyCompressorOutputStream
23+
import org.gradle.internal.cc.base.serialize.IsolateOwners
24+
import org.gradle.internal.cc.impl.io.ByteBufferPool
25+
import org.gradle.internal.cc.impl.io.ParallelOutputStream
26+
import org.gradle.internal.cc.impl.serialize.Codecs
27+
import org.gradle.internal.cc.impl.serialize.DefaultClassEncoder
28+
import org.gradle.internal.configuration.problems.ProblemsListener
29+
import org.gradle.internal.extensions.stdlib.useToRun
30+
import org.gradle.internal.serialize.FlushableEncoder
31+
import org.gradle.internal.serialize.beans.services.DefaultBeanStateWriterLookup
32+
import org.gradle.internal.serialize.codecs.core.jos.JavaSerializationEncodingLookup
33+
import org.gradle.internal.serialize.graph.Codec
34+
import org.gradle.internal.serialize.graph.DefaultWriteContext
35+
import org.gradle.internal.serialize.graph.MutableIsolateContext
36+
import org.gradle.internal.serialize.graph.runWriteOperation
37+
import org.gradle.internal.serialize.graph.withIsolate
38+
import org.gradle.internal.serialize.kryo.KryoBackedEncoder
39+
import org.openjdk.jmh.annotations.Benchmark
40+
import org.openjdk.jmh.annotations.Scope
41+
import org.openjdk.jmh.annotations.Setup
42+
import org.openjdk.jmh.annotations.State
43+
import org.openjdk.jmh.infra.Blackhole
44+
import java.io.OutputStream
45+
import java.util.concurrent.ArrayBlockingQueue
46+
import java.util.zip.GZIPOutputStream
47+
48+
49+
@State(Scope.Benchmark)
50+
open class CCStoreBenchmark {
51+
52+
private
53+
lateinit var graph: Any
54+
55+
@Setup
56+
fun setup() {
57+
graph = (1..1024).map { Peano.fromInt(1024) }
58+
}
59+
60+
@Benchmark
61+
fun withSnappyCompression(bh: Blackhole) {
62+
writeTo(
63+
KryoBackedEncoder(
64+
compressorOutputStreamFor(
65+
BlackholeOutputStream(bh)
66+
)
67+
),
68+
graph
69+
)
70+
}
71+
72+
@Benchmark
73+
fun withGZIPCompression(bh: Blackhole) {
74+
writeTo(
75+
KryoBackedEncoder(
76+
GZIPOutputStream(
77+
BlackholeOutputStream(bh)
78+
)
79+
),
80+
graph
81+
)
82+
}
83+
84+
@Benchmark
85+
fun withParallelSnappyCompression(bh: Blackhole) {
86+
writeTo(
87+
KryoBackedEncoder(
88+
ParallelOutputStream.of {
89+
compressorOutputStreamFor(
90+
BlackholeOutputStream(bh)
91+
)
92+
},
93+
ParallelOutputStream.recommendedBufferSize
94+
),
95+
graph
96+
)
97+
}
98+
99+
@Benchmark
100+
fun withParallelSnappyCompressionAndArrayBlockingQueue(bh: Blackhole) {
101+
writeTo(
102+
KryoBackedEncoder(
103+
ParallelOutputStream.of(ArrayBlockingQueue(ByteBufferPool.maxChunks)) {
104+
compressorOutputStreamFor(
105+
BlackholeOutputStream(bh)
106+
)
107+
},
108+
ParallelOutputStream.recommendedBufferSize
109+
),
110+
graph
111+
)
112+
}
113+
114+
@Benchmark
115+
fun withParallelGZIPCompression(bh: Blackhole) {
116+
writeTo(
117+
KryoBackedEncoder(
118+
ParallelOutputStream.of {
119+
GZIPOutputStream(
120+
BlackholeOutputStream(bh)
121+
)
122+
},
123+
ParallelOutputStream.recommendedBufferSize
124+
),
125+
graph
126+
)
127+
}
128+
129+
@Benchmark
130+
fun withoutCompression(bh: Blackhole) {
131+
writeTo(
132+
KryoBackedEncoder(BlackholeOutputStream(bh)),
133+
graph
134+
)
135+
}
136+
137+
private
138+
fun compressorOutputStreamFor(outputStream: OutputStream) =
139+
FramedSnappyCompressorOutputStream(
140+
outputStream,
141+
SnappyCompressorOutputStream
142+
.createParameterBuilder(SnappyCompressorInputStream.DEFAULT_BLOCK_SIZE)
143+
.tunedForSpeed()
144+
.build()
145+
)
146+
147+
internal
148+
class BlackholeOutputStream(val bh: Blackhole) : OutputStream() {
149+
override fun write(b: Int) {
150+
bh.consume(b)
151+
}
152+
}
153+
154+
private
155+
fun writeTo(
156+
encoder: KryoBackedEncoder,
157+
graph: Any,
158+
codec: Codec<Any?> = userTypesCodec(),
159+
problemsListener: ProblemsListener = mock(),
160+
) {
161+
writeContextFor(encoder, codec, problemsListener).useToRun {
162+
withIsolateMock(codec) {
163+
runWriteOperation {
164+
write(graph)
165+
}
166+
}
167+
}
168+
}
169+
170+
private
171+
fun writeContextFor(encoder: FlushableEncoder, codec: Codec<Any?>, problemHandler: ProblemsListener) =
172+
DefaultWriteContext(
173+
codec = codec,
174+
encoder = encoder,
175+
classEncoder = DefaultClassEncoder(mock()),
176+
beanStateWriterLookup = DefaultBeanStateWriterLookup(),
177+
logger = mock(),
178+
tracer = null,
179+
problemsListener = problemHandler
180+
)
181+
182+
private
183+
inline fun <R> MutableIsolateContext.withIsolateMock(codec: Codec<Any?>, block: () -> R): R =
184+
withIsolate(IsolateOwners.OwnerGradle(mock()), codec) {
185+
block()
186+
}
187+
188+
private
189+
fun userTypesCodec() = codecs().userTypesCodec()
190+
191+
private
192+
fun codecs() = Codecs(
193+
directoryFileTreeFactory = mock(),
194+
fileCollectionFactory = mock(),
195+
artifactSetConverter = mock(),
196+
fileLookup = mock(),
197+
propertyFactory = mock(),
198+
filePropertyFactory = mock(),
199+
fileResolver = mock(),
200+
objectFactory = mock(),
201+
instantiator = mock(),
202+
fileSystemOperations = mock(),
203+
taskNodeFactory = mock(),
204+
ordinalGroupFactory = mock(),
205+
inputFingerprinter = mock(),
206+
buildOperationRunner = mock(),
207+
classLoaderHierarchyHasher = mock(),
208+
isolatableFactory = mock(),
209+
managedFactoryRegistry = mock(),
210+
parameterScheme = mock(),
211+
actionScheme = mock(),
212+
attributesFactory = mock(),
213+
valueSourceProviderFactory = mock(),
214+
calculatedValueContainerFactory = mock(),
215+
patternSetFactory = mock(),
216+
fileOperations = mock(),
217+
fileFactory = mock(),
218+
includedTaskGraph = mock(),
219+
buildStateRegistry = mock(),
220+
documentationRegistry = mock(),
221+
javaSerializationEncodingLookup = JavaSerializationEncodingLookup(),
222+
flowProviders = mock(),
223+
transformStepNodeFactory = mock(),
224+
)
225+
226+
internal
227+
sealed class Peano {
228+
229+
companion object {
230+
231+
fun fromInt(n: Int): Peano = (0 until n).fold(Z as Peano) { acc, _ -> S(acc) }
232+
}
233+
234+
fun toInt(): Int = sequence().count() - 1
235+
236+
object Z : Peano() {
237+
override fun toString() = "Z"
238+
}
239+
240+
data class S(val n: Peano) : Peano() {
241+
override fun toString() = "S($n)"
242+
}
243+
244+
private
245+
fun sequence() = generateSequence(this) { previous ->
246+
when (previous) {
247+
is Z -> null
248+
is S -> previous.n
249+
}
250+
}
251+
}
252+
}

platforms/core-configuration/configuration-cache/src/main/kotlin/org/gradle/internal/cc/impl/ConfigurationCacheIO.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ class ConfigurationCacheIO internal constructor(
274274

275275
when (stateType) {
276276
StateType.Work -> KryoBackedEncoder(
277-
ParallelOutputStream.of(::compressorStream),
277+
ParallelOutputStream.of(outputStreamFactory = ::compressorStream),
278278
ParallelOutputStream.recommendedBufferSize
279279
)
280280

platforms/core-configuration/configuration-cache/src/main/kotlin/org/gradle/internal/cc/impl/io/ParallelOutputStream.kt

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,20 +43,22 @@ object ParallelOutputStream {
4343
get() = ByteBufferPool.chunkSize
4444

4545
/**
46-
* Returns an [OutputStream] that offloads writing to the stream returned by [createOutputStream]
46+
* Returns an [OutputStream] that offloads writing to the stream returned by [outputStreamFactory]
4747
* to a separate thread. The returned stream can only be written to from a single thread at a time.
4848
*
49-
* Note that [createOutputStream] will be called in the writing thread.
49+
* Note that [outputStreamFactory] will be called in the writing thread.
5050
*
5151
* @see QueuedOutputStream
5252
* @see ByteBufferPool
5353
*/
54-
fun of(createOutputStream: () -> OutputStream): OutputStream {
54+
fun of(
55+
readyQ: Queue<ByteBuffer> = ConcurrentLinkedQueue(),
56+
outputStreamFactory: () -> OutputStream,
57+
): OutputStream {
5558
val chunks = ByteBufferPool()
56-
val readyQ = ConcurrentLinkedQueue<ByteBuffer>()
5759
val writer = thread(name = "CC writer", isDaemon = true, priority = Thread.NORM_PRIORITY) {
5860
try {
59-
createOutputStream().use { outputStream ->
61+
outputStreamFactory().use { outputStream ->
6062
while (true) {
6163
val chunk = readyQ.poll()
6264
if (chunk == null) {
@@ -183,7 +185,6 @@ class QueuedOutputStream(
183185
* Manages a pool of chunks of fixed [size][ByteBufferPool.chunkSize] allocated on-demand
184186
* upto a [fixed maximum][ByteBufferPool.maxChunks].
185187
*/
186-
private
187188
class ByteBufferPool {
188189

189190
companion object {

0 commit comments

Comments
 (0)