StreamUtils Try-Catch for streams, Remove old unused flags

This commit is contained in:
Andrea Cavalli 2023-05-24 01:44:06 +02:00
parent bee2fe1bf5
commit 2a817cbc58
9 changed files with 317 additions and 200 deletions

View File

@ -1,5 +1,7 @@
package it.cavallium.dbengine.database.collections; package it.cavallium.dbengine.database.collections;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import it.cavallium.buffer.Buf; import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput; import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput; import it.cavallium.buffer.BufDataOutput;
@ -535,8 +537,10 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
@Override @Override
public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) { public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) {
return getAllEntries(null, false) return resourceStream(
.onClose(() -> dictionary.setRange(range, entries.map(entry -> serializeEntry(entry)), false)); () -> getAllEntries(null, false),
() -> dictionary.setRange(range, entries.map(entry -> serializeEntry(entry)), false)
);
} }
@Override @Override

View File

@ -1,5 +1,7 @@
package it.cavallium.dbengine.database.collections; package it.cavallium.dbengine.database.collections;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import it.cavallium.buffer.Buf; import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput; import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput; import it.cavallium.buffer.BufDataOutput;
@ -16,6 +18,7 @@ import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.Serializer;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import it.cavallium.dbengine.utils.StreamUtils;
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.Optional; import java.util.Optional;
@ -304,7 +307,7 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
@Override @Override
public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) { public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) {
return this.getAllEntries(null, false).onClose(() -> setAllEntries(entries)); return resourceStream(() -> this.getAllEntries(null, false), () -> setAllEntries(entries));
} }
@Override @Override

View File

@ -10,6 +10,7 @@ import static it.cavallium.dbengine.utils.StreamUtils.ROCKSDB_POOL;
import static it.cavallium.dbengine.utils.StreamUtils.collectOn; import static it.cavallium.dbengine.utils.StreamUtils.collectOn;
import static it.cavallium.dbengine.utils.StreamUtils.executing; import static it.cavallium.dbengine.utils.StreamUtils.executing;
import static it.cavallium.dbengine.utils.StreamUtils.fastSummingLong; import static it.cavallium.dbengine.utils.StreamUtils.fastSummingLong;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import static it.cavallium.dbengine.utils.StreamUtils.streamWhileNonNull; import static it.cavallium.dbengine.utils.StreamUtils.streamWhileNonNull;
import static java.util.Objects.requireNonNull; import static java.util.Objects.requireNonNull;
import static it.cavallium.dbengine.utils.StreamUtils.batches; import static it.cavallium.dbengine.utils.StreamUtils.batches;
@ -629,39 +630,33 @@ public class LLLocalDictionary implements LLDictionary {
@Override @Override
public Stream<BadBlock> badBlocks(LLRange range) { public Stream<BadBlock> badBlocks(LLRange range) {
try { try {
var ro = LLUtils.generateCustomReadOptions(null, return resourceStream(
false, () -> LLUtils.generateCustomReadOptions(null, false, isBoundedRange(range), false),
isBoundedRange(range), ro -> {
false ro.setFillCache(false);
); if (!range.isSingle()) {
ro.setFillCache(false); if (LLUtils.MANUAL_READAHEAD) {
if (!range.isSingle()) { ro.setReadaheadSize(32 * 1024);
if (LLUtils.MANUAL_READAHEAD) { }
ro.setReadaheadSize(32 * 1024);
} }
} ro.setVerifyChecksums(true);
ro.setVerifyChecksums(true); return resourceStream(() -> db.newRocksIterator(ro, range, false), rocksIterator -> {
var rocksIterator = db.newRocksIterator(ro, range, false); rocksIterator.seekToFirst();
try { return streamWhileNonNull(() -> {
rocksIterator.seekToFirst(); if (!rocksIterator.isValid()) return null;
} catch (Exception ex) { Buf rawKey = null;
rocksIterator.close(); try {
ro.close(); rawKey = rocksIterator.keyBuf().copy();
throw new DBException("Failed to open rocksdb iterator", ex); rocksIterator.next();
} } catch (RocksDBException ex) {
return streamWhileNonNull(() -> { return new BadBlock(databaseName, ColumnUtils.special(columnName), rawKey, ex);
if (!rocksIterator.isValid()) return null; }
Buf rawKey = null; return null;
try { }).takeWhile(x -> rocksIterator.isValid()).onClose(() -> {
rawKey = rocksIterator.keyBuf().copy(); rocksIterator.close();
rocksIterator.next(); ro.close();
} catch (RocksDBException ex) { });
return new BadBlock(databaseName, ColumnUtils.special(columnName), rawKey, ex); });
}
return null;
}).takeWhile(x -> rocksIterator.isValid()).onClose(() -> {
rocksIterator.close();
ro.close();
}); });
} catch (RocksDBException e) { } catch (RocksDBException e) {
throw new DBException("Failed to get bad blocks", e); throw new DBException("Failed to get bad blocks", e);

View File

@ -3,6 +3,7 @@ package it.cavallium.dbengine.database.disk;
import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB;
import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions; import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions;
import static it.cavallium.dbengine.database.LLUtils.isBoundedRange; import static it.cavallium.dbengine.database.LLUtils.isBoundedRange;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import it.cavallium.buffer.Buf; import it.cavallium.buffer.Buf;
import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLRange;
@ -20,6 +21,7 @@ import java.util.function.Supplier;
import java.util.stream.Stream; import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable; import org.jetbrains.annotations.Nullable;
import org.rocksdb.ReadOptions; import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDBException; import org.rocksdb.RocksDBException;
@ -53,58 +55,71 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
} }
public final Stream<List<T>> stream() { public final Stream<List<T>> stream() {
return StreamUtils.<List<T>>streamWhileNonNull(() -> { try {
try (var readOptions = generateCustomReadOptions(this.readOptions.get(), canFillCache, isBoundedRange(range), smallRange); return resourceStream(
var rocksIterator = db.newRocksIterator(readOptions, range, false)) { () -> generateCustomReadOptions(this.readOptions.get(), canFillCache, isBoundedRange(range), smallRange),
if (logger.isTraceEnabled()) { readOptions -> StreamUtils.resourceStream(
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range)); () -> db.newRocksIterator(readOptions, range, false),
} rocksIterator -> StreamUtils.streamWhileNonNull(() -> {
ObjectArrayList<T> values = new ObjectArrayList<>(); try {
Buf firstGroupKey = null; if (logger.isTraceEnabled()) {
while (rocksIterator.isValid()) { logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range));
// Note that the underlying array is subject to changes! }
Buf key = rocksIterator.keyBuf(); ObjectArrayList<T> values = new ObjectArrayList<>();
if (firstGroupKey == null) { Buf firstGroupKey = null;
firstGroupKey = key.copy(); while (rocksIterator.isValid()) {
} else if (!LLUtils.equals(firstGroupKey, 0, key, 0, prefixLength)) { // Note that the underlying array is subject to changes!
break; Buf key = rocksIterator.keyBuf();
} if (firstGroupKey == null) {
// Note that the underlying array is subject to changes! firstGroupKey = key.copy();
@Nullable Buf value; } else if (!LLUtils.equals(firstGroupKey, 0, key, 0, prefixLength)) {
if (readValues) { break;
value = rocksIterator.valueBuf(); }
} else { // Note that the underlying array is subject to changes!
value = null; @Nullable Buf value;
} if (readValues) {
value = rocksIterator.valueBuf();
} else {
value = null;
}
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, logger.trace(MARKER_ROCKSDB,
"Range {} is reading {}: {}", "Range {} is reading {}: {}",
LLUtils.toStringSafe(range), LLUtils.toStringSafe(range),
LLUtils.toStringSafe(key), LLUtils.toStringSafe(key),
LLUtils.toStringSafe(value) LLUtils.toStringSafe(value)
); );
} }
rocksIterator.next(); rocksIterator.next();
T entry = getEntry(key, value); T entry = getEntry(key, value);
values.add(entry); values.add(entry);
} }
if (!values.isEmpty()) { if (!values.isEmpty()) {
return values; return values;
} else { } else {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range)); logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range));
} }
return null; return null;
} }
} catch (RocksDBException ex) { } catch (RocksDBException ex) {
if (logger.isTraceEnabled()) { throw new CompletionException(generateRangeFailedException(ex));
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range)); }
} })
throw new CompletionException(new DBException("Range failed", ex)); )
} );
}); } catch (RocksDBException ex) {
throw generateRangeFailedException(ex);
}
}
private DBException generateRangeFailedException(RocksDBException ex) {
if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range));
}
throw new DBException("Range failed", ex);
} }
/** /**

View File

@ -3,6 +3,7 @@ package it.cavallium.dbengine.database.disk;
import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB;
import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions; import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions;
import static it.cavallium.dbengine.database.LLUtils.isBoundedRange; import static it.cavallium.dbengine.database.LLUtils.isBoundedRange;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import static it.cavallium.dbengine.utils.StreamUtils.streamWhileNonNull; import static it.cavallium.dbengine.utils.StreamUtils.streamWhileNonNull;
import com.google.common.collect.Iterators; import com.google.common.collect.Iterators;
@ -50,61 +51,74 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
public Stream<Buf> stream() { public Stream<Buf> stream() {
return streamWhileNonNull(() -> { try {
try (var readOptions = generateCustomReadOptions(this.readOptions.get(), canFillCache, isBoundedRange(range), smallRange); return resourceStream(
var rocksIterator = db.newRocksIterator(readOptions, range, false)) { () -> generateCustomReadOptions(this.readOptions.get(), canFillCache, isBoundedRange(range), smallRange),
if (logger.isTraceEnabled()) { readOptions -> resourceStream(
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range)); () -> db.newRocksIterator(readOptions, range, false),
} rocksIterator -> streamWhileNonNull(() -> {
Buf firstGroupKey = null; try {
while (rocksIterator.isValid()) { if (logger.isTraceEnabled()) {
// Note that the underlying array is subject to changes! logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range));
Buf key = rocksIterator.keyBuf(); }
var keyLen = key.size(); Buf firstGroupKey = null;
if (keyLen >= prefixLength) { while (rocksIterator.isValid()) {
if (firstGroupKey == null) { // Note that the underlying array is subject to changes!
firstGroupKey = key.copy(); Buf key = rocksIterator.keyBuf();
assert firstGroupKey == null || firstGroupKey.size() >= prefixLength; var keyLen = key.size();
} else if (!LLUtils.equals(firstGroupKey, if (keyLen >= prefixLength) {
0, if (firstGroupKey == null) {
key, firstGroupKey = key.copy();
0, assert firstGroupKey == null || firstGroupKey.size() >= prefixLength;
prefixLength } else if (!LLUtils.equals(firstGroupKey,
)) { 0,
break; key,
} 0,
} else { prefixLength
logger.error("Skipped a key with length {}, the expected minimum prefix key length is {}!" )) {
+ " This key will be dropped", key.size(), prefixLength); break;
} }
rocksIterator.next(); } else {
} logger.error("Skipped a key with length {}, the expected minimum prefix key length is {}!"
+ " This key will be dropped", key.size(), prefixLength);
}
rocksIterator.next();
}
if (firstGroupKey != null) { if (firstGroupKey != null) {
var groupKeyPrefix = firstGroupKey.subList(0, prefixLength); var groupKeyPrefix = firstGroupKey.subList(0, prefixLength);
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, logger.trace(MARKER_ROCKSDB,
"Range {} is reading prefix {}", "Range {} is reading prefix {}",
LLUtils.toStringSafe(range), LLUtils.toStringSafe(range),
LLUtils.toStringSafe(groupKeyPrefix) LLUtils.toStringSafe(groupKeyPrefix)
); );
} }
return groupKeyPrefix; return groupKeyPrefix;
} else { } else {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range)); logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range));
} }
return null; return null;
} }
} catch (RocksDBException ex) { } catch (RocksDBException ex) {
if (logger.isTraceEnabled()) { throw new CompletionException(generateRangeFailedException(ex));
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range)); }
} }
throw new CompletionException(new DBException("Range failed", ex)); ))
} );
}); } catch (RocksDBException e) {
throw generateRangeFailedException(e);
}
}
private DBException generateRangeFailedException(RocksDBException ex) {
if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range));
}
throw new DBException("Range failed", ex);
} }
} }

View File

@ -1,6 +1,7 @@
package it.cavallium.dbengine.database.disk; package it.cavallium.dbengine.database.disk;
import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions; import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import static it.cavallium.dbengine.utils.StreamUtils.streamWhileNonNull; import static it.cavallium.dbengine.utils.StreamUtils.streamWhileNonNull;
import it.cavallium.buffer.Buf; import it.cavallium.buffer.Buf;
@ -34,20 +35,26 @@ public final class LLLocalMigrationReactiveRocksIterator {
} }
public Stream<LLEntry> stream() { public Stream<LLEntry> stream() {
return streamWhileNonNull(() -> { try {
try (var readOptions = generateCustomReadOptions(this.readOptions.get(), false, false, false); return resourceStream(
var rocksIterator = db.newRocksIterator(readOptions, range, false)) { // Create the read options
if (rocksIterator.isValid()) { () -> generateCustomReadOptions(this.readOptions.get(), false, false, false),
var key = rocksIterator.keyBuf().copy(); readOptions -> resourceStream(
var value = rocksIterator.valueBuf().copy(); // Create the iterator
rocksIterator.next(false); () -> db.newRocksIterator(readOptions, range, false),
return LLEntry.of(key, value); // Stream the iterator values until null is returned
} else { iterator -> streamWhileNonNull(() -> {
return null; if (iterator.isValid()) {
} var key = iterator.keyBuf().copy();
} catch (RocksDBException e) { var value = iterator.valueBuf().copy();
throw new DBException("Failed to open iterator", e); iterator.next(false);
} return LLEntry.of(key, value);
}); } else {
return null;
}
})));
} catch (RocksDBException e) {
throw new DBException("Failed to open iterator", e);
}
} }
} }

View File

@ -67,10 +67,6 @@ import org.jetbrains.annotations.Nullable;
public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneIndex, LuceneCloseable { public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneIndex, LuceneCloseable {
private static final Logger LOG = LogManager.getLogger(LLLuceneIndex.class); private static final Logger LOG = LogManager.getLogger(LLLuceneIndex.class);
private static final boolean BYPASS_GROUPBY_BUG = Boolean.parseBoolean(System.getProperty(
"it.cavallium.dbengine.bypassGroupByBug",
"false"
));
private final String clusterName; private final String clusterName;
private final boolean lowMemory; private final boolean lowMemory;

View File

@ -3,6 +3,7 @@ package it.cavallium.dbengine.database.disk;
import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB;
import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions; import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions;
import static it.cavallium.dbengine.database.LLUtils.isBoundedRange; import static it.cavallium.dbengine.database.LLUtils.isBoundedRange;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import static it.cavallium.dbengine.utils.StreamUtils.streamWhileNonNull; import static it.cavallium.dbengine.utils.StreamUtils.streamWhileNonNull;
import it.cavallium.buffer.Buf; import it.cavallium.buffer.Buf;
@ -48,52 +49,65 @@ public abstract class LLLocalReactiveRocksIterator<T> {
} }
public final Stream<T> stream() { public final Stream<T> stream() {
return streamWhileNonNull(() -> { try {
try (var readOptions = generateCustomReadOptions(this.readOptions.get(), true, isBoundedRange(range), smallRange); return resourceStream(
var rocksIterator = db.newRocksIterator(readOptions, range, reverse)) { () -> generateCustomReadOptions(this.readOptions.get(), true, isBoundedRange(range), smallRange),
if (logger.isTraceEnabled()) { readOptions -> resourceStream(
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range)); () -> db.newRocksIterator(readOptions, range, reverse),
} rocksIterator -> streamWhileNonNull(() -> {
if (rocksIterator.isValid()) { try {
// Note that the underlying array is subject to changes! if (logger.isTraceEnabled()) {
Buf key; logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range));
key = rocksIterator.keyBuf(); }
// Note that the underlying array is subject to changes! if (rocksIterator.isValid()) {
Buf value; // Note that the underlying array is subject to changes!
if (readValues) { Buf key;
value = rocksIterator.valueBuf(); key = rocksIterator.keyBuf();
} else { // Note that the underlying array is subject to changes!
value = null; Buf value;
} if (readValues) {
value = rocksIterator.valueBuf();
} else {
value = null;
}
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, logger.trace(MARKER_ROCKSDB,
"Range {} is reading {}: {}", "Range {} is reading {}: {}",
LLUtils.toStringSafe(range), LLUtils.toStringSafe(range),
LLUtils.toStringSafe(key), LLUtils.toStringSafe(key),
LLUtils.toStringSafe(value) LLUtils.toStringSafe(value)
); );
} }
if (reverse) { if (reverse) {
rocksIterator.prev(); rocksIterator.prev();
} else { } else {
rocksIterator.next(); rocksIterator.next();
} }
return getEntry(key, value); return getEntry(key, value);
} else { } else {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range)); logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range));
} }
return null; return null;
} }
} catch (RocksDBException ex) { } catch (RocksDBException ex) {
if (logger.isTraceEnabled()) { throw new CompletionException(generateRangeFailedException(ex));
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range)); }
} })
throw new CompletionException(ex); )
} );
}); } catch (RocksDBException ex) {
throw generateRangeFailedException(ex);
}
}
private DBException generateRangeFailedException(RocksDBException ex) {
if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range));
}
throw new DBException("Range failed", ex);
} }
/** /**

View File

@ -32,6 +32,8 @@ import java.util.stream.Collector;
import java.util.stream.Collector.Characteristics; import java.util.stream.Collector.Characteristics;
import java.util.stream.Stream; import java.util.stream.Stream;
import java.util.stream.StreamSupport; import java.util.stream.StreamSupport;
import org.apache.commons.lang3.function.FailableFunction;
import org.apache.commons.lang3.function.FailableSupplier;
import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable; import org.jetbrains.annotations.Nullable;
@ -324,6 +326,57 @@ public class StreamUtils {
return Streams.mapWithIndex(stream, mapper::apply); return Streams.mapWithIndex(stream, mapper::apply);
} }
/**
* Checks if stream.onClose() will be called during the stream lifetime
*/
public static <T> Stream<T> resourceStream(Stream<T> stream) {
var sr = new StreamResource(null);
return stream.onClose(sr::close);
}
/**
* Checks if stream.onClose() will be called during the stream lifetime
*/
public static <SR extends SimpleResource, T, EX extends Exception> Stream<T> resourceStream(
FailableSupplier<SR, ? extends EX> resourceInitializer,
FailableFunction<SR, ? extends Stream<T>, ? extends EX> streamInitializer) throws EX {
SR resource = resourceInitializer.get();
var sr = new StreamResource(resource::close);
try {
Stream<T> stream = streamInitializer.apply(resource);
return stream.onClose(sr::close);
} catch (Throwable ex) {
sr.close();
throw ex;
}
}
/**
* Checks if stream.onClose() will be called during the stream lifetime
*/
public static <T> Stream<T> resourceStream(Stream<T> stream, Runnable finalization) {
var sr = new StreamResource(finalization);
try {
return stream.onClose(sr::close);
} catch (Throwable ex) {
sr.close();
throw ex;
}
}
/**
* Checks if stream.onClose() will be called during the stream lifetime
*/
public static <T> Stream<T> resourceStream(Supplier<Stream<T>> stream, Runnable finalization) {
var sr = new StreamResource(finalization);
try {
return stream.get().onClose(sr::close);
} catch (Throwable ex) {
sr.close();
throw ex;
}
}
private record BatchSpliterator<E>(Spliterator<E> base, int batchSize) implements Spliterator<List<E>> { private record BatchSpliterator<E>(Spliterator<E> base, int batchSize) implements Spliterator<List<E>> {
@Override @Override
@ -556,4 +609,20 @@ public class StreamUtils {
return worker; return worker;
} }
} }
private static class StreamResource extends SimpleResource {
private final Runnable finalization;
public StreamResource(Runnable finalization) {
this.finalization = finalization;
}
@Override
protected void onClose() {
if (finalization != null) {
finalization.run();
}
}
}
} }