Compare commits

...

89 Commits

Author SHA1 Message Date
Andrea Cavalli 54f4b984ed Use java 8 2023-05-10 17:20:13 +02:00
Andrea Cavalli 34427e3097 Fix revision 2023-05-10 17:15:40 +02:00
Andrea Cavalli 84aa32cac3 Fix revision 2023-05-10 17:14:15 +02:00
Andrea Cavalli 82b8abd828 Formatting 2023-05-10 17:08:26 +02:00
Andrea Cavalli 908ab2969b Code cleanup 2023-05-10 17:06:35 +02:00
Andrea Cavalli 0ec246fe32 First commit 2023-05-10 17:05:09 +02:00
Andrea Cavalli 4c94431ed6 Reformat code 2022-05-09 11:17:24 +02:00
Andrea Cavalli 76ddeb7696 Update module info 2022-05-09 09:43:42 +02:00
Andrea Cavalli c98069ff7c Update gitignore 2022-04-27 10:46:59 +02:00
Andrea Cavalli 489fa15fdd Java modules 2022-04-09 02:42:56 +02:00
Andrea Cavalli af83cba387 Update dependencies 2022-03-19 00:07:10 +01:00
Andrea Cavalli 24f9811f6f Configure daemon 2022-02-25 00:43:26 +01:00
Andrea Cavalli 5030b02f49 Update to 1.1.8 2022-02-21 00:59:48 +01:00
Andrea Cavalli 255829fe2f Bugfix 2022-02-21 00:59:11 +01:00
Andrea Cavalli d1b4fe6a96 Update fastutil 2022-02-12 00:17:37 +01:00
Andrea Cavalli 707005500f Support peek 2022-02-09 00:49:32 +01:00
Andrea Cavalli 93c35b2b30 Update pom 2022-01-11 16:01:39 +01:00
Andrea Cavalli 8e1351501c Remove logs 2022-01-04 11:52:48 +01:00
Andrea Cavalli df4f4b253c Code cleanup 2021-12-16 16:33:18 +01:00
Andrea Cavalli a412246212 Add thread groups 2021-12-16 16:15:01 +01:00
Andrea Cavalli b9edef018a Update to 1.1.5 2021-10-17 13:05:50 +02:00
Andrea Cavalli 1c4c49c24c Remove deprecated securitymanager 2021-09-23 12:01:40 +02:00
Andrea Cavalli e4ba8dcd68 Annotate NotNull and Nullable 2021-09-22 18:26:16 +02:00
Andrea Cavalli aa661c936c Update fastutil 2021-09-16 15:15:20 +02:00
Andrea Cavalli bf5ae382ce Optimize speed of MoshiPolymorphic 2021-08-24 11:23:39 +02:00
Andrea Cavalli 2122b180e0 Fix compilation issue 2021-08-22 18:19:07 +02:00
Andrea Cavalli e3c17e9923 Remove locks 2021-08-16 10:29:08 +02:00
Andrea Cavalli 6b5b69acba Downgrade fastutil 2021-07-24 22:27:15 +02:00
Andrea Cavalli ff4c803536 Add record getters 2021-07-13 23:07:18 +02:00
Andrea Cavalli 51eaeaae33 Fix missing list elements 2021-06-30 10:58:13 +02:00
Andrea Cavalli 37e8d9ae8a Add lists support 2021-06-20 02:27:51 +02:00
Andrea Cavalli 84cf90947e Initialize logs with slf4j root settings if no root level has been set 2021-06-09 01:49:58 +02:00
Andrea Cavalli 99c410c21c Fix polymorphism adapters 2021-06-09 01:47:28 +02:00
Andrea Cavalli 135d71bb44 Fix adapters order 2021-06-08 19:55:10 +02:00
Andrea Cavalli 74593538dc Fix adapters order 2021-06-08 19:42:20 +02:00
Andrea Cavalli 47c505d755 Update to 1.1.4 2021-06-08 19:18:17 +02:00
Andrea Cavalli 2511a1748a Bugfix 2021-06-08 19:14:35 +02:00
Andrea Cavalli 9f2feb1195 Add unimi adapters 2021-06-08 19:14:31 +02:00
Andrea Cavalli 379326fd63 Fix polymorphic bug 2021-05-21 01:56:39 +02:00
Andrea Cavalli 424c3f1c45 Add records support 2021-05-21 00:13:20 +02:00
Andrea Cavalli 3aaa787198 Bugfixes 2021-05-19 22:51:28 +02:00
Andrea Cavalli 1f6ad95f92 Update dependencies 2021-05-09 22:56:27 +02:00
Andrea Cavalli d8cdb45cb0 Update moshi 2021-05-08 03:07:37 +02:00
Andrea Cavalli d752af85d7 Add moshi 2021-05-07 12:06:07 +02:00
Andrea Cavalli 008f4912a4 Update guava 2021-04-01 00:30:11 +02:00
Andrea Cavalli ea607cdd02 Add slf4j delegate 2021-02-20 21:04:58 +01:00
Andrea Cavalli d41a0d135b Return unique stripes 2021-02-13 01:36:00 +01:00
Andrea Cavalli 6e07f1bace Add FloatPriorityQueueView 2021-01-30 10:53:00 +01:00
Andrea Cavalli 833c0f0c7c Update TransitLock.java and UTFUtils.java 2021-01-27 03:04:35 +01:00
Andrea Cavalli 9950a4ceb1 Update SnapshottableCollectionLockTest.java 2021-01-24 03:15:14 +01:00
Andrea Cavalli 27ea0deaed Add snapshottable collection lock 2021-01-24 02:40:12 +01:00
Andrea Cavalli 5f0e5419c9 Safe data input/output streams (Streams that don't require IOException handling) 2021-01-23 22:34:28 +01:00
Andrea Cavalli 827bb23038 Bugfix 2021-01-22 04:22:58 +01:00
Andrea Cavalli ff44ed16ba Add new locks 2021-01-22 03:15:14 +01:00
Andrea Cavalli 97b7246ead Add cancellable consumers 2021-01-17 18:29:51 +01:00
Andrea Cavalli 9f179ab32c Add ArrayStack 2020-12-14 03:12:51 +01:00
Andrea Cavalli bb89ca6bdb Update FastUtilStackSetWrapper.java, HashStackSet.java, and 3 more files... 2020-12-14 00:41:55 +01:00
Andrea Cavalli 06a5e1c525 Add StackSet 2020-12-13 22:17:16 +01:00
Andrea Cavalli 52b09474ba Add documentation 2020-12-12 22:30:40 +01:00
Andrea Cavalli 2e2ec6ff68 Add scheduled task lifecycle 2020-12-12 22:08:14 +01:00
Andrea Cavalli 279fad3431 Refactoring 2020-12-07 22:08:17 +01:00
Andrea Cavalli e6f8fe1e2a Update pom.xml 2020-11-28 22:58:50 +01:00
Andrea Cavalli faafc3fe29 Add UnmodifiableSet 2020-11-28 22:55:32 +01:00
Andrea Cavalli c64a272bd1 Update ParallelUtils.java and IOBiConsumer.java 2020-11-17 13:10:05 +01:00
Andrea Cavalli a636411261 Update IOTriConsumer.java 2020-11-17 02:59:59 +01:00
Andrea Cavalli f1ca9c7ef8 Update HashAssociation.java and HashMultiAssociation.java 2020-11-11 00:25:53 +01:00
Andrea Cavalli 9d30be133e Update HashAssociation.java, HashMultiAssociation.java, and 2 more files... 2020-11-11 00:14:02 +01:00
Andrea Cavalli bd80576306 Add getSources and getDestinations 2020-11-10 23:43:17 +01:00
Andrea Cavalli 326f80fc68 Update pom.xml 2020-11-10 22:39:48 +01:00
Andrea Cavalli dcc5b44520 Limited synchronized association access 2020-11-10 22:39:07 +01:00
Andrea Cavalli c755b57e0c Add Association data type 2020-11-10 22:37:49 +01:00
Andrea Cavalli 4b11b7fd94 Update Batching.java, KVSafeBatching.java, and ParallelUtils.java 2020-11-10 00:43:10 +01:00
Andrea Cavalli 8813ef3e88 Update CompletableFutureUtils.java 2020-10-27 16:27:12 +01:00
Andrea Cavalli c1a8a23961 Reverted back, modules are unusable 2020-10-16 19:50:30 +02:00
Andrea Cavalli ac166bfba9 Update pom.xml and BlockingOnFullQueueExecutorServiceDecorator.java 2020-10-16 13:09:22 +02:00
Andrea Cavalli 4ee7c8c2a4 Update EqualsWrapper.java 2020-10-14 21:42:57 +02:00
Andrea Cavalli fe52292587 Update pom.xml 2020-10-07 12:26:31 +02:00
Andrea Cavalli 2920df02b3 Update Striped.java 2020-10-01 19:14:38 +02:00
Andrea Cavalli 01f07d2d8d Fork concurrent-locks 2020-10-01 19:07:17 +02:00
Andrea Cavalli 0d566b88b0 Update pom.xml, CompletableFutureUtils.java, and FloatPriorityQueue.java 2020-09-30 22:43:53 +02:00
Andrea Cavalli 37bbafdfbf Fix scores ordering 2020-09-22 23:50:23 +02:00
Andrea Cavalli 203ab6c10e Update FloatPriorityQueue.java 2020-09-22 21:28:56 +02:00
Andrea Cavalli b754302914 Sum the old priority with the new priority if an element is offered a second time 2020-09-22 19:17:48 +02:00
Andrea Cavalli f99ef5906e Add ByteBufferBackedInputStream 2020-09-19 00:26:14 +02:00
Andrea Cavalli 54f09d35ac Fix queue auto-resizing 2020-09-18 00:43:35 +02:00
Andrea Cavalli 9e5b0a3688 Update BoundedExecutorService.java 2020-09-17 18:56:21 +02:00
Andrea Cavalli 016ac5b140 Fix queue size 2020-09-17 18:00:34 +02:00
Andrea Cavalli a1febbc868 Update ImmutableLinkedSet.java 2020-09-14 19:46:58 +02:00
Andrea Cavalli 8e3e59c822 Update ArgumentTokenizer.java 2020-09-12 16:25:31 +02:00
87 changed files with 1142 additions and 6662 deletions

79
.github/workflows/maven-publish.yml vendored Normal file
View File

@ -0,0 +1,79 @@
# This workflow will build a package using Maven and then publish it to GitHub packages when a release is created
# For more information see: https://github.com/actions/setup-java#apache-maven-with-a-settings-path
name: Maven Package
on:
push:
jobs:
build:
strategy:
matrix:
include:
- { os: ubuntu-20.04, arch: "linux/amd64" }
runs-on: ${{ matrix.os }}
steps:
- name: Branch name
id: branch_name
run: |
set -xeo pipefail
echo "SOURCE_NAME=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
echo "SOURCE_BRANCH=${GITHUB_REF#refs/heads/}" >> $GITHUB_ENV
echo "SOURCE_TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
echo "SOURCE_TAG_VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV
cat $GITHUB_ENV > github.env
- uses: actions/checkout@v3
with:
submodules: "recursive"
- name: Setup variables
shell: bash
run: |
set -xeo pipefail
# ====== Variables
export REVISION=${{ steps.branch_name.outputs.SOURCE_TAG_VERSION }}
echo "REVISION=$REVISION" >> $GITHUB_ENV
cat $GITHUB_ENV >> github.env
- name: Set up JDK 17 (Snapshot)
if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
uses: actions/setup-java@v3
with:
java-version: 8
distribution: temurin
cache: 'maven'
server-id: mchv-snapshot-distribution
server-username: MAVEN_USERNAME
server-password: MAVEN_PASSWORD
- name: Build and deploy to Maven (Snapshot)
if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
shell: bash
run: |
set -xeo pipefail
mvn -B clean deploy
echo "Done."
env:
MAVEN_USERNAME: ${{ secrets.MCHV_USERNAME }}
MAVEN_PASSWORD: ${{ secrets.MCHV_TOKEN }}
- name: Set up JDK 17 (Release)
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
uses: actions/setup-java@v3
with:
java-version: 8
distribution: temurin
cache: 'maven'
server-id: mchv-release-distribution
server-username: MAVEN_USERNAME
server-password: MAVEN_PASSWORD
- name: Build and deploy to Maven (Release)
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
shell: bash
run: |
set -xeo pipefail
echo "REVISION: $REVISION"
mvn -B clean -Drevision="$REVISION" deploy
echo "Done."
env:
MAVEN_USERNAME: ${{ secrets.MCHV_USERNAME }}
MAVEN_PASSWORD: ${{ secrets.MCHV_TOKEN }}

4
.gitignore vendored
View File

@ -161,3 +161,7 @@ $RECYCLE.BIN/
*.lnk
# End of https://www.toptal.com/developers/gitignore/api/linux,intellij+all,windows,git,maven
/.classpath
/.project
/.settings/
/.flattened-pom.xml

194
pom.xml
View File

@ -1,97 +1,113 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<packaging>jar</packaging>
<modelVersion>4.0.0</modelVersion>
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<packaging>jar</packaging>
<modelVersion>4.0.0</modelVersion>
<artifactId>common-utils</artifactId>
<groupId>org.warp</groupId>
<version>1.0.7</version>
<groupId>it.tdlight</groupId>
<artifactId>common-util</artifactId>
<version>${revision}</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<!-- required for jdk9 -->
<maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>
</properties>
<properties>
<revision>1.0.0-SNAPSHOT</revision>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<profiles>
<profile>
<id>publish-to-tdlight</id>
<properties/>
<distributionManagement>
<repository>
<id>github</id>
<name>GitHub TDLight Team Apache Maven Packages</name>
<url>https://maven.pkg.github.com/tdlight-team/tdlight-java</url>
</repository>
</distributionManagement>
</profile>
<profile>
<id>publish-to-github</id>
<properties/>
<distributionManagement>
<repository>
<id>github</id>
<name>GitHub Cavallium Apache Maven Packages</name>
<url>https://maven.pkg.github.com/Cavallium/common-utils</url>
</repository>
</distributionManagement>
</profile>
<profile>
<id>publish-to-mchv</id>
<properties/>
<distributionManagement>
<repository>
<id>mchv</id>
<name>MCHV Apache Maven Packages</name>
<url>https://mvn.mchv.eu/repository/mchv/</url>
</repository>
</distributionManagement>
</profile>
</profiles>
<dependencies>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>RELEASE</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.jetbrains</groupId>
<artifactId>annotations</artifactId>
<version>17.0.0</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>28.2-jre</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>it.unimi.dsi</groupId>
<artifactId>fastutil</artifactId>
<version>8.3.0</version>
</dependency>
<dependency>
<groupId>com.googlecode.concurrent-locks</groupId>
<artifactId>concurrent-locks</artifactId>
<version>1.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.9</version>
</dependency>
</dependencies>
<repositories>
<repository>
<id>mchv-release</id>
<name>MCHV Release Apache Maven Packages</name>
<url>https://mvn.mchv.eu/repository/mchv</url>
</repository>
<repository>
<id>mchv-snapshot</id>
<name>MCHV Snapshot Apache Maven Packages</name>
<url>https://mvn.mchv.eu/repository/mchv-snapshot</url>
</repository>
</repositories>
<distributionManagement>
<repository>
<id>mchv-release</id>
<name>MCHV Apache Release Maven Packages Distribution</name>
<url>https://mvn.mchv.eu/repository/mchv</url>
</repository>
</distributionManagement>
<dependencies>
<dependency>
<groupId>org.jctools</groupId>
<artifactId>jctools-core</artifactId>
<version>4.0.1</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>5.8.2</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<encoding>UTF-8</encoding>
<release>8</release>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<version>3.0.0-M3</version>
<executions>
<execution>
<id>enforce</id>
<configuration>
<rules>
<dependencyConvergence/>
</rules>
</configuration>
<goals>
<goal>enforce</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>flatten-maven-plugin</artifactId>
<version>1.1.0</version>
<configuration>
<updatePomFile>true</updatePomFile>
<flattenMode>oss</flattenMode>
</configuration>
<executions>
<execution>
<id>flatten</id>
<phase>process-resources</phase>
<goals>
<goal>flatten</goal>
</goals>
</execution>
<execution>
<id>flatten.clean</id>
<phase>clean</phase>
<goals>
<goal>clean</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,25 @@
package it.tdlight.commonutil;
/**
* An iterator optimized for primitive collections which avoids auto-boxing on {@link #next()}.
*/
public interface IntIterator {
/**
* Identical to {@link java.util.Iterator#next()} but avoids auto-boxing.
*
* @return The next int in the collection.
*/
int next();
/**
* Identical to {@link java.util.Iterator#hasNext()}.
*
* @return True if the iterator has more elements.
*/
boolean hasNext();
/**
* Identical to {@link java.util.Iterator#remove()}.
*/
void remove();
}

View File

@ -0,0 +1,16 @@
package it.tdlight.commonutil;
import java.util.Iterator;
/**
* An extension of the standard {@link Iterator} interface which provides the {@link #nextLong()} method to avoid
* auto-boxing of results as they are returned.
* */
public interface LongIterator extends Iterator<Long> {
/**
* Returns the next long value without auto-boxing. Using this is preferred to {@link #next()}.
*
* @return The next long value.
*/
long nextLong();
}

View File

@ -0,0 +1,188 @@
package it.tdlight.commonutil;
import java.io.Serializable;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Iterator;
import java.util.Set;
import org.jctools.maps.NonBlockingHashMapLong;
/**
* A simple wrapper around {@link NonBlockingHashMapLong} making it implement the
* {@link Set} interface. All operations are Non-Blocking and multi-thread safe.
*/
public class NonBlockingHashSetLong extends AbstractSet<Long> implements Serializable {
private static final Object V = "";
private final NonBlockingHashMapLong<Object> _map;
/** Make a new empty {@link NonBlockingHashSetLong}. */
public NonBlockingHashSetLong() {
super();
_map = new NonBlockingHashMapLong<Object>();
}
@Override
public boolean addAll(Collection<? extends Long> c) {
if (!NonBlockingHashSetLong.class.equals(c.getClass())) {
return super.addAll(c);
}
boolean modified = false;
for (final LongIterator it = ((NonBlockingHashSetLong)c).longIterator(); it.hasNext(); ) {
modified |= add(it.nextLong());
}
return modified;
}
@Override
public boolean removeAll(Collection<?> c) {
if (!NonBlockingHashSetLong.class.equals(c.getClass())) {
return super.removeAll(c);
}
boolean modified = false;
for (final LongIterator it = ((NonBlockingHashSetLong)c).longIterator(); it.hasNext(); ) {
modified |= remove(it.nextLong());
}
return modified;
}
@Override
public boolean containsAll(Collection<?> c) {
if (!NonBlockingHashSetLong.class.equals(c.getClass())) {
return super.containsAll(c);
}
for (final LongIterator it = ((NonBlockingHashSetLong)c).longIterator(); it.hasNext(); ) {
if (!contains(it.nextLong())) {
return false;
}
}
return true;
}
@Override
public boolean retainAll(Collection<?> c) {
if (!NonBlockingHashSetLong.class.equals(c.getClass())) {
return super.retainAll(c);
}
boolean modified = false;
final NonBlockingHashSetLong nonBlockingHashSetLong = (NonBlockingHashSetLong) c;
for (final LongIterator it = longIterator(); it.hasNext(); ) {
if (!nonBlockingHashSetLong.contains(it.nextLong())) {
it.remove();
modified = true;
}
}
return modified;
}
@Override
public int hashCode() {
int hashCode = 0;
for (final LongIterator it = longIterator(); it.hasNext(); ) {
final long value = it.nextLong();
hashCode += (int)(value ^ (value >>> 32));
}
return hashCode;
}
/** Add {@code o} to the set.
* @return <tt>true</tt> if {@code o} was added to the set, <tt>false</tt>
* if {@code o} was already in the set.
*/
public boolean add(final long o) {
return _map.putIfAbsent(o,V) != V;
}
/**
* To support AbstractCollection.addAll
*/
@Override
public boolean add(final Long o) {
return _map.putIfAbsent(o.longValue(),V) != V;
}
/**
* @return <tt>true</tt> if {@code o} is in the set.
*/
public boolean contains(final long o) { return _map.containsKey(o); }
@Override
public boolean contains(Object o) {
return o instanceof Long && contains(((Long) o).longValue());
}
/** Remove {@code o} from the set.
* @return <tt>true</tt> if {@code o} was removed to the set, <tt>false</tt>
* if {@code o} was not in the set.
*/
public boolean remove(final long o) { return _map.remove(o) == V; }
@Override
public boolean remove(final Object o) { return o instanceof Long && remove(((Long) o).longValue()); }
/**
* Current count of elements in the set. Due to concurrent racing updates,
* the size is only ever approximate. Updates due to the calling thread are
* immediately visible to calling thread.
* @return count of elements.
*/
@Override
public int size() { return _map.size(); }
/** Empty the set. */
@Override
public void clear() { _map.clear(); }
@Override
public String toString() {
// Overloaded to avoid auto-boxing
final LongIterator it = longIterator();
if (!it.hasNext()) {
return "[]";
}
final StringBuilder sb = new StringBuilder().append('[');
for (;;) {
sb.append(it.next());
if (!it.hasNext()) {
return sb.append(']').toString();
}
sb.append(", ");
}
}
@Override
public Iterator<Long>iterator() { return _map.keySet().iterator(); }
public LongIterator longIterator() {
return (LongIterator) _map.keySet().iterator();
}
// ---
/**
* Atomically make the set immutable. Future calls to mutate will throw an
* IllegalStateException. Existing mutator calls in other threads racing
* with this thread and will either throw IllegalStateException or their
* update will be visible to this thread. This implies that a simple flag
* cannot make the Set immutable, because a late-arriving update in another
* thread might see immutable flag not set yet, then mutate the Set after
* the {@link #readOnly} call returns. This call can be called concurrently
* (and indeed until the operation completes, all calls on the Set from any
* thread either complete normally or end up calling {@link #readOnly}
* internally).
*
* <p> This call is useful in debugging multi-threaded programs where the
* Set is constructed in parallel, but construction completes after some
* time; and after construction the Set is only read. Making the Set
* read-only will cause updates arriving after construction is supposedly
* complete to throw an {@link IllegalStateException}.
*/
// (1) call _map's immutable() call
// (2) get snapshot
// (3) CAS down a local map, power-of-2 larger than _map.size()+1/8th
// (4) start @ random, visit all snapshot, insert live keys
// (5) CAS _map to null, needs happens-after (4)
// (6) if Set call sees _map is null, needs happens-after (4) for readers
public void readOnly() {
throw new RuntimeException("Unimplemented");
}
}

View File

@ -0,0 +1,725 @@
/*
* Written by Cliff Click and released to the public domain, as explained at
* http://creativecommons.org/licenses/publicdomain
*/
package it.tdlight.commonutil;
import java.io.IOException;
import java.io.Serializable;
import java.lang.reflect.Field;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicInteger;
import org.jctools.maps.ConcurrentAutoTable;
import org.jctools.util.UnsafeAccess;
import sun.misc.Unsafe;
/**
* A multi-threaded bit-vector set, implemented as an array of primitive
* {@code longs}. All operations are non-blocking and multi-threaded safe.
* {@link #contains(int)} calls are roughly the same speed as a {load, mask}
* sequence. {@link #add(int)} and {@link #remove(int)} calls are a tad more
* expensive than a {load, mask, store} sequence because they must use a CAS.
* The bit-vector is auto-sizing.
*
* <p><em>General note of caution:</em> The Set API allows the use of {@link Integer}
* with silent autoboxing - which can be very expensive if many calls are
* being made. Since autoboxing is silent you may not be aware that this is
* going on. The built-in API takes lower-case {@code ints} and is much more
* efficient.
*
* <p>Space: space is used in proportion to the largest element, as opposed to
* the number of elements (as is the case with hash-table based Set
* implementations). Space is approximately (largest_element/8 + 64) bytes.
* <p>
* The implementation is a simple bit-vector using CAS for update.
*
* @since 1.5
* @author Cliff Click
*/
public class NonBlockingSetInt extends AbstractSet<Integer> implements Serializable {
private static final long serialVersionUID = 1234123412341234123L;
private static final Unsafe _unsafe = UnsafeAccess.UNSAFE;
// --- Bits to allow atomic update of the NBSI
private static final long _nbsi_offset;
static { // <clinit>
Field f = null;
try {
f = NonBlockingSetInt.class.getDeclaredField("_nbsi");
} catch( NoSuchFieldException e ) {
}
_nbsi_offset = _unsafe.objectFieldOffset(f);
}
private final boolean CAS_nbsi( NBSI old, NBSI nnn ) {
return _unsafe.compareAndSwapObject(this, _nbsi_offset, old, nnn );
}
// The actual Set of Joy, which changes during a resize event. The
// Only Field for this class, so I can atomically change the entire
// set implementation with a single CAS.
private transient NBSI _nbsi;
/** Create a new empty bit-vector */
public NonBlockingSetInt( ) {
_nbsi = new NBSI(63, new ConcurrentAutoTable(), this); // The initial 1-word set
}
private NonBlockingSetInt(NonBlockingSetInt a, NonBlockingSetInt b) {
_nbsi = new NBSI(a._nbsi,b._nbsi,new ConcurrentAutoTable(),this);
}
/**
* Overridden to avoid auto-boxing for NonBlockingSetInt.
*
* @param c The collection to add to this set.
* @return True if the set was modified.
*/
@Override
public boolean addAll(Collection<? extends Integer> c) {
if (!NonBlockingSetInt.class.equals(c.getClass())) {
return super.addAll(c);
}
boolean modified = false;
for (final IntIterator it = ((NonBlockingSetInt)c).intIterator(); it.hasNext(); ) {
modified |= add(it.next());
}
return modified;
}
/**
* Overridden to avoid auto-boxing for NonBlockingSetInt.
*
* @param c The collection to remove from this set.
* @return True if the set was modified.
*/
@Override
public boolean removeAll(Collection<?> c) {
if (!NonBlockingSetInt.class.equals(c.getClass())) {
return super.removeAll(c);
}
boolean modified = false;
for (final IntIterator it = ((NonBlockingSetInt)c).intIterator(); it.hasNext(); ) {
modified |= remove(it.next());
}
return modified;
}
@Override
public boolean containsAll(Collection<?> c) {
if (!NonBlockingSetInt.class.equals(c.getClass())) {
return super.containsAll(c);
}
for (final IntIterator it = ((NonBlockingSetInt)c).intIterator(); it.hasNext(); ) {
if (!contains(it.next())) {
return false;
}
}
return true;
}
@Override
public boolean retainAll(Collection<?> c) {
if (!NonBlockingSetInt.class.equals(c.getClass())) {
return super.retainAll(c);
}
boolean modified = false;
final NonBlockingSetInt nonBlockingSetInt = (NonBlockingSetInt) c;
for (final IntIterator it = intIterator(); it.hasNext(); ) {
if (!nonBlockingSetInt.contains(it.next())) {
it.remove();
modified = true;
}
}
return modified;
}
@Override
public int hashCode() {
int hashCode = 0;
for (final IntIterator it = intIterator(); it.hasNext(); ) {
hashCode += it.next();
}
return hashCode;
}
/**
* Add {@code i} to the set. Uppercase {@link Integer} version of add,
* requires auto-unboxing. When possible use the {@code int} version of
* {@link #add(int)} for efficiency.
* @throws IllegalArgumentException if i is negative.
* @return <tt>true</tt> if i was added to the set.
*/
@Override
public boolean add ( final Integer i ) {
return add(i.intValue());
}
/**
* Test if {@code o} is in the set. This is the uppercase {@link Integer}
* version of contains, requires a type-check and auto-unboxing. When
* possible use the {@code int} version of {@link #contains(int)} for
* efficiency.
* @return <tt>true</tt> if i was in the set.
*/
@Override
public boolean contains( final Object o ) {
return o instanceof Integer && contains(((Integer) o).intValue());
}
/**
* Remove {@code o} from the set. This is the uppercase {@link Integer}
* version of remove, requires a type-check and auto-unboxing. When
* possible use the {@code int} version of {@link #remove(int)} for
* efficiency.
* @return <tt>true</tt> if i was removed to the set.
*/
@Override
public boolean remove( final Object o ) {
return o instanceof Integer && remove(((Integer) o).intValue());
}
/**
* Add {@code i} to the set. This is the lower-case '{@code int}' version
* of {@link #add} - no autoboxing. Negative values throw
* IllegalArgumentException.
* @throws IllegalArgumentException if i is negative.
* @return <tt>true</tt> if i was added to the set.
*/
public boolean add( final int i ) {
if( i < 0 ) throw new IllegalArgumentException(""+i);
return _nbsi.add(i);
}
/**
* Test if {@code i} is in the set. This is the lower-case '{@code int}'
* version of {@link #contains} - no autoboxing.
* @return <tt>true</tt> if i was int the set.
*/
public boolean contains( final int i ) { return i >= 0 && _nbsi.contains(i); }
/**
* Remove {@code i} from the set. This is the fast lower-case '{@code int}'
* version of {@link #remove} - no autoboxing.
* @return <tt>true</tt> if i was added to the set.
*/
public boolean remove ( final int i ) { return i >= 0 && _nbsi.remove(i); }
/**
* Current count of elements in the set. Due to concurrent racing updates,
* the size is only ever approximate. Updates due to the calling thread are
* immediately visible to calling thread.
* @return count of elements.
*/
@Override
public int size ( ) { return _nbsi.size( ); }
/** Empty the bitvector. */
@Override
public void clear ( ) {
NBSI cleared = new NBSI(63, new ConcurrentAutoTable(), this); // An empty initial NBSI
while( !CAS_nbsi( _nbsi, cleared ) ) // Spin until clear works
;
}
@Override
public String toString() {
// Overloaded to avoid auto-boxing
final IntIterator it = intIterator();
if (!it.hasNext()) {
return "[]";
}
final StringBuilder sb = new StringBuilder().append('[');
for (;;) {
sb.append(it.next());
if (!it.hasNext()) {
return sb.append(']').toString();
}
sb.append(", ");
}
}
public int sizeInBytes() { return _nbsi.sizeInBytes(); }
/*****************************************************************
*
* bitwise comparisons optimised for NBSI
*
*****************************************************************/
public NonBlockingSetInt intersect(final NonBlockingSetInt op) {
NonBlockingSetInt res = new NonBlockingSetInt(this,op);
res._nbsi.intersect(res._nbsi, this._nbsi, op._nbsi);
return res;
}
public NonBlockingSetInt union(final NonBlockingSetInt op) {
NonBlockingSetInt res = new NonBlockingSetInt(this,op);
res._nbsi.union(res._nbsi, this._nbsi, op._nbsi);
return res;
}
// public NonBlockingSetInt not(final NonBlockingSetInt op) {
//
// }
/** Verbose printout of internal structure for debugging. */
public void print() { _nbsi.print(0); }
/**
* Standard Java {@link Iterator}. Not very efficient because it
* auto-boxes the returned values.
*/
@Override
public Iterator<Integer> iterator( ) { return new iter(); }
public IntIterator intIterator() { return new NBSIIntIterator(); }
private class NBSIIntIterator implements IntIterator {
NBSI nbsi;
int index = -1;
int prev = -1;
NBSIIntIterator() {
nbsi = _nbsi;
advance();
}
private void advance() {
while( true ) {
index++; // Next index
while( (index>>6) >= nbsi._bits.length ) { // Index out of range?
if( nbsi._new == null ) { // New table?
index = -2; // No, so must be all done
return; //
}
nbsi = nbsi._new; // Carry on, in the new table
}
if( nbsi.contains(index) ) return;
}
}
@Override
public int next() {
if( index == -1 ) throw new NoSuchElementException();
prev = index;
advance();
return prev;
}
@Override
public boolean hasNext() {
return index != -2;
}
@Override
public void remove() {
if( prev == -1 ) throw new IllegalStateException();
nbsi.remove(prev);
prev = -1;
}
}
private class iter implements Iterator<Integer> {
NBSIIntIterator intIterator;
iter() { intIterator = new NBSIIntIterator(); }
@Override
public boolean hasNext() { return intIterator.hasNext(); }
@Override
public Integer next() { return intIterator.next(); }
@Override
public void remove() { intIterator.remove(); }
}
// --- writeObject -------------------------------------------------------
// Write a NBSI to a stream
private void writeObject(java.io.ObjectOutputStream s) throws IOException {
s.defaultWriteObject(); // Nothing to write
final NBSI nbsi = _nbsi; // The One Field is transient
final int len = _nbsi._bits.length<<6;
s.writeInt(len); // Write max element
for( int i=0; i<len; i++ )
s.writeBoolean( _nbsi.contains(i) );
}
// --- readObject --------------------------------------------------------
// Read a CHM from a stream
private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
s.defaultReadObject(); // Read nothing
final int len = s.readInt(); // Read max element
_nbsi = new NBSI(len, new ConcurrentAutoTable(), this);
for( int i=0; i<len; i++ ) // Read all bits
if( s.readBoolean() )
_nbsi.add(i);
}
// --- NBSI ----------------------------------------------------------------
private static final class NBSI {
// Back pointer to the parent wrapper; sorta like make the class non-static
private transient final NonBlockingSetInt _non_blocking_set_int;
// Used to count elements: a high-performance counter.
private transient final ConcurrentAutoTable _size;
// The Bits
private final long _bits[];
// --- Bits to allow Unsafe access to arrays
private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class);
private static final int _Lscale = _unsafe.arrayIndexScale(long[].class);
private static long rawIndex(final long[] ary, final int idx) {
assert idx >= 0 && idx < ary.length;
return _Lbase + idx * _Lscale;
}
private final boolean CAS( int idx, long old, long nnn ) {
return _unsafe.compareAndSwapLong( _bits, rawIndex(_bits, idx), old, nnn );
}
// --- Resize
// The New Table, only set once to non-zero during a resize.
// Must be atomically set.
private NBSI _new;
private static final long _new_offset;
static { // <clinit>
Field f = null;
try {
f = NBSI.class.getDeclaredField("_new");
} catch( NoSuchFieldException e ) {
}
_new_offset = _unsafe.objectFieldOffset(f);
}
private final boolean CAS_new( NBSI nnn ) {
return _unsafe.compareAndSwapObject(this, _new_offset, null, nnn );
}
private transient final AtomicInteger _copyIdx; // Used to count bits started copying
private transient final AtomicInteger _copyDone; // Used to count words copied in a resize operation
private transient final int _sum_bits_length; // Sum of all nested _bits.lengths
private static final long mask( int i ) { return 1L<<(i&63); }
// I need 1 free bit out of 64 to allow for resize. I do this by stealing
// the high order bit - but then I need to do something with adding element
// number 63 (and friends). I could use a mod63 function but it's more
// efficient to handle the mod-64 case as an exception.
//
// Every 64th bit is put in it's own recursive bitvector. If the low 6 bits
// are all set, we shift them off and recursively operate on the _nbsi64 set.
private final NBSI _nbsi64;
private NBSI(int max_elem, ConcurrentAutoTable ctr, NonBlockingSetInt nonb ) {
super();
_non_blocking_set_int = nonb;
_size = ctr;
_copyIdx = ctr == null ? null : new AtomicInteger();
_copyDone = ctr == null ? null : new AtomicInteger();
// The main array of bits
_bits = new long[(int)(((long)max_elem+63)>>>6)];
// Every 64th bit is moved off to it's own subarray, so that the
// sign-bit is free for other purposes
_nbsi64 = ((max_elem+1)>>>6) == 0 ? null : new NBSI((max_elem+1)>>>6, null, null);
_sum_bits_length = _bits.length + (_nbsi64==null ? 0 : _nbsi64._sum_bits_length);
}
/** built a new NBSI with buffers large enough to hold bitwise operations on the operands **/
private NBSI(NBSI a, NBSI b, ConcurrentAutoTable ctr, NonBlockingSetInt nonb) {
super();
_non_blocking_set_int = nonb;
_size = ctr;
_copyIdx = ctr == null ? null : new AtomicInteger();
_copyDone = ctr == null ? null : new AtomicInteger();
if(!has_bits(a) && !has_bits(b)) {
_bits = null;
_nbsi64 = null;
_sum_bits_length = 0;
return;
}
// todo - clean this nastiness up
// essentially just safely creates new empty buffers for each of the recursive bitsets
if(!has_bits(a)) {
_bits = new long[b._bits.length];
_nbsi64 = new NBSI(null,b._nbsi64,null,null);
} else if(!has_bits(b)) {
_bits = new long[a._bits.length];
_nbsi64 = new NBSI(null,a._nbsi64,null,null);
} else {
int bit_length = a._bits.length > b._bits.length ? a._bits.length : b._bits.length;
_bits = new long[bit_length];
_nbsi64 = new NBSI(a._nbsi64,b._nbsi64,null,null);
}
_sum_bits_length = _bits.length + _nbsi64._sum_bits_length;
}
private static boolean has_bits(NBSI n) {
return n != null && n._bits != null;
}
// Lower-case 'int' versions - no autoboxing, very fast.
// 'i' is known positive.
public boolean add( final int i ) {
// Check for out-of-range for the current size bit vector.
// If so we need to grow the bit vector.
if( (i>>6) >= _bits.length )
return install_larger_new_bits(i). // Install larger pile-o-bits (duh)
help_copy().add(i); // Finally, add to the new table
// Handle every 64th bit via using a nested array
NBSI nbsi = this; // The bit array being added into
int j = i; // The bit index being added
while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set)
nbsi = nbsi._nbsi64; // Recurse
j = j>>6; // Strip off low 6 bits (all set)
}
final long mask = mask(j);
long old;
do {
old = nbsi._bits[j>>6]; // Read old bits
if( old < 0 ) // Not mutable?
// Not mutable: finish copy of word, and retry on copied word
return help_copy_impl(i).help_copy().add(i);
if( (old & mask) != 0 ) return false; // Bit is already set?
} while( !nbsi.CAS( j>>6, old, old | mask ) );
_size.add(1);
return true;
}
public boolean remove( final int i ) {
if( (i>>6) >= _bits.length ) // Out of bounds? Not in this array!
return _new==null ? false : help_copy().remove(i);
// Handle every 64th bit via using a nested array
NBSI nbsi = this; // The bit array being added into
int j = i; // The bit index being added
while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set)
nbsi = nbsi._nbsi64; // Recurse
j = j>>6; // Strip off low 6 bits (all set)
}
final long mask = mask(j);
long old;
do {
old = nbsi._bits[j>>6]; // Read old bits
if( old < 0 ) // Not mutable?
// Not mutable: finish copy of word, and retry on copied word
return help_copy_impl(i).help_copy().remove(i);
if( (old & mask) == 0 ) return false; // Bit is already clear?
} while( !nbsi.CAS( j>>6, old, old & ~mask ) );
_size.add(-1);
return true;
}
public boolean contains( final int i ) {
if( (i>>6) >= _bits.length ) // Out of bounds? Not in this array!
return _new==null ? false : help_copy().contains(i);
// Handle every 64th bit via using a nested array
NBSI nbsi = this; // The bit array being added into
int j = i; // The bit index being added
while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set)
nbsi = nbsi._nbsi64; // Recurse
j = j>>6; // Strip off low 6 bits (all set)
}
final long mask = mask(j);
long old = nbsi._bits[j>>6]; // Read old bits
if( old < 0 ) // Not mutable?
// Not mutable: finish copy of word, and retry on copied word
return help_copy_impl(i).help_copy().contains(i);
// Yes mutable: test & return bit
return (old & mask) != 0;
}
/**
* Bitwise operations which store the result in this instance.
* Assumes that this instance contains ample buffer space to store the largest
* buffer from each NBSI in the recursive bitmap.
*
* Also assumes that this method is called during the construction process of
* the bitset before the instance could be leaked to multiple threads.
***/
public boolean intersect(NBSI dest, NBSI a, NBSI b) {
// terminate recursion if one bitset is missing data
// since that word should be left as 0L anyway
if(!has_bits(a) || !has_bits(b))
return true;
for(int i = 0; i < dest._bits.length; i++) {
long left = a.safe_read_word(i,0L);
long right = b.safe_read_word(i,0L);
dest._bits[i] = (left & right) & Long.MAX_VALUE; // mask sign bit
}
// todo - recompute size
return intersect(dest._nbsi64, a._nbsi64, b._nbsi64);
}
public boolean union(NBSI dest, NBSI a, NBSI b) {
// terminate recursion if neiter bitset has data
if(!has_bits(a) && !has_bits(b))
return true;
if(has_bits(a) || has_bits(b)) {
for(int i = 0; i < dest._bits.length; i++) {
long left = a == null ? 0L : a.safe_read_word(i,0);
long right = b == null ? 0L : b.safe_read_word(i,0);
dest._bits[i] = (left | right) & Long.MAX_VALUE;
}
}
return union(dest._nbsi64, a == null ? null : a._nbsi64, b == null ? null : b._nbsi64);
}
/**************************************************************************/
private long safe_read_word(int i, long default_word) {
if(i >= _bits.length) {
// allow reading past the end of the buffer filling in a default word
return default_word;
}
long word = _bits[i];
if(word < 0) {
NBSI nb = help_copy_impl(i);
if(nb._non_blocking_set_int == null) {
return default_word;
}
word = nb.help_copy()._bits[i];
}
return word;
}
public int sizeInBytes() { return (int)_bits.length; }
public int size() { return (int)_size.get(); }
// Must grow the current array to hold an element of size i
private NBSI install_larger_new_bits( final int i ) {
if( _new == null ) {
// Grow by powers of 2, to avoid minor grow-by-1's.
// Note: must grow by exact powers-of-2 or the by-64-bit trick doesn't work right
int sz = (_bits.length<<6)<<1;
// CAS to install a new larger size. Did it work? Did it fail? We
// don't know and don't care. Only One can be installed, so if
// another thread installed a too-small size, we can't help it - we
// must simply install our new larger size as a nested-resize table.
CAS_new(new NBSI(sz, _size, _non_blocking_set_int));
}
// Return self for 'fluid' programming style
return this;
}
// Help any top-level NBSI to copy until completed.
// Always return the _new version of *this* NBSI, in case we're nested.
private NBSI help_copy() {
// Pick some words to help with - but only help copy the top-level NBSI.
// Nested NBSI waits until the top is done before we start helping.
NBSI top_nbsi = _non_blocking_set_int._nbsi;
final int HELP = 8; // Tuning number: how much copy pain are we willing to inflict?
// We "help" by forcing individual bit indices to copy. However, bits
// come in lumps of 64 per word, so we just advance the bit counter by 64's.
int idx = top_nbsi._copyIdx.getAndAdd(64*HELP);
for( int i=0; i<HELP; i++ ) {
int j = idx+i*64;
j %= (top_nbsi._bits.length<<6); // Limit, wrap to array size; means we retry indices
top_nbsi.help_copy_impl(j );
top_nbsi.help_copy_impl(j+63); // Also force the nested-by-64 bit
}
// Top level guy ready to promote?
// Note: WE may not be the top-level guy!
if( top_nbsi._copyDone.get() == top_nbsi._sum_bits_length )
// One shot CAS to promote - it may fail since we are racing; others
// may promote as well
if( _non_blocking_set_int.CAS_nbsi( top_nbsi, top_nbsi._new ) ) {
//System.out.println("Promote at top level to size "+(_non_blocking_set_int._nbsi._bits.length<<6));
}
// Return the new bitvector for 'fluid' programming style
return _new;
}
// Help copy this one word. State Machine.
// (1) If not "made immutable" in the old array, set the sign bit to make
// it immutable.
// (2) If non-zero in old array & zero in new, CAS new from 0 to copy-of-old
// (3) If non-zero in old array & non-zero in new, CAS old to zero
// (4) Zero in old, new is valid
// At this point, old should be immutable-zero & new has a copy of bits
private NBSI help_copy_impl( int i ) {
// Handle every 64th bit via using a nested array
NBSI old = this; // The bit array being copied from
NBSI nnn = _new; // The bit array being copied to
if( nnn == null ) return this; // Promoted already
int j = i; // The bit index being added
while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set)
old = old._nbsi64; // Recurse
nnn = nnn._nbsi64; // Recurse
j = j>>6; // Strip off low 6 bits (all set)
}
// Transit from state 1: word is not immutable yet
// Immutable is in bit 63, the sign bit.
long bits = old._bits[j>>6];
while( bits >= 0 ) { // Still in state (1)?
long oldbits = bits;
bits |= mask(63); // Target state of bits: sign-bit means immutable
if( old.CAS( j>>6, oldbits, bits ) ) {
if( oldbits == 0 ) _copyDone.addAndGet(1);
break; // Success - old array word is now immutable
}
bits = old._bits[j>>6]; // Retry if CAS failed
}
// Transit from state 2: non-zero in old and zero in new
if( bits != mask(63) ) { // Non-zero in old?
long new_bits = nnn._bits[j>>6];
if( new_bits == 0 ) { // New array is still zero
new_bits = bits & ~mask(63); // Desired new value: a mutable copy of bits
// One-shot CAS attempt, no loop, from 0 to non-zero.
// If it fails, somebody else did the copy for us
if( !nnn.CAS( j>>6, 0, new_bits ) )
new_bits = nnn._bits[j>>6]; // Since it failed, get the new value
assert new_bits != 0;
}
// Transit from state 3: non-zero in old and non-zero in new
// One-shot CAS attempt, no loop, from non-zero to 0 (but immutable)
if( old.CAS( j>>6, bits, mask(63) ) )
_copyDone.addAndGet(1); // One more word finished copying
}
// Now in state 4: zero (and immutable) in old
// Return the self bitvector for 'fluid' programming style
return this;
}
private void print( int d, String msg ) {
for( int i=0; i<d; i++ )
System.out.print(" ");
System.out.println(msg);
}
private void print(int d) {
StringBuilder buf = new StringBuilder();
buf.append("NBSI - _bits.len=");
NBSI x = this;
while( x != null ) {
buf.append(" "+x._bits.length);
x = x._nbsi64;
}
print(d,buf.toString());
x = this;
while( x != null ) {
for( int i=0; i<x._bits.length; i++ )
System.out.print(Long.toHexString(x._bits[i])+" ");
x = x._nbsi64;
System.out.println();
}
if( _copyIdx.get() != 0 || _copyDone.get() != 0 )
print(d,"_copyIdx="+_copyIdx.get()+" _copyDone="+_copyDone.get()+" _words_to_cpy="+_sum_bits_length);
if( _new != null ) {
print(d,"__has_new - ");
_new.print(d+1);
}
}
}
}

View File

@ -1,213 +0,0 @@
package org.warp.commonutils.batch;
import com.google.common.util.concurrent.AtomicDouble;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
public abstract class Batching<T> {
private final int pingRefreshTimeMillis;
private volatile double singleItemTransferTimeMillis;
private volatile double latencyMillis;
private final AtomicBoolean enablePacking = new AtomicBoolean(false);
private final ConcurrentLinkedQueue<ExecutorService> executors = new ConcurrentLinkedQueue<>();
private final AtomicBoolean closeRequested = new AtomicBoolean(false);
private final ReentrantLock waitingAccesLock = new ReentrantLock();
private final ConcurrentLinkedQueue<T> waitingPutItems = new ConcurrentLinkedQueue<>();
private final AtomicDouble lostTimeMillis = new AtomicDouble(0d);
private final AtomicDouble sentItems = new AtomicDouble(0);
private final double startTimeMillis = ((double) System.nanoTime()) / 1000000d;
public Batching(int pingRefreshTimeMillis) {
this.pingRefreshTimeMillis = pingRefreshTimeMillis;
refreshPing();
if (enablePacking.get()) {
ExecutorService executor = Executors.newFixedThreadPool(2);
this.executors.offer(executor);
executor.execute(this::pingRefreshExecutor);
executor.execute(new BatchSender());
}
}
private void pingRefreshExecutor() {
boolean closeReq = false;
while (!(closeReq = closeRequested.get())) {
try {
Thread.sleep(pingRefreshTimeMillis);
} catch (InterruptedException e) {
e.printStackTrace();
}
refreshPing();
}
}
private void refreshPing() {
double pingTime = ping();
this.latencyMillis = 0.9 * pingTime;
this.singleItemTransferTimeMillis = 0.1 * pingTime;
this.enablePacking.compareAndSet(false, latencyMillis > 0.1d);
}
public void offer(T action) {
if (enablePacking.get()) {
sentItems.addAndGet(1d);
waitingAccesLock.lock();
try {
waitingPutItems.offer(action);
} finally {
waitingAccesLock.unlock();
}
} else {
executeDirect(action);
}
}
public void offer(Collection<T> actions) {
if (enablePacking.get()) {
sentItems.addAndGet(actions.size());
waitingAccesLock.lock();
try {
for (T action : actions) {
waitingPutItems.offer(action);
}
} finally {
waitingAccesLock.unlock();
}
} else {
executeDirect(actions);
}
}
public void offer(T... actions) {
offer(List.of(actions));
}
protected abstract void executeBatch(Collection<T> actions);
protected void executeBatch(T action) {
executeBatch(List.of(action));
}
protected abstract void executeDirect(T action);
protected abstract void executeDirect(Collection<T> action);
protected abstract double ping();
public abstract void close();
private static final double getItemSendLongestTime(double lostTime, double latencyMillis, double waitingSize,
double singleItemTransferTimeMillis) {
return lostTime + latencyMillis + waitingSize * singleItemTransferTimeMillis;
}
private static final double getItemSendLongestTimeNext(double lostTime, double latencyMillis, double waitTime,
double waitingSize, double singleItemTransferTimeMillis, double itemsPerMillisecondIdeal) {
return lostTime + latencyMillis + waitTime + (waitingSize
+ (waitTime * itemsPerMillisecondIdeal) * singleItemTransferTimeMillis);
}
private static final double getItemsPerSecond(double waitingSize, double itemSendLongestTime) {
return waitingSize / notZero(itemSendLongestTime);
}
private static final double getAverageItemTime(double waitingSize, double itemSendLongestTime) {
return itemSendLongestTime / notZero(waitingSize);
}
private static final double getNextItemsPerSecond(double waitingSize, double nextItemSendLongestTime, double waitTime,
double itemsPerMillisecondIdeal) {
return (waitingSize + (waitTime * itemsPerMillisecondIdeal)) / notZero(nextItemSendLongestTime);
}
private static final double getNextAverageItemTime(double waitingSize, double nextItemSendLongestTime,
double waitTime, double itemsPerMillisecondIdeal) {
return nextItemSendLongestTime / notZero((waitingSize + (waitTime * itemsPerMillisecondIdeal)));
}
private static final double notZero(double input) {
if (input != 0) {
return input;
} else {
return input + 0.000000000000000000001d;
}
}
private class BatchSender implements Runnable {
@Override
public void run() {
boolean closeReq;
while ((!(closeReq = closeRequested.get())) || !waitingPutItems.isEmpty()) {
double waitTimeMillis = latencyMillis;
long waitTimeNanoMillis = (long) Math.floor(latencyMillis);
int waitTimeNanos = (int) ((waitTimeMillis - ((double) waitTimeNanoMillis)) * 1000000d);
try {
if (!closeReq) {
Thread.sleep(waitTimeNanoMillis, waitTimeNanos);
}
} catch (InterruptedException e) {
e.printStackTrace();
}
waitingAccesLock.lock();
try {
if (!waitingPutItems.isEmpty()) {
int waitingSize = waitingPutItems.size();
double lostTime = lostTimeMillis.addAndGet(waitTimeMillis); // Get the lost time as the time
// in the middle
double idealItemsPerMillis =
sentItems.get() / notZero(((double) System.nanoTime()) / 1000000d - startTimeMillis);
double idealMillisPerItem = 1d / notZero(idealItemsPerMillis);
double itemSendLongestTime = getItemSendLongestTime(lostTime, latencyMillis, waitingSize,
singleItemTransferTimeMillis);
double itemsPerSecond = getItemsPerSecond(waitingSize, itemSendLongestTime);
double averageItemTime = getAverageItemTime(waitingSize, itemSendLongestTime);
double nextItemSendLongestTime = getItemSendLongestTimeNext(lostTime, latencyMillis, waitTimeMillis,
waitingSize, singleItemTransferTimeMillis, idealItemsPerMillis);
double nextItemsPerSecond = getNextItemsPerSecond(waitingSize, nextItemSendLongestTime, waitTimeMillis,
idealItemsPerMillis);
double nextAverageItemTime = getNextAverageItemTime(waitingSize, itemSendLongestTime, waitTimeMillis,
idealItemsPerMillis);
boolean do1 = idealMillisPerItem > latencyMillis;
boolean do2 = itemsPerSecond > nextItemsPerSecond;
boolean do3 = averageItemTime - nextAverageItemTime < latencyMillis;
boolean do4 = averageItemTime > 5;
boolean doThisTurn = do1 | do2 | do3 | do4 || closeReq;
if (doThisTurn) {
lostTimeMillis.set(0);
if (waitingSize > 1) {
executeBatch(waitingPutItems);
} else {
T pair = waitingPutItems.poll();
executeBatch(pair);
}
if ((System.nanoTime() % 100) < 1) {
System.out.printf("LATENCY=%.2f; WAITED=%.2f; PACKET_SIZE=%.2f; AVG_ITEM_TIME=%.2f; "
+ "NEXT_AVG_ITEM_TIME=%.2f; DO=%s,%s,%s\n", latencyMillis, lostTime, (double) waitingSize,
averageItemTime, nextAverageItemTime, "" + do1, "" + do2, "" + do3);
System.out.printf("idealMillisPerItem=%.2f; itemsPerSecond=%.2f; nextItemsPerSecond=%"
+ ".2f; averageItemTime-nextAverageItemTime=%.2f\n", idealItemsPerMillis, itemsPerSecond,
nextItemsPerSecond, averageItemTime - nextAverageItemTime);
}
waitingPutItems.clear();
} else {
if ((System.nanoTime() % 100) < 1) {
System.out.println("SKIPPED TURN");
}
}
}
} finally {
waitingAccesLock.unlock();
}
}
}
}
}

View File

@ -1,80 +0,0 @@
package org.warp.commonutils.batch;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.lang3.tuple.Pair;
public abstract class KVSafeBatching<T, U> extends Batching<Pair<T, U>> {
public KVSafeBatching(int pingRefreshTimeMillis) {
super(pingRefreshTimeMillis);
}
@Deprecated
@Override
public void offer(Pair<T, U>... actions) {
offer(List.of(actions));
}
@Deprecated
@Override
public void offer(Collection<Pair<T, U>> actions) {
Object[] keys = new Object[actions.size()];
Object[] values = new Object[actions.size()];
int i = 0;
for (Pair<T, U> action : actions) {
keys[i] = action.getKey();
values[i] = action.getValue();
i++;
}
offer_(keys, values);
}
public void offer(T key, U value) {
this.offer_(key, value);
}
public void offer(T[] keys, U[] values) {
if (keys.length == 1 && values.length == 1) {
this.offer_(keys[0], values[0]);
} else {
this.offer_(keys, values);
}
}
private void offer_(T key, U value) {
super.offer(Pair.of(key, value));
}
private void offer_(Object[] keys, Object[] values) {
if (keys.length != values.length) {
throw new IllegalArgumentException("Keys and values count must be the same.");
}
List<Pair<T, U>> pairs = new ArrayList<>(keys.length);
for (int i = 0; i < keys.length; i++) {
pairs.add(Pair.of((T) keys[i], (U) values[i]));
}
super.offer(pairs);
}
@Override
protected void executeBatch(Collection<Pair<T, U>> actions) {
}
@Override
protected void executeDirect(Pair<T, U> action) {
}
@Override
protected void executeDirect(Collection<Pair<T, U>> action) {
}
@Override
public void close() {
}
}

View File

@ -1,201 +0,0 @@
package org.warp.commonutils.batch;
import java.util.concurrent.CompletionException;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import org.warp.commonutils.concurrency.executor.BoundedExecutorService;
import org.warp.commonutils.functional.TriConsumer;
import org.warp.commonutils.type.IntWrapper;
import org.warp.commonutils.type.ShortNamedThreadFactory;
import org.warp.commonutils.type.VariableWrapper;
public class ParallelUtils {
public static <V> void parallelize(Consumer<Consumer<V>> iterator,
int maxQueueSize,
int parallelism,
int groupSize, Consumer<V> consumer) throws CompletionException {
var parallelExecutor = BoundedExecutorService.create(maxQueueSize,
parallelism,
parallelism,
0,
TimeUnit.MILLISECONDS,
new ShortNamedThreadFactory("ForEachParallel"),
(a, b) -> {}
);
final int CHUNK_SIZE = groupSize;
IntWrapper count = new IntWrapper(CHUNK_SIZE);
VariableWrapper<Object[]> values = new VariableWrapper<>(new Object[CHUNK_SIZE]);
AtomicReference<CompletionException> firstExceptionReference = new AtomicReference<>(null);
iterator.accept((value) -> {
var firstException = firstExceptionReference.get();
if (firstException != null) {
throw firstException;
}
values.var[CHUNK_SIZE - count.var] = value;
count.var--;
if (count.var == 0) {
count.var = CHUNK_SIZE;
Object[] valuesCopy = values.var;
values.var = new Object[CHUNK_SIZE];
try {
parallelExecutor.execute(() -> {
for (int i = 0; i < CHUNK_SIZE; i++) {
try {
//noinspection unchecked
consumer.accept((V) valuesCopy[i]);
} catch (Exception ex) {
firstExceptionReference.compareAndSet(null, new CompletionException(ex));
}
}
});
} catch (RejectedExecutionException e) {
throw new CompletionException(e);
}
}
});
parallelExecutor.shutdown();
try {
parallelExecutor.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new RuntimeException("Parallel forEach interrupted", e);
}
var firstException = firstExceptionReference.get();
if (firstException != null) {
throw firstException;
}
}
public static <K, V> void parallelize(Consumer<BiConsumer<K, V>> iterator,
int maxQueueSize,
int parallelism,
int groupSize, BiConsumer<K, V> consumer) throws CompletionException {
var parallelExecutor = BoundedExecutorService.create(maxQueueSize,
parallelism,
parallelism,
0,
TimeUnit.MILLISECONDS,
new ShortNamedThreadFactory("ForEachParallel"),
(a, b) -> {}
);
final int CHUNK_SIZE = groupSize;
IntWrapper count = new IntWrapper(CHUNK_SIZE);
VariableWrapper<Object[]> keys = new VariableWrapper<>(new Object[CHUNK_SIZE]);
VariableWrapper<Object[]> values = new VariableWrapper<>(new Object[CHUNK_SIZE]);
AtomicReference<CompletionException> firstExceptionReference = new AtomicReference<>(null);
iterator.accept((key, value) -> {
var firstException = firstExceptionReference.get();
if (firstException != null) {
throw firstException;
}
keys.var[CHUNK_SIZE - count.var] = key;
values.var[CHUNK_SIZE - count.var] = value;
count.var--;
if (count.var == 0) {
count.var = CHUNK_SIZE;
Object[] keysCopy = keys.var;
Object[] valuesCopy = values.var;
keys.var = new Object[CHUNK_SIZE];
values.var = new Object[CHUNK_SIZE];
try {
parallelExecutor.execute(() -> {
for (int i = 0; i < CHUNK_SIZE; i++) {
try {
//noinspection unchecked
consumer.accept((K) keysCopy[i], (V) valuesCopy[i]);
} catch (Exception ex) {
firstExceptionReference.compareAndSet(null, new CompletionException(ex));
break;
}
}
});
} catch (RejectedExecutionException e) {
throw new CompletionException(e);
}
}
});
parallelExecutor.shutdown();
try {
parallelExecutor.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new RuntimeException("Parallel forEach interrupted", e);
}
var firstException = firstExceptionReference.get();
if (firstException != null) {
throw firstException;
}
}
public static <K1, K2, V> void parallelize(Consumer<TriConsumer<K1, K2, V>> iterator,
int maxQueueSize,
int parallelism,
int groupSize,
TriConsumer<K1, K2, V> consumer) throws CompletionException {
var parallelExecutor = BoundedExecutorService.create(maxQueueSize,
parallelism,
parallelism,
0,
TimeUnit.MILLISECONDS,
new ShortNamedThreadFactory("ForEachParallel"),
(a, b) -> {}
);
final int CHUNK_SIZE = groupSize;
IntWrapper count = new IntWrapper(CHUNK_SIZE);
VariableWrapper<Object[]> keys1 = new VariableWrapper<>(new Object[CHUNK_SIZE]);
VariableWrapper<Object[]> keys2 = new VariableWrapper<>(new Object[CHUNK_SIZE]);
VariableWrapper<Object[]> values = new VariableWrapper<>(new Object[CHUNK_SIZE]);
AtomicReference<CompletionException> firstExceptionReference = new AtomicReference<>(null);
iterator.accept((key1, key2, value) -> {
var firstException = firstExceptionReference.get();
if (firstException != null) {
throw firstException;
}
keys1.var[CHUNK_SIZE - count.var] = key1;
keys2.var[CHUNK_SIZE - count.var] = key2;
values.var[CHUNK_SIZE - count.var] = value;
count.var--;
if (count.var == 0) {
count.var = CHUNK_SIZE;
Object[] keys1Copy = keys1.var;
Object[] keys2Copy = keys2.var;
Object[] valuesCopy = values.var;
keys1.var = new Object[CHUNK_SIZE];
keys2.var = new Object[CHUNK_SIZE];
values.var = new Object[CHUNK_SIZE];
try {
parallelExecutor.execute(() -> {
for (int i = 0; i < CHUNK_SIZE; i++) {
try {
//noinspection unchecked
consumer.accept((K1) keys1Copy[i], (K2) keys2Copy[i], (V) valuesCopy[i]);
} catch (Exception ex) {
firstExceptionReference.compareAndSet(null, new CompletionException(ex));
}
}
});
} catch (RejectedExecutionException e) {
throw new CompletionException(e);
}
}
});
parallelExecutor.shutdown();
try {
parallelExecutor.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new RuntimeException("Parallel forEach interrupted", e);
}
var firstException = firstExceptionReference.get();
if (firstException != null) {
throw firstException;
}
}
}

View File

@ -1,13 +0,0 @@
package org.warp.commonutils.concurrency.atomicity;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* This element can be considered atomic
*/
@Retention(RetentionPolicy.SOURCE)
@Target({ElementType.FIELD, ElementType.METHOD, ElementType.TYPE})
public @interface Atomic {}

View File

@ -1,13 +0,0 @@
package org.warp.commonutils.concurrency.atomicity;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* This element cannot be considered atomic
*/
@Retention(RetentionPolicy.SOURCE)
@Target({ElementType.FIELD, ElementType.METHOD, ElementType.TYPE})
public @interface NotAtomic {}

View File

@ -1,229 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.lang.StackWalker.StackFrame;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.jetbrains.annotations.NotNull;
import org.warp.commonutils.type.IntWrapper;
public class AsyncStackTraceExecutorDecorator extends ExecutorDecorator {
private static final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private static final Map<Thread, LSTTask> threadToTask = new HashMap<>();
private static final Map<Thread, AsyncStackTraceExecutorDecorator> threadToExecutor = new HashMap<>();
public AsyncStackTraceExecutorDecorator(Executor executor) {
super(executor);
}
@Override
public void execute(@NotNull Runnable command) {
var currentThread = Thread.currentThread();
List<StackFrame> frames = new ArrayList<>();
LSTTask lstTask;
lock.readLock().lock();
try {
lstTask = threadToTask.getOrDefault(currentThread, null);
// Add the current stack frames
addCurrentStackTrace(frames, lstTask != null);
if (lstTask != null) {
frames.addAll(lstTask.frames);
}
lstTask = new LSTTask(command, frames);
//System.out.println("execute(): THREAD-" + Thread.currentThread().hashCode() + " TASK-" + lstTask.hashCode());
} finally {
lock.readLock().unlock();
}
super.execute(lstTask);
}
private static void addCurrentStackTrace(List<StackFrame> frames, boolean isFromAsyncCal) {
StackWalker.getInstance().walk(a -> {
IntWrapper count = new IntWrapper(0);
int STACK_MAX_SIZE = 10;
a.filter(x -> {
var cn = x.getClassName();
return !cn.equals("java.util.concurrent.CompletableFuture")
&& !cn.equals("java.util.concurrent.CompletableFuture$AsyncRun")
&& !cn.equals("java.util.concurrent.ThreadPoolExecutor")
&& !cn.equals("java.util.concurrent.ThreadPoolExecutor$Worker")
&& !cn.equals("java.lang.Thread")
&& !cn.equals(LSTTask.class.getName())
&& !cn.equals(AsyncStackTraceExecutorDecorator.class.getName());
}).skip(0).limit(STACK_MAX_SIZE + 1).peek(x -> count.var++).forEachOrdered(frames::add);
if (count.var > STACK_MAX_SIZE) {
frames.remove(frames.size() - 1);
frames.add(new TextStackFrame("AndMoreFrames"));
}
return null;
});
if (isFromAsyncCal) {
frames.add(new TextStackFrame("AsyncCall"));
}
}
class LSTTask implements Runnable {
private final Runnable runnable;
List<StackFrame> frames;
LSTTask(Runnable runnable, List<StackFrame> frames) {
this.runnable = runnable;
this.frames = frames;
}
@Override
public void run() {
var currentThread = Thread.currentThread();
lock.writeLock().lock();
try {
threadToTask.put(currentThread, LSTTask.this);
threadToExecutor.put(currentThread, AsyncStackTraceExecutorDecorator.this);
} finally {
lock.writeLock().unlock();
}
try {
//System.out.println(" run(): THREAD-" + Thread.currentThread().hashCode() + " TASK-" + this.hashCode());
runnable.run();
} catch (Throwable t) {
RuntimeException e = new RuntimeException(t);
e.setStackTrace(frames.stream().map(StackFrame::toStackTraceElement).toArray(StackTraceElement[]::new));
throw e;
}
lock.writeLock().lock();
try {
threadToExecutor.remove(currentThread, AsyncStackTraceExecutorDecorator.this);
threadToTask.remove(currentThread, LSTTask.this);
} finally {
lock.writeLock().unlock();
}
}
}
public static void fixStackTrace(Exception ex) {
List<StackTraceElement> result = new ArrayList<>();
var currentThread = Thread.currentThread();
lock.readLock().lock();
try {
var executor = threadToExecutor.getOrDefault(currentThread, null);
if (executor != null) {
LSTTask lstTask = threadToTask.getOrDefault(currentThread, null);
if (lstTask != null) {
var currentStackFrames = new ArrayList<StackFrame>();
addCurrentStackTrace(currentStackFrames, true);
for (var frame : currentStackFrames) {
result.add(frame.toStackTraceElement());
}
for (var frame : lstTask.frames) {
result.add(frame.toStackTraceElement());
}
ex.setStackTrace(result.toArray(StackTraceElement[]::new));
}
}
} finally {
lock.readLock().unlock();
}
}
public static void dumpStack() {
var currentThread = Thread.currentThread();
lock.readLock().lock();
try {
var executor = threadToExecutor.getOrDefault(currentThread, null);
if (executor != null) {
LSTTask lstTask = threadToTask.getOrDefault(currentThread, null);
if (lstTask != null) {
StringBuilder sb = new StringBuilder();
sb.append(new Exception("Stack trace").toString()).append('\n');
var currentStackFrames = new ArrayList<StackFrame>();
addCurrentStackTrace(currentStackFrames, true);
for (var frame : currentStackFrames) {
printStackFrame(sb, frame);
}
for (var frame : lstTask.frames) {
printStackFrame(sb, frame);
}
System.err.println(sb.toString());
return;
}
}
Thread.dumpStack();
} finally {
lock.readLock().unlock();
}
}
private static void printStackFrame(StringBuilder sb, StackFrame frame) {
if(frame.getClassName().equals("AsyncCall")) {
sb.append("\t(async call)\n");
} else if(frame.getClassName().equals("AndMoreFrames")) {
sb.append("\t... omitted more frames\n");
} else {
sb.append("\tat ").append(frame.toString()).append('\n');
}
}
private static class TextStackFrame implements StackFrame {
private final String text;
public TextStackFrame(String text) {
this.text = text;
}
@Override
public String getClassName() {
return text;
}
@Override
public String getMethodName() {
return "..";
}
@Override
public Class<?> getDeclaringClass() {
return Object.class;
}
@Override
public int getByteCodeIndex() {
return 0;
}
@Override
public String getFileName() {
return null;
}
@Override
public int getLineNumber() {
return 0;
}
@Override
public boolean isNativeMethod() {
return false;
}
@Override
public StackTraceElement toStackTraceElement() {
return new StackTraceElement(getClassName(), getMethodName(), getFileName(), getLineNumber());
}
}
}

View File

@ -1,33 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.concurrent.ExecutorService;
import org.jetbrains.annotations.NotNull;
public class AsyncStackTraceExecutorServiceDecorator extends SimplerExecutorServiceDecorator {
// todo: Fix async stacktrace performance and memory problems
private static final boolean DISABLE_ASYNC_STACKTRACES_GLOBALLY = true;
public AsyncStackTraceExecutorServiceDecorator(ExecutorService executorService) {
super(executorService, (executor) -> {
if (DISABLE_ASYNC_STACKTRACES_GLOBALLY) {
return executor;
}
// Do nothing if it has already the asyncstacktrace executor service decorator
if (executorService instanceof ExecutorServiceDecorator) {
var decorators = ((ExecutorServiceDecorator) executorService).getExecutorServiceDecorators();
if (decorators.contains(AsyncStackTraceExecutorServiceDecorator.class)) {
return new ExecutorDecorator(executorService) {
@Override
public void execute(@NotNull Runnable runnable) {
super.execute(runnable);
}
};
}
}
return new AsyncStackTraceExecutorDecorator(executor);
});
}
}

View File

@ -1,200 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.StampedLock;
import java.util.function.BiConsumer;
import java.util.function.Supplier;
import javax.annotation.Nonnull;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public class BlockingOnFullQueueExecutorServiceDecorator extends ExecutorServiceDecorator {
private volatile boolean ignoreTaskLimit;
private final StampedLock drainAllLock = new StampedLock();
@Nonnull
private final Semaphore taskLimit;
@Nonnull
private final Duration timeout;
private final int maximumTaskNumber;
@Nonnull
private final Supplier<Integer> queueSizeSupplier;
private final @Nullable BiConsumer<Boolean, Integer> queueSizeStatus;
@Nonnull
private final Object queueSizeStatusLock;
public BlockingOnFullQueueExecutorServiceDecorator(@Nonnull final ExecutorService executor, final int maximumTaskNumber, @Nonnull final Duration maximumTimeout, @Nonnull Supplier<Integer> queueSizeSupplier, @Nullable BiConsumer<Boolean, Integer> queueSizeStatus) {
super(executor);
ExecutorServiceDecorator.hasDecorator(executor, this.getClass());
if (maximumTaskNumber < 0) {
throw new IllegalArgumentException(String.format("At least zero tasks must be permitted, not '%d'", maximumTaskNumber));
} else if (maximumTaskNumber == 0) {
ignoreTaskLimit = true;
}
this.timeout = Objects.requireNonNull(maximumTimeout, "'maximumTimeout' must not be null");
if (this.timeout.isNegative()) {
throw new IllegalArgumentException("'maximumTimeout' must not be negative");
}
this.maximumTaskNumber = maximumTaskNumber;
this.queueSizeSupplier = queueSizeSupplier;
this.queueSizeStatus = queueSizeStatus;
this.queueSizeStatusLock = new Object();
this.taskLimit = new Semaphore(maximumTaskNumber);
}
public BlockingOnFullQueueExecutorServiceDecorator(@Nonnull final ExecutorService executor, final int maximumTaskNumber, @Nonnull final Duration maximumTimeout, @Nonnull Supplier<Integer> queueSizeSupplier) {
this(executor, maximumTaskNumber, maximumTimeout, queueSizeSupplier, null);
}
private void updateQueue() {
var queueSize = queueSizeSupplier.get();
synchronized (queueSizeStatusLock) {
if (queueSizeStatus != null) queueSizeStatus.accept(queueSize >= maximumTaskNumber, queueSize);
}
}
private void preExecute(Object command) {
Objects.requireNonNull(command, "'command' must not be null");
if (!ignoreTaskLimit) {
try {
if (this.taskLimit.availablePermits() == 0) {
synchronized (queueSizeStatusLock) {
if (queueSizeStatus != null)
queueSizeStatus.accept(true,
maximumTaskNumber + (taskLimit.hasQueuedThreads() ? taskLimit.getQueueLength() : 0)
);
}
}
// attempt to acquire permit for task execution
if (!this.taskLimit.tryAcquire(this.timeout.toMillis(), MILLISECONDS)) {
throw new RejectedExecutionException(String.format("Executor '%s' busy", super.toString()));
}
} catch (final InterruptedException e) {
// restore interrupt status
Thread.currentThread().interrupt();
throw new RejectedExecutionException(e);
}
}
}
@Override
public final void execute(final @NotNull Runnable command) {
preExecute(command);
super.execute(new PermitReleasingRunnableDecorator(command, () -> {
var queueSize = queueSizeSupplier.get();
synchronized (queueSizeStatusLock) {
if (queueSizeStatus != null) queueSizeStatus.accept(!ignoreTaskLimit && queueSize >= maximumTaskNumber, queueSize);
}
}, this.taskLimit));
}
@Override
public void shutdown() {
this.ignoreTaskLimit = true;
while (this.taskLimit.hasQueuedThreads()) {
this.taskLimit.release(10);
}
super.shutdown();
}
@NotNull
@Override
public List<Runnable> shutdownNow() {
this.ignoreTaskLimit = true;
while (this.taskLimit.hasQueuedThreads()) {
this.taskLimit.release(10);
}
return super.shutdownNow();
}
@Override
public boolean isShutdown() {
return super.isShutdown();
}
@Override
public boolean isTerminated() {
return super.isTerminated();
}
@Override
public boolean awaitTermination(long timeout, @NotNull TimeUnit unit) throws InterruptedException {
return super.awaitTermination(timeout, unit);
}
@NotNull
@Override
public <T> Future<T> submit(@NotNull Callable<T> task) {
preExecute(task);
return super.submit(new PermitReleasingCallableDecorator<>(task, this::updateQueue, this.taskLimit));
}
@NotNull
@Override
public <T> Future<T> submit(@NotNull Runnable task, T result) {
preExecute(task);
return super.submit(new PermitReleasingRunnableDecorator(task, this::updateQueue, this.taskLimit), result);
}
@NotNull
@Override
public Future<?> submit(@NotNull Runnable task) {
preExecute(task);
return super.submit(new PermitReleasingRunnableDecorator(task, this::updateQueue, this.taskLimit));
}
@NotNull
@Override
public <T> List<Future<T>> invokeAll(@NotNull Collection<? extends Callable<T>> tasks) {
throw new UnsupportedOperationException("invokeAll(tasks) is not supported");
}
@NotNull
@Override
public <T> List<Future<T>> invokeAll(@NotNull Collection<? extends Callable<T>> tasks,
long timeout,
@NotNull TimeUnit unit) {
throw new UnsupportedOperationException("invokeAll(tasks, timeout, unit) is not supported");
}
@NotNull
@Override
public <T> T invokeAny(@NotNull Collection<? extends Callable<T>> tasks) {
throw new UnsupportedOperationException("invokeAny(tasks) is not supported");
}
@Override
public <T> T invokeAny(@NotNull Collection<? extends Callable<T>> tasks, long timeout, @NotNull TimeUnit unit) {
throw new UnsupportedOperationException("invokeAny(tasks, timeout, unit) is not supported");
}
@Override
public final String toString() {
return String.format("%s[availablePermits='%s',timeout='%s',delegate='%s']", getClass().getSimpleName(), this.taskLimit.availablePermits(),
this.timeout, super.toString());
}
}

View File

@ -1,52 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.concurrent.Executor;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.locks.StampedLock;
import org.jetbrains.annotations.NotNull;
public class BoundedExecutor {
private final Executor executor;
private final int maxQueueSize;
private final Semaphore semaphore;
private final StampedLock drainAllLock = new StampedLock();
public BoundedExecutor(Executor executor, int maxQueueSize) {
this.executor = executor;
this.maxQueueSize = maxQueueSize > 0 ? maxQueueSize : Integer.MAX_VALUE;
this.semaphore = new Semaphore(maxQueueSize);
}
public void executeButBlockIfFull(@NotNull Runnable command) throws RejectedExecutionException, InterruptedException {
var drainAllLockRead = drainAllLock.readLockInterruptibly();
semaphore.acquire();
try {
executor.execute(() -> {
try {
semaphore.release();
command.run();
} finally {
drainAllLock.unlockRead(drainAllLockRead);
}
});
} catch (RejectedExecutionException | NullPointerException ex) {
drainAllLock.unlockRead(drainAllLockRead);
throw ex;
}
}
public void drainAll(DrainAllMethodLambda runnableWhenDrained) throws InterruptedException {
var drainAllWriteLock = drainAllLock.writeLockInterruptibly();
try {
runnableWhenDrained.run();
} finally {
drainAllLock.unlockWrite(drainAllWriteLock);
}
}
public interface DrainAllMethodLambda {
void run() throws InterruptedException;
}
}

View File

@ -1,112 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.time.Duration;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import org.jetbrains.annotations.Nullable;
public class BoundedExecutorService {
private static final int MAX_BLOCKING_QUEUE_SIZE = 50000;
private BoundedExecutorService() {
}
@Deprecated
public static ExecutorService createUnbounded(
int corePoolSize,
int maxPoolSize,
long keepAliveTime,
TimeUnit unit,
@Nullable BiConsumer<Boolean, Integer> queueSizeStatus) {
return create(0, corePoolSize, maxPoolSize, keepAliveTime, unit, Executors.defaultThreadFactory(), queueSizeStatus);
}
public static ExecutorService createUnbounded(
int corePoolSize,
int maxPoolSize,
long keepAliveTime,
TimeUnit unit,
ThreadFactory threadFactory,
@Nullable BiConsumer<Boolean, Integer> queueSizeStatus) {
return createCustom(0, corePoolSize, maxPoolSize, keepAliveTime, unit, threadFactory, Duration.ofDays(100000), queueSizeStatus, new LinkedBlockingQueue<>(MAX_BLOCKING_QUEUE_SIZE));
}
public static ExecutorService createUnbounded(
int corePoolSize,
int maxPoolSize,
long keepAliveTime,
TimeUnit unit,
ThreadFactory threadFactory,
@Nullable BiConsumer<Boolean, Integer> queueSizeStatus,
BlockingQueue<Runnable> queue) {
return createCustom(0, corePoolSize, maxPoolSize, keepAliveTime, unit, threadFactory, Duration.ofDays(100000), queueSizeStatus, queue);
}
@Deprecated
public static ExecutorService create(
int maxQueueSize,
int corePoolSize,
int maxPoolSize,
long keepAliveTime,
TimeUnit unit,
@Nullable BiConsumer<Boolean, Integer> queueSizeStatus) {
return create(maxQueueSize, corePoolSize, maxPoolSize, keepAliveTime, unit, Executors.defaultThreadFactory(), queueSizeStatus);
}
public static ExecutorService create(
int maxQueueSize,
int corePoolSize,
int maxPoolSize,
long keepAliveTime,
TimeUnit unit,
ThreadFactory threadFactory,
@Nullable BiConsumer<Boolean, Integer> queueSizeStatus) {
return createCustom(maxQueueSize, corePoolSize, maxPoolSize, keepAliveTime, unit, threadFactory, Duration.ofDays(100000), queueSizeStatus, new LinkedBlockingQueue<>(MAX_BLOCKING_QUEUE_SIZE));
}
public static ExecutorService create(
int maxQueueSize,
int corePoolSize,
int maxPoolSize,
long keepAliveTime,
TimeUnit unit,
ThreadFactory threadFactory,
@Nullable BiConsumer<Boolean, Integer> queueSizeStatus,
BlockingQueue<Runnable> queue) {
return createCustom(maxQueueSize, corePoolSize, maxPoolSize, keepAliveTime, unit, threadFactory, Duration.ofDays(100000), queueSizeStatus, queue);
}
public static ExecutorService createCustom(
int maxQueueSize,
int corePoolSize,
int maxPoolSize,
long keepAliveTime,
TimeUnit unit,
ThreadFactory threadFactory,
Duration queueItemTtl,
@Nullable BiConsumer<Boolean, Integer> queueSizeStatus,
BlockingQueue<Runnable> queue) {
ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(corePoolSize,
maxPoolSize,
keepAliveTime,
unit,
queue,
threadFactory
);
threadPoolExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
return new BlockingOnFullQueueExecutorServiceDecorator(threadPoolExecutor,
maxQueueSize,
queueItemTtl,
queue::size,
queueSizeStatus
);
}
}

View File

@ -1,18 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.Objects;
import java.util.concurrent.Callable;
public abstract class CallableDecorator<T> implements Callable<T> {
private final Callable<T> callable;
public CallableDecorator(Callable<T> callable) {
this.callable = Objects.requireNonNull(callable);
}
@Override
public T call() throws Exception {
return callable.call();
}
}

View File

@ -1,40 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Supplier;
final class ConcurrencySegment<K, V> {
private final Map<K, Entry> store = new HashMap<K, Entry>();
private final Supplier<V> valuesSupplier;
ConcurrencySegment(Supplier<V> valuesSupplier) {
this.valuesSupplier = valuesSupplier;
}
synchronized V getValue(K key) {
Entry current = store.get(key);
if (current == null) {
current = new Entry();
store.put(key, current);
} else {
current.users++;
}
return current.value;
}
synchronized void releaseKey(K key) {
Entry current = store.get(key);
if (current.users == 1) {
store.remove(key);
} else {
current.users--;
}
}
private class Entry {
private int users = 1;
private V value = valuesSupplier.get();
}
}

View File

@ -1,30 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.Executor;
import org.jetbrains.annotations.NotNull;
public abstract class ExecutorDecorator implements Executor {
private final Executor executor;
public ExecutorDecorator(Executor executor) {
this.executor = Objects.requireNonNull(executor);
}
public final Set<Class<? extends ExecutorDecorator>> getExecutorDecorators() {
if (executor instanceof ExecutorDecorator) {
var decorators = ((ExecutorDecorator) executor).getExecutorDecorators();
decorators.add(this.getClass());
return decorators;
} else {
return new HashSet<>();
}
}
@Override
public void execute(@NotNull Runnable runnable) {
executor.execute(runnable);
}
}

View File

@ -1,119 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.jetbrains.annotations.NotNull;
public abstract class ExecutorServiceDecorator implements ExecutorService {
private ExecutorService executorService;
public ExecutorServiceDecorator(ExecutorService executorService) {
this.executorService = Objects.requireNonNull(executorService);
}
protected static boolean hasDecorator(ExecutorService executor,
Class<? extends ExecutorServiceDecorator> decoratorClass) {
if (executor instanceof ExecutorServiceDecorator) {
var executorServiceDecoratorImpl = (ExecutorServiceDecorator) executor;
var executorServiceDecorators = executorServiceDecoratorImpl.getExecutorServiceDecorators();
return executorServiceDecorators.contains(decoratorClass);
}
return false;
}
public final Set<Class<? extends ExecutorServiceDecorator>> getExecutorServiceDecorators() {
if (executorService instanceof ExecutorServiceDecorator) {
var decorators = ((ExecutorServiceDecorator) executorService).getExecutorServiceDecorators();
decorators.add(this.getClass());
return decorators;
} else {
return new HashSet<>();
}
}
@Override
public void shutdown() {
executorService.shutdown();
}
@NotNull
@Override
public List<Runnable> shutdownNow() {
return executorService.shutdownNow();
}
@Override
public boolean isShutdown() {
return executorService.isShutdown();
}
@Override
public boolean isTerminated() {
return executorService.isTerminated();
}
@Override
public boolean awaitTermination(long l, @NotNull TimeUnit timeUnit) throws InterruptedException {
return executorService.awaitTermination(l, timeUnit);
}
@NotNull
@Override
public <T> Future<T> submit(@NotNull Callable<T> callable) {
return executorService.submit(callable);
}
@NotNull
@Override
public <T> Future<T> submit(@NotNull Runnable runnable, T t) {
return executorService.submit(runnable, t);
}
@NotNull
@Override
public Future<?> submit(@NotNull Runnable runnable) {
return executorService.submit(runnable);
}
@NotNull
@Override
public <T> List<Future<T>> invokeAll(@NotNull Collection<? extends Callable<T>> collection)
throws InterruptedException {
return executorService.invokeAll(collection);
}
@NotNull
@Override
public <T> List<Future<T>> invokeAll(@NotNull Collection<? extends Callable<T>> collection,
long l,
@NotNull TimeUnit timeUnit) throws InterruptedException {
return executorService.invokeAll(collection, l, timeUnit);
}
@NotNull
@Override
public <T> T invokeAny(@NotNull Collection<? extends Callable<T>> collection)
throws InterruptedException, ExecutionException {
return executorService.invokeAny(collection);
}
@Override
public <T> T invokeAny(@NotNull Collection<? extends Callable<T>> collection, long l, @NotNull TimeUnit timeUnit)
throws InterruptedException, ExecutionException, TimeoutException {
return executorService.invokeAny(collection, l, timeUnit);
}
@Override
public void execute(@NotNull Runnable runnable) {
executorService.execute(runnable);
}
}

View File

@ -1,108 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.locks.Lock;
import java.util.function.Supplier;
import org.warp.commonutils.functional.IORunnable;
import org.warp.commonutils.functional.IOSupplier;
import org.warp.commonutils.random.HashUtil;
/**
* An Executor which executes tasks on the caller thread.
* The tasks will be executed synchronously on a <b>per-key basis</b>.
* By saying <b>per-key</b>, we mean that thread safety is guaranteed for threads calling it with equals keys.
* When two threads calling the executor with equals keys, the executions will never overlap each other.
* On the other hand, the executor is implemented so calls from different threads, with keys that are not equals, will be executed concurrently with minimal contention between the calls.
* Calling threads might be suspended.
* Calling execute from different threads with equals keys has the same memory semantics as locking and releasing a java.util.concurrent.locks.{@link Lock}.
*/
public final class PerKeyReadWriteExecutor<KEY_TYPE> extends ReadWriteExecutor implements Closeable {
private static final int BASE_CONCURRENCY_LEVEL = 32;
private final int concurrencyLevel;
private final ConcurrencySegment<KEY_TYPE, ReadWriteExecutor>[] segments;
private boolean closed = false;
public PerKeyReadWriteExecutor() {
this(BASE_CONCURRENCY_LEVEL);
}
@SuppressWarnings({"unchecked"})
public PerKeyReadWriteExecutor(int concurrencyLevel) {
super();
this.concurrencyLevel = concurrencyLevel;
segments = (ConcurrencySegment<KEY_TYPE, ReadWriteExecutor>[]) new ConcurrencySegment[concurrencyLevel];
for (int i = 0; i < concurrencyLevel; i++) {
segments[i] = new ConcurrencySegment<>(ReadWriteExecutor::new);
}
}
public void execute(KEY_TYPE key, ReadWriteExecutor.LockMode lockMode, Runnable task) {
super.execute(LockMode.READ, () -> {
if (closed) throw new IllegalStateException(PerKeyReadWriteExecutor.class.getSimpleName() + " is closed");
int segmentIndex = HashUtil.boundedHash(key, concurrencyLevel);
ConcurrencySegment<KEY_TYPE, ReadWriteExecutor> s = segments[segmentIndex];
ReadWriteExecutor executor = s.getValue(key);
try {
executor.execute(lockMode, task);
} finally {
s.releaseKey(key);
}
});
}
public void executeIO(KEY_TYPE key, ReadWriteExecutor.LockMode lockMode, IORunnable task) throws IOException {
super.executeIO(LockMode.READ, () -> {
if (closed) throw new IllegalStateException(PerKeyReadWriteExecutor.class.getSimpleName() + " is closed");
int segmentIndex = HashUtil.boundedHash(key, concurrencyLevel);
ConcurrencySegment<KEY_TYPE, ReadWriteExecutor> s = segments[segmentIndex];
ReadWriteExecutor executor = s.getValue(key);
try {
executor.executeIO(lockMode, task);
} finally {
s.releaseKey(key);
}
});
}
public <R> R execute(KEY_TYPE key, ReadWriteExecutor.LockMode lockMode, Supplier<R> task) {
return super.execute(LockMode.READ, () -> {
if (closed) throw new IllegalStateException(PerKeyReadWriteExecutor.class.getSimpleName() + " is closed");
int segmentIndex = HashUtil.boundedHash(key, concurrencyLevel);
ConcurrencySegment<KEY_TYPE, ReadWriteExecutor> s = segments[segmentIndex];
ReadWriteExecutor executor = s.getValue(key);
try {
return executor.execute(lockMode, task);
} finally {
s.releaseKey(key);
}
});
}
public <R> R executeIO(KEY_TYPE key, ReadWriteExecutor.LockMode lockMode, IOSupplier<R> task) throws IOException {
return super.executeIO(LockMode.READ, () -> {
if (closed)
throw new IllegalStateException(PerKeyReadWriteExecutor.class.getSimpleName() + " is closed");
int segmentIndex = HashUtil.boundedHash(key, concurrencyLevel);
ConcurrencySegment<KEY_TYPE, ReadWriteExecutor> s = segments[segmentIndex];
ReadWriteExecutor executor = s.getValue(key);
try {
return executor.executeIO(lockMode, task);
} finally {
s.releaseKey(key);
}
});
}
@Override
public void close() {
super.execute(LockMode.WRITE, () -> {
closed = true;
});
}
}

View File

@ -1,106 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.locks.Lock;
import java.util.function.Supplier;
import org.warp.commonutils.functional.IORunnable;
import org.warp.commonutils.functional.IOSupplier;
import org.warp.commonutils.random.HashUtil;
/**
* An Executor which executes tasks on the caller thread.
* The tasks will be executed synchronously on a <b>per-key basis</b>.
* By saying <b>per-key</b>, we mean that thread safety is guaranteed for threads calling it with equals keys.
* When two threads calling the executor with equals keys, the executions will never overlap each other.
* On the other hand, the executor is implemented so calls from different threads, with keys that are not equals, will be executed concurrently with minimal contention between the calls.
* Calling threads might be suspended.
* Calling execute from different threads with equals keys has the same memory semantics as locking and releasing a java.util.concurrent.locks.{@link Lock}.
*/
public final class PerKeySynchronizedExecutor<KEY_TYPE> extends ReadWriteExecutor implements Closeable {
private static final int BASE_CONCURRENCY_LEVEL = 32;
private final int concurrencyLevel;
private final ConcurrencySegment<KEY_TYPE, SynchronizedExecutor>[] segments;
private boolean closed = false;
public PerKeySynchronizedExecutor() {
this(BASE_CONCURRENCY_LEVEL);
}
@SuppressWarnings({"unchecked"})
public PerKeySynchronizedExecutor(int concurrencyLevel) {
this.concurrencyLevel = concurrencyLevel;
segments = (ConcurrencySegment<KEY_TYPE, SynchronizedExecutor>[]) new ConcurrencySegment[concurrencyLevel];
for (int i = 0; i < concurrencyLevel; i++) {
segments[i] = new ConcurrencySegment<>(SynchronizedExecutor::new);
}
}
public void execute(KEY_TYPE key, Runnable task) {
super.execute(LockMode.READ, () -> {
if (closed) throw new IllegalStateException(PerKeySynchronizedExecutor.class.getSimpleName() + " is closed");
int segmentIndex = HashUtil.boundedHash(key, concurrencyLevel);
ConcurrencySegment<KEY_TYPE, SynchronizedExecutor> s = segments[segmentIndex];
SynchronizedExecutor executor = s.getValue(key);
try {
executor.execute(task);
} finally {
s.releaseKey(key);
}
});
}
public void executeIO(KEY_TYPE key, IORunnable task) throws IOException {
super.executeIO(LockMode.READ, () -> {
if (closed) throw new IllegalStateException(PerKeySynchronizedExecutor.class.getSimpleName() + " is closed");
int segmentIndex = HashUtil.boundedHash(key, concurrencyLevel);
ConcurrencySegment<KEY_TYPE, SynchronizedExecutor> s = segments[segmentIndex];
SynchronizedExecutor executor = s.getValue(key);
try {
executor.executeIO(task);
} finally {
s.releaseKey(key);
}
});
}
public <R> R execute(KEY_TYPE key, Supplier<R> task) {
return super.execute(LockMode.READ, () -> {
if (closed) throw new IllegalStateException(PerKeySynchronizedExecutor.class.getSimpleName() + " is closed");
int segmentIndex = HashUtil.boundedHash(key, concurrencyLevel);
ConcurrencySegment<KEY_TYPE, SynchronizedExecutor> s = segments[segmentIndex];
SynchronizedExecutor executor = s.getValue(key);
try {
return executor.execute(task);
} finally {
s.releaseKey(key);
}
});
}
public <R> R executeIO(KEY_TYPE key, IOSupplier<R> task) throws IOException {
return super.executeIO(LockMode.READ, () -> {
if (closed) throw new IllegalStateException(PerKeySynchronizedExecutor.class.getSimpleName() + " is closed");
int segmentIndex = HashUtil.boundedHash(key, concurrencyLevel);
ConcurrencySegment<KEY_TYPE, SynchronizedExecutor> s = segments[segmentIndex];
SynchronizedExecutor executor = s.getValue(key);
try {
return executor.executeIO(task);
} finally {
s.releaseKey(key);
}
});
}
@Override
public void close() {
super.execute(LockMode.WRITE, () -> {
closed = true;
});
}
}

View File

@ -1,39 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.concurrent.Callable;
import java.util.concurrent.Semaphore;
import javax.annotation.Nonnull;
public final class PermitReleasingCallableDecorator<T> extends CallableDecorator<T> {
@Nonnull
private final Runnable queueSizeUpdater;
@Nonnull
private final Semaphore semaphore;
PermitReleasingCallableDecorator(@Nonnull final Callable<T> task,
@Nonnull final Runnable queueSizeUpdater,
@Nonnull final Semaphore semaphoreToRelease) {
super(task);
this.queueSizeUpdater = queueSizeUpdater;
this.semaphore = semaphoreToRelease;
}
@Override
public T call() throws Exception {
try {
queueSizeUpdater.run();
} finally {
// however execution goes, release permit for next task
this.semaphore.release();
return super.call();
}
}
@Override
public final String toString() {
return String.format("%s[delegate='%s']", getClass().getSimpleName(), super.toString());
}
}

View File

@ -1,38 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.concurrent.Semaphore;
import javax.annotation.Nonnull;
public final class PermitReleasingRunnableDecorator extends RunnableDecorator {
@Nonnull
private final Runnable queueSizeUpdater;
@Nonnull
private final Semaphore semaphore;
PermitReleasingRunnableDecorator(@Nonnull final Runnable task,
@Nonnull final Runnable queueSizeUpdater,
@Nonnull final Semaphore semaphoreToRelease) {
super(task);
this.queueSizeUpdater = queueSizeUpdater;
this.semaphore = semaphoreToRelease;
}
@Override
public void run() {
try {
queueSizeUpdater.run();
} finally {
// however execution goes, release permit for next task
this.semaphore.release();
super.run();
}
}
@Override
public final String toString() {
return String.format("%s[delegate='%s']", getClass().getSimpleName(), super.toString());
}
}

View File

@ -1,45 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.io.IOException;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Supplier;
import org.warp.commonutils.functional.IORunnable;
import org.warp.commonutils.functional.IOSupplier;
import org.warp.commonutils.locks.LockUtils;
/**
* An Executor which executes tasks on the caller thread.
* The tasks will be executed synchronously, so no overlapping between two tasks running on different threads will ever occur.
* Calling threads might be suspended.
* Executing a task has the same memory semantics as locking and releasing a java.util.concurrent.locks.{@link Lock}.
*/
public class ReadWriteExecutor {
private final ReentrantReadWriteLock lock;
public ReadWriteExecutor() {
this.lock = new ReentrantReadWriteLock();
}
public void execute(LockMode lockMode, Runnable task) {
LockUtils.lock(lockMode == LockMode.READ ? lock.readLock() : lock.writeLock(), task);
}
public void executeIO(LockMode lockMode, IORunnable task) throws IOException {
LockUtils.lockIO(lockMode == LockMode.READ ? lock.readLock() : lock.writeLock(), task);
}
public <R> R execute(LockMode lockMode, Supplier<R> task) {
return LockUtils.lock(lockMode == LockMode.READ ? lock.readLock() : lock.writeLock(), task);
}
public <R> R executeIO(LockMode lockMode, IOSupplier<R> task) throws IOException {
return LockUtils.lockIO(lockMode == LockMode.READ ? lock.readLock() : lock.writeLock(), task);
}
public enum LockMode {
READ,
WRITE
}
}

View File

@ -1,17 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.Objects;
public abstract class RunnableDecorator implements Runnable {
private final Runnable runnable;
public RunnableDecorator(Runnable runnable) {
this.runnable = Objects.requireNonNull(runnable);
}
@Override
public void run() {
runnable.run();
}
}

View File

@ -1,101 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import org.jetbrains.annotations.NotNull;
public abstract class SimplerExecutorServiceDecorator extends ExecutorServiceDecorator {
private final Executor executorDecorator;
public SimplerExecutorServiceDecorator(ExecutorService executorService,
Function<Executor, Executor> executorDecoratorInitializer) {
super(executorService);
this.executorDecorator = executorDecoratorInitializer.apply(executorService);
}
@Override
public void shutdown() {
super.shutdown();
}
@NotNull
@Override
public List<Runnable> shutdownNow() {
return super.shutdownNow();
}
@Override
public boolean isShutdown() {
return super.isShutdown();
}
@Override
public boolean isTerminated() {
return super.isTerminated();
}
@Override
public boolean awaitTermination(long l, @NotNull TimeUnit timeUnit) throws InterruptedException {
return super.awaitTermination(l, timeUnit);
}
@NotNull
@Override
public <T> Future<T> submit(@NotNull Callable<T> callable) {
return super.submit(callable);
}
@NotNull
@Override
public <T> Future<T> submit(@NotNull Runnable runnable, T t) {
return super.submit(runnable, t);
}
@NotNull
@Override
public Future<?> submit(@NotNull Runnable runnable) {
return super.submit(runnable);
}
@NotNull
@Override
public <T> List<Future<T>> invokeAll(@NotNull Collection<? extends Callable<T>> collection)
throws InterruptedException {
return super.invokeAll(collection);
}
@NotNull
@Override
public <T> List<Future<T>> invokeAll(@NotNull Collection<? extends Callable<T>> collection,
long l,
@NotNull TimeUnit timeUnit) throws InterruptedException {
return super.invokeAll(collection, l, timeUnit);
}
@NotNull
@Override
public <T> T invokeAny(@NotNull Collection<? extends Callable<T>> collection)
throws InterruptedException, ExecutionException {
return super.invokeAny(collection);
}
@Override
public <T> T invokeAny(@NotNull Collection<? extends Callable<T>> collection, long l, @NotNull TimeUnit timeUnit)
throws InterruptedException, ExecutionException, TimeoutException {
return super.invokeAny(collection, l, timeUnit);
}
@Override
public void execute(@NotNull Runnable runnable) {
executorDecorator.execute(runnable);
}
}

View File

@ -1,63 +0,0 @@
package org.warp.commonutils.concurrency.executor;
import java.io.IOException;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Supplier;
import org.warp.commonutils.functional.IORunnable;
import org.warp.commonutils.functional.IOSupplier;
/**
* An Executor which executes tasks on the caller thread.
* The tasks will be executed synchronously, so no overlapping between two tasks running on different threads will ever occur.
* Calling threads might be suspended.
* Executing a task has the same memory semantics as locking and releasing a java.util.concurrent.locks.{@link Lock}.
*/
public final class SynchronizedExecutor {
private final Lock lock;
public SynchronizedExecutor() {
this.lock = new ReentrantLock();
}
SynchronizedExecutor(Lock lock) {
this.lock = lock;
}
public void execute(Runnable task) {
lock.lock();
try {
task.run();
} finally {
lock.unlock();
}
}
public void executeIO(IORunnable task) throws IOException {
lock.lock();
try {
task.run();
} finally {
lock.unlock();
}
}
public <R> R execute(Supplier<R> task) {
lock.lock();
try {
return task.get();
} finally {
lock.unlock();
}
}
public <R> R executeIO(IOSupplier<R> task) throws IOException {
lock.lock();
try {
return task.get();
} finally {
lock.unlock();
}
}
}

View File

@ -1,413 +0,0 @@
package org.warp.commonutils.concurrency.future;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Optional;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.warp.commonutils.functional.BiCompletableFunction;
import org.warp.commonutils.functional.CompletableFunction;
import org.warp.commonutils.functional.IOCompletableFunction;
import org.warp.commonutils.functional.IOSupplier;
import org.warp.commonutils.functional.TriCompletableFunction;
import org.warp.commonutils.type.FloatPriorityQueue;
import org.warp.commonutils.type.ScoredValue;
public class CompletableFutureUtils {
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <T> CompletableFuture<T> getCompletableFuture(Supplier<CompletableFuture<T>> completableFutureSupplier) {
CompletableFuture<T> cf;
try {
cf = completableFutureSupplier.get();
} catch (Exception ex) {
cf = CompletableFuture.failedFuture(ex);
}
return cf;
}
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <F, T> CompletableFuture<T> getCompletableFuture(CompletableFunction<F, T> completableFutureFunction, F value) {
return getCompletableFuture(() -> completableFutureFunction.apply(value));
}
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <F, T> CompletableFuture<T> getCompletableFutureSupply(CompletableFunction<F, T> completableFutureFunction, Supplier<F> valueSupplier) {
return getCompletableFuture(() -> completableFutureFunction.apply(valueSupplier.get()));
}
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <F1, F2, T> CompletableFuture<T> getCompletableFuture(BiCompletableFunction<F1, F2, T> completableFutureFunction, F1 value1, F2 value2) {
return getCompletableFuture(() -> completableFutureFunction.apply(value1, value2));
}
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <F1, F2, T> CompletableFuture<T> getCompletableFutureSupply(BiCompletableFunction<F1, F2, T> completableFutureFunction, Supplier<F1> value1Supplier, Supplier<F2> value2Supplier) {
return getCompletableFuture(() -> completableFutureFunction.apply(value1Supplier.get(), value2Supplier.get()));
}
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <F1, F2, F3, T> CompletableFuture<T> getCompletableFuture(TriCompletableFunction<F1, F2, F3, T> completableFutureFunction, F1 value1, F2 value2, F3 value3) {
return getCompletableFuture(() -> completableFutureFunction.apply(value1, value2, value3));
}
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <F1, F2, F3, T> CompletableFuture<T> getCompletableFutureSupply(TriCompletableFunction<F1, F2, F3, T> completableFutureFunction, Supplier<F1> value1Supplier, Supplier<F2> value2Supplier, Supplier<F3> value3Supplier) {
return getCompletableFuture(() -> completableFutureFunction.apply(value1Supplier.get(), value2Supplier.get(), value3Supplier.get()));
}
////
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <T> CompletableFuture<T> getCompletableFutureIO(IOSupplier<CompletableFuture<T>> completableFutureSupplier) {
CompletableFuture<T> cf;
try {
cf = completableFutureSupplier.get();
} catch (Exception ex) {
cf = CompletableFuture.failedFuture(ex);
}
return cf;
}
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <F, T> CompletableFuture<T> getCompletableFutureIO(IOCompletableFunction<F, T> completableFutureFunction, F value) {
return getCompletableFutureIO(() -> completableFutureFunction.apply(value));
}
/**
* Safely get a CompletableFuture or a FailedFuture
*/
public static <F, T> CompletableFuture<T> getCompletableFutureIOSupply(IOCompletableFunction<F, T> completableFutureFunction, IOSupplier<F> valueSupplier) {
return getCompletableFutureIO(() -> completableFutureFunction.apply(valueSupplier.get()));
}
/**
* Aggregate multiple {@link CompletableFuture} lists into a single {@link CompletableFuture} list
*
* @param futureLists A collection of {@link CompletableFuture} lists.
* @param <T> List elements type
* @return {@link CompletableFuture} list
*/
public static <T> CompletableFuture<List<T>> aggregateList(Collection<CompletableFuture<List<T>>> futureLists) {
final CompletableFuture<List<T>> identityAggregatedResult = CompletableFuture.completedFuture(new ArrayList<T>());
return futureLists.parallelStream().reduce(identityAggregatedResult, (currentAggregatedResult, futureList) -> {
return currentAggregatedResult.thenApplyAsync((aggregatedList) -> {
aggregatedList.addAll(futureList.join());
return aggregatedList;
});
});
}
/**
* Creates a new empty collection of disaggregated future results future lists
*/
public static <T> Collection<CompletableFuture<List<CompletableFuture<T>>>> createDisaggregatedResultsList() {
return new ArrayList<>(10);
}
/**
* Add a
* @param disaggregatedResults
* @param result
* @param <T>
*/
public static <T> void addDisaggregatedList(
Collection<CompletableFuture<List<CompletableFuture<T>>>> disaggregatedResults,
CompletableFuture<List<CompletableFuture<T>>> result) {
disaggregatedResults.add(result);
}
/**
* Add a result
*/
public static <T, U extends T> void addDisaggregatedListCast(
Collection<CompletableFuture<List<CompletableFuture<T>>>> disaggregatedResults,
CompletableFuture<List<CompletableFuture<U>>> result) {
addDisaggregatedListCastForced(disaggregatedResults, result);
}
public static <T, U> void addDisaggregatedListCastForced(
Collection<CompletableFuture<List<CompletableFuture<T>>>> disaggregatedResults,
CompletableFuture<List<CompletableFuture<U>>> result) {
disaggregatedResults.add(result.thenApply((originalList) -> {
List<CompletableFuture<T>> resultList = new ArrayList<>();
for (CompletableFuture<U> originalFuture : originalList) {
resultList.add(originalFuture.thenApply((originalValue) -> {
//noinspection unchecked
return (T) originalValue;
}));
}
return resultList;
}));
}
/**
* Aggregate multiple {@link CompletableFuture} lists into a single {@link CompletableFuture} list
*
* @param futureFloatPriorityQueues A collection of {@link CompletableFuture} lists.
* @param <T> List elements type
* @return {@link CompletableFuture} list
*/
public static <T> CompletableFuture<FloatPriorityQueue<T>> aggregatePq(Collection<CompletableFuture<FloatPriorityQueue<T>>> futureFloatPriorityQueues) {
final CompletableFuture<FloatPriorityQueue<T>> identityAggregatedResult = CompletableFuture.completedFuture(new FloatPriorityQueue<>());
return futureFloatPriorityQueues.parallelStream().reduce(identityAggregatedResult, (currentAggregatedResult, futureFloatPriorityQueue) -> {
return currentAggregatedResult.thenApplyAsync((aggregatedFloatPriorityQueue) -> {
var futureFloatPriorityQueueValues = futureFloatPriorityQueue.join();
if (futureFloatPriorityQueueValues == aggregatedFloatPriorityQueue) {
return aggregatedFloatPriorityQueue;
}
futureFloatPriorityQueueValues.forEachItem(aggregatedFloatPriorityQueue::offer);
return aggregatedFloatPriorityQueue;
});
});
}
/**
* Creates a new empty collection of disaggregated future results future lists
*/
public static <T> Collection<CompletableFuture<FloatPriorityQueue<CompletableFuture<T>>>> createDisaggregatedResultsPq() {
return FloatPriorityQueue.synchronizedPq(10);
}
/**
* Add a
* @param disaggregatedResults
* @param result
* @param <T>
*/
public static <T> void addDisaggregatedPq(
Collection<CompletableFuture<FloatPriorityQueue<CompletableFuture<T>>>> disaggregatedResults,
CompletableFuture<FloatPriorityQueue<CompletableFuture<T>>> result) {
disaggregatedResults.add(result);
}
/**
* Add a result
*/
public static <T, U extends T> void addDisaggregatedPqCast(
Collection<CompletableFuture<FloatPriorityQueue<CompletableFuture<T>>>> disaggregatedResults,
CompletableFuture<FloatPriorityQueue<CompletableFuture<U>>> result) {
addDisaggregatedPqCastForced(disaggregatedResults, result);
}
public static <T, U> void addDisaggregatedPqCastForced(
Collection<CompletableFuture<FloatPriorityQueue<CompletableFuture<T>>>> disaggregatedResults,
CompletableFuture<FloatPriorityQueue<CompletableFuture<U>>> result) {
disaggregatedResults.add(result.thenApply((originalFloatPriorityQueue) -> {
FloatPriorityQueue<CompletableFuture<T>> resultFloatPriorityQueue = new FloatPriorityQueue<>();
originalFloatPriorityQueue.forEachItem((originalFuture) -> {
resultFloatPriorityQueue.offer(ScoredValue.of(originalFuture.getScore(),
originalFuture.getValue().thenApply((originalValue) -> {
//noinspection unchecked
return (T) originalValue;
})
));
});
return resultFloatPriorityQueue;
}));
}
public static <T> Set<T> collectToSet(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList) {
return futureList.join().parallelStream().map(CompletableFuture::join).collect(Collectors.toSet());
}
public static <T> Set<T> collectToSet(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList, int limit) {
return futureList.join().parallelStream().map(CompletableFuture::join).limit(10).collect(Collectors.toSet());
}
public static <T> List<T> collectToList(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList) {
return futureList.join().stream().map(CompletableFuture::join).collect(Collectors.toList());
}
public static <T> List<T> collectToList(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList, int limit) {
return futureList.join().stream().map(CompletableFuture::join).limit(limit).collect(Collectors.toList());
}
public static <T> LinkedHashSet<T> collectToLinkedSet(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList) {
return futureList.join().stream().map(CompletableFuture::join).collect(Collectors.toCollection(LinkedHashSet::new));
}
public static <T> LinkedHashSet<T> collectToLinkedSet(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList,
int limit) {
return futureList.join().stream().map(CompletableFuture::join).limit(limit)
.collect(Collectors.toCollection(LinkedHashSet::new));
}
public static <T> FloatPriorityQueue<T> collectToPq(CompletableFuture<? extends FloatPriorityQueue<CompletableFuture<T>>> futureList) {
var internalPq = futureList.join().streamItems().map(t -> {
if (t.getValue() != null) {
return ScoredValue.of(t.getScore(), t.getValue().join());
} else {
return ScoredValue.of(t.getScore(), (T) null);
}
}).collect(Collectors.toCollection(PriorityQueue::new));
return new FloatPriorityQueue<>(internalPq);
}
public static <T> FloatPriorityQueue<T> collectToPq(CompletableFuture<? extends FloatPriorityQueue<CompletableFuture<T>>> futureList,
int limit) {
var internalPq = futureList.join().streamItems().map(t -> {
if (t.getValue() != null) {
return ScoredValue.of(t.getScore(), t.getValue().join());
} else {
return ScoredValue.of(t.getScore(), (T) null);
}
}).limit(limit).collect(Collectors.toCollection(PriorityQueue::new));
return new FloatPriorityQueue<>(internalPq);
}
public static <T> TreeSet<T> collectToTreeSet(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList) {
return futureList.join().stream().map(CompletableFuture::join).collect(Collectors.toCollection(TreeSet::new));
}
public static <T> TreeSet<T> collectToTreeSet(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList, int limit) {
return futureList.join().stream().map(CompletableFuture::join).limit(limit)
.collect(Collectors.toCollection(TreeSet::new));
}
public static <T> TreeSet<T> collectToTreeSet(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList, Comparator<T> comparator) {
return futureList.join().stream().map(CompletableFuture::join).collect(Collectors.toCollection(() -> new TreeSet<>(comparator)));
}
public static <T> TreeSet<T> collectToTreeSet(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList, Comparator<T> comparator, int limit) {
return futureList.join().stream().map(CompletableFuture::join).limit(limit)
.collect(Collectors.toCollection(() -> new TreeSet<>(comparator)));
}
public static <T> Optional<T> anyOrNull(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList) {
return futureList.join().parallelStream().map(CompletableFuture::join).findAny();
}
public static <T> Optional<T> firstOrNull(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList) {
return futureList.join().stream().map(CompletableFuture::join).findFirst();
}
public static <T> void forEachOrdered(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList,
Consumer<T> consumer) {
var futures = futureList.join();
futures.stream().map(CompletableFuture::join).forEachOrdered(consumer);
}
public static <T> void forEachOrdered(CompletableFuture<List<CompletableFuture<T>>> futureList,
Consumer<T> consumer, boolean reverse) {
var futures = futureList.join();
if (reverse) {
Collections.reverse(futures);
}
futures.stream().map(CompletableFuture::join).forEachOrdered(consumer);
}
public static <T> void forEach(CompletableFuture<? extends Collection<CompletableFuture<T>>> futureList, Consumer<T> consumer) {
futureList.join().parallelStream().map(CompletableFuture::join).forEach(consumer);
}
/**
* Use CompletableFutureUtils.getCompletableFuture(supplier);
*/
@Deprecated
public static <T> CompletableFuture<T> catchUncheckedExceptions(Supplier<CompletableFuture<T>> supplier) {
return getCompletableFuture(supplier);
}
public static CompletableFuture<Void> runSequence(Collection<CompletableFuture<?>> collection) {
if (collection.isEmpty()) {
return CompletableFuture.completedFuture(null);
} else {
var result = new CompletableFuture<Void>();
for (CompletableFuture<?> completableFuture : collection) {
result = result.thenCompose(x -> completableFuture.thenRun(() -> {}));
}
return result;
}
}
public static CompletableFuture<Void> runSequenceAsync(Collection<CompletableFuture<?>> collection, ExecutorService executorService) {
var result = CompletableFuture.<Void>completedFuture(null);
for (CompletableFuture<?> completableFuture : collection) {
result = result.thenComposeAsync(x -> completableFuture.thenRun(() -> {}), executorService);
}
return result;
}
/**
* Accept values synchronously from an async sequence
*/
public static <T> CompletableFuture<?> acceptSequenceAsync(Collection<CompletableFuture<T>> collection,
Function<T, CompletionStage<?>> runner,
ExecutorService executorService) {
CompletableFuture<?> result = CompletableFuture.completedFuture(null);
for (CompletableFuture<T> completableFuture : collection) {
result = result.thenComposeAsync(x -> completableFuture.thenComposeAsync(runner::apply, executorService),
executorService
);
}
return result;
}
/**
* Accept values synchronously from an async sequence
*/
public static <T> CompletableFuture<?> acceptSequenceAsync(Collection<CompletableFuture<T>> collection,
Consumer<T> runner,
ExecutorService executorService) {
CompletableFuture<?> result = CompletableFuture.completedFuture(null);
for (CompletableFuture<T> completableFuture : collection) {
result = result.thenComposeAsync(x -> completableFuture.thenAcceptAsync(runner, executorService), executorService);
}
return result;
}
public static <T> CompletableFuture<T> applySequenceAsync(T initialValue, Collection<Function<T, CompletableFuture<T>>> collection, ExecutorService executorService) {
var result = CompletableFuture.completedFuture(initialValue);
for (Function<T, CompletableFuture<T>> item : collection) {
result = result.thenComposeAsync(item, executorService);
}
return result;
}
public static <U> CompletableFuture<U> composeAsync(
Supplier<? extends CompletionStage<U>> supp,
Executor executor) {
return CompletableFuture.completedFuture(null).thenComposeAsync((_x) -> supp.get(), executor);
}
public static <U> CompletableFuture<U> composeAsyncIO(
IOSupplier<CompletableFuture<U>> supp,
Executor executor) {
return CompletableFuture.completedFuture(null).thenComposeAsync((_x) -> getCompletableFutureIO(supp), executor);
}
}

View File

@ -1,67 +0,0 @@
package org.warp.commonutils.concurrency.future;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.locks.StampedLock;
import java.util.function.Supplier;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.functional.IOSupplier;
public class FutureLockUtils {
public static <T> CompletableFuture<T> readLock(@Nullable StampedLock lock, @NotNull Supplier<CompletableFuture<T>> r) {
long lockValue;
if (lock != null) {
lockValue = lock.readLock();
} else {
lockValue = 0;
}
return CompletableFutureUtils.getCompletableFuture(r).whenComplete((result, err) -> {
if (lock != null) {
lock.unlockRead(lockValue);
}
});
}
public static <T> CompletableFuture<T> writeLock(@Nullable StampedLock lock, @NotNull Supplier<CompletableFuture<T>> r) {
long lockValue;
if (lock != null) {
lockValue = lock.writeLock();
} else {
lockValue = 0;
}
return CompletableFutureUtils.getCompletableFuture(r).whenComplete((result, err) -> {
if (lock != null) {
lock.unlockWrite(lockValue);
}
});
}
public static <T> CompletableFuture<T> readLockIO(@Nullable StampedLock lock, @NotNull IOSupplier<CompletableFuture<T>> r) {
long lockValue;
if (lock != null) {
lockValue = lock.readLock();
} else {
lockValue = 0;
}
return CompletableFutureUtils.getCompletableFutureIO(r).whenComplete((result, err) -> {
if (lock != null) {
lock.unlockRead(lockValue);
}
});
}
public static <T> CompletableFuture<T> writeLockIO(@Nullable StampedLock lock, @NotNull IOSupplier<CompletableFuture<T>> r) {
long lockValue;
if (lock != null) {
lockValue = lock.writeLock();
} else {
lockValue = 0;
}
return CompletableFutureUtils.getCompletableFutureIO(r).whenComplete((result, err) -> {
if (lock != null) {
lock.unlockWrite(lockValue);
}
});
}
}

View File

@ -1,23 +0,0 @@
package org.warp.commonutils.concurrency.future;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
public class FutureUtils {
/**
* Waits for *all* futures to complete and returns a list of results. If *any* future completes exceptionally then the
* resulting future will also complete exceptionally.
*
* @param futures
* @param <T>
* @return
*/
public static <T> CompletableFuture<List<T>> all(List<CompletableFuture<T>> futures) {
CompletableFuture[] cfs = futures.toArray(CompletableFuture[]::new);
return CompletableFuture.allOf(cfs)
.thenApply(ignored -> futures.stream().map(CompletableFuture::join).collect(Collectors.toList()));
}
}

View File

@ -1,50 +0,0 @@
package org.warp.commonutils.concurrency.future;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
public class SizedFutureList<T> {
private final CompletableFuture<List<CompletableFuture<T>>> data;
private final CompletableFuture<Integer> size;
public SizedFutureList(CompletableFuture<List<CompletableFuture<T>>> data, CompletableFuture<Integer> size) {
this.data = data;
this.size = size;
}
public static <T> SizedFutureList<T> empty() {
return new SizedFutureList<>(CompletableFuture.completedFuture(List.of()), CompletableFuture.completedFuture(0));
}
public CompletableFuture<List<CompletableFuture<T>>> getData() {
return data;
}
public CompletableFuture<Integer> getSize() {
return size;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SizedFutureList<?> that = (SizedFutureList<?>) o;
return Objects.equals(data, that.data) && Objects.equals(size, that.size);
}
@Override
public int hashCode() {
return Objects.hash(data, size);
}
@Override
public String toString() {
return "SizedFutureList{" + "data=" + data + ", size=" + size + '}';
}
}

View File

@ -1,65 +0,0 @@
package org.warp.commonutils.concurrency.future;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
public class SizedFutureSet<T> {
private final CompletableFuture<List<CompletableFuture<T>>> data;
private final CompletableFuture<Integer> size;
public SizedFutureSet(CompletableFuture<List<CompletableFuture<T>>> data, CompletableFuture<Integer> size) {
this.data = data;
this.size = size;
}
public static <T> SizedFutureSet<T> empty() {
return new SizedFutureSet<>(CompletableFuture.completedFuture(List.of()), CompletableFuture.completedFuture(0));
}
public CompletableFuture<LinkedHashSet<CompletableFuture<T>>> getFutureDataOrdered() {
return data.thenApply(LinkedHashSet::new);
}
public CompletableFuture<Set<CompletableFuture<T>>> getFutureDataUnordered() {
return data.thenApply(HashSet::new);
}
public LinkedHashSet<T> getDataOrdered() {
return CompletableFutureUtils.collectToLinkedSet(data);
}
public Set<T> getDataUnordered() {
return CompletableFutureUtils.collectToSet(data);
}
public CompletableFuture<Integer> getSize() {
return size;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SizedFutureSet<?> that = (SizedFutureSet<?>) o;
return Objects.equals(data, that.data) && Objects.equals(size, that.size);
}
@Override
public int hashCode() {
return Objects.hash(data, size);
}
@Override
public String toString() {
return "SizedFutureList{" + "data=" + data + ", size=" + size + '}';
}
}

View File

@ -1,19 +0,0 @@
package org.warp.commonutils.error;
public class IndexOutOfBoundsException extends RuntimeException {
public IndexOutOfBoundsException() {
}
public IndexOutOfBoundsException(String s) {
super(s);
}
public IndexOutOfBoundsException(long index) {
super("Index out of range: " + index);
}
public IndexOutOfBoundsException(long index, long min, long max) {
super("Index " + index + " out of range (from " + min + " to " + max + ")");
}
}

View File

@ -1,21 +0,0 @@
package org.warp.commonutils.error;
import java.io.IOException;
public class InitializationException extends IOException {
public InitializationException() {
super();
}
public InitializationException(String text) {
super(text);
}
public InitializationException(String message, Throwable cause) {
super(message, cause);
}
public InitializationException(Throwable cause) {
super(cause);
}
}

View File

@ -1,7 +0,0 @@
package org.warp.commonutils.functional;
import java.util.concurrent.CompletableFuture;
public interface BiCompletableFunction<T1, T2, U> {
CompletableFuture<U> apply(T1 value1, T2 value2);
}

View File

@ -1,7 +0,0 @@
package org.warp.commonutils.functional;
import java.util.concurrent.CompletableFuture;
public interface CompletableFunction<T, U> {
CompletableFuture<U> apply(T value);
}

View File

@ -1,19 +0,0 @@
package org.warp.commonutils.functional;
import java.util.function.Consumer;
import java.util.function.Function;
import org.warp.commonutils.functional.Unchecked.UncheckedConsumer;
public class Generic {
public static <T, U> Function<T, U> function(Function<Object, U> fnc) {
return (Function<T, U>) fnc;
}
public static <T> Consumer<T> consumer(Consumer<Object> fnc) {
return (Consumer<T>) fnc;
}
public static <T> UncheckedConsumer<T> consumerExc(UncheckedConsumer<Object> fnc) {
return (UncheckedConsumer<T>) fnc;
}
}

View File

@ -1,8 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
public interface IOBooleanSupplier {
boolean get() throws IOException;
}

View File

@ -1,8 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
public interface IOCompletableFunction<T, U> {
CompletableFuture<U> apply(T value) throws IOException;
}

View File

@ -1,8 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
public interface IOConsumer<T> {
void consume(T value) throws IOException;
}

View File

@ -1,8 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
public interface IOFunction<T, U> {
U apply(T data) throws IOException;
}

View File

@ -1,8 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
public interface IOIntegerSupplier {
int get() throws IOException;
}

View File

@ -1,8 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
public interface IOLongSupplier {
long get() throws IOException;
}

View File

@ -1,8 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
public interface IORunnable {
void run() throws IOException;
}

View File

@ -1,8 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
public interface IOSupplier<T> {
T get() throws IOException;
}

View File

@ -1,43 +0,0 @@
package org.warp.commonutils.functional;
import java.util.Iterator;
import java.util.function.Consumer;
import java.util.function.Function;
import org.jetbrains.annotations.Nullable;
public class MappedIterator<A, B> implements Iterator<B> {
private final Iterator<A> iterator;
private final Function<A,B> mappingFunction;
private MappedIterator(Iterator<A> iterator, Function<A, B> mappingFunction) {
this.iterator = iterator;
this.mappingFunction = mappingFunction;
}
public static <T, U> Iterator<U> of(Iterator<T> originalIterator, Function<@Nullable T, @Nullable U> mappingFunction) {
return new MappedIterator<>(originalIterator, mappingFunction);
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public B next() {
return mappingFunction.apply(iterator.next());
}
@Override
public void remove() {
iterator.remove();
}
@Override
public void forEachRemaining(Consumer<? super B> action) {
iterator.forEachRemaining((item) -> {
action.accept(mappingFunction.apply(item));
});
}
}

View File

@ -1,7 +0,0 @@
package org.warp.commonutils.functional;
import java.util.concurrent.CompletableFuture;
public interface TriCompletableFunction<T1, T2, T3, U> {
CompletableFuture<U> apply(T1 value1, T2 value2, T3 value3);
}

View File

@ -1,54 +0,0 @@
package org.warp.commonutils.functional;
import java.util.Objects;
import java.util.function.Consumer;
/**
* Represents an operation that accepts three input arguments and returns no
* result. This is the three-arity specialization of {@link Consumer}.
* Unlike most other functional interfaces, {@code TriConsumer} is expected
* to operate via side-effects.
*
* <p>This is a <a href="package-summary.html">functional interface</a>
* whose functional method is {@link #accept(Object, Object, Object)}.
*
* @param <T> the type of the first argument to the operation
* @param <U> the type of the second argument to the operation
* @param <U> the type of the thord argument to the operation
*
* @see Consumer
* @since 1.8
*/
@FunctionalInterface
public interface TriConsumer<T, U, V> {
/**
* Performs this operation on the given arguments.
*
* @param t the first input argument
* @param u the second input argument
* @param v the third input argument
*/
void accept(T t, U u, V v);
/**
* Returns a composed {@code TriConsumer} that performs, in sequence, this
* operation followed by the {@code after} operation. If performing either
* operation throws an exception, it is relayed to the caller of the
* composed operation. If performing this operation throws an exception,
* the {@code after} operation will not be performed.
*
* @param after the operation to perform after this operation
* @return a composed {@code TriConsumer} that performs in sequence this
* operation followed by the {@code after} operation
* @throws NullPointerException if {@code after} is null
*/
default org.warp.commonutils.functional.TriConsumer<T, U, V> andThen(org.warp.commonutils.functional.TriConsumer<? super T, ? super U, ? super V> after) {
Objects.requireNonNull(after);
return (l, r, u) -> {
accept(l, r, u);
after.accept(l, r, u);
};
}
}

View File

@ -1,51 +0,0 @@
package org.warp.commonutils.functional;
import java.util.Objects;
import java.util.function.Function;
/**
* Represents a function that accepts three arguments and produces a result.
* This is the three-arity specialization of {@link Function}.
*
* <p>This is a <a href="package-summary.html">functional interface</a>
* whose functional method is {@link #apply(Object, Object, Object)}.
*
* @param <T> the type of the first argument to the function
* @param <U> the type of the second argument to the function
* @param <X> the type of the third argument to the function
* @param <R> the type of the result of the function
*
* @see Function
* @since 1.8
*/
@FunctionalInterface
public interface TriFunction<T, U, X, R> {
/**
* Applies this function to the given arguments.
*
* @param t the first function argument
* @param u the second function argument
* @param x the third function argument
* @return the function result
*/
R apply(T t, U u, X x);
/**
* Returns a composed function that first applies this function to
* its input, and then applies the {@code after} function to the result.
* If evaluation of either function throws an exception, it is relayed to
* the caller of the composed function.
*
* @param <V> the type of output of the {@code after} function, and of the
* composed function
* @param after the function to apply after this function is applied
* @return a composed function that first applies this function and then
* applies the {@code after} function
* @throws NullPointerException if after is null
*/
default <V> org.warp.commonutils.functional.TriFunction<T, U, X, V> andThen(Function<? super R, ? extends V> after) {
Objects.requireNonNull(after);
return (T t, U u, X x) -> after.apply(apply(t, u, x));
}
}

View File

@ -1,30 +0,0 @@
package org.warp.commonutils.functional;
import java.util.function.Function;
public class Unchecked<T> implements Function<T, UncheckedResult> {
private final UncheckedConsumer<T> uncheckedConsumer;
public Unchecked(UncheckedConsumer<T> uncheckedConsumer) {
this.uncheckedConsumer = uncheckedConsumer;
}
public static <T> Unchecked<T> wrap(UncheckedConsumer<T> uncheckedConsumer) {
return new Unchecked<>(uncheckedConsumer);
}
@Override
public UncheckedResult apply(T t) {
try {
uncheckedConsumer.consume(t);
return new UncheckedResult();
} catch (Exception e) {
return new UncheckedResult(e);
}
}
public interface UncheckedConsumer<T> {
public void consume(T value) throws Exception;
}
}

View File

@ -1,33 +0,0 @@
package org.warp.commonutils.functional;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public class UncheckedResult {
@Nullable
private final Exception e;
public UncheckedResult(@NotNull Exception e) {
this.e = e;
}
public UncheckedResult() {
this.e = null;
}
public <T extends Exception> UncheckedResult throwException(@NotNull Class<T> exceptionClass) throws T {
if (e != null) {
if (exceptionClass.isInstance(e)) {
throw (T) e;
}
}
return this;
}
public void done() {
if (e != null) {
throw new RuntimeException(e);
}
}
}

View File

@ -1,52 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOError;
import java.io.IOException;
import org.warp.commonutils.functional.IOBooleanSupplier;
import org.warp.commonutils.functional.IOIntegerSupplier;
import org.warp.commonutils.functional.IOLongSupplier;
import org.warp.commonutils.functional.IORunnable;
import org.warp.commonutils.functional.IOSupplier;
public final class UnsafeIOUtils {
public static <T> T unsafe(IOSupplier<T> expression) {
try {
return expression.get();
} catch (IOException e) {
throw new IOError(e);
}
}
public static int unsafe(IOIntegerSupplier expression) {
try {
return expression.get();
} catch (IOException e) {
throw new IOError(e);
}
}
public static boolean unsafe(IOBooleanSupplier expression) {
try {
return expression.get();
} catch (IOException e) {
throw new IOError(e);
}
}
public static long unsafe(IOLongSupplier expression) {
try {
return expression.get();
} catch (IOException e) {
throw new IOError(e);
}
}
public static void unsafe(IORunnable expression) {
try {
expression.run();
} catch (IOException e) {
throw new IOError(e);
}
}
}

View File

@ -1,123 +0,0 @@
package org.warp.commonutils.locks;
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import java.util.concurrent.locks.Lock;
/**
* A binary mutex with the following properties:
*
* Exposes two different {@link Lock}s: LEFT, RIGHT.
*
* When LEFT is held other threads can acquire LEFT but thread trying to acquire RIGHT will be
* blocked. When RIGHT is held other threads can acquire RIGHT but thread trying to acquire LEFT
* will be blocked.
*/
public class LeftRightLock {
public static final int ACQUISITION_FAILED = -1;
public static final int ACQUISITION_SUCCEEDED = 1;
private final LeftRightSync sync = new LeftRightSync();
public void lockLeft() {
sync.acquireShared(LockSide.LEFT.getV());
}
public void lockRight() {
sync.acquireShared(LockSide.RIGHT.getV());
}
public void releaseLeft() {
sync.releaseShared(LockSide.LEFT.getV());
}
public void releaseRight() {
sync.releaseShared(LockSide.RIGHT.getV());
}
public boolean tryLockLeft() {
return sync.tryAcquireShared(LockSide.LEFT) == ACQUISITION_SUCCEEDED;
}
public boolean tryLockRight() {
return sync.tryAcquireShared(LockSide.RIGHT) == ACQUISITION_SUCCEEDED;
}
private enum LockSide {
LEFT(-1), NONE(0), RIGHT(1);
private final int v;
LockSide(int v) {
this.v = v;
}
public int getV() {
return v;
}
}
/**
* <p>
* Keep count the count of threads holding either the LEFT or the RIGHT lock.
* </p>
*
* <li>A state ({@link AbstractQueuedSynchronizer#getState()}) greater than 0 means one or more threads are holding RIGHT lock. </li>
* <li>A state ({@link AbstractQueuedSynchronizer#getState()}) lower than 0 means one or more threads are holding LEFT lock.</li>
* <li>A state ({@link AbstractQueuedSynchronizer#getState()}) equal to zero means no thread is holding any lock.</li>
*/
private static final class LeftRightSync extends AbstractQueuedSynchronizer {
@Override
protected int tryAcquireShared(int requiredSide) {
return (tryChangeThreadCountHoldingCurrentLock(requiredSide, ChangeType.ADD) ? ACQUISITION_SUCCEEDED : ACQUISITION_FAILED);
}
@Override
protected boolean tryReleaseShared(int requiredSide) {
return tryChangeThreadCountHoldingCurrentLock(requiredSide, ChangeType.REMOVE);
}
public boolean tryChangeThreadCountHoldingCurrentLock(int requiredSide, ChangeType changeType) {
if (requiredSide != 1 && requiredSide != -1)
throw new AssertionError("You can either lock LEFT or RIGHT (-1 or +1)");
int curState;
int newState;
do {
curState = this.getState();
if (!sameSide(curState, requiredSide)) {
return false;
}
if (changeType == ChangeType.ADD) {
newState = curState + requiredSide;
} else {
newState = curState - requiredSide;
}
//TODO: protect against int overflow (hopefully you won't have so many threads)
} while (!this.compareAndSetState(curState, newState));
return true;
}
final int tryAcquireShared(LockSide lockSide) {
return this.tryAcquireShared(lockSide.getV());
}
final boolean tryReleaseShared(LockSide lockSide) {
return this.tryReleaseShared(lockSide.getV());
}
private boolean sameSide(int curState, int requiredSide) {
return curState == 0 || sameSign(curState, requiredSide);
}
private boolean sameSign(int a, int b) {
return (a >= 0) ^ (b < 0);
}
public enum ChangeType {
ADD, REMOVE
}
}
}

View File

@ -1,277 +0,0 @@
package org.warp.commonutils.locks;
import java.io.IOException;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.StampedLock;
import java.util.function.Supplier;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.functional.IORunnable;
import org.warp.commonutils.functional.IOSupplier;
public class LockUtils {
public static void lock(@Nullable Lock lock, @NotNull Runnable r) {
if (lock != null) {
lock.lock();
}
try {
r.run();
} finally {
if (lock != null) {
lock.unlock();
}
}
}
public static void readLock(@Nullable StampedLock lock, @NotNull Runnable r) {
long lockValue;
if (lock != null) {
lockValue = lock.readLock();
} else {
lockValue = 0;
}
try {
r.run();
} finally {
if (lock != null) {
lock.unlockRead(lockValue);
}
}
}
public static void writeLock(@Nullable StampedLock lock, @NotNull Runnable r) {
long lockValue;
if (lock != null) {
lockValue = lock.writeLock();
} else {
lockValue = 0;
}
try {
r.run();
} finally {
if (lock != null) {
lock.unlockWrite(lockValue);
}
}
}
public static void lock(@Nullable LeftRightLock lock, boolean right, @NotNull Runnable r) {
if (lock != null) {
if (right) {
lock.lockRight();
} else {
lock.lockLeft();
}
}
try {
r.run();
} finally {
if (lock != null) {
if (right) {
lock.releaseRight();
} else {
lock.releaseLeft();
}
}
}
}
public static void lockIO(@Nullable Lock lock, @NotNull IORunnable r) throws IOException {
if (lock != null) {
lock.lock();
}
try {
r.run();
} finally {
if (lock != null) {
lock.unlock();
}
}
}
public static void readLockIO(@Nullable StampedLock lock, @NotNull IORunnable r) throws IOException {
long lockValue;
if (lock != null) {
lockValue = lock.readLock();
} else {
lockValue = 0;
}
try {
r.run();
} finally {
if (lock != null) {
lock.unlockRead(lockValue);
}
}
}
public static void writeLockIO(@Nullable StampedLock lock, @NotNull IORunnable r) throws IOException {
long lockValue;
if (lock != null) {
lockValue = lock.writeLock();
} else {
lockValue = 0;
}
try {
r.run();
} finally {
if (lock != null) {
lock.unlockWrite(lockValue);
}
}
}
public static void lockIO(@Nullable LeftRightLock lock, boolean right, @NotNull IORunnable r) throws IOException {
if (lock != null) {
if (right) {
lock.lockRight();
} else {
lock.lockLeft();
}
}
try {
r.run();
} finally {
if (lock != null) {
if (right) {
lock.releaseRight();
} else {
lock.releaseLeft();
}
}
}
}
public static <T> T lock(@Nullable Lock lock, @NotNull Supplier<T> r) {
if (lock != null) {
lock.lock();
}
try {
return r.get();
} finally {
if (lock != null) {
lock.unlock();
}
}
}
public static <T> T readLock(@Nullable StampedLock lock, @NotNull Supplier<T> r) {
long lockValue;
if (lock != null) {
lockValue = lock.readLock();
} else {
lockValue = 0;
}
try {
return r.get();
} finally {
if (lock != null) {
lock.unlockRead(lockValue);
}
}
}
public static <T> T writeLock(@Nullable StampedLock lock, @NotNull Supplier<T> r) {
long lockValue;
if (lock != null) {
lockValue = lock.writeLock();
} else {
lockValue = 0;
}
try {
return r.get();
} finally {
if (lock != null) {
lock.unlockWrite(lockValue);
}
}
}
public static <T> T lock(@Nullable LeftRightLock lock, boolean right, @NotNull Supplier<T> r) {
if (lock != null) {
if (right) {
lock.lockRight();
} else {
lock.lockLeft();
}
}
try {
return r.get();
} finally {
if (lock != null) {
if (right) {
lock.releaseRight();
} else {
lock.releaseLeft();
}
}
}
}
public static <T> T lockIO(@Nullable Lock lock, @NotNull IOSupplier<T> r) throws IOException {
if (lock != null) {
lock.lock();
}
try {
return r.get();
} finally {
if (lock != null) {
lock.unlock();
}
}
}
public static <T> T readLockIO(@Nullable StampedLock lock, @NotNull IOSupplier<T> r) throws IOException {
long lockValue;
if (lock != null) {
lockValue = lock.readLock();
} else {
lockValue = 0;
}
try {
return r.get();
} finally {
if (lock != null) {
lock.unlockRead(lockValue);
}
}
}
public static <T> T writeLockIO(@Nullable StampedLock lock, @NotNull IOSupplier<T> r) throws IOException {
long lockValue;
if (lock != null) {
lockValue = lock.writeLock();
} else {
lockValue = 0;
}
try {
return r.get();
} finally {
if (lock != null) {
lock.unlockWrite(lockValue);
}
}
}
public static <T> T lockIO(@Nullable LeftRightLock lock, boolean right, @NotNull IOSupplier<T> r) throws IOException {
if (lock != null) {
if (right) {
lock.lockRight();
} else {
lock.lockLeft();
}
}
try {
return r.get();
} finally {
if (lock != null) {
if (right) {
lock.releaseRight();
} else {
lock.releaseLeft();
}
}
}
}
}

View File

@ -1,519 +0,0 @@
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.warp.commonutils.locks;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.MapMaker;
import com.google.common.math.IntMath;
import com.google.common.primitives.Ints;
import com.googlecode.concurentlocks.ReadWriteUpdateLock;
import com.googlecode.concurentlocks.ReentrantReadWriteUpdateLock;
import java.lang.ref.Reference;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.math.RoundingMode;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* A striped {@code Lock/Semaphore/ReadWriteLock}. This offers the underlying lock striping similar to that of {@code
* ConcurrentHashMap} in a reusable form, and extends it for semaphores and read-write locks. Conceptually, lock
* striping is the technique of dividing a lock into many
* <i>stripes</i>, increasing the granularity of a single lock and allowing independent operations
* to lock different stripes and proceed concurrently, instead of creating contention for a single lock.
*
* <p>The guarantee provided by this class is that equal keys lead to the same lock (or semaphore),
* i.e. {@code if (key1.equals(key2))} then {@code striped.get(key1) == striped.get(key2)} (assuming {@link
* Object#hashCode()} is correctly implemented for the keys). Note that if {@code key1} is
* <strong>not</strong> equal to {@code key2}, it is <strong>not</strong> guaranteed that
* {@code striped.get(key1) != striped.get(key2)}; the elements might nevertheless be mapped to the same lock. The lower
* the number of stripes, the higher the probability of this happening.
*
* <p>There are three flavors of this class: {@code Striped<Lock>}, {@code Striped<Semaphore>}, and
* {@code Striped<ReadWriteLock>}. For each type, two implementations are offered: {@linkplain #lock(int) strong} and
* {@linkplain #lazyWeakLock(int) weak} {@code Striped<Lock>}, {@linkplain #semaphore(int, int) strong} and {@linkplain
* #lazyWeakSemaphore(int, int) weak} {@code Striped<Semaphore>}, and {@linkplain #readWriteLock(int) strong} and
* {@linkplain #lazyWeakReadWriteLock(int) weak} {@code Striped<ReadWriteLock>}. <i>Strong</i> means that all stripes
* (locks/semaphores) are initialized eagerly, and are not reclaimed unless {@code Striped} itself is reclaimable.
* <i>Weak</i> means that locks/semaphores are created lazily, and they are allowed to be reclaimed if nobody is
* holding on to them. This is useful, for example, if one wants to create a {@code Striped<Lock>} of many locks, but
* worries that in most cases only a small portion of these would be in use.
*
* <p>Prior to this class, one might be tempted to use {@code Map<K, Lock>}, where {@code K}
* represents the task. This maximizes concurrency by having each unique key mapped to a unique lock, but also maximizes
* memory footprint. On the other extreme, one could use a single lock for all tasks, which minimizes memory footprint
* but also minimizes concurrency. Instead of choosing either of these extremes, {@code Striped} allows the user to
* trade between required concurrency and memory footprint. For example, if a set of tasks are CPU-bound, one could
* easily create a very compact {@code Striped<Lock>} of {@code availableProcessors() * 4} stripes, instead of possibly
* thousands of locks which could be created in a {@code Map<K, Lock>} structure.
*
* @author Dimitris Andreou
* @since 13.0
*/
@Beta
@GwtIncompatible
public abstract class Striped<L> {
/**
* If there are at least this many stripes, we assume the memory usage of a ConcurrentMap will be smaller than a large
* array. (This assumes that in the lazy case, most stripes are unused. As always, if many stripes are in use, a
* non-lazy striped makes more sense.)
*/
private static final int LARGE_LAZY_CUTOFF = 1024;
private Striped() {
}
/**
* Returns the stripe that corresponds to the passed key. It is always guaranteed that if {@code key1.equals(key2)},
* then {@code get(key1) == get(key2)}.
*
* @param key an arbitrary, non-null key
* @return the stripe that the passed key corresponds to
*/
public abstract L get(Object key);
/**
* Returns the stripe at the specified index. Valid indexes are 0, inclusively, to {@code size()}, exclusively.
*
* @param index the index of the stripe to return; must be in {@code [0...size())}
* @return the stripe at the specified index
*/
public abstract L getAt(int index);
/**
* Returns the index to which the given key is mapped, so that getAt(indexFor(key)) == get(key).
*/
abstract int indexFor(Object key);
/**
* Returns the total number of stripes in this instance.
*/
public abstract int size();
/**
* Returns the stripes that correspond to the passed objects, in ascending (as per {@link #getAt(int)}) order. Thus,
* threads that use the stripes in the order returned by this method are guaranteed to not deadlock each other.
*
* <p>It should be noted that using a {@code Striped<L>} with relatively few stripes, and
* {@code bulkGet(keys)} with a relative large number of keys can cause an excessive number of shared stripes (much
* like the birthday paradox, where much fewer than anticipated birthdays are needed for a pair of them to match).
* Please consider carefully the implications of the number of stripes, the intended concurrency level, and the
* typical number of keys used in a {@code bulkGet(keys)} operation. See <a href="http://www.mathpages.com/home/kmath199.htm">Balls
* in Bins model</a> for mathematical formulas that can be used to estimate the probability of collisions.
*
* @param keys arbitrary non-null keys
* @return the stripes corresponding to the objects (one per each object, derived by delegating to {@link
* #get(Object)}; may contain duplicates), in an increasing index order.
*/
public Iterable<L> bulkGet(Iterable<?> keys) {
// Initially using the array to store the keys, then reusing it to store the respective L's
final Object[] array = Iterables.toArray(keys, Object.class);
if (array.length == 0) {
return ImmutableList.of();
}
int[] stripes = new int[array.length];
for (int i = 0; i < array.length; i++) {
stripes[i] = indexFor(array[i]);
}
Arrays.sort(stripes);
// optimize for runs of identical stripes
int previousStripe = stripes[0];
array[0] = getAt(previousStripe);
for (int i = 1; i < array.length; i++) {
int currentStripe = stripes[i];
if (currentStripe == previousStripe) {
array[i] = array[i - 1];
} else {
array[i] = getAt(currentStripe);
previousStripe = currentStripe;
}
}
/*
* Note that the returned Iterable holds references to the returned stripes, to avoid
* error-prone code like:
*
* Striped<Lock> stripedLock = Striped.lazyWeakXXX(...)'
* Iterable<Lock> locks = stripedLock.bulkGet(keys);
* for (Lock lock : locks) {
* lock.lock();
* }
* operation();
* for (Lock lock : locks) {
* lock.unlock();
* }
*
* If we only held the int[] stripes, translating it on the fly to L's, the original locks might
* be garbage collected after locking them, ending up in a huge mess.
*/
@SuppressWarnings("unchecked") // we carefully replaced all keys with their respective L's
List<L> asList = (List<L>) Arrays.asList(array);
return Collections.unmodifiableList(asList);
}
// Static factories
/**
* Creates a {@code Striped<Lock>} with eagerly initialized, strongly referenced locks. Every lock is reentrant.
*
* @param stripes the minimum number of stripes (locks) required
* @return a new {@code Striped<Lock>}
*/
public static Striped<Lock> lock(int stripes) {
return new CompactStriped<Lock>(stripes, new Supplier<Lock>() {
@Override
public Lock get() {
return new PaddedLock();
}
});
}
/**
* Creates a {@code Striped<Lock>} with lazily initialized, weakly referenced locks. Every lock is reentrant.
*
* @param stripes the minimum number of stripes (locks) required
* @return a new {@code Striped<Lock>}
*/
public static Striped<Lock> lazyWeakLock(int stripes) {
return lazy(stripes, new Supplier<Lock>() {
@Override
public Lock get() {
return new ReentrantLock(false);
}
});
}
private static <L> Striped<L> lazy(int stripes, Supplier<L> supplier) {
return stripes < LARGE_LAZY_CUTOFF ? new SmallLazyStriped<L>(stripes, supplier)
: new LargeLazyStriped<L>(stripes, supplier);
}
/**
* Creates a {@code Striped<Semaphore>} with eagerly initialized, strongly referenced semaphores, with the specified
* number of permits.
*
* @param stripes the minimum number of stripes (semaphores) required
* @param permits the number of permits in each semaphore
* @return a new {@code Striped<Semaphore>}
*/
public static Striped<Semaphore> semaphore(int stripes, final int permits) {
return new CompactStriped<Semaphore>(stripes, new Supplier<Semaphore>() {
@Override
public Semaphore get() {
return new PaddedSemaphore(permits);
}
});
}
/**
* Creates a {@code Striped<Semaphore>} with lazily initialized, weakly referenced semaphores, with the specified
* number of permits.
*
* @param stripes the minimum number of stripes (semaphores) required
* @param permits the number of permits in each semaphore
* @return a new {@code Striped<Semaphore>}
*/
public static Striped<Semaphore> lazyWeakSemaphore(int stripes, final int permits) {
return lazy(stripes, new Supplier<Semaphore>() {
@Override
public Semaphore get() {
return new Semaphore(permits, false);
}
});
}
/**
* Creates a {@code Striped<ReadWriteLock>} with eagerly initialized, strongly referenced read-write locks. Every lock
* is reentrant.
*
* @param stripes the minimum number of stripes (locks) required
* @return a new {@code Striped<ReadWriteLock>}
*/
public static Striped<ReadWriteLock> readWriteLock(int stripes) {
return new CompactStriped<ReadWriteLock>(stripes, READ_WRITE_LOCK_SUPPLIER);
}
/**
* Creates a {@code Striped<ReadWriteLock>} with eagerly initialized, strongly referenced read-write-update locks.
* Every lock is reentrant.
*
* @param stripes the minimum number of stripes (locks) required
* @return a new {@code Striped<ReadWriteUpdateLock>}
*/
public static Striped<ReadWriteUpdateLock> readWriteUpdateLock(int stripes) {
return new CompactStriped<ReadWriteUpdateLock>(stripes, READ_WRITE_UPDATE_LOCK_SUPPLIER);
}
/**
* Creates a {@code Striped<ReadWriteLock>} with lazily initialized, weakly referenced read-write locks. Every lock is
* reentrant.
*
* @param stripes the minimum number of stripes (locks) required
* @return a new {@code Striped<ReadWriteLock>}
*/
public static Striped<ReadWriteLock> lazyWeakReadWriteLock(int stripes) {
return lazy(stripes, READ_WRITE_LOCK_SUPPLIER);
}
// ReentrantReadWriteLock is large enough to make padding probably unnecessary
private static final Supplier<ReadWriteLock> READ_WRITE_LOCK_SUPPLIER = new Supplier<ReadWriteLock>() {
@Override
public ReadWriteLock get() {
return new ReentrantReadWriteLock();
}
};
// ReentrantReadWriteUpdateLock is large enough to make padding probably unnecessary
private static final Supplier<ReadWriteUpdateLock> READ_WRITE_UPDATE_LOCK_SUPPLIER = new Supplier<ReadWriteUpdateLock>() {
@Override
public ReadWriteUpdateLock get() {
return new ReentrantReadWriteUpdateLock();
}
};
private abstract static class PowerOfTwoStriped<L> extends Striped<L> {
/**
* Capacity (power of two) minus one, for fast mod evaluation
*/
final int mask;
PowerOfTwoStriped(int stripes) {
Preconditions.checkArgument(stripes > 0, "Stripes must be positive");
this.mask = stripes > Ints.MAX_POWER_OF_TWO ? ALL_SET : ceilToPowerOfTwo(stripes) - 1;
}
@Override
final int indexFor(Object key) {
int hash = smear(key.hashCode());
return hash & mask;
}
@Override
public final L get(Object key) {
return getAt(indexFor(key));
}
}
/**
* Implementation of Striped where 2^k stripes are represented as an array of the same length, eagerly initialized.
*/
private static class CompactStriped<L> extends PowerOfTwoStriped<L> {
/**
* Size is a power of two.
*/
private final Object[] array;
private CompactStriped(int stripes, Supplier<L> supplier) {
super(stripes);
Preconditions.checkArgument(stripes <= Ints.MAX_POWER_OF_TWO, "Stripes must be <= 2^30)");
this.array = new Object[mask + 1];
for (int i = 0; i < array.length; i++) {
array[i] = supplier.get();
}
}
@SuppressWarnings("unchecked") // we only put L's in the array
@Override
public L getAt(int index) {
return (L) array[index];
}
@Override
public int size() {
return array.length;
}
}
/**
* Implementation of Striped where up to 2^k stripes can be represented, using an AtomicReferenceArray of size 2^k. To
* map a user key into a stripe, we take a k-bit slice of the user key's (smeared) hashCode(). The stripes are lazily
* initialized and are weakly referenced.
*/
@VisibleForTesting
static class SmallLazyStriped<L> extends PowerOfTwoStriped<L> {
final AtomicReferenceArray<ArrayReference<? extends L>> locks;
final Supplier<L> supplier;
final int size;
final ReferenceQueue<L> queue = new ReferenceQueue<L>();
SmallLazyStriped(int stripes, Supplier<L> supplier) {
super(stripes);
this.size = (mask == ALL_SET) ? Integer.MAX_VALUE : mask + 1;
this.locks = new AtomicReferenceArray<ArrayReference<? extends L>>(size);
this.supplier = supplier;
}
@Override
public L getAt(int index) {
if (size != Integer.MAX_VALUE) {
Preconditions.checkElementIndex(index, size());
} // else no check necessary, all index values are valid
ArrayReference<? extends L> existingRef = locks.get(index);
L existing = existingRef == null ? null : existingRef.get();
if (existing != null) {
return existing;
}
L created = supplier.get();
ArrayReference<L> newRef = new ArrayReference<L>(created, index, queue);
while (!locks.compareAndSet(index, existingRef, newRef)) {
// we raced, we need to re-read and try again
existingRef = locks.get(index);
existing = existingRef == null ? null : existingRef.get();
if (existing != null) {
return existing;
}
}
drainQueue();
return created;
}
// N.B. Draining the queue is only necessary to ensure that we don't accumulate empty references
// in the array. We could skip this if we decide we don't care about holding on to Reference
// objects indefinitely.
private void drainQueue() {
Reference<? extends L> ref;
while ((ref = queue.poll()) != null) {
// We only ever register ArrayReferences with the queue so this is always safe.
ArrayReference<? extends L> arrayRef = (ArrayReference<? extends L>) ref;
// Try to clear out the array slot, n.b. if we fail that is fine, in either case the
// arrayRef will be out of the array after this step.
locks.compareAndSet(arrayRef.index, arrayRef, null);
}
}
@Override
public int size() {
return size;
}
private static final class ArrayReference<L> extends WeakReference<L> {
final int index;
ArrayReference(L referent, int index, ReferenceQueue<L> queue) {
super(referent, queue);
this.index = index;
}
}
}
/**
* Implementation of Striped where up to 2^k stripes can be represented, using a ConcurrentMap where the key domain is
* [0..2^k). To map a user key into a stripe, we take a k-bit slice of the user key's (smeared) hashCode(). The
* stripes are lazily initialized and are weakly referenced.
*/
@VisibleForTesting
static class LargeLazyStriped<L> extends PowerOfTwoStriped<L> {
final ConcurrentMap<Integer, L> locks;
final Supplier<L> supplier;
final int size;
LargeLazyStriped(int stripes, Supplier<L> supplier) {
super(stripes);
this.size = (mask == ALL_SET) ? Integer.MAX_VALUE : mask + 1;
this.supplier = supplier;
this.locks = new MapMaker().weakValues().makeMap();
}
@Override
public L getAt(int index) {
if (size != Integer.MAX_VALUE) {
Preconditions.checkElementIndex(index, size());
} // else no check necessary, all index values are valid
L existing = locks.get(index);
if (existing != null) {
return existing;
}
L created = supplier.get();
existing = locks.putIfAbsent(index, created);
return MoreObjects.firstNonNull(existing, created);
}
@Override
public int size() {
return size;
}
}
/**
* A bit mask were all bits are set.
*/
private static final int ALL_SET = ~0;
private static int ceilToPowerOfTwo(int x) {
return 1 << IntMath.log2(x, RoundingMode.CEILING);
}
/*
* This method was written by Doug Lea with assistance from members of JCP JSR-166 Expert Group
* and released to the public domain, as explained at
* http://creativecommons.org/licenses/publicdomain
*
* As of 2010/06/11, this method is identical to the (package private) hash method in OpenJDK 7's
* java.util.HashMap class.
*/
// Copied from java/com/google/common/collect/Hashing.java
private static int smear(int hashCode) {
hashCode ^= (hashCode >>> 20) ^ (hashCode >>> 12);
return hashCode ^ (hashCode >>> 7) ^ (hashCode >>> 4);
}
private static class PaddedLock extends ReentrantLock {
/*
* Padding from 40 into 64 bytes, same size as cache line. Might be beneficial to add a fourth
* long here, to minimize chance of interference between consecutive locks, but I couldn't
* observe any benefit from that.
*/ long unused1;
long unused2;
long unused3;
PaddedLock() {
super(false);
}
}
private static class PaddedSemaphore extends Semaphore {
// See PaddedReentrantLock comment
long unused1;
long unused2;
long unused3;
PaddedSemaphore(int permits) {
super(permits, false);
}
}
}

View File

@ -1,107 +0,0 @@
package org.warp.commonutils.metrics;
import it.unimi.dsi.fastutil.objects.Object2ObjectMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap;
import it.unimi.dsi.fastutil.objects.ObjectOpenHashSet;
import java.util.Collections;
import java.util.HashMap;
import java.util.Set;
public class AtomicDetailedTimeAbsoluteSamples<T> implements AtomicDetailedTimeAbsoluteSamplesSnapshot<T> {
private final boolean isSnapshot;
private final int sampleTime;
private final int samplesCount;
private Object2ObjectMap<T, AtomicTimeAbsoluteSamples> detailedAtomicTimeSamples = new Object2ObjectOpenHashMap<>();
/**
* @param sampleTime in milliseconds
* @param samplesCount
*/
public AtomicDetailedTimeAbsoluteSamples(int sampleTime, int samplesCount) {
this.sampleTime = sampleTime;
this.samplesCount = samplesCount;
this.isSnapshot = false;
}
public AtomicDetailedTimeAbsoluteSamples(int sampleTime, int samplesCount, HashMap<T, AtomicTimeAbsoluteSamplesSnapshot> detailedAtomicTimeSamples, boolean isSnapshot) {
this.sampleTime = sampleTime;
this.samplesCount = samplesCount;
this.detailedAtomicTimeSamples = new Object2ObjectOpenHashMap<>();
detailedAtomicTimeSamples.forEach((detail, sample) -> this.detailedAtomicTimeSamples.put(detail, (AtomicTimeAbsoluteSamples) sample));
this.isSnapshot = isSnapshot;
}
private synchronized void updateSamples() {
}
private synchronized AtomicTimeAbsoluteSamples getDetailed(T detail) {
AtomicTimeAbsoluteSamples detailed = detailedAtomicTimeSamples.get(detail);
if (detailed == null) {
detailed = new AtomicTimeAbsoluteSamples(sampleTime, samplesCount);
detailedAtomicTimeSamples.put(detail, detailed);
}
return detailed;
}
public synchronized void set(T detail, long count) {
updateSamples();
getDetailed(detail).set(count);
}
@Override
public synchronized Set<T> getDetails() {
return Collections.unmodifiableSet(new ObjectOpenHashSet<>(detailedAtomicTimeSamples.keySet()));
}
@Override
public synchronized double getAveragePerSecond(T detail, long timeRange) {
updateSamples();
return getDetailed(detail).getAveragePerSecond(timeRange);
}
@Override
public synchronized double getAveragePerSecond(long timeRange) {
updateSamples();
return detailedAtomicTimeSamples.values().stream().mapToDouble((detail) -> detail.getAveragePerSecond(timeRange)).sum();
}
@Override
public synchronized long getCurrentCount(T detail) {
updateSamples();
return getDetailed(detail).getCurrentCount();
}
@Override
public synchronized long getCurrentCount() {
updateSamples();
return detailedAtomicTimeSamples.values().stream().mapToLong(AtomicTimeAbsoluteSamples::getCurrentCount).sum();
}
@Override
public synchronized double getTotalAveragePerSecond() {
updateSamples();
return detailedAtomicTimeSamples.values().stream().mapToDouble(AtomicTimeAbsoluteSamples::getTotalAveragePerSecond).sum();
}
@Override
public synchronized double getTotalAveragePerSecond(T detail) {
updateSamples();
return getDetailed(detail).getTotalAveragePerSecond();
}
public synchronized AtomicTimeAbsoluteSamplesSnapshot snapshot(T detail) {
return getDetailed(detail).snapshot();
}
public synchronized AtomicDetailedTimeAbsoluteSamples<T> snapshot() {
if (isSnapshot) {
return this;
}
var clonedDetailedAtomicTimeSamples = new HashMap<T, AtomicTimeAbsoluteSamplesSnapshot>(detailedAtomicTimeSamples);
clonedDetailedAtomicTimeSamples.replaceAll((key, value) -> ((AtomicTimeAbsoluteSamples) value).snapshot());
return new AtomicDetailedTimeAbsoluteSamples<>(sampleTime,
samplesCount, clonedDetailedAtomicTimeSamples, true);
}
}

View File

@ -1,14 +0,0 @@
package org.warp.commonutils.metrics;
import java.util.Set;
public interface AtomicDetailedTimeAbsoluteSamplesSnapshot<T> extends AtomicTimeAbsoluteSamplesSnapshot {
Set<T> getDetails();
double getAveragePerSecond(T detail, long timeRange);
long getCurrentCount(T detail);
double getTotalAveragePerSecond(T detail);
}

View File

@ -1,94 +0,0 @@
package org.warp.commonutils.metrics;
import it.unimi.dsi.fastutil.objects.Object2ObjectMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap;
import it.unimi.dsi.fastutil.objects.ObjectOpenHashSet;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Set;
public class AtomicDetailedTimeIncrementalSamples<T> extends AtomicTimeIncrementalSamples implements
AtomicDetailedTimeIncrementalSamplesSnapshot<T> {
private Object2ObjectMap<T, AtomicTimeIncrementalSamples> detailedAtomicTimeSamples = new Object2ObjectOpenHashMap<>();
/**
* @param sampleTime in milliseconds
* @param samplesCount
*/
public AtomicDetailedTimeIncrementalSamples(int sampleTime, int samplesCount) {
super(sampleTime, samplesCount);
}
public AtomicDetailedTimeIncrementalSamples(long startTime, long[] samples, int sampleTime, long currentSampleStartTime, long totalEvents,
HashMap<T, AtomicTimeIncrementalSamplesSnapshot> detailedAtomicTimeSamples, boolean isSnapshot) {
super(startTime, samples, sampleTime, currentSampleStartTime, totalEvents, isSnapshot);
this.detailedAtomicTimeSamples = new Object2ObjectOpenHashMap<>();
detailedAtomicTimeSamples.forEach((detail, sample) -> this.detailedAtomicTimeSamples.put(detail, (AtomicTimeIncrementalSamples) sample));
}
private synchronized AtomicTimeIncrementalSamples getDetailed(T detail) {
AtomicTimeIncrementalSamples detailed = detailedAtomicTimeSamples.get(detail);
if (detailed == null) {
detailed = new AtomicTimeIncrementalSamples(sampleTime, samples.length);
detailedAtomicTimeSamples.put(detail, detailed);
}
return detailed;
}
public synchronized void increment(T detail, long count) {
updateSamples();
getDetailed(detail).increment(count);
increment(count);
}
@Override
public synchronized Set<T> getDetails() {
return Collections.unmodifiableSet(new ObjectOpenHashSet<>(detailedAtomicTimeSamples.keySet()));
}
@Override
public synchronized double getAveragePerSecond(T detail, long timeRange) {
updateSamples();
return getDetailed(detail).getAveragePerSecond(timeRange);
}
@Override
public synchronized long getApproximateCount(T detail, long timeRange) {
updateSamples();
return getDetailed(detail).getApproximateCount(timeRange);
}
@Override
public synchronized long getTotalCount(T detail) {
updateSamples();
return getDetailed(detail).getTotalCount();
}
@Override
public synchronized double getTotalAverage(T detail) {
updateSamples();
return getDetailed(detail).getTotalAveragePerSecond();
}
public synchronized AtomicTimeIncrementalSamplesSnapshot snapshot(T detail) {
return getDetailed(detail).snapshot();
}
@Override
protected synchronized void shiftSamples(int shiftCount) {
//detailedAtomicTimeSamples.values().forEach(AtomicTimeSamples::shiftSamples);
super.shiftSamples(shiftCount);
}
public synchronized AtomicDetailedTimeIncrementalSamples<T> snapshot() {
if (isSnapshot) {
return this;
}
var clonedDetailedAtomicTimeSamples = new HashMap<T, AtomicTimeIncrementalSamplesSnapshot>(detailedAtomicTimeSamples);
clonedDetailedAtomicTimeSamples.replaceAll((key, value) -> ((AtomicTimeIncrementalSamples) value).snapshot());
return new AtomicDetailedTimeIncrementalSamples<>(startTime, Arrays.copyOf(this.samples, this.samples.length), sampleTime,
currentSampleStartTime, totalEvents, clonedDetailedAtomicTimeSamples, isSnapshot);
}
}

View File

@ -1,16 +0,0 @@
package org.warp.commonutils.metrics;
import java.util.Set;
public interface AtomicDetailedTimeIncrementalSamplesSnapshot<T> extends AtomicTimeIncrementalSamplesSnapshot {
Set<T> getDetails();
double getAveragePerSecond(T detail, long timeRange);
long getApproximateCount(T detail, long timeRange);
long getTotalCount(T detail);
double getTotalAverage(T detail);
}

View File

@ -1,123 +0,0 @@
package org.warp.commonutils.metrics;
import java.util.Arrays;
public class AtomicTimeAbsoluteSamples implements AtomicTimeAbsoluteSamplesSnapshot {
protected final boolean isSnapshot;
protected long startTime;
protected final long[] samples;
protected final int sampleTime;
protected long currentSampleStartTime;
protected long totalSamplesSum = 0;
protected long totalSamplesCount = 1;
/**
*
* @param sampleTime in milliseconds
* @param samplesCount
*/
public AtomicTimeAbsoluteSamples(int sampleTime, int samplesCount) {
this.samples = new long[samplesCount];
this.sampleTime = sampleTime;
startTime = -1;
if (samplesCount < 1) throw new IndexOutOfBoundsException();
if (sampleTime < 1) throw new IndexOutOfBoundsException();
this.isSnapshot = false;
}
public AtomicTimeAbsoluteSamples(long startTime, long[] samples, int sampleTime, long currentSampleStartTime, long totalSamplesSum, long totalSamplesCount, boolean isSnapshot) {
this.startTime = startTime;
this.samples = samples;
this.sampleTime = sampleTime;
this.currentSampleStartTime = currentSampleStartTime;
this.totalSamplesSum = totalSamplesSum;
this.totalSamplesCount = totalSamplesCount;
this.isSnapshot = isSnapshot;
}
protected synchronized void updateSamples() {
checkStarted();
if (isSnapshot) {
return;
}
long currentTime = System.nanoTime() / 1000000L;
long timeDiff = currentTime - currentSampleStartTime;
long timeToShift = timeDiff - (timeDiff % sampleTime);
int shiftCount = (int) (timeToShift / sampleTime);
if (currentTime - (currentSampleStartTime + timeToShift) > sampleTime) {
throw new IndexOutOfBoundsException("Time sample bigger than " + sampleTime + "! It's " + (currentTime - (currentSampleStartTime + timeToShift)));
}
if (shiftCount > 0) {
shiftSamples(shiftCount);
currentSampleStartTime += timeToShift;
totalSamplesCount += shiftCount;
long lastSample = samples[0];
totalSamplesSum += lastSample * shiftCount;
}
}
protected synchronized void checkStarted() {
if (startTime == -1) {
this.startTime = System.nanoTime() / 1000000L;
this.currentSampleStartTime = startTime;
}
}
protected void shiftSamples(int shiftCount) {
checkStarted();
long lastSample = samples[0];
if (samples.length - shiftCount > 0) {
System.arraycopy(samples, 0, samples, shiftCount, samples.length - shiftCount);
Arrays.fill(samples, 0, shiftCount, lastSample);
} else {
Arrays.fill(samples, lastSample);
}
}
public synchronized void set(long count) {
updateSamples();
long oldValue = samples[0];
samples[0]=count;
totalSamplesSum += count - oldValue;
}
@Override
public synchronized double getAveragePerSecond(long timeRange) {
updateSamples();
double preciseTimeRange = timeRange;
// Fix if the time range is bigger than the collected data since start
if (currentSampleStartTime - preciseTimeRange < startTime) {
preciseTimeRange = currentSampleStartTime - startTime;
}
double samplesCount = Math.min(Math.max(preciseTimeRange / sampleTime, 1d), samples.length - 1);
if (samplesCount < 0) {
return 0;
}
double value = 0;
for (int i = 1; i <= samplesCount; i++) {
value += samples[i];
}
return value / samplesCount;
}
@Override
public synchronized long getCurrentCount() {
updateSamples();
return samples[0];
}
@Override
public synchronized double getTotalAveragePerSecond() {
updateSamples();
return (double) totalSamplesSum / (double) totalSamplesCount;
}
public synchronized AtomicTimeAbsoluteSamplesSnapshot snapshot() {
return new AtomicTimeAbsoluteSamples(startTime, Arrays.copyOf(this.samples, this.samples.length), sampleTime, currentSampleStartTime, totalSamplesSum, totalSamplesCount, true);
}
}

View File

@ -1,10 +0,0 @@
package org.warp.commonutils.metrics;
public interface AtomicTimeAbsoluteSamplesSnapshot {
double getAveragePerSecond(long timeRange);
long getCurrentCount();
double getTotalAveragePerSecond();
}

View File

@ -1,135 +0,0 @@
package org.warp.commonutils.metrics;
import java.util.Arrays;
public class AtomicTimeIncrementalSamples implements AtomicTimeIncrementalSamplesSnapshot {
protected final boolean isSnapshot;
protected long startTime;
protected final long[] samples;
protected final int sampleTime;
protected long currentSampleStartTime;
protected long totalEvents;
/**
*
* @param sampleTime in milliseconds
* @param samplesCount
*/
public AtomicTimeIncrementalSamples(int sampleTime, int samplesCount) {
this.samples = new long[samplesCount];
this.sampleTime = sampleTime;
startTime = -1;
if (samplesCount < 1) throw new IndexOutOfBoundsException();
if (sampleTime < 1) throw new IndexOutOfBoundsException();
this.isSnapshot = false;
}
public AtomicTimeIncrementalSamples(long startTime, long[] samples, int sampleTime, long currentSampleStartTime, long totalEvents, boolean isSnapshot) {
this.startTime = startTime;
this.samples = samples;
this.sampleTime = sampleTime;
this.currentSampleStartTime = currentSampleStartTime;
this.totalEvents = totalEvents;
this.isSnapshot = isSnapshot;
}
protected synchronized void updateSamples() {
checkStarted();
if (isSnapshot) {
return;
}
long currentTime = System.nanoTime() / 1000000L;
long timeDiff = currentTime - currentSampleStartTime;
long timeToShift = timeDiff - (timeDiff % sampleTime);
int shiftCount = (int) (timeToShift / sampleTime);
if (currentTime - (currentSampleStartTime + timeToShift) > sampleTime) {
throw new IndexOutOfBoundsException("Time sample bigger than " + sampleTime + "! It's " + (currentTime - (currentSampleStartTime + timeToShift)));
}
if (shiftCount > 0) {
shiftSamples(shiftCount);
currentSampleStartTime += timeToShift;
}
}
protected synchronized void checkStarted() {
if (startTime == -1) {
this.startTime = System.nanoTime() / 1000000L;
this.currentSampleStartTime = startTime;
}
}
protected synchronized void shiftSamples(int shiftCount) {
checkStarted();
if (samples.length - shiftCount > 0) {
System.arraycopy(samples, 0, samples, shiftCount, samples.length - shiftCount);
Arrays.fill(samples, 0, shiftCount, 0);
} else {
Arrays.fill(samples, 0);
}
}
public synchronized void increment(long count) {
updateSamples();
samples[0]+=count;
totalEvents+=count;
}
@Override
public synchronized double getAveragePerSecond(long timeRange) {
updateSamples();
double preciseTimeRange = timeRange;
// Fix if the time range is bigger than the collected data since start
if (currentSampleStartTime - preciseTimeRange < startTime) {
preciseTimeRange = currentSampleStartTime - startTime;
}
double samplesCount = Math.min(Math.max(preciseTimeRange / sampleTime, 1d), samples.length - 1);
if (samplesCount < 0) {
return 0;
}
double roundedTimeRange = samplesCount * sampleTime;
double value = 0;
for (int i = 1; i <= samplesCount; i++) {
value += samples[i];
}
return (value / roundedTimeRange) * 1000d;
}
@Override
public synchronized long getApproximateCount(long timeRange) {
updateSamples();
long samplesCount = Math.min(Math.max(timeRange / sampleTime, 1L), samples.length);
long value = 0;
for (int i = 0; i < samplesCount; i++) {
value += samples[i];
}
return value;
}
@Override
public synchronized long getTotalCount() {
updateSamples();
return totalEvents;
}
@Override
public synchronized double getTotalAveragePerSecond() {
updateSamples();
if (currentSampleStartTime == startTime) {
return 0;
}
return ((double) totalEvents) / (double) ((currentSampleStartTime - startTime) / 1000D);
}
public synchronized AtomicTimeIncrementalSamplesSnapshot snapshot() {
if (isSnapshot) {
return this;
}
return new AtomicTimeIncrementalSamples(startTime, Arrays.copyOf(this.samples, this.samples.length), sampleTime, currentSampleStartTime, totalEvents, true);
}
}

View File

@ -1,12 +0,0 @@
package org.warp.commonutils.metrics;
public interface AtomicTimeIncrementalSamplesSnapshot {
double getAveragePerSecond(long timeRange);
long getApproximateCount(long timeRange);
long getTotalCount();
double getTotalAveragePerSecond();
}

View File

@ -1,24 +0,0 @@
package org.warp.commonutils.random;
public final class HashUtil {
private HashUtil() {
}
public static int boundedHash(Object o, int upperBoundExclusive) {
int h = o.hashCode();
// Protection against poor hash functions.
// Used by java.util.concurrent.ConcurrentHashMap
// Spread bits to regularize both segment and index locations,
// using variant of single-word Wang/Jenkins hash.
h += (h << 15) ^ 0xffffcd7d;
h ^= (h >>> 10);
h += (h << 3);
h ^= (h >>> 6);
h += (h << 2) + (h << 14);
h ^= (h >>> 16);
return Math.abs(h % upperBoundExclusive);
}
}

View File

@ -1,137 +0,0 @@
package org.warp.commonutils.random;
import java.math.BigInteger;
import java.util.Iterator;
import java.util.Random;
import org.jetbrains.annotations.NotNull;
/**
* Linear feedback shift register
* <p>
* Taps can be found at: See http://www.xilinx.com/support/documentation/application_notes/xapp052.pdf See
* http://mathoverflow.net/questions/46961/how-are-taps-proven-to-work-for-lfsrs/46983#46983 See
* http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm See
* http://www.yikes.com/~ptolemy/lfsr_web/index.htm See http://seanerikoconnor.freeservers.com/Mathematics/AbstractAlgebra/PrimitivePolynomials/overview.html
*
* @author OldCurmudgeon
*/
public class LFSR implements Iterable<BigInteger> {
private static final Random random = new Random();
// Bit pattern for taps.
private final BigInteger taps;
// Where to start (and end).
private final BigInteger start;
public static LFSR randomInt() {
return random(32, random.nextInt());
}
public static LFSR randomLong() {
return random(64, random.nextLong());
}
public static LFSR randomPositiveLong() {
return random(50, Math.abs(random.nextInt()));
}
public static BigInteger randomPrimitive(int bitsSize) {
// Build the BigInteger.
BigInteger primitive = BigInteger.ZERO;
for (int bitNumber = 0; bitNumber <= bitsSize; bitNumber++) {
if (random.nextBoolean() || bitNumber == 0 || bitNumber == bitsSize) {
primitive = primitive.or(BigInteger.ONE.shiftLeft(bitNumber));
}
}
return primitive;
}
public static LFSR random(int bitsSize, long startNumber) {
return new LFSR(randomPrimitive(bitsSize), BigInteger.valueOf(startNumber));
}
// The poly must be primitive to span the full sequence.
public LFSR(BigInteger primitivePoly, BigInteger start) {
// Where to start from (and stop).
this.start = start.equals(BigInteger.ZERO) ? BigInteger.ONE : start;
// Knock off the 2^0 coefficient of the polynomial for the TAP.
this.taps = primitivePoly.shiftRight(1);
}
@NotNull
@Override
public LFSRIterator iterator() {
return new LFSRIterator(start);
}
public class LFSRIterator implements Iterator<BigInteger> {
// The last one we returned.
private BigInteger last = null;
// The next one to return.
private BigInteger next = null;
public LFSRIterator(BigInteger start) {
// Do not return the seed.
last = start;
}
@Override
public boolean hasNext() {
if (next == null) {
/*
* Uses the Galois form.
*
* Shift last right one.
*
* If the bit shifted out was a 1 - xor with the tap mask.
*/
boolean shiftedOutA1 = last.testBit(0);
// Shift right.
next = last.shiftRight(1);
if (shiftedOutA1) {
// Tap!
next = next.xor(taps);
}
// Never give them `start` again.
if (next.equals(start)) {
// Could set a finished flag here too.
next = null;
}
}
return next != null;
}
@Override
public BigInteger next() {
// Remember this one.
last = hasNext() ? next : null;
// Don't deliver it again.
next = null;
return last;
}
public BigInteger next(BigInteger last) {
this.last = last;
next = null;
return next();
}
@Override
public void remove() {
throw new UnsupportedOperationException("Not supported.");
}
@Override
public String toString() {
return LFSR.this.toString() + "[" + (last != null ? last.toString(16) : "") + "-" + (next != null ? next
.toString(16) : "") + "]";
}
}
@Override
public String toString() {
return "(" + taps.toString(32) + ")-" + start.toString(32);
}
}

View File

@ -1,121 +0,0 @@
package org.warp.commonutils.range;
import it.unimi.dsi.fastutil.objects.Object2ObjectMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectMaps;
import it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectRBTreeMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
import it.unimi.dsi.fastutil.objects.ObjectOpenHashSet;
import java.util.Comparator;
import java.util.function.Function;
public class MappedRanges<T> {
private final Object2ObjectMap<Range, T> ranges;
public MappedRanges(int start, int end, T value) {
if (start > end) {
throw new IndexOutOfBoundsException();
}
this.ranges = new Object2ObjectOpenHashMap<>();
ranges.put(new Range(start, end), value);
}
public void deleteRange(final int start, final int end, Function<T, T> replaceWhenSplitting, Function<T, T> cloneWhenSplitting) {
if (start > end) {
throw new IndexOutOfBoundsException();
}
Object2ObjectOpenHashMap<Range, T> rangesToAdd = new Object2ObjectOpenHashMap<>();
ObjectOpenHashSet<Range> rangesToDelete = new ObjectOpenHashSet<>();
ranges.forEach((range, value) -> {
if (range.start <= end && range.end >= start) {
if (range.start >= start && range.end <= end) {
// delete the range
rangesToDelete.add(range);
} else if (range.start <= start && range.end >= end) {
// cut the hole
rangesToDelete.add(range);
rangesToAdd.put(new Range(range.start, start), value);
rangesToAdd.put(new Range(end, range.end), cloneWhenSplitting.apply(value));
} else if (range.start <= start && range.end <= end && range.end > start) {
// shrink the right border
rangesToDelete.add(range);
rangesToAdd.put(new Range(range.start, start), value);
} else if (range.start >= start && range.end >= end && range.start < end) {
// shrink the left border
rangesToDelete.add(range);
rangesToAdd.put(new Range(end, range.end), value);
}
}
});
for (Range range : rangesToDelete) {
ranges.remove(range);
}
rangesToAdd.forEach((range, value) -> {
if (canAddRange(range)) {
ranges.put(range, replaceWhenSplitting.apply(value));
}
});
}
public void transformRange(int start, int end, Function<T, T> replaceWhenOverlapping, Function<T, T> cloneWhenSplitting) {
if (start > end) {
throw new IndexOutOfBoundsException();
}
Object2ObjectOpenHashMap<Range, T> rangesToTransform = new Object2ObjectOpenHashMap<>();
Object2ObjectOpenHashMap<Range, T> rangesToAdd = new Object2ObjectOpenHashMap<>();
ObjectOpenHashSet<Range> rangesToRemove = new ObjectOpenHashSet<>();
ranges.forEach((range, value) -> {
if (range.start <= end && range.end >= start) {
if (range.start >= start && range.end <= end) {
// transform the range
rangesToTransform.put(range, replaceWhenOverlapping.apply(value));
} else if (range.start <= start && range.end >= end) {
// transform the middle
rangesToRemove.add(range);
rangesToAdd.put(new Range(range.start, start), value);
rangesToTransform.put(new Range(start, end), replaceWhenOverlapping.apply(cloneWhenSplitting.apply(value)));
rangesToAdd.put(new Range(end, range.end), cloneWhenSplitting.apply(value));
} else if (range.start <= start && range.end <= end && range.end > start) {
// transform the right
rangesToRemove.add(range);
rangesToAdd.put(new Range(range.start, start), value);
rangesToTransform.put(new Range(start, range.end), replaceWhenOverlapping.apply(cloneWhenSplitting.apply(value)));
} else if (range.start >= start && range.end >= end && range.start < end) {
// transform the left
rangesToRemove.add(range);
rangesToTransform.put(new Range(range.start, end), replaceWhenOverlapping.apply(cloneWhenSplitting.apply(value)));
rangesToAdd.put(new Range(end, range.end), value);
} else {
// do not transform
}
}
});
rangesToRemove.forEach((range) -> {
ranges.remove(range);
});
rangesToAdd.forEach((range, value) -> {
if (canAddRange(range)) {
ranges.put(range, value);
}
});
rangesToTransform.forEach((range, value) -> {
ranges.put(range, value);
});
}
private boolean canAddRange(UnmodifiableRange range) {
return range.getStart() != range.getEnd();
}
private boolean canAddRange(Range range) {
return range.getStart() != range.getEnd();
}
public Object2ObjectMap<UnmodifiableRange, T> getRanges() {
Object2ObjectSortedMap<UnmodifiableRange, T> a = new Object2ObjectRBTreeMap<>(Comparator.comparingLong(UnmodifiableRange::getStart));
ranges.forEach((range, value) -> a.put(range.unmodifiableClone(), value));
return Object2ObjectMaps.unmodifiable(a);
}
}

View File

@ -1,60 +0,0 @@
package org.warp.commonutils.range;
import java.util.Objects;
import java.util.StringJoiner;
import org.warp.commonutils.error.IndexOutOfBoundsException;
public class Range {
public long start;
public long end;
public Range(long start, long end) {
if (start > end) {
throw new IndexOutOfBoundsException(start, 0, end);
}
this.start = start;
this.end = end;
}
public long getStart() {
return start;
}
public long getEnd() {
return end;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Range range = (Range) o;
return start == range.start && end == range.end;
}
@Override
public int hashCode() {
return Objects.hash(start, end);
}
@Override
public String toString() {
return new StringJoiner(", ", Range.class.getSimpleName() + "[", "]").add("start=" + start).add("end=" + end)
.toString();
}
@SuppressWarnings("MethodDoesntCallSuperMethod")
@Override
public Range clone() {
return new Range(start, end);
}
public UnmodifiableRange unmodifiableClone() {
return new UnmodifiableRange(start, end);
}
}

View File

@ -1,104 +0,0 @@
package org.warp.commonutils.range;
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
import it.unimi.dsi.fastutil.objects.ObjectRBTreeSet;
import it.unimi.dsi.fastutil.objects.ObjectSortedSet;
import it.unimi.dsi.fastutil.objects.ObjectSortedSets;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import org.warp.commonutils.error.IndexOutOfBoundsException;
public class Ranges {
private final ObjectArrayList<Range> ranges;
public Ranges(long start, long end) {
if (start > end) {
throw new IndexOutOfBoundsException(start, 0, end);
}
this.ranges = new ObjectArrayList<>();
ranges.add(new Range(start, end));
}
public void addRange(Range range) {
addRange(range.start, range.end);
}
public void addRange(long start, long end) {
if (start > end) {
throw new IndexOutOfBoundsException(start, 0, end);
}
long rangeStart = start;
long rangeEnd = end;
var it = ranges.iterator();
while (it.hasNext()) {
Range range = it.next();
if (range.start <= end && range.end >= start) {
boolean remove = false;
if (range.start < rangeStart && range.end >= rangeStart) {
rangeStart = range.start;
remove = true;
}
if (range.end > rangeEnd && range.start <= rangeEnd) {
rangeEnd = range.end;
remove = true;
}
if (remove) {
it.remove();
}
}
}
addRangeIfNotZero(new Range(rangeStart, rangeEnd));
}
public void deleteRange(final long start, final long end) {
if (start > end) {
throw new IndexOutOfBoundsException(start);
}
List<Range> rangesToAdd = new ArrayList<>(ranges.size());
var it = ranges.iterator();
while (it.hasNext()) {
Range range = it.next();
if (range.start <= end && range.end >= start) {
if (range.start >= start && range.end <= end) {
// delete the range
it.remove();
} else if (range.start <= start && range.end >= end) {
// cut the hole
it.remove();
rangesToAdd.add(new Range(range.start, start));
rangesToAdd.add(new Range(end, range.end));
} else if (range.start <= start && range.end <= end && range.end > start) {
// shrink the right border
it.remove();
rangesToAdd.add(new Range(range.start, start));
} else if (range.start >= start && range.end >= end && range.start < end) {
// shrink the left border
it.remove();
rangesToAdd.add(new Range(end, range.end));
}
}
}
for (Range rangeToAdd : rangesToAdd) {
addRangeIfNotZero(rangeToAdd);
}
}
/**
* This methods does not check overlapping ranges! It's used only internally to skip empty ranges
*
* @param range
*/
private void addRangeIfNotZero(Range range) {
if (range.start != range.end) {
ranges.add(range);
}
}
public ObjectSortedSet<UnmodifiableRange> getRanges() {
ObjectSortedSet<UnmodifiableRange> a = new ObjectRBTreeSet<>(Comparator.comparingLong(UnmodifiableRange::getStart));
ranges.forEach((range) -> a.add(range.unmodifiableClone()));
return ObjectSortedSets.unmodifiable(a);
}
}

View File

@ -1,54 +0,0 @@
package org.warp.commonutils.range;
import java.util.Objects;
import java.util.StringJoiner;
import org.warp.commonutils.error.IndexOutOfBoundsException;
public class UnmodifiableRange {
private final long start;
private final long end;
public UnmodifiableRange(long start, long end) {
if (start > end) {
throw new IndexOutOfBoundsException(start, 0, end);
}
this.start = start;
this.end = end;
}
public long getStart() {
return start;
}
public long getEnd() {
return end;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UnmodifiableRange that = (UnmodifiableRange) o;
return start == that.start && end == that.end;
}
@Override
public int hashCode() {
return Objects.hash(start, end);
}
@Override
public String toString() {
return new StringJoiner(", ", UnmodifiableRange.class.getSimpleName() + "[", "]").add("start=" + start)
.add("end=" + end).toString();
}
public Range toRange() {
return new Range(start, end);
}
}

View File

@ -1,19 +0,0 @@
package org.warp.commonutils.serialization;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
public class UTFUtils {
public static final void writeUTF(DataOutputStream out, String utf) throws IOException {
byte[] bytes = utf.getBytes(StandardCharsets.UTF_8);
out.writeInt(bytes.length);
out.write(bytes);
}
public static final String readUTF(DataInputStream in) throws IOException {
int len = in.readInt();
return new String(in.readNBytes(len), StandardCharsets.UTF_8);
}
}

View File

@ -1,11 +0,0 @@
package org.warp.commonutils.stream;
import java.io.DataInput;
import java.io.DataOutput;
public interface DataInputOutput extends DataInput, DataOutput {
DataInput getIn();
DataOutput getOut();
}

View File

@ -1,173 +0,0 @@
package org.warp.commonutils.stream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.jetbrains.annotations.NotNull;
public class DataInputOutputImpl implements DataInputOutput {
private final DataInput in;
private final DataOutput out;
public DataInputOutputImpl(DataInput in, DataOutput out) {
this.in = in;
this.out = out;
}
@Override
public DataInput getIn() {
return this;
}
@Override
public DataOutput getOut() {
return this;
}
@Override
public void readFully(byte[] bytes) throws IOException {
in.readFully(bytes);
}
@Override
public void readFully(byte[] bytes, int i, int i1) throws IOException {
in.readFully(bytes, i, i1);
}
@Override
public int skipBytes(int i) throws IOException {
return in.skipBytes(i);
}
@Override
public boolean readBoolean() throws IOException {
return in.readBoolean();
}
@Override
public byte readByte() throws IOException {
return in.readByte();
}
@Override
public int readUnsignedByte() throws IOException {
return in.readUnsignedByte();
}
@Override
public short readShort() throws IOException {
return in.readShort();
}
@Override
public int readUnsignedShort() throws IOException {
return in.readUnsignedShort();
}
@Override
public char readChar() throws IOException {
return in.readChar();
}
@Override
public int readInt() throws IOException {
return in.readInt();
}
@Override
public long readLong() throws IOException {
return in.readLong();
}
@Override
public float readFloat() throws IOException {
return in.readFloat();
}
@Override
public double readDouble() throws IOException {
return in.readDouble();
}
@Override
public String readLine() throws IOException {
return in.readLine();
}
@NotNull
@Override
public String readUTF() throws IOException {
return in.readUTF();
}
@Override
public void write(int i) throws IOException {
out.write(i);
}
@Override
public void write(byte[] bytes) throws IOException {
out.write(bytes);
}
@Override
public void write(byte[] bytes, int i, int i1) throws IOException {
out.write(bytes, i, i1);
}
@Override
public void writeBoolean(boolean b) throws IOException {
out.writeBoolean(b);
}
@Override
public void writeByte(int i) throws IOException {
out.writeByte(i);
}
@Override
public void writeShort(int i) throws IOException {
out.writeShort(i);
}
@Override
public void writeChar(int i) throws IOException {
out.writeChar(i);
}
@Override
public void writeInt(int i) throws IOException {
out.writeInt(i);
}
@Override
public void writeLong(long l) throws IOException {
out.writeLong(l);
}
@Override
public void writeFloat(float v) throws IOException {
out.writeFloat(v);
}
@Override
public void writeDouble(double v) throws IOException {
out.writeDouble(v);
}
@Override
public void writeBytes(@NotNull String s) throws IOException {
out.writeBytes(s);
}
@Override
public void writeChars(@NotNull String s) throws IOException {
out.writeChars(s);
}
@Override
public void writeUTF(@NotNull String s) throws IOException {
out.writeUTF(s);
}
}

View File

@ -1,103 +0,0 @@
package org.warp.commonutils.stream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.jetbrains.annotations.NotNull;
public class DataInputOutputStream extends DataOutputStream implements DataInputOutput {
private final DataInputStream in;
public DataInputOutputStream(DataInputStream in, DataOutputStream out) {
super(out);
this.in = in;
}
@Override
public DataInputStream getIn() {
return in;
}
@Override
public DataOutputStream getOut() {
return this;
}
@Override
public void readFully(byte[] bytes) throws IOException {
in.readFully(bytes);
}
@Override
public void readFully(byte[] bytes, int i, int i1) throws IOException {
in.readFully(bytes, i, i1);
}
@Override
public int skipBytes(int i) throws IOException {
return in.skipBytes(i);
}
@Override
public boolean readBoolean() throws IOException {
return in.readBoolean();
}
@Override
public byte readByte() throws IOException {
return in.readByte();
}
@Override
public int readUnsignedByte() throws IOException {
return in.readUnsignedByte();
}
@Override
public short readShort() throws IOException {
return in.readShort();
}
@Override
public int readUnsignedShort() throws IOException {
return in.readUnsignedShort();
}
@Override
public char readChar() throws IOException {
return in.readChar();
}
@Override
public int readInt() throws IOException {
return in.readInt();
}
@Override
public long readLong() throws IOException {
return in.readLong();
}
@Override
public float readFloat() throws IOException {
return in.readFloat();
}
@Override
public double readDouble() throws IOException {
return in.readDouble();
}
@Deprecated
@Override
public String readLine() throws IOException {
return in.readLine();
}
@NotNull
@Override
public String readUTF() throws IOException {
return in.readUTF();
}
}

View File

@ -1,93 +0,0 @@
package org.warp.commonutils.type;
import it.unimi.dsi.fastutil.objects.ObjectOpenHashSet;
import it.unimi.dsi.fastutil.objects.ObjectSets.UnmodifiableSet;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.jetbrains.annotations.NotNull;
public class Bytes {
public final byte[] data;
public Bytes(@NotNull byte[] data) {
this.data = data;
}
public static Map<? extends Bytes,? extends Bytes> ofMap(Map<byte[], byte[]> oldMap) {
var newMap = new HashMap<Bytes, Bytes>(oldMap.size());
oldMap.forEach((key, value) -> newMap.put(new Bytes(key), new Bytes(value)));
return newMap;
}
public static UnmodifiableMap<? extends Bytes,? extends Bytes> ofMap(UnmodifiableIterableMap<byte[], byte[]> oldMap) {
Bytes[] keys = new Bytes[oldMap.size()];
Bytes[] values = new Bytes[oldMap.size()];
IntWrapper i = new IntWrapper(0);
oldMap.forEach((key, value) -> {
keys[i.var] = new Bytes(key);
values[i.var] = new Bytes(value);
i.var++;
});
return UnmodifiableMap.of(keys, values);
}
public static List<? extends Bytes> ofList(List<byte[]> oldList) {
var newList = new ArrayList<Bytes>(oldList.size());
oldList.forEach((item) -> newList.add(new Bytes(item)));
return newList;
}
public static Set<? extends Bytes> ofSet(Set<byte[]> oldSet) {
var newSet = new ObjectOpenHashSet<Bytes>(oldSet.size());
oldSet.forEach((item) -> newSet.add(new Bytes(item)));
return newSet;
}
public static UnmodifiableIterableSet<byte[]> toIterableSet(UnmodifiableSet<Bytes> set) {
byte[][] resultItems = new byte[set.size()][];
var it = set.iterator();
int i = 0;
while (it.hasNext()) {
var item = it.next();
resultItems[i] = item.data;
i++;
}
return UnmodifiableIterableSet.of(resultItems);
}
public static byte[][] toByteArray(Collection<Bytes> value) {
Bytes[] valueBytesArray = value.toArray(Bytes[]::new);
byte[][] convertedResult = new byte[valueBytesArray.length][];
for (int i = 0; i < valueBytesArray.length; i++) {
convertedResult[i] = valueBytesArray[i].data;
}
return convertedResult;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Bytes that = (Bytes) o;
return Arrays.equals(data, that.data);
}
@Override
public int hashCode() {
return Arrays.hashCode(data);
}
@Override
public String toString() {
return Arrays.toString(data);
}
}

View File

@ -1,205 +0,0 @@
package org.warp.commonutils.type;
import com.google.common.collect.Queues;
import java.lang.reflect.Array;
import java.util.Collection;
import java.util.Iterator;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.jetbrains.annotations.NotNull;
public class FloatPriorityQueue<T> implements Queue<T> {
private final Queue<ScoredValue<T>> internalQueue;
public static <T> FloatPriorityQueue<T> of() {
return new FloatPriorityQueue<T>(0);
}
public static <T> FloatPriorityQueue<T> of(T value, float score) {
var pq = new FloatPriorityQueue<T>(1);
pq.offer(value);
return pq;
}
public static <T> FloatPriorityQueue<T> of(ScoredValue<T> value) {
var pq = new FloatPriorityQueue<T>(1);
pq.offer(value);
return pq;
}
public static <T> FloatPriorityQueue<T> of(ScoredValue<T>... values) {
var pq = new FloatPriorityQueue<T>(values.length);
for (ScoredValue<T> value : values) {
pq.offer(value);
}
return pq;
}
public FloatPriorityQueue(PriorityQueue<ScoredValue<T>> internalQueue) {
this.internalQueue = internalQueue;
}
private FloatPriorityQueue(Queue<ScoredValue<T>> internalQueue) {
this.internalQueue = internalQueue;
}
public FloatPriorityQueue() {
internalQueue = new PriorityQueue<>();
}
public FloatPriorityQueue(int initialCapacity) {
internalQueue = new PriorityQueue<>(Math.max(1, initialCapacity));
}
public static <T> FloatPriorityQueue<T> synchronize(FloatPriorityQueue<T> queue) {
return new FloatPriorityQueue<T>(Queues.synchronizedQueue(queue.internalQueue));
}
public static <T> FloatPriorityQueue<T> synchronizedPq(int initialCapacity) {
return new FloatPriorityQueue<T>(Queues.synchronizedQueue(new PriorityQueue<>(Math.max(1, initialCapacity))));
}
@Override
public int size() {
return internalQueue.size();
}
@Override
public boolean isEmpty() {
return internalQueue.isEmpty();
}
@Override
public boolean contains(Object o) {
return internalQueue.contains(ScoredValue.of(0, o));
}
@NotNull
@Override
public Iterator<T> iterator() {
var it = internalQueue.iterator();
return new Iterator<T>() {
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public T next() {
return getValueOrNull(it.next());
}
};
}
private T getValueOrNull(ScoredValue<T> scoredValue) {
if (scoredValue == null) {
return null;
} else {
return scoredValue.getValue();
}
}
@NotNull
@Override
public Object[] toArray() {
return internalQueue.stream().map(this::getValueOrNull).toArray(Object[]::new);
}
@NotNull
@Override
public <T1> T1[] toArray(@NotNull T1[] a) {
return internalQueue
.stream()
.map(this::getValueOrNull)
.toArray(i -> (T1[]) Array.newInstance(a.getClass().getComponentType(), i));
}
@Deprecated
@Override
public boolean add(T t) {
return internalQueue.add(ScoredValue.of(0, t));
}
public boolean addTop(T t) {
return internalQueue.add(ScoredValue.of(Integer.MAX_VALUE, t));
}
public boolean add(T t, float score) {
return internalQueue.add(ScoredValue.of(score, t));
}
@Override
public boolean remove(Object o) {
return internalQueue.remove(ScoredValue.of(0, o));
}
@Override
public boolean containsAll(@NotNull Collection<?> c) {
return internalQueue.containsAll(c.stream().map(val -> ScoredValue.of(0, val)).collect(Collectors.toList()));
}
@Override
public boolean addAll(@NotNull Collection<? extends T> c) {
return internalQueue.addAll(c.stream().map(val -> ScoredValue.of(0, (T) val)).collect(Collectors.toList()));
}
@Override
public boolean removeAll(@NotNull Collection<?> c) {
return internalQueue.removeAll(c.stream().map(val -> ScoredValue.of(0, val)).collect(Collectors.toList()));
}
@Override
public boolean retainAll(@NotNull Collection<?> c) {
return internalQueue.retainAll(c.stream().map(val -> ScoredValue.of(0, val)).collect(Collectors.toList()));
}
@Override
public void clear() {
internalQueue.clear();
}
@Override
public boolean offer(T t) {
return offer(ScoredValue.of(0, t));
}
public boolean offer(T t, float score) {
return offer(ScoredValue.of(score, t));
}
public boolean offer(ScoredValue<T> value) {
return this.internalQueue.offer(value);
}
@Override
public T remove() {
return getValueOrNull(internalQueue.remove());
}
@Override
public T poll() {
return getValueOrNull(internalQueue.poll());
}
@Override
public T element() {
return getValueOrNull(internalQueue.element());
}
@Override
public T peek() {
return getValueOrNull(internalQueue.peek());
}
public void forEachItem(Consumer<ScoredValue<T>> action) {
internalQueue.forEach(action);
}
public Stream<ScoredValue<T>> streamItems() {
return internalQueue.stream();
}
}

View File

@ -1,10 +0,0 @@
package org.warp.commonutils.type;
public class IntWrapper {
public int var;
public IntWrapper(int value) {
this.var = value;
}
}

View File

@ -1,52 +0,0 @@
package org.warp.commonutils.type;
import java.util.Objects;
import org.jetbrains.annotations.NotNull;
public final class ScoredValue<T> implements Comparable<ScoredValue<T>> {
private final float score;
private final T value;
private ScoredValue(float score, T value) {
this.score = score;
this.value = value;
}
public static <T> ScoredValue<T> of(float score, T value) {
return new ScoredValue<T>(score, value);
}
@Override
public int compareTo(@NotNull ScoredValue<T> o) {
return Float.compare(this.score, o.score);
}
public float getScore() {
return this.score;
}
public T getValue() {
return this.value;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ScoredValue<?> that = (ScoredValue<?>) o;
return Objects.equals(value, that.value);
}
@Override
public int hashCode() {
return Objects.hash(value);
}
public String toString() {
return "ScoredValue(score=" + this.getScore() + ", value=" + this.getValue() + ")";
}
}

View File

@ -1,76 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.warp.commonutils.type;
import java.util.Locale;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A default {@link ThreadFactory} implementation that accepts the name prefix
* of the created threads as a constructor argument. Otherwise, this factory
* yields the same semantics as the thread factory returned by
* {@link Executors#defaultThreadFactory()}.
*/
public class ShortNamedThreadFactory implements ThreadFactory {
private static int POOL_NUMBERS_COUNT = 50;
private static final AtomicInteger[] threadPoolNumber = new AtomicInteger[POOL_NUMBERS_COUNT];
static {
for (int i = 0; i < threadPoolNumber.length; i++) {
threadPoolNumber[i] = new AtomicInteger(1);
}
}
private final ThreadGroup group;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private static final String NAME_PATTERN = "%s-%d";
private final String threadNamePrefix;
/**
* Creates a new {@link ShortNamedThreadFactory} instance
*
* @param threadNamePrefix the name prefix assigned to each thread created.
*/
public ShortNamedThreadFactory(String threadNamePrefix) {
final SecurityManager s = System.getSecurityManager();
group = (s != null) ? s.getThreadGroup() : Thread.currentThread()
.getThreadGroup();
this.threadNamePrefix = String.format(Locale.ROOT, NAME_PATTERN,
checkPrefix(threadNamePrefix), threadPoolNumber[(threadNamePrefix.hashCode() % POOL_NUMBERS_COUNT / 2) + POOL_NUMBERS_COUNT / 2].getAndIncrement());
}
private static String checkPrefix(String prefix) {
return prefix == null || prefix.length() == 0 ? "Unnamed" : prefix;
}
/**
* Creates a new {@link Thread}
*
* @see java.util.concurrent.ThreadFactory#newThread(java.lang.Runnable)
*/
@Override
public Thread newThread(Runnable r) {
final Thread t = new Thread(group, r, String.format(Locale.ROOT, "%s-%d",
this.threadNamePrefix, threadNumber.getAndIncrement()), 0);
t.setDaemon(false);
t.setPriority(Thread.NORM_PRIORITY);
return t;
}
}

View File

@ -1,202 +0,0 @@
package org.warp.commonutils.type;
import com.google.common.collect.Streams;
import it.unimi.dsi.fastutil.objects.Object2ObjectMaps;
import it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap;
import java.lang.reflect.Array;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import java.util.function.BiConsumer;
import java.util.function.IntFunction;
import java.util.stream.Stream;
import org.jetbrains.annotations.NotNull;
public interface UnmodifiableIterableMap<K, V> extends Iterable<Entry<K, V>> {
/**
* Returns the number of key-value mappings in this map. If the
* map contains more than {@code Integer.MAX_VALUE} elements, returns
* {@code Integer.MAX_VALUE}.
*
* @return the number of key-value mappings in this map
*/
int size();
/**
* Returns {@code true} if this map contains no key-value mappings.
*
* @return {@code true} if this map contains no key-value mappings
*/
boolean isEmpty();
/**
* Performs the given action for each entry in this map until all entries
* have been processed or the action throws an exception. Unless
* otherwise specified by the implementing class, actions are performed in
* the order of entry set iteration (if an iteration order is specified.)
* Exceptions thrown by the action are relayed to the caller.
*
* @implSpec
* The default implementation is equivalent to, for this {@code map}:
* <pre> {@code
* for (Map.Entry<K, V> entry : map.entrySet())
* action.accept(entry.getKey(), entry.getValue());
* }</pre>
*
* The default implementation makes no guarantees about synchronization
* or atomicity properties of this method. Any implementation providing
* atomicity guarantees must override this method and document its
* concurrency properties.
*
* @param action The action to be performed for each entry
* @throws NullPointerException if the specified action is null
* @throws ConcurrentModificationException if an entry is found to be
* removed during iteration
* @since 1.8
*/
void forEach(BiConsumer<? super K, ? super V> action);
Map<K, V> toUnmodifiableMap();
Stream<Entry<K, V>> stream();
UnmodifiableIterableSet<K> toUnmodifiableIterableKeysSet(IntFunction<K[]> generator);
@SuppressWarnings("SuspiciousSystemArraycopy")
static <K, V> UnmodifiableIterableMap<K, V> ofObjects(Object[] keys, Object[] values) {
if (keys == null || values == null || (keys.length == 0 && values.length == 0)) {
return UnmodifiableIterableMap.of(null, null);
} else if (keys.length == values.length) {
//noinspection unchecked
K[] keysArray = (K[]) Array.newInstance(keys[0].getClass(), keys.length);
System.arraycopy(keys, 0, keysArray, 0, keys.length);
//noinspection unchecked
V[] valuesArray = (V[]) Array.newInstance(values[0].getClass(), keys.length);
System.arraycopy(values, 0, valuesArray, 0, values.length);
return UnmodifiableIterableMap.of(keysArray, valuesArray);
} else {
throw new IllegalArgumentException("The number of keys doesn't match the number of values.");
}
}
static <K, V> UnmodifiableIterableMap<K, V> of(K[] keys, V[] values) {
int keysSize = (keys != null) ? keys.length : 0;
int valuesSize = (values != null) ? values.length : 0;
if (keysSize == 0 && valuesSize == 0) {
// return mutable map
return new EmptyUnmodifiableIterableMap<>();
}
if (keysSize != valuesSize) {
throw new IllegalArgumentException("The number of keys doesn't match the number of values.");
}
return new ArrayUnmodifiableIterableMap<>(keys, values, keysSize);
}
class EmptyUnmodifiableIterableMap<K, V> implements UnmodifiableIterableMap<K, V> {
private EmptyUnmodifiableIterableMap() {}
@NotNull
@Override
public Iterator<Entry<K, V>> iterator() {
return new Iterator<>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public Entry<K, V> next() {
throw new NoSuchElementException();
}
};
}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {}
@Override
public Map<K, V> toUnmodifiableMap() {
//noinspection unchecked
return Object2ObjectMaps.EMPTY_MAP;
}
@Override
public Stream<Entry<K, V>> stream() {
return Stream.empty();
}
@Override
public UnmodifiableIterableSet<K> toUnmodifiableIterableKeysSet(IntFunction<K[]> generator) {
return UnmodifiableIterableSet.of(null);
}
}
class ArrayUnmodifiableIterableMap<K, V> implements UnmodifiableIterableMap<K, V> {
private final K[] keys;
private final V[] values;
private final int keysSize;
private ArrayUnmodifiableIterableMap(K[] keys, V[] values, int keysSize) {
this.keys = keys;
this.values = values;
this.keysSize = keysSize;
}
@NotNull
@Override
public Iterator<Entry<K, V>> iterator() {
return new Object2ObjectOpenHashMap<K, V>(keys, values, 1.0f).entrySet().iterator();
}
@Override
public int size() {
return keysSize;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
for (int i = 0; i < keys.length; i++) {
action.accept(keys[i], values[i]);
}
}
@Override
public Map<K, V> toUnmodifiableMap() {
return Object2ObjectMaps.unmodifiable(new Object2ObjectOpenHashMap<>(keys, values, 1.0f));
}
@Override
public Stream<Entry<K, V>> stream() {
//noinspection UnstableApiUsage
return Streams.zip(Stream.of(keys), Stream.of(values), Map::entry);
}
@Override
public UnmodifiableIterableSet<K> toUnmodifiableIterableKeysSet(IntFunction<K[]> generator) {
return UnmodifiableIterableSet.of(keys);
}
}
}

View File

@ -1,195 +0,0 @@
package org.warp.commonutils.type;
import it.unimi.dsi.fastutil.objects.ObjectOpenHashSet;
import it.unimi.dsi.fastutil.objects.ObjectSets;
import java.util.Collections;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.function.Consumer;
import java.util.function.IntFunction;
import org.jetbrains.annotations.NotNull;
public interface UnmodifiableIterableSet<K> extends Iterable<K> {
int size();
boolean isEmpty();
void forEach(Consumer<? super K> action);
Set<K> toUnmodifiableSet();
<V> UnmodifiableIterableMap<K,V> toUnmodifiableIterableMapSetValues(V[] values);
<K2> UnmodifiableIterableMap<K2,K> toUnmodifiableIterableMapSetKeys(K2[] keys);
<V> UnmodifiableMap<K,V> toUnmodifiableMapSetValues(V[] values);
<K2> UnmodifiableMap<K2,K> toUnmodifiableMapSetKeys(K2[] keys);
static <K> UnmodifiableIterableSet<K> of(K[] items) {
int keysSize = (items != null) ? items.length : 0;
if (keysSize == 0) {
// return mutable map
return new UnmodifiableIterableSet<K>() {
@NotNull
@Override
public Iterator<K> iterator() {
return new Iterator<>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public K next() {
throw new NoSuchElementException();
}
};
}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public void forEach(Consumer<? super K> action) {}
@Override
public Set<K> toUnmodifiableSet() {
//noinspection unchecked
return ObjectSets.EMPTY_SET;
}
@Override
public <V> UnmodifiableIterableMap<K, V> toUnmodifiableIterableMapSetValues(V[] values) {
return UnmodifiableIterableMap.of(null, values);
}
@Override
public <K2> UnmodifiableIterableMap<K2, K> toUnmodifiableIterableMapSetKeys(K2[] keys) {
return UnmodifiableIterableMap.of(keys, null);
}
@Override
public <V> UnmodifiableMap<K, V> toUnmodifiableMapSetValues(V[] values) {
return UnmodifiableMap.of(null, values);
}
@Override
public <K2> UnmodifiableMap<K2, K> toUnmodifiableMapSetKeys(K2[] keys) {
return UnmodifiableMap.of(keys, null);
}
};
}
return new UnmodifiableIterableSet<K>() {
@Override
public int size() {
return keysSize;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public void forEach(Consumer<? super K> action) {
for (int i = 0; i < items.length; i++) {
action.accept(items[i]);
}
}
@Override
public Set<K> toUnmodifiableSet() {
return ObjectSets.unmodifiable(new ObjectOpenHashSet<>(items, 1.0f));
}
@Override
public <V> UnmodifiableIterableMap<K, V> toUnmodifiableIterableMapSetValues(V[] values) {
return UnmodifiableIterableMap.of(items, values);
}
@Override
public <K2> UnmodifiableIterableMap<K2, K> toUnmodifiableIterableMapSetKeys(K2[] keys) {
return UnmodifiableIterableMap.of(keys, items);
}
@Override
public <V> UnmodifiableMap<K, V> toUnmodifiableMapSetValues(V[] values) {
return UnmodifiableMap.of(items, values);
}
@Override
public <K2> UnmodifiableMap<K2, K> toUnmodifiableMapSetKeys(K2[] keys) {
return UnmodifiableMap.of(keys, items);
}
@NotNull
@Override
public Iterator<K> iterator() {
return new ObjectOpenHashSet<K>(items, 1.0f).iterator();
}
};
}
static <K> UnmodifiableIterableSet<K> of(Set<K> items, IntFunction<K[]> generator) {
return new UnmodifiableIterableSet<K>() {
@Override
public int size() {
return items.size();
}
@Override
public boolean isEmpty() {
return items.isEmpty();
}
@Override
public void forEach(Consumer<? super K> action) {
items.forEach(action);
}
@Override
public Set<K> toUnmodifiableSet() {
return Collections.unmodifiableSet(items);
}
@Override
public <V> UnmodifiableIterableMap<K, V> toUnmodifiableIterableMapSetValues(V[] values) {
return UnmodifiableIterableMap.of(items.toArray(generator), values);
}
@Override
public <K2> UnmodifiableIterableMap<K2, K> toUnmodifiableIterableMapSetKeys(K2[] keys) {
return UnmodifiableIterableMap.of(keys, items.toArray(generator));
}
@Override
public <V> UnmodifiableMap<K, V> toUnmodifiableMapSetValues(V[] values) {
return UnmodifiableMap.of(items.toArray(generator), values);
}
@Override
public <K2> UnmodifiableMap<K2, K> toUnmodifiableMapSetKeys(K2[] keys) {
return UnmodifiableMap.of(keys, items.toArray(generator));
}
@NotNull
@Override
public Iterator<K> iterator() {
return items.iterator();
}
};
}
}

View File

@ -1,317 +0,0 @@
package org.warp.commonutils.type;
import it.unimi.dsi.fastutil.objects.Object2ObjectMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectMaps;
import it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap;
import it.unimi.dsi.fastutil.objects.ObjectIterator;
import java.util.Collections;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import java.util.function.BiConsumer;
import java.util.function.IntFunction;
import java.util.stream.Stream;
import org.jetbrains.annotations.NotNull;
public interface UnmodifiableMap<K, V> extends UnmodifiableIterableMap<K, V> {
/**
* Returns {@code true} if this map contains a mapping for the specified
* key. More formally, returns {@code true} if and only if
* this map contains a mapping for a key {@code k} such that
* {@code Objects.equals(key, k)}. (There can be
* at most one such mapping.)
*
* @param key key whose presence in this map is to be tested
* @return {@code true} if this map contains a mapping for the specified
* key
* @throws ClassCastException if the key is of an inappropriate type for
* this map
* (<a href="{@docRoot}/java.base/java/util/Collection.html#optional-restrictions">optional</a>)
* @throws NullPointerException if the specified key is null and this map
* does not permit null keys
* (<a href="{@docRoot}/java.base/java/util/Collection.html#optional-restrictions">optional</a>)
*/
boolean containsKey(Object key);
/**
* Returns the value to which the specified key is mapped,
* or {@code null} if this map contains no mapping for the key.
*
* <p>More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that
* {@code Objects.equals(key, k)},
* then this method returns {@code v}; otherwise
* it returns {@code null}. (There can be at most one such mapping.)
*
* <p>If this map permits null values, then a return value of
* {@code null} does not <i>necessarily</i> indicate that the map
* contains no mapping for the key; it's also possible that the map
* explicitly maps the key to {@code null}. The {@link #containsKey
* containsKey} operation may be used to distinguish these two cases.
*
* @param key the key whose associated value is to be returned
* @return the value to which the specified key is mapped, or
* {@code null} if this map contains no mapping for the key
* @throws ClassCastException if the key is of an inappropriate type for
* this map
* (<a href="{@docRoot}/java.base/java/util/Collection.html#optional-restrictions">optional</a>)
* @throws NullPointerException if the specified key is null and this map
* does not permit null keys
* (<a href="{@docRoot}/java.base/java/util/Collection.html#optional-restrictions">optional</a>)
*/
V get(Object key);
/**
* Returns the value to which the specified key is mapped, or
* {@code defaultValue} if this map contains no mapping for the key.
*
* @implSpec
* The default implementation makes no guarantees about synchronization
* or atomicity properties of this method. Any implementation providing
* atomicity guarantees must override this method and document its
* concurrency properties.
*
* @param key the key whose associated value is to be returned
* @param defaultValue the default mapping of the key
* @return the value to which the specified key is mapped, or
* {@code defaultValue} if this map contains no mapping for the key
* @throws ClassCastException if the key is of an inappropriate type for
* this map
* (<a href="{@docRoot}/java.base/java/util/Collection.html#optional-restrictions">optional</a>)
* @throws NullPointerException if the specified key is null and this map
* does not permit null keys
* (<a href="{@docRoot}/java.base/java/util/Collection.html#optional-restrictions">optional</a>)
* @since 1.8
*/
default V getOrDefault(Object key, V defaultValue) {
V v;
return (((v = get(key)) != null) || containsKey(key))
? v
: defaultValue;
}
@NotNull
ObjectIterator<Object2ObjectMap.Entry<K, V>> fastIterator();
/**
* Performs the given action for each entry in this map until all entries
* have been processed or the action throws an exception. Unless
* otherwise specified by the implementing class, actions are performed in
* the order of entry set iteration (if an iteration order is specified.)
* Exceptions thrown by the action are relayed to the caller.
*
* @implSpec
* The default implementation is equivalent to, for this {@code map}:
* <pre> {@code
* for (Map.Entry<K, V> entry : map.entrySet())
* action.accept(entry.getKey(), entry.getValue());
* }</pre>
*
* The default implementation makes no guarantees about synchronization
* or atomicity properties of this method. Any implementation providing
* atomicity guarantees must override this method and document its
* concurrency properties.
*
* @param action The action to be performed for each entry
* @throws NullPointerException if the specified action is null
* @throws ConcurrentModificationException if an entry is found to be
* removed during iteration
* @since 1.8
*/
void forEach(BiConsumer<? super K, ? super V> action);
static <K, V> UnmodifiableMap<K, V> of(K[] keys, V[] values) {
int keysSize = (keys != null) ? keys.length : 0;
int valuesSize = (values != null) ? values.length : 0;
if (keysSize == 0 && valuesSize == 0) {
// return mutable map
return new EmptyUnmodifiableMap<>();
}
return new MappedUnmodifiableMap<>(new Object2ObjectOpenHashMap<>(keys, values, 1.0f));
}
static <K, V> UnmodifiableMap<K, V> of(Map<K, V> map) {
return new MappedUnmodifiableMap<K, V>(map);
}
class EmptyUnmodifiableMap<K, V> implements UnmodifiableMap<K, V> {
private EmptyUnmodifiableMap() {}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public boolean containsKey(Object key) {
return false;
}
@Override
public V get(Object key) {
return null;
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
}
@NotNull
@Override
public Iterator<Entry<K, V>> iterator() {
return new Iterator<>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public Entry<K, V> next() {
throw new NoSuchElementException();
}
};
}
@NotNull
@Override
public ObjectIterator<Object2ObjectMap.Entry<K, V>> fastIterator() {
return new ObjectIterator<>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public Object2ObjectMap.Entry<K, V> next() {
throw new NoSuchElementException();
}
};
}
@Override
public Map<K, V> toUnmodifiableMap() {
//noinspection unchecked
return Object2ObjectMaps.EMPTY_MAP;
}
@Override
public Stream<Entry<K, V>> stream() {
return Stream.empty();
}
@Override
public UnmodifiableIterableSet<K> toUnmodifiableIterableKeysSet(IntFunction<K[]> generator) {
return UnmodifiableIterableSet.of(null);
}
}
class MappedUnmodifiableMap<K, V> implements UnmodifiableMap<K, V> {
private final Map<K,V> map;
private MappedUnmodifiableMap(@NotNull Map<K, V> map) {
this.map = map;
}
@Override
public int size() {
return map.size();
}
@Override
public boolean isEmpty() {
return map.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return map.containsKey(key);
}
@Override
public V get(Object key) {
return map.get(key);
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
map.forEach(action);
}
@NotNull
@Override
public Iterator<Entry<K, V>> iterator() {
return map.entrySet().iterator();
}
@NotNull
@Override
public ObjectIterator<Object2ObjectMap.Entry<K, V>> fastIterator() {
if (map instanceof Object2ObjectMap) {
return Object2ObjectMaps.fastIterator((Object2ObjectMap<K, V>) map);
} else {
var iterator = map.entrySet().iterator();
var reusableEntry = new Object2ObjectMap.Entry<K, V>() {
private K key;
private V val;
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return val;
}
@Override
public V setValue(V value) {
throw new UnsupportedOperationException();
}
};
return new ObjectIterator<>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Object2ObjectMap.Entry<K, V> next() {
var next = iterator.next();
reusableEntry.key = next.getKey();
reusableEntry.val = next.getValue();
return reusableEntry;
}
};
}
}
@Override
public Map<K, V> toUnmodifiableMap() {
return Collections.unmodifiableMap(map);
}
@Override
public Stream<Entry<K, V>> stream() {
return map.entrySet().stream();
}
@Override
public UnmodifiableIterableSet<K> toUnmodifiableIterableKeysSet(IntFunction<K[]> generator) {
return UnmodifiableIterableSet.of(map.keySet().toArray(generator));
}
}
}

View File

@ -1,10 +0,0 @@
package org.warp.commonutils.type;
public class VariableWrapper<T> {
public volatile T var;
public VariableWrapper(T value) {
this.var = value;
}
}

View File

@ -1,57 +0,0 @@
package org.warp.commonutils;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.opentest4j.AssertionFailedError;
import org.warp.commonutils.concurrency.executor.BoundedExecutorService;
import org.warp.commonutils.type.ShortNamedThreadFactory;
public class BoundedQueueTest {
@Test
public void testBoundedQueue() throws InterruptedException {
testBoundedQueue(1, 1);
testBoundedQueue(1, 10);
testBoundedQueue(4, 10);
}
public void testBoundedQueue(int corePoolSize, int maxPoolSize) throws InterruptedException {
int maxQueueSize = 2;
AtomicInteger queueSize = new AtomicInteger();
AtomicReference<AssertionFailedError> failedError = new AtomicReference<>();
var executor = BoundedExecutorService.create(maxQueueSize,
corePoolSize,
maxPoolSize,
0L,
TimeUnit.MILLISECONDS,
new ShortNamedThreadFactory("test"),
(isQueueFull, currentQueueSize) -> {
try {
if (currentQueueSize >= maxQueueSize) {
Assertions.assertTrue(isQueueFull);
} else {
Assertions.assertFalse(isQueueFull);
}
} catch (AssertionFailedError ex) {
if (failedError.get() == null) {
failedError.set(ex);
}
ex.printStackTrace();
}
}
);
for (int i = 0; i < 10000; i++) {
queueSize.incrementAndGet();
executor.execute(queueSize::decrementAndGet);
}
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
Assertions.assertNull(failedError.get());
}
}

View File

@ -1,40 +0,0 @@
package org.warp.commonutils.functional;
import java.io.IOException;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.warp.commonutils.functional.Unchecked.UncheckedConsumer;
public class TestGenericExceptions {
@Test
public void testGenericExceptions() {
testFunction((number) -> {
Assertions.assertEquals(number, 1);
}).done();
boolean thrown = false;
try {
testFunction((number) -> {
throw new IOException("Test");
}).throwException(IOException.class);
} catch (IOException e) {
thrown = true;
}
Assertions.assertEquals(true, thrown, "IOException not thrown");
boolean thrown2 = false;
try {
testFunction((number) -> {
throw new IOException("Test");
}).throwException(Exception.class);
} catch (Exception e) {
thrown2 = true;
}
Assertions.assertEquals(true, thrown2, "Exception not thrown");
}
private UncheckedResult testFunction(UncheckedConsumer<Integer> uncheckedConsumer) {
return Unchecked.wrap(uncheckedConsumer).apply(1);
}
}

View File

@ -1,185 +0,0 @@
package org.warp.commonutils.functional.org.warp.commonutils.locks;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.warp.commonutils.locks.LeftRightLock;
public class LeftRightLockTest {
int logLineSequenceNumber = 0;
private LeftRightLock sut = new LeftRightLock();
@Timeout(2000)
@Test()
public void acquiringLeftLockExcludeAcquiringRightLock() throws Exception {
sut.lockLeft();
Future<Boolean> task = Executors.newSingleThreadExecutor().submit(() -> sut.tryLockRight());
assertFalse(task.get(), "I shouldn't be able to acquire the RIGHT lock!");
}
@Timeout(2000)
@Test()
public void acquiringRightLockExcludeAcquiringLeftLock() throws Exception {
sut.lockRight();
Future<Boolean> task = Executors.newSingleThreadExecutor().submit(() -> sut.tryLockLeft());
assertFalse(task.get(), "I shouldn't be able to acquire the LEFT lock!");
}
@Timeout(2000)
@Test()
public void theLockShouldBeReentrant() throws Exception {
sut.lockLeft();
assertTrue(sut.tryLockLeft());
}
@Timeout(2000)
@Test()
public void multipleThreadShouldBeAbleToAcquireTheSameLock_Right() throws Exception {
sut.lockRight();
Future<Boolean> task = Executors.newSingleThreadExecutor().submit(() -> sut.tryLockRight());
assertTrue(task.get());
}
@Timeout(2000)
@Test()
public void multipleThreadShouldBeAbleToAcquireTheSameLock_left() throws Exception {
sut.lockLeft();
Future<Boolean> task = Executors.newSingleThreadExecutor().submit(() -> sut.tryLockLeft());
assertTrue(task.get());
}
@Timeout(2000)
@Test()
public void shouldKeepCountOfAllTheThreadsHoldingTheSide() throws Exception {
CountDownLatch latchA = new CountDownLatch(1);
CountDownLatch latchB = new CountDownLatch(1);
Thread threadA = spawnThreadToAcquireLeftLock(latchA, sut);
Thread threadB = spawnThreadToAcquireLeftLock(latchB, sut);
System.out.println("Both threads have acquired the left lock.");
try {
latchA.countDown();
threadA.join();
boolean acqStatus = sut.tryLockRight();
System.out.println("The right lock was " + (acqStatus ? "" : "not") + " acquired");
assertFalse(acqStatus, "There is still a thread holding the left lock. This shouldn't succeed.");
} finally {
latchB.countDown();
threadB.join();
}
}
@Timeout(2000)
@Test()
public void shouldBlockThreadsTryingToAcquireLeftIfRightIsHeld() throws Exception {
sut.lockLeft();
CountDownLatch taskStartedLatch = new CountDownLatch(1);
final Future<Boolean> task = Executors.newSingleThreadExecutor().submit(() -> {
taskStartedLatch.countDown();
sut.lockRight();
return false;
});
taskStartedLatch.await();
Thread.sleep(100);
assertFalse(task.isDone());
}
@Test
public void shouldBeFreeAfterRelease() throws Exception {
sut.lockLeft();
sut.releaseLeft();
assertTrue(sut.tryLockRight());
}
@Test
public void shouldBeFreeAfterMultipleThreadsReleaseIt() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final Thread thread1 = spawnThreadToAcquireLeftLock(latch, sut);
final Thread thread2 = spawnThreadToAcquireLeftLock(latch, sut);
latch.countDown();
thread1.join();
thread2.join();
assertTrue(sut.tryLockRight());
}
@Timeout(2000)
@Test()
public void lockShouldBeReleasedIfNoThreadIsHoldingIt() throws Exception {
CountDownLatch releaseLeftLatch = new CountDownLatch(1);
CountDownLatch rightLockTaskIsRunning = new CountDownLatch(1);
Thread leftLockThread1 = spawnThreadToAcquireLeftLock(releaseLeftLatch, sut);
Thread leftLockThread2 = spawnThreadToAcquireLeftLock(releaseLeftLatch, sut);
Future<Boolean> acquireRightLockTask = Executors.newSingleThreadExecutor().submit(() -> {
if (sut.tryLockRight())
throw new AssertionError("The left lock should be still held, I shouldn't be able to acquire right a this point.");
printSynchronously("Going to be blocked on right lock");
rightLockTaskIsRunning.countDown();
sut.lockRight();
printSynchronously("Lock acquired!");
return true;
});
rightLockTaskIsRunning.await();
releaseLeftLatch.countDown();
leftLockThread1.join();
leftLockThread2.join();
assertTrue(acquireRightLockTask.get());
}
private synchronized void printSynchronously(String str) {
System.out.println(logLineSequenceNumber++ + ")" + str);
System.out.flush();
}
private Thread spawnThreadToAcquireLeftLock(CountDownLatch releaseLockLatch, LeftRightLock lock) throws InterruptedException {
CountDownLatch lockAcquiredLatch = new CountDownLatch(1);
final Thread thread = spawnThreadToAcquireLeftLock(releaseLockLatch, lockAcquiredLatch, lock);
lockAcquiredLatch.await();
return thread;
}
private Thread spawnThreadToAcquireLeftLock(CountDownLatch releaseLockLatch, CountDownLatch lockAcquiredLatch, LeftRightLock lock) {
final Thread thread = new Thread(() -> {
lock.lockLeft();
printSynchronously("Thread " + Thread.currentThread() + " Acquired left lock");
try {
lockAcquiredLatch.countDown();
releaseLockLatch.await();
} catch (InterruptedException ignore) {
} finally {
lock.releaseLeft();
}
printSynchronously("Thread " + Thread.currentThread() + " RELEASED left lock");
});
thread.start();
return thread;
}
}