Compare commits

...

493 Commits

Author SHA1 Message Date
Andrea Cavalli
a14c823941 Update dbengine 2024-12-05 18:12:11 +01:00
Andrea Cavalli
12669e4eb5 Remove codec 2024-10-23 19:01:28 +02:00
Andrea Cavalli
6af06ca90e Low-level sst entry 2024-10-02 12:18:33 +02:00
Andrea Cavalli
86377a4e65 Return list 2024-10-02 00:42:15 +02:00
Andrea Cavalli
591963f630 Check status unsafe 2024-10-02 00:15:07 +02:00
Andrea Cavalli
6a68c8452b fix parallelism when listing live files 2024-10-01 23:53:31 +02:00
Andrea Cavalli
c859b8238f Fix parallelism bug 2024-10-01 23:44:16 +02:00
Andrea Cavalli
e1e6065036 Update data generator 2024-10-01 23:30:02 +02:00
Andrea Cavalli
ceff8f5022 Handle errors 2024-09-28 15:14:18 +02:00
Andrea Cavalli
06e754d437 Sort files metadata 2024-09-28 10:50:32 +02:00
Andrea Cavalli
8dbfe7a488 Start from low levels 2024-09-27 00:32:34 +02:00
Andrea Cavalli
ce356e3c33 Fix statistics 2024-09-19 22:14:44 +02:00
Andrea Cavalli
52863bf498 Add javadocs and sources 2024-09-19 11:19:55 +02:00
Andrea Cavalli
fe6ec9afe3 Add cache capacity 2024-09-19 11:10:45 +02:00
Andrea Cavalli
8bd0e7cf12 Update rocksdb, set optimize filters for memory, add cache tests 2024-09-19 00:45:35 +02:00
Andrea Cavalli
852a5e1d51 Fix null issues 2024-09-16 12:55:51 +02:00
Andrea Cavalli
bc9d274c58 Add unsafe fast iterator 2024-09-14 03:38:14 +02:00
Andrea Cavalli
e8342b783a Update version number 2024-09-12 15:47:33 +02:00
Andrea Cavalli
18191ef2fd Remove lucene 2024-09-11 18:32:52 +02:00
Andrea Cavalli
6564db6c4f Update lucene 2024-06-10 23:55:43 +02:00
Andrea Cavalli
fe9370294b Update dbengine 2024-05-27 22:42:11 +02:00
Andrea Cavalli
632dd41e9e Update dbengine 2024-04-23 20:21:32 +02:00
Andrea Cavalli
4695f29a76 Update dbengine 2024-04-22 16:28:44 +02:00
Andrea Cavalli
5888bc96b4 Performance optimization and code cleanup
- Refactor options
- Update dependencies
- Separate Read-Write pool
2024-04-18 14:48:16 +02:00
Andrea Cavalli
ff3cbc11b6 Update data generator 2024-02-20 16:45:03 +01:00
Andrea Cavalli
ae8e8441ac Add two query types 2024-02-12 21:44:26 +01:00
Andrea Cavalli
2792add2b9 Disable key empty fix in keymayexist 2024-01-22 21:15:09 +01:00
Andrea Cavalli
89a0fa9408 Update rocksdb 2024-01-22 01:31:28 +01:00
Andrea Cavalli
251240996e Optimize parallelism 2024-01-22 01:26:57 +01:00
Andrea Cavalli
024db43de8 Enable hyperclockcache 2023-12-28 11:22:28 +01:00
Andrea Cavalli
a5502af24a Update rocksdb and lucene, add serialization errors details 2023-12-21 02:10:49 +01:00
Andrea Cavalli
fa865a654d Update data generator 2023-11-29 16:39:24 +01:00
Andrea Cavalli
361e2f04f6 Use rocksdb .keyExists 2023-11-27 22:20:39 +01:00
Andrea Cavalli
fe0256dbf9 Update rocksdb, more verbose logs 2023-11-24 01:54:17 +01:00
Andrea Cavalli
495cbdea64 Update data generator 2023-11-11 23:24:15 +01:00
Andrea Cavalli
117df4cb30 Update data generator 2023-11-11 22:05:32 +01:00
Andrea Cavalli
d0c79a57d9 Update data generator 2023-11-09 02:11:39 +01:00
Andrea Cavalli
50ce5984ac Update rocksdb 2023-11-08 11:49:16 +01:00
Andrea Cavalli
b3631c5513 Update data generator 2023-11-07 01:38:17 +01:00
Andrea Cavalli
0ab75623ba Update rocksdb, lucene, Add SST Reader/Writer, update blocks verification, live files, refactor iterator 2023-10-10 00:39:41 +02:00
Andrea Cavalli
7cffc853b3 Db repair features 2023-10-03 12:08:51 +02:00
Andrea Cavalli
6fd7d249de A lot of db repair features, rollback SearchEntityHandler, update log4j 2023-10-02 19:51:28 +02:00
Andrea Cavalli
a6e73b83d8 Fix repair, add dynamic log level 2023-09-26 02:43:06 +02:00
Andrea Cavalli
8b747db386 Customizable checks 2023-09-26 01:45:28 +02:00
Andrea Cavalli
3df0dcf36a Rename badBlocks, check failures in rocksdb iterator 2023-09-26 01:18:24 +02:00
Andrea Cavalli
0cb1ad55a8 Generate a correct name for the repair fat jar 2023-09-26 00:46:40 +02:00
Andrea Cavalli
065281a4e4 Update dependencies 2023-09-26 00:36:31 +02:00
Andrea Cavalli
85bfdc33e9 Implement repair module, improve badBlocks function, read-only mode 2023-09-26 00:34:44 +02:00
Andrea Cavalli
3b35c18517 Update maven 2023-09-25 02:11:27 +02:00
Andrea Cavalli
e4ec49e9aa More checks 2023-09-20 00:56:21 +02:00
Andrea Cavalli
0caffbfa79 Update rocksdb 2023-09-11 23:56:41 +02:00
Andrea Cavalli
ce5516bd28 Faster startup 2023-09-03 01:33:58 +02:00
Andrea Cavalli
7692b21e3d Faster startup 2023-09-03 01:14:18 +02:00
Andrea Cavalli
40cd756f35 Merge remote-tracking branch 'origin/master' 2023-07-28 19:25:12 +02:00
Andrea Cavalli
a4c322e96a Avoid SIGSEGV when using/closing rocksIterator on different threads 2023-07-28 19:22:42 +02:00
Andrea Cavalli
fdb504d9cd Enable manual flush 2023-07-25 17:29:36 +02:00
Andrea Cavalli
e83270906e Remove unnecessary arguments 2023-07-22 00:56:43 +02:00
Andrea Cavalli
0e2b3677c4 Update rocksdb 2023-06-30 23:15:09 +02:00
Andrea Cavalli
faa15d64ce Update lucene 2023-06-28 00:07:45 +02:00
Andrea Cavalli
7f7f13d7f3 Predicate utils 2023-06-21 02:43:19 +02:00
Andrea Cavalli
e9c765f7da Fix xml escape 2023-06-11 00:10:46 +02:00
Andrea Cavalli
dec229ac78 Fix query parser 2023-05-28 23:26:08 +02:00
Andrea Cavalli
e66bc6ce53 Add solr query 2023-05-28 16:44:54 +02:00
Andrea Cavalli
3a6883c274 Add versions leak test utility 2023-05-24 01:44:16 +02:00
Andrea Cavalli
2a817cbc58 StreamUtils Try-Catch for streams, Remove old unused flags 2023-05-24 01:44:06 +02:00
Andrea Cavalli
bee2fe1bf5 Close iterators 2023-05-23 01:05:03 +02:00
Andrea Cavalli
0062a36ed5 Change defaults 2023-05-23 00:20:14 +02:00
Andrea Cavalli
3e90ba3704 Optimization 2023-05-23 00:20:07 +02:00
Andrea Cavalli
7e7e1c410b Add debug option to disable snapshots, optimize iterate bounds 2023-05-23 00:19:47 +02:00
Andrea Cavalli
dc03d25fdc Require exact binary length 2023-05-22 23:34:08 +02:00
Andrea Cavalli
26961125c0 Fix major bug 2023-05-22 23:33:58 +02:00
Andrea Cavalli
df946146a1 Major bugfix 2023-05-22 23:08:37 +02:00
Andrea Cavalli
93fc28101a set setMaxWriteBufferNumberToMaintain 2 2023-05-22 20:25:57 +02:00
Andrea Cavalli
2f5c8b618f Clean unused iterators 2023-05-22 19:12:05 +02:00
Andrea Cavalli
8499dcf89c Update dependencies 2023-05-11 13:53:05 +02:00
Andrea Cavalli
0c3afa5839 Update micrometer 2023-04-22 20:11:55 +02:00
Andrea Cavalli
14e00a1857 Optimizations 2023-04-20 20:40:04 +02:00
Andrea Cavalli
7c67676a25 Update serializer 2023-04-20 15:12:59 +02:00
Andrea Cavalli
2b6b447e0c Change module path 2023-04-20 11:29:07 +02:00
Andrea Cavalli
2810571d7f Update library package 2023-04-20 10:20:17 +02:00
Andrea Cavalli
249403016a data generator 2023-04-20 01:34:27 +02:00
Andrea Cavalli
7ac452d52a fixes 2023-04-18 19:30:18 +02:00
Andrea Cavalli
977dd472c1 Update data generator 2023-04-18 19:16:16 +02:00
Andrea Cavalli
cce49a50ee Optimize serializer 2023-04-17 23:31:16 +02:00
Andrea Cavalli
97df3bf725 Downgrade snakeyaml 2023-04-14 00:59:04 +02:00
Andrea Cavalli
09bdfed0aa Fix RecordBuilder dependency 2023-04-13 17:05:51 +02:00
Andrea Cavalli
0a325c6ef6 Add no-op analyzer instance 2023-04-07 11:49:00 +02:00
Andrea Cavalli
161876c1ee Update snakeyaml 2023-03-31 12:58:16 +02:00
Andrea Cavalli
f54388efa8 Fix name 2023-03-29 16:35:51 +02:00
Andrea Cavalli
3180b751ef Fifo workers 2023-03-29 16:33:33 +02:00
Andrea Cavalli
003799b468 Change names 2023-03-29 00:47:53 +02:00
Andrea Cavalli
8ac067b639 Implement named fork join worker thread factory, count-exec collector 2023-03-28 15:54:27 +02:00
Andrea Cavalli
09113207ed Depend on different exception type 2023-03-28 01:21:10 +02:00
Andrea Cavalli
dfa1fc3ecc Important serialization update 2023-03-28 01:20:58 +02:00
Andrea Cavalli
ed981581ec Important serialization bugfixes 2023-03-28 00:47:47 +02:00
Andrea Cavalli
a83f1ff1a6 Optimize database 2023-03-27 22:00:32 +02:00
Andrea Cavalli
468886d154 Remove compressed cache, rocksdb 8.0.0 2023-03-25 13:42:49 +01:00
Andrea Cavalli
1aeb0c99d3 Code cleanup 2023-03-20 00:36:27 +01:00
Andrea Cavalli
af7c3dfd65 Faster mapList, bugfixes, remove composite buffers 2023-03-09 00:16:57 +01:00
Andrea Cavalli
81f1c5643d Fix some adapters 2023-03-07 12:20:48 +01:00
Andrea Cavalli
404092106b Use new buffers 2023-03-06 12:19:08 +01:00
Andrea Cavalli
6037a906dc Code cleanup 2023-03-02 23:27:30 +01:00
Andrea Cavalli
a21c1f3cf4 Important bugfixes 2023-03-02 23:13:54 +01:00
Andrea Cavalli
ea1b464ddf Implement writeUTF 2023-03-02 18:23:03 +01:00
Andrea Cavalli
011c8f839c Remove netty 5 and unused code 2023-02-28 23:10:31 +01:00
Andrea Cavalli
024c4ee226 pool naming 2023-02-26 23:48:47 +01:00
Andrea Cavalli
0e21c72e0a Finalize and test the new implementation 2023-02-26 21:41:20 +01:00
Andrea Cavalli
daa7047614 Partially replace foreach with collecton 2023-02-24 17:19:25 +01:00
Andrea Cavalli
e0d929dbaa Bugfixes 2023-02-24 11:07:33 +01:00
Andrea Cavalli
1b83c95856 Fix tests 2023-02-24 00:18:02 +01:00
Andrea Cavalli
3f88ff8f83 Fix tests 2023-02-22 23:31:05 +01:00
Andrea Cavalli
59f9f01268 Fix sigsegv 2023-02-22 22:31:36 +01:00
Andrea Cavalli
cd15f8d23d Fix compilation errors 2023-02-22 16:59:35 +01:00
Andrea Cavalli
a9857f7553 Remove netty buffers, remove reactive streams 2023-02-22 16:21:13 +01:00
Andrea Cavalli
5c112484bd Update data generator 2023-02-09 18:02:29 +01:00
Andrea Cavalli
df420d4193 Remove exception 2023-02-08 08:52:37 +01:00
Andrea Cavalli
ff6922449c Update rocksdb 2023-01-31 02:41:35 +01:00
Andrea Cavalli
694c2d811d Ignore mono elements faster 2023-01-27 15:33:32 +01:00
Andrea Cavalli
b9b420afba Update reactor 2023-01-26 01:56:54 +01:00
Andrea Cavalli
cc87c1caeb Update data generator 2023-01-24 18:17:13 +01:00
Andrea Cavalli
80710bf0c5 Update data generator 2023-01-22 16:53:24 +01:00
Andrea Cavalli
ec579d61dc Update serializer 2023-01-22 01:48:35 +01:00
Andrea Cavalli
1c8a41d133 Update data generator 2023-01-18 03:10:13 +01:00
Andrea Cavalli
447edbe103 Update data builder syntax 2023-01-18 02:17:48 +01:00
Andrea Cavalli
855e4e50e9 Update data-generator 2023-01-17 17:57:50 +01:00
Andrea Cavalli
547eb1ab13 Update byte buddy 2023-01-05 12:48:52 +01:00
Andrea Cavalli
10d4ac600d More precise statistics 2023-01-05 02:58:38 +01:00
Andrea Cavalli
992df9b15f Update fastutil 2023-01-02 17:15:17 +01:00
Andrea Cavalli
410d3563de Update database 2022-12-21 01:05:08 +01:00
Andrea Cavalli
17ae98750e Update lucene 2022-12-13 02:25:12 +01:00
Andrea Cavalli
204d8fa990 Bugfix 2022-11-23 15:34:45 +01:00
Andrea Cavalli
36b76d81ed flag 2022-11-22 18:44:45 +01:00
Andrea Cavalli
6aa7bb6040 Implement db maintenance operations 2022-11-22 17:36:31 +01:00
Andrea Cavalli
889f59772c Update lucene 2022-10-31 14:06:07 +01:00
Andrea Cavalli
548016c66e Fix drops 2022-10-24 01:17:08 +02:00
Andrea Cavalli
d28ff73ee6 Don't dump queueSubscription 2022-10-18 18:01:45 +02:00
Andrea Cavalli
abe18d0b66 Update slf4j 2022-10-17 15:30:50 +02:00
Andrea Cavalli
d6f9398eff Remove publishOn on lucene functions 2022-10-13 00:15:57 +02:00
Andrea Cavalli
1f672a63ef Default to non-shared schedulers 2022-10-13 00:07:24 +02:00
Andrea Cavalli
c01485158d Update reactor 2022-10-07 00:48:32 +02:00
Andrea Cavalli
78ba98c8c4 Update lucene and netty 5 2022-10-02 03:09:50 +02:00
Andrea Cavalli
b33bafec21 Update rocksdb 2022-09-26 21:56:56 +02:00
Andrea Cavalli
5360a7dd7c Update log4j2 2022-09-22 01:12:00 +02:00
Andrea Cavalli
d004f750a0 Do not collect iterables 2022-09-18 23:38:21 +02:00
Andrea Cavalli
32e5a100f5 Speedup 2022-09-13 22:15:34 +02:00
Andrea Cavalli
f739f4f9f4 UpdateMode is now a "blocking" method 2022-09-12 20:14:56 +02:00
Andrea Cavalli
f9c2f7ca31 Update log4j 2022-09-10 23:13:11 +02:00
Andrea Cavalli
0302ad9458 Update dependencies 2022-09-07 20:22:03 +02:00
Andrea Cavalli
67d3797df8 fix crash 2022-08-24 14:31:10 +02:00
Andrea Cavalli
1c02a601bc Add NoFilter 2022-08-16 19:50:53 +02:00
Andrea Cavalli
ddd71d3b72 Implement backuppable class 2022-08-15 23:07:17 +02:00
Andrea Cavalli
8b6f1dfe87 Fix pq bug 2022-08-12 02:08:15 +02:00
Andrea Cavalli
69c28b92d0 Update lucene, junit, rocksdb 2022-08-11 23:59:29 +02:00
Andrea Cavalli
907a30dc2b Bugfixes 2022-08-03 18:23:26 +02:00
Andrea Cavalli
faa2a71597 Fix compilation errors 2022-07-29 00:33:23 +02:00
Andrea Cavalli
25a702015e Update dependencies 2022-07-29 00:32:08 +02:00
Andrea Cavalli
b8835dca5b Update rocksdb 2022-07-28 23:59:12 +02:00
Andrea Cavalli
98444af89e Avoid race conditions 2022-07-28 23:48:45 +02:00
Andrea Cavalli
a3ed443bad Fix disposable 2022-07-28 23:44:23 +02:00
Andrea Cavalli
b1fbf39c87 Reduce refresh overhead 2022-07-28 23:41:10 +02:00
Andrea Cavalli
8ff3381c72 Fix startup 2022-07-25 01:57:34 +02:00
Andrea Cavalli
200d7b5eb1 Code cleanup 2022-07-23 22:13:17 +02:00
Andrea Cavalli
fd202d8dfa Handle discards 2022-07-23 15:12:44 +02:00
Andrea Cavalli
d8419a4c1b Code cleanup 2022-07-23 15:03:59 +02:00
Andrea Cavalli
bca768a4b8 Bugfix 2022-07-23 14:36:40 +02:00
Andrea Cavalli
b9ffa1dd49 Schedule correctly lucene closeables 2022-07-23 14:25:59 +02:00
Andrea Cavalli
a4a8926e02 Close lucene objects in the correct thread 2022-07-23 02:42:48 +02:00
Andrea Cavalli
d896780611 Add RAF directory 2022-07-22 13:49:03 +02:00
Andrea Cavalli
f72a9c231b Postpone cors handler 2022-07-21 01:14:46 +02:00
Andrea Cavalli
7b2a1677e7 Bugfix 2022-07-21 01:07:16 +02:00
Andrea Cavalli
c27f7d4e2b Bugfixes 2022-07-20 02:00:08 +02:00
Andrea Cavalli
a976a4baa4 Use SimpleResources everywhere 2022-07-19 23:45:39 +02:00
Andrea Cavalli
8d8442d55d Rename file 2022-07-19 04:13:21 +02:00
Andrea Cavalli
ad584ff8ad Update data generator 2022-07-19 03:55:00 +02:00
Andrea Cavalli
015e6797c7 Update to netty 5 2022-07-15 02:44:50 +02:00
Andrea Cavalli
f2358e9bee Disable reactor agent by default 2022-07-05 00:46:54 +02:00
Andrea Cavalli
0bc7b6d659 Skip wrong keys 2022-07-04 20:03:10 +02:00
Andrea Cavalli
4ed382197b Set the max total wal size to 10gb 2022-07-03 22:49:07 +02:00
Andrea Cavalli
8f47adfc44 Fix searchers leak, change method references
Replace most method references with lambdas to ease debugging
2022-07-03 01:32:13 +02:00
Andrea Cavalli
409b2985ca Bugfix 2022-07-02 13:32:38 +02:00
Andrea Cavalli
96d19c3e09 Code cleanup 2022-07-02 12:22:16 +02:00
Andrea Cavalli
2a9427f0e4 Reduce the possibility of leaks 2022-07-02 11:44:13 +02:00
Andrea Cavalli
caf55a633e Code cleanup 2022-06-30 17:05:32 +02:00
Andrea Cavalli
8e50976d27 Add discarding closeable 2022-06-30 15:06:10 +02:00
Andrea Cavalli
ab93ede348 Use resource 2022-06-30 13:54:55 +02:00
Andrea Cavalli
831af1ef81 Leak detection 2022-06-29 01:14:05 +02:00
Andrea Cavalli
aee08f3e48 Code cleanup 2022-06-28 13:52:21 +02:00
Andrea Cavalli
4a0710ed9a Force delete unused files 2022-06-23 00:14:36 +02:00
Andrea Cavalli
f023fc3b83 Update data generator 2022-06-22 23:44:36 +02:00
Andrea Cavalli
5483a1551d Update CI, update dependencies 2022-06-22 17:36:16 +02:00
Andrea Cavalli
9e97d24955 Use fork of rocksdbjni 2022-06-22 12:56:52 +02:00
Andrea Cavalli
8c9aca21b3 Implement preClose 2022-06-21 22:52:42 +02:00
Andrea Cavalli
8083364ebf Add waitForMerges, waitForLastMerges, flush, fix #210, fix #209 2022-06-21 14:35:07 +02:00
Andrea Cavalli
ea2065302a Code cleanup 2022-06-20 23:31:42 +02:00
Andrea Cavalli
d2e7c56f06 Use more method references 2022-06-20 12:30:33 +02:00
Andrea Cavalli
a3d1207d76 Implement configurable merge policies 2022-06-20 11:55:41 +02:00
Andrea Cavalli
73b5092785 Fixes 2022-06-20 00:32:56 +02:00
Andrea Cavalli
d2650161fb clockcache is still blocked 2022-06-18 00:27:02 +02:00
Andrea Cavalli
367226480b Use clock cache 2022-06-17 01:00:45 +02:00
Andrea Cavalli
afa159de67 Reduce executors count 2022-06-16 18:40:17 +02:00
Andrea Cavalli
33d8f83933 Add new query 2022-06-16 00:04:50 +02:00
Andrea Cavalli
eb02e0f18d Test huge queries 2022-06-15 18:36:22 +02:00
Andrea Cavalli
4a08a876ca Force huge pq with an option 2022-06-15 13:09:45 +02:00
Andrea Cavalli
86e48eab92 safer count 2022-06-15 10:39:32 +02:00
Andrea Cavalli
62692a1f9a Fix wrong query format 2022-06-15 00:23:55 +02:00
Andrea Cavalli
17c40757ba Share term statistics across shards 2022-06-14 21:58:26 +02:00
Andrea Cavalli
0d830fbd21 Use SimpleResource when possible 2022-06-14 18:05:26 +02:00
Andrea Cavalli
fb0bd092a4 Fix searcher leak 2022-06-14 17:46:49 +02:00
Andrea Cavalli
8e47c15809 Avoid indexsearcher leaks 2022-06-14 13:10:38 +02:00
Andrea Cavalli
cc6071a4de Add statistics about searchers 2022-06-13 23:25:43 +02:00
Andrea Cavalli
9db32dacae Fix write buffer manager null 2022-06-10 16:26:03 +02:00
Andrea Cavalli
d4de13c2ab Create a class for KeyMayExist code 2022-06-09 19:45:03 +02:00
Andrea Cavalli
8578facfe7 Check persistent cache directory 2022-06-09 16:47:42 +02:00
Andrea Cavalli
957866ec99 Add a property to enable mmap writes 2022-06-09 00:49:08 +02:00
Andrea Cavalli
8c2f3b89b6 Use LZ4 compression as fallback 2022-06-09 00:13:44 +02:00
Andrea Cavalli
563defb2ff Implement closeRequested 2022-06-08 18:52:15 +02:00
Andrea Cavalli
3be0d3710c Enable lucene auto io throttle 2022-06-08 16:45:54 +02:00
Andrea Cavalli
cc9306fbde Check if accessible 2022-06-05 16:38:39 +02:00
Andrea Cavalli
956f33fb6c Improve string field 2022-06-04 19:18:51 +02:00
Andrea Cavalli
0e9b45ebf9 Update hitkey 2022-06-04 16:33:45 +02:00
Andrea Cavalli
62c2dabd8c Workaround again fake-zero elements 2022-06-01 23:18:11 +02:00
Andrea Cavalli
a1a509a6cf BlobDB 2022-06-01 17:36:21 +02:00
Andrea Cavalli
6dfc10859f Implement term fields 2022-05-30 01:08:46 +02:00
Andrea Cavalli
e1bd57c837 Fix default options 2022-05-29 23:48:40 +02:00
Andrea Cavalli
23d70e55bd Add more netty stats 2022-05-28 18:55:05 +02:00
Andrea Cavalli
65295dbf03 Implement NettyMetrics 2022-05-28 14:34:35 +02:00
Andrea Cavalli
6056eedd75 Fix some possible leaks 2022-05-26 13:13:14 +02:00
Andrea Cavalli
96de3023a0 Fix double-free, close all properties 2022-05-22 16:48:08 +02:00
Andrea Cavalli
fe31f9b1c7 Code cleanup 2022-05-22 00:56:32 +02:00
Andrea Cavalli
bff4d87164 Code cleanup 2022-05-22 00:32:08 +02:00
Andrea Cavalli
2e58189015 Code cleanup 2022-05-21 23:49:06 +02:00
Andrea Cavalli
52c216c0df Handle discards and drops 2022-05-21 22:41:48 +02:00
Andrea Cavalli
7f52339a6a Code cleanup 2022-05-21 15:28:52 +02:00
Andrea Cavalli
5c4519552d Fix more possible leaks 2022-05-21 01:06:55 +02:00
Andrea Cavalli
18d5ddf6e1 Remove some leaks 2022-05-20 23:59:56 +02:00
Andrea Cavalli
d253111233 Fix some memory leaks 2022-05-20 18:31:05 +02:00
Andrea Cavalli
82f8e91e99 Fix double-free 2022-05-20 13:16:26 +02:00
Andrea Cavalli
a720a12701 Bugfix 2022-05-20 10:44:00 +02:00
Andrea Cavalli
0e7df84c38 Remove RocksObj, add BufSupplier, remove sends 2022-05-20 10:20:03 +02:00
Andrea Cavalli
1c36ab040b Support m2e 2022-05-18 13:10:12 +02:00
Andrea Cavalli
84c9a2c3cc Update netty 2022-05-18 01:38:04 +02:00
Andrea Cavalli
c9a12760bc Trace leaks 2022-05-12 19:14:27 +02:00
Andrea Cavalli
6a5a9a3e94 Fix more leaks 2022-05-11 20:32:56 +02:00
Andrea Cavalli
bfc78f1465 Update modules 2022-05-11 10:24:23 +02:00
Andrea Cavalli
fab06db239 Fixed more leaks 2022-05-11 00:29:42 +02:00
Andrea Cavalli
99e101914d Fix some possible leaks 2022-05-10 16:57:41 +02:00
Andrea Cavalli
b5b9fc9d0d Fix module 2022-05-10 00:53:07 +02:00
Andrea Cavalli
f89c3ff707 Update dependencies 2022-05-10 00:31:16 +02:00
Andrea Cavalli
2c308f0f63 Update log4j 2022-05-09 22:09:07 +02:00
Andrea Cavalli
cc69ec3063 Add statistics 2022-05-09 22:08:54 +02:00
Andrea Cavalli
e68a4e5121 Reformat code 2022-05-09 11:18:09 +02:00
Andrea Cavalli
b30ce665a0 Require okio 2022-05-09 10:23:48 +02:00
Andrea Cavalli
4cf9617703 Clarifications 2022-05-07 14:20:52 +02:00
Andrea Cavalli
4fc1a66e4f Update runtime 2022-05-07 00:37:22 +02:00
Andrea Cavalli
ca9a2b0794 Update runtime 2022-05-05 16:07:11 +02:00
Andrea Cavalli
02f1276181 Bugfixes 2022-05-04 12:36:32 +02:00
Andrea Cavalli
a1c0e19adc Add readable rocksdb stats 2022-05-04 01:21:56 +02:00
Andrea Cavalli
549a3bd178 Fix some defaults 2022-05-03 19:47:32 +02:00
Andrea Cavalli
614d24ac8d Code cleanup 2022-05-02 19:05:40 +02:00
Andrea Cavalli
19bb638f20 Follow rocksdb optimizations 2022-05-02 18:48:44 +02:00
Andrea Cavalli
404fe03c14 Reduce L0 files 2022-05-02 01:16:46 +02:00
Andrea Cavalli
22f0711ab8 Configurable log path, configurable wal path 2022-05-02 00:42:38 +02:00
Andrea Cavalli
3a7b1498ff Change log levels 2022-05-01 17:36:29 +02:00
Andrea Cavalli
87d000968a Automatic wal ttls and sync sizes 2022-05-01 17:25:22 +02:00
Andrea Cavalli
f63b70ab9d Set target file size multiplier to 2, to reduce write amplification 2022-05-01 15:48:08 +02:00
Andrea Cavalli
d9c2e8a5f9 Code cleanup, bigger compaction trigger l0 2022-05-01 15:42:51 +02:00
Andrea Cavalli
c72e4d5a83 Code cleanup 2022-05-01 15:35:12 +02:00
Andrea Cavalli
59e980f356 Fix closed status 2022-04-30 23:50:22 +02:00
Andrea Cavalli
04623b754c Update luene, update rocksdb 2022-04-30 21:56:42 +02:00
Andrea Cavalli
e03afafcee Safer access to database elements 2022-04-30 14:21:20 +02:00
Andrea Cavalli
88a1add102 Bugfixes 2022-04-30 02:14:44 +02:00
Andrea Cavalli
654a62d7b8 Bugfix 2022-04-30 01:50:24 +02:00
Andrea Cavalli
e962ae6336 Secure database shutdown, deduplicate compaction script 2022-04-30 01:49:44 +02:00
Andrea Cavalli
9d16ccdd9e Flush API, accessibility lock, better manual compaction 2022-04-28 23:23:26 +02:00
Andrea Cavalli
e7718a8370 Fix crash 2022-04-28 11:35:01 +02:00
Andrea Cavalli
c0d4dd0c22 Update gitignore 2022-04-27 10:40:29 +02:00
Andrea Cavalli
f854ce1253 Temporarily disable compactfiles 2022-04-27 09:56:07 +02:00
Andrea Cavalli
1bd3b8a945 Implement compact API 2022-04-26 17:30:31 +02:00
Andrea Cavalli
ec5bf1c5cc Compaction API and configurable write buffer size 2022-04-26 17:12:22 +02:00
Andrea Cavalli
116fc88311 Bugfix 2022-04-20 23:29:39 +02:00
Andrea Cavalli
f5cb2d8895 Improve point lookup speed 2022-04-19 23:23:32 +02:00
Andrea Cavalli
735be8ecb8 Add mayExist 2022-04-19 23:23:12 +02:00
Andrea Cavalli
94cdaaedef Bugfix 2022-04-15 22:40:47 +02:00
Andrea Cavalli
4b627664aa Bugfixes 2022-04-15 16:49:01 +02:00
Andrea Cavalli
80ef0394b1 Use crc32c, it seems that there are problems with checksums 2022-04-15 02:56:03 +02:00
Andrea Cavalli
046c08e5bf Support persistent cache, ensure that all snapshots are closed in time 2022-04-15 02:41:06 +02:00
Andrea Cavalli
798262d72b Update dependencies 2022-04-13 21:02:19 +02:00
Andrea Cavalli
e038b41d17 Adjust parameter for spinning disk
Based on the default document storage of arangodb
2022-04-13 19:44:44 +02:00
Andrea Cavalli
632d575a47 Update dependencies 2022-04-13 19:44:19 +02:00
Andrea Cavalli
31ea2d0e10 Merge remote-tracking branch 'origin/master' 2022-04-13 19:43:57 +02:00
Andrea Cavalli
be929932aa Update dbengine 2022-04-13 17:52:15 +02:00
Andrea Cavalli
7e71599893 Update data generator 2022-04-13 15:01:08 +02:00
Andrea Cavalli
0ad5a15792 Fix putmulti 2022-04-12 00:25:18 +02:00
Andrea Cavalli
1b150dcbaf configurable partition filters 2022-04-11 20:04:27 +02:00
Andrea Cavalli
d35840ec03 Configurable write buffer manager 2022-04-11 16:53:17 +02:00
Andrea Cavalli
4448947cfd Fix shared caching size 2022-04-11 16:41:13 +02:00
Andrea Cavalli
0ca6f4c2c5 Re-create allocations tester 2022-04-11 16:40:55 +02:00
Andrea Cavalli
851f73481a Bugfixes 2022-04-11 12:42:01 +02:00
Andrea Cavalli
a7329925d0 Update dependency 2022-04-11 11:44:56 +02:00
Andrea Cavalli
1cac7cb0c9 Custom block size, fix cache stats, fix db close, disable clock cache 2022-04-11 01:27:09 +02:00
Andrea Cavalli
eb5792bbe0 bugfixes 2022-04-10 20:15:05 +02:00
Andrea Cavalli
fb44c182fa Update data generator 2022-04-09 22:56:38 +02:00
Andrea Cavalli
411bf196af Remove unneeded plugin 2022-04-09 20:20:54 +02:00
Andrea Cavalli
e86965efa7 Avoid closing things that have already been closed 2022-04-09 16:31:32 +02:00
Andrea Cavalli
6315175dc4 Java modules 2022-04-09 02:45:42 +02:00
Andrea Cavalli
95d436860f Update options 2022-04-08 14:32:47 +02:00
Andrea Cavalli
29210cca80 More stats 2022-04-07 22:19:11 +02:00
Andrea Cavalli
f9fb679f9b Bloom stats, fix thread caps 2022-04-07 20:03:29 +02:00
Andrea Cavalli
5c6e6411f5 Experiment with different options 2022-04-06 14:53:08 +02:00
Andrea Cavalli
6c6263e1d0 Fix lucene hugepq searcher 2022-04-06 14:25:53 +02:00
Andrea Cavalli
dc69bf8e25 Reimplement LMDB PriorityQueue using RocksDB 2022-04-06 02:41:32 +02:00
Andrea Cavalli
6ac9505653 Separate write and read schedulers 2022-04-05 13:58:12 +02:00
Andrea Cavalli
02cd99a963 Reduce bloom filter size 2022-04-05 01:07:50 +02:00
Andrea Cavalli
24cf7ea58d Add dedicated scheduler 2022-04-05 00:37:44 +02:00
Andrea Cavalli
1cd5fc8eed Adjust default compaction size 2022-04-04 22:55:28 +02:00
Andrea Cavalli
1dfe0d5a77 Use the right scheduler 2022-04-04 20:12:29 +02:00
Andrea Cavalli
a45f357bca Group threads 2022-04-04 17:52:49 +02:00
Andrea Cavalli
c711bbc5ad Explain the hard-coded rocksdb settings 2022-04-04 11:16:20 +02:00
Andrea Cavalli
32dc615f88 Default to 256KiB block size 2022-04-04 10:27:38 +02:00
Andrea Cavalli
c4e696f359 Subscribe correctly 2022-04-01 20:06:06 +02:00
Andrea Cavalli
c2d3f79a08 Subscribe to the correct scheduler 2022-04-01 15:15:06 +02:00
Andrea Cavalli
cd26cf61b7 More update statistics, avoid send in updates 2022-04-01 01:30:56 +02:00
Andrea Cavalli
7891b0b9e0 Optimizations 2022-03-30 23:44:57 +02:00
Andrea Cavalli
cb83c17811 Bugfixes 2022-03-30 18:36:07 +02:00
Andrea Cavalli
7d0951956d Add more metrics about payload sizes, and iteration times 2022-03-30 15:15:53 +02:00
Andrea Cavalli
6bd3fdb677 Add utility method to lazyhitentry 2022-03-29 21:27:56 +02:00
Andrea Cavalli
87c031fe71 Update to rocksdb 7 2022-03-27 01:22:20 +01:00
Andrea Cavalli
39811dc3f3 Add the possibility to iterate only a slice of the database 2022-03-25 00:27:44 +01:00
Andrea Cavalli
81b26eed82 Add smallRange parameter 2022-03-24 23:56:23 +01:00
Andrea Cavalli
388b79c6d1 Allow reverse iteration 2022-03-24 21:14:17 +01:00
Andrea Cavalli
8e88c78ce7 Improve query parsing 2022-03-23 23:58:30 +01:00
Andrea Cavalli
bafe5a1fe8 Add more term queries 2022-03-23 23:05:11 +01:00
Andrea Cavalli
2bed1d4d51 Remove unsupported code 2022-03-22 19:40:15 +01:00
Andrea Cavalli
6443e75ebd Allow reading the column families 2022-03-22 12:59:22 +01:00
Andrea Cavalli
8e6ea58823 Update options 2022-03-22 11:50:30 +01:00
Andrea Cavalli
372c45220c Update rocksdb options, don't delete logs manually 2022-03-22 00:23:32 +01:00
Andrea Cavalli
5f6dfac1da Respect limit 2022-03-21 15:25:26 +01:00
Andrea Cavalli
da3e7fdf33 Restore default scheduler 2022-03-21 15:22:55 +01:00
Andrea Cavalli
b163260702 Fix db options 2022-03-21 15:19:17 +01:00
Andrea Cavalli
b1d8e3e48e Re-enable dedicated schedulers 2022-03-20 16:14:31 +01:00
Andrea Cavalli
e34e1e5852 Optimize singleton 2022-03-20 14:45:48 +01:00
Andrea Cavalli
de5be6564e Implement singletons 2022-03-20 14:33:27 +01:00
Andrea Cavalli
e866241ff1 Unify read options 2022-03-19 16:36:59 +01:00
Andrea Cavalli
bbc77df56b Update dependencies 2022-03-19 00:08:23 +01:00
Andrea Cavalli
aad7195acb Don't use arrays 2022-03-18 19:16:06 +01:00
Andrea Cavalli
46ac6ca481 Update reactor 2022-03-18 16:20:34 +01:00
Andrea Cavalli
28b4fdee50 Better errors logging, avoid zero-bytes bug in keyMayExist 2022-03-18 15:33:54 +01:00
Andrea Cavalli
59c37c0fc9 Improve suffix performance 2022-03-16 22:41:51 +01:00
Andrea Cavalli
ba3765eece Improve direct buffer support 2022-03-16 19:19:26 +01:00
Andrea Cavalli
0a6a0657a3 Use official netty 5 package 2022-03-16 13:47:56 +01:00
Andrea Cavalli
8999102038 add fast path 2022-03-16 00:32:00 +01:00
Andrea Cavalli
a20bb9c423 Use standard searcher when possible 2022-03-15 12:36:33 +01:00
Andrea Cavalli
4cc8d44fd8 Fix terms 2022-03-15 11:46:00 +01:00
Andrea Cavalli
93c5251392 Fix problematic uninterruptible scheduler 2022-03-14 00:59:16 +01:00
Andrea Cavalli
2b81006d56 Bypass groupBy bug and other small improvements 2022-03-13 11:01:51 +01:00
Andrea Cavalli
a4df72fe46 Improve performance 2022-03-12 02:55:18 +01:00
Andrea Cavalli
9b5071c45e Optional atomicity of addDocuments 2022-03-12 00:22:41 +01:00
Andrea Cavalli
4a2d143135 Various bugfixes 2022-03-11 17:59:46 +01:00
Andrea Cavalli
16f6025b30 Fix options 2022-03-10 02:38:57 +01:00
Andrea Cavalli
325457dd44 Bugfix 2022-03-10 01:43:37 +01:00
Andrea Cavalli
faa7118b8e Use buffer api 2022-03-09 02:29:38 +01:00
Andrea Cavalli
35a70efec5 Close db optionally 2022-03-08 02:12:13 +01:00
Andrea Cavalli
f0533a17c9 Use LuceneUtils to create simple shards 2022-03-07 01:42:33 +01:00
Andrea Cavalli
48f3a54e72 Partial RPC implementation 2022-03-05 15:46:40 +01:00
Andrea Cavalli
26b9de5eb0 Code cleanup 2022-03-04 01:28:18 +01:00
Andrea Cavalli
090a47ae86 Implement and test some utilities 2022-03-04 01:26:18 +01:00
Andrea Cavalli
77af845a8a Remove unneeded parameter 2022-03-02 18:33:58 +01:00
Andrea Cavalli
ed37a769e2 Partial server implementation 2022-03-02 12:34:30 +01:00
Andrea Cavalli
2022495dda Remove unused options 2022-02-28 03:50:09 +01:00
Andrea Cavalli
eec46d6c50 Use byte buffers in streams 2022-02-28 03:20:24 +01:00
Andrea Cavalli
53519fbc4e Use WriteBatch 2022-02-28 00:47:44 +01:00
Andrea Cavalli
86263af6f7 Create rocksdb directory 2022-02-26 22:51:22 +01:00
Andrea Cavalli
743919b831 Code cleanup 2022-02-26 03:28:20 +01:00
Andrea Cavalli
85642621df Use BytesRef when possible 2022-02-25 15:46:32 +01:00
Andrea Cavalli
f5729f0b65 Add knn field 2022-02-23 00:49:29 +01:00
Andrea Cavalli
ad67aa003c Add more queries 2022-02-22 02:10:36 +01:00
Andrea Cavalli
d4bff85dc6 Update utils 2022-02-21 01:01:22 +01:00
Andrea Cavalli
484b123199 Update data generator 2022-02-20 18:22:38 +01:00
Andrea Cavalli
6805bbd907 Remove dependencies 2022-02-20 03:24:08 +01:00
Andrea Cavalli
100301a645 Update netty and snakeyaml 2022-02-20 01:34:57 +01:00
Andrea Cavalli
8f40fd6a3f Fix NPE 2022-02-14 00:31:51 +01:00
Andrea Cavalli
cf0db4be31 Fix unit tests 2022-02-12 00:10:56 +01:00
Andrea Cavalli
e4fa423aa3 Code cleanup 2022-02-11 22:14:54 +01:00
Andrea Cavalli
6752fc8df4 Code cleanup 2022-02-11 22:05:08 +01:00
Andrea Cavalli
4f52b3d542 Update tests 2022-02-11 21:46:05 +01:00
Andrea Cavalli
f478ea97cd Test lucene generator 2022-02-11 21:08:23 +01:00
Andrea Cavalli
2eb4a84afa Code cleanup 2022-02-11 15:29:30 +01:00
Andrea Cavalli
5155fc6c10 Code cleanup 2022-02-11 15:27:12 +01:00
Andrea Cavalli
8a657b4f1d Remove min competitive score 2022-02-11 15:10:13 +01:00
Andrea Cavalli
06613ca9e6 Optimize lucene generator, adding global min competitive score and limit 2022-02-11 13:33:07 +01:00
Andrea Cavalli
8e7f7eaf18 Code cleanup 2022-02-11 13:32:50 +01:00
Andrea Cavalli
cf61636141 Update rocksdb 2022-02-11 12:31:03 +01:00
Andrea Cavalli
f9335d890f Fix kExists 2022-02-11 12:19:32 +01:00
Andrea Cavalli
5ebc9abe43 Update removeDirect 2022-02-11 12:16:46 +01:00
Andrea Cavalli
83b1d29047 Update fastutil and rocksdb 2022-02-11 12:12:44 +01:00
Andrea Cavalli
5c16a65bd3 Deoptimize method 2022-02-10 00:35:07 +01:00
Andrea Cavalli
79a6c3140f Convert anonymous class to static class 2022-02-09 20:22:32 +01:00
Andrea Cavalli
2c11b13b7a Avoid allocating a byte array in writeUTF and readUTF 2022-02-09 20:02:23 +01:00
Andrea Cavalli
46e7abfd8c Highly optimize LLUtils.toTerm(term) 2022-02-09 20:01:26 +01:00
Andrea Cavalli
89200c2ed5 Support more query types 2022-02-06 19:29:23 +01:00
Andrea Cavalli
8e0d806d2b Update defaults 2022-02-04 01:55:36 +01:00
Andrea Cavalli
7b66259da6 Bugfix 2022-02-02 23:30:21 +01:00
Andrea Cavalli
dfe8361e19 Rename query rewrite class 2022-01-28 21:12:10 +01:00
Andrea Cavalli
58943b5e08 Simplify query transformations 2022-01-28 19:31:25 +01:00
Andrea Cavalli
5c0434c73f Fix compilation issue 2022-01-26 21:45:41 +01:00
Andrea Cavalli
5615c8019a Update containsKey 2022-01-26 21:30:08 +01:00
Andrea Cavalli
574a35907d Further optimizations 2022-01-26 21:18:43 +01:00
Andrea Cavalli
fb19a7a9f3 Optimize some methods 2022-01-26 19:56:51 +01:00
Andrea Cavalli
95afa6f9dd Clean database code 2022-01-26 19:03:51 +01:00
Andrea Cavalli
cdb65b31f3 Various local dict optimizations, customize fillCache in containsRange 2022-01-26 16:06:15 +01:00
Andrea Cavalli
cf53eb4f5a Use global hooks 2022-01-26 15:03:23 +01:00
Andrea Cavalli
f837a1d1b2 Add containsKey 2022-01-22 23:21:40 +01:00
Andrea Cavalli
ca8718780e Update net5 2022-01-20 18:39:41 +01:00
Andrea Cavalli
adb490371f Optimization 2022-01-18 15:56:27 +01:00
Andrea Cavalli
dfce6cd725 Use a single facet collector 2022-01-18 15:29:14 +01:00
Andrea Cavalli
6baa05de51 Facets optimizations 2022-01-18 14:16:32 +01:00
Andrea Cavalli
b2216c1b2c Improve bucket collector performance 2022-01-18 00:03:03 +01:00
Andrea Cavalli
119a4ffe85 Add legacy ICU 2022-01-18 00:02:55 +01:00
Andrea Cavalli
08ef14fb76 Typo 2022-01-17 18:12:12 +01:00
Andrea Cavalli
4f19d2fff3 Add memory stats method 2022-01-15 20:00:10 +01:00
Andrea Cavalli
d2abe044cc Rethink rocksdb settings 2022-01-12 16:18:31 +01:00
Andrea Cavalli
ce506f22dd Bugfix 2022-01-11 22:28:42 +01:00
Andrea Cavalli
4e5e4423ff Rewrite the custom analyzers 2022-01-11 22:23:07 +01:00
Andrea Cavalli
ecde6724e5 Update pom 2022-01-11 19:59:07 +01:00
Andrea Cavalli
74121cf462 Update pom 2022-01-11 16:52:03 +01:00
Andrea Cavalli
2cf743ddec Update pom 2022-01-11 16:01:50 +01:00
Andrea Cavalli
aad377acb0 Change default values 2022-01-10 22:59:36 +01:00
Andrea Cavalli
84dea46a35 Update logging 2022-01-04 12:55:33 +01:00
Andrea Cavalli
e6d85d226e Update snakeyaml 2022-01-04 00:03:38 +01:00
Andrea Cavalli
ce97f33873 Fix infostream 2021-12-31 01:00:32 +01:00
Andrea Cavalli
57bcc92512 Fix statistics 2021-12-31 00:58:47 +01:00
Andrea Cavalli
d6244ab18d Rewind and clear the buffers 2021-12-30 22:29:17 +01:00
Andrea Cavalli
64f21409f1 Performance optimization 2021-12-30 22:29:06 +01:00
Andrea Cavalli
b493decd64 Fix metric names 2021-12-30 18:20:56 +01:00
Andrea Cavalli
68d8b5240c Implement more micrometer metrics 2021-12-30 17:28:06 +01:00
Andrea Cavalli
5769bc7076 Print errored key 2021-12-29 00:31:35 +01:00
Andrea Cavalli
24493eb4ff Change default timeout 2021-12-27 23:07:11 +01:00
Andrea Cavalli
7993a6210b Fix pessimistic db 2021-12-27 18:44:54 +01:00
Andrea Cavalli
dffb8eb3eb Set default compression types 2021-12-27 17:49:22 +01:00
Andrea Cavalli
9309692062 Configurable optimistic transactions 2021-12-27 17:45:52 +01:00
Andrea Cavalli
aa04a64c34 Fix logging, configurable compression 2021-12-27 17:34:44 +01:00
Andrea Cavalli
582813b6c7 Customizable volumes 2021-12-27 16:33:31 +01:00
Andrea Cavalli
3ae3bd7944 Fix bound 2021-12-26 20:24:02 +01:00
Andrea Cavalli
2b09f5a738 Update retry algorithm 2021-12-26 20:20:27 +01:00
Andrea Cavalli
83bc3ebb04 Update retry algorithm 2021-12-26 12:47:00 +01:00
Andrea Cavalli
03bbf3baa9 Bugfix 2021-12-23 02:20:40 +01:00
Andrea Cavalli
a1eec93c64 Rewrite unsorted lucene queries 2021-12-23 02:13:51 +01:00
Andrea Cavalli
a5666dd5b4 Increase default timeout 2021-12-20 16:38:58 +01:00
Andrea Cavalli
eaef75a304 Close LMDB databases after each full search 2021-12-18 21:01:14 +01:00
Andrea Cavalli
480ab77db8 Use sorted maps 2021-12-18 18:16:56 +01:00
Andrea Cavalli
1de5e52ffd Use linkedhashmap 2021-12-18 15:15:19 +01:00
Andrea Cavalli
9952eaffc0 Using search instead of count is too heavy 2021-12-18 00:18:24 +01:00
Andrea Cavalli
0f62362d21 Disable reader pooling 2021-12-17 23:51:10 +01:00
Andrea Cavalli
638595f518 Pool LMDB databases 2021-12-17 23:12:35 +01:00
Andrea Cavalli
0e9c8c089e Bugfixes 2021-12-17 16:24:18 +01:00
Andrea Cavalli
6d92ba8a68 Bugfixes 2021-12-17 03:04:01 +01:00
Andrea Cavalli
1a35930909 use a reentrantlock to avoid multiple merges at the same time 2021-12-17 02:19:51 +01:00
Andrea Cavalli
1dffb55572 Use log4j for logging, rewrite some local dictionary parts 2021-12-17 01:48:49 +01:00
Andrea Cavalli
7a712722d7 Code cleanup 2021-12-16 16:34:20 +01:00
Andrea Cavalli
6e312fe102 Use uninterruptible scheduler for lucene searches 2021-12-16 16:14:44 +01:00
Andrea Cavalli
2e1678373c Bugfix 2021-12-16 03:00:51 +01:00
Andrea Cavalli
01099cc4d1 Fix unscored searcher 2021-12-16 02:38:56 +01:00
Andrea Cavalli
b7ca57a215 Schedule commits and merges 2021-12-15 16:47:59 +01:00
Andrea Cavalli
8ad622db0a Reduce the number of threads 2021-12-15 16:04:33 +01:00
Andrea Cavalli
c59655e844 Bugfix 2021-12-14 20:23:22 +01:00
Andrea Cavalli
d58d696ca4 Bugfixes 2021-12-13 01:57:37 +01:00
Andrea Cavalli
907561d93c Add requests timeout 2021-12-12 23:40:30 +01:00
Andrea Cavalli
5157656a2c Don't use official searcher for production queries 2021-12-12 18:52:50 +01:00
Andrea Cavalli
3cdafd748e Optionally disable LMDB 2021-12-12 16:43:22 +01:00
Andrea Cavalli
297c249243 Code cleanup 2021-12-12 16:19:50 +01:00
Andrea Cavalli
1a64d98697 Bugfixes 2021-12-12 02:40:26 +01:00
Andrea Cavalli
2a5e90d667 Update rocksdb to 6.26.1, Update netty 2021-12-12 02:17:36 +01:00
Andrea Cavalli
18b242d746 Update log4j and lucene 2021-12-11 13:20:28 +01:00
Andrea Cavalli
6644e040dd Fix unsorted unscored streaming multi searcher 2021-12-08 11:58:06 +01:00
Andrea Cavalli
b5aa8b4baa Use a LongSemaphore to avoid using a buffer 2021-11-29 23:27:55 +01:00
Andrea Cavalli
cc368aecc8 Fix deadlock 2021-11-29 14:15:31 +01:00
Andrea Cavalli
d8de969bee Fix deadlock 2021-11-29 14:01:57 +01:00
Andrea Cavalli
3d9247c969 Optimize unscored streaming searcher 2021-11-24 16:39:22 +01:00
Andrea Cavalli
044d189600 Optimize single-shard indices 2021-11-21 12:31:23 +01:00
Andrea Cavalli
7047b512fc Add numeric field 2021-11-20 16:09:00 +01:00
Andrea Cavalli
798b8a5288 Update policies 2021-11-20 01:30:06 +01:00
Andrea Cavalli
06d98040b1 Allow to use absolute values 2021-11-20 01:12:17 +01:00
Andrea Cavalli
3810c49fa1 Allow random sampling 2021-11-19 22:15:31 +01:00
Andrea Cavalli
29d9aad8bf Update buckets 2021-11-19 19:03:31 +01:00
Andrea Cavalli
ed00d474d6 Implement numeric buckets collector 2021-11-18 17:13:53 +01:00
Andrea Cavalli
e014266b8d Rename unclear parameters 2021-11-16 23:54:23 +01:00
Andrea Cavalli
c1c57388e5 Add standard lucene queries 2021-11-16 23:19:23 +01:00
Andrea Cavalli
3d7e80b4ec Update comparators 2021-11-16 23:19:13 +01:00
Andrea Cavalli
891255e18e Add more methods 2021-11-14 22:21:32 +01:00
Andrea Cavalli
f30ad372ed Bugfix 2021-11-12 02:52:42 +01:00
Andrea Cavalli
23d5f700fb Add update method to single values 2021-11-12 02:05:44 +01:00
Andrea Cavalli
42c4b6e651 Add a discard hook 2021-11-09 15:57:16 +01:00
Andrea Cavalli
d4dae4667d Remove stream executor 2021-11-09 02:14:21 +01:00
387 changed files with 16872 additions and 28394 deletions

View File

@ -14,7 +14,7 @@ jobs:
strategy:
matrix:
include:
- { os: ubuntu-20.04, arch: "linux/amd64" }
- { os: ubuntu-21.04, arch: "linux/amd64" }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
@ -27,11 +27,11 @@ jobs:
export REVISION=${{ github.run_number }}
echo "REVISION=$REVISION" >> $GITHUB_ENV
- name: Set up JDK 17
- name: Set up JDK 18
if: github.ref == 'refs/heads/master'
uses: actions/setup-java@v1
with:
java-version: 17
java-version: 18
server-id: mchv-release-distribution
server-username: MAVEN_USERNAME
server-password: MAVEN_PASSWORD

7
.gitignore vendored
View File

@ -181,3 +181,10 @@ fabric.properties
dbengine.iml
/.idea/
.ci-friendly-pom.xml
/.classpath
/.project
/.settings/
.flattened-pom.xml

View File

@ -4,6 +4,8 @@
[Reactive](https://www.reactive-streams.org/) database engine written in Java (17+) using [Project Reactor](https://github.com/reactor/reactor-core).
## DO NOT USE THIS PROJECT: THIS IS A PERSONAL PROJECT, THE API IS NOT STABLE, THE CODE IS NOT TESTED.
This library provides a basic reactive abstraction and implementation of a **key-value store** and a **search engine**.
Four implementations exists out-of-the-box, two for the key-value store, two for the search engine, but it's possible to add more implementations.

776
pom.xml
View File

@ -1,20 +1,21 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<name>CavalliumDBEngine</name>
<groupId>it.cavallium</groupId>
<artifactId>dbengine</artifactId>
<version>3.0.${revision}</version>
<version>4.3.${revision}</version>
<packaging>jar</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<revision>0-SNAPSHOT</revision>
<dbengine.ci>false</dbengine.ci>
<micrometer.version>1.7.4</micrometer.version>
<micrometer.version>1.10.4</micrometer.version>
<rocksdb.version>9.7.3</rocksdb.version>
<junit.jupiter.version>5.9.0</junit.jupiter.version>
<data.generator.version>1.1.18</data.generator.version>
</properties>
<repositories>
<repository>
@ -27,28 +28,29 @@
<name>MCHV Snapshot Apache Maven Packages</name>
<url>https://mvn.mchv.eu/repository/mchv-snapshot</url>
</repository>
<repository>
<id>lucene-repository</id>
<name>Lucene Maven</name>
<url>https://repository.apache.org/snapshots/</url>
<snapshots>
<enabled>true</enabled>
<updatePolicy>always</updatePolicy>
</snapshots>
</repository>
<repository>
<id>mulesoft-public-snapshots</id>
<name>MuleSoft public snapshots</name>
<url>https://repository.mulesoft.org/nexus/content/repositories/public</url>
<releases><enabled>false</enabled></releases>
<snapshots><enabled>true</enabled></snapshots>
<releases>
<enabled>false</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
</repository>
<repository>
<id>netty5-snapshots</id>
<name>Netty 5 snapshots</name>
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
<releases><enabled>true</enabled></releases>
<snapshots><enabled>true</enabled></snapshots>
<id>apache.snapshots</id>
<name>Apache Snapshot Repository</name>
<url>https://repository.apache.org/snapshots</url>
<releases>
<enabled>false</enabled>
</releases>
</repository>
<repository>
<id>maven_central</id>
<name>Maven Central</name>
<url>https://repo.maven.apache.org/maven2/</url>
</repository>
</repositories>
<pluginRepositories>
@ -68,11 +70,23 @@
<id>mchv-release-distribution</id>
<name>MCHV Release Apache Maven Packages Distribution</name>
<url>https://mvn.mchv.eu/repository/mchv</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
<snapshotRepository>
<id>mchv-snapshot-distribution</id>
<name>MCHV Snapshot Apache Maven Packages Distribution</name>
<url>https://mvn.mchv.eu/repository/mchv-snapshot</url>
<releases>
<enabled>false</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
</snapshotRepository>
</distributionManagement>
<scm>
@ -80,397 +94,220 @@
<developerConnection>scm:git:https://git.ignuranza.net/andreacavalli/CavalliumDBEngine.git</developerConnection>
<tag>HEAD</tag>
</scm>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>uk.org.lidalia</groupId>
<artifactId>lidalia-slf4j-ext</artifactId>
<version>1.0.0</version>
<exclusions>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>org.warp</groupId>
<artifactId>common-utils</artifactId>
</dependency>
<dependency>
<groupId>io.net5</groupId>
<artifactId>netty-buffer</artifactId>
</dependency>
<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-core</artifactId>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-impl</artifactId>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
</dependency>
<dependency>
<groupId>it.cavallium</groupId>
<artifactId>concurrent-locks</artifactId>
</dependency>
<dependency>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
</dependency>
<dependency>
<groupId>javax.annotation</groupId>
<artifactId>javax.annotation-api</artifactId>
</dependency>
<dependency>
<groupId>it.unimi.dsi</groupId>
<artifactId>fastutil</artifactId>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
<scope>test</scope>
</dependency>
<!-- This will get hamcrest-core automatically -->
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.lmax</groupId>
<artifactId>disruptor</artifactId>
<version>3.4.4</version>
</dependency>
<dependency>
<groupId>org.rocksdb</groupId>
<artifactId>rocksdbjni</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-join</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analysis-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analysis-icu</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-codecs</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-backward-codecs</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-queries</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-misc</artifactId>
</dependency>
<dependency>
<groupId>org.jetbrains</groupId>
<artifactId>annotations</artifactId>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-core</artifactId>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-tools</artifactId>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-test</artifactId>
</dependency>
<dependency>
<groupId>org.novasearch</groupId>
<artifactId>lucene-relevance</artifactId>
</dependency>
<dependency>
<groupId>io.soabase.record-builder</groupId>
<artifactId>record-builder-core</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>it.cavallium</groupId>
<artifactId>data-generator-runtime</artifactId>
</dependency>
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-core</artifactId>
</dependency>
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-jmx</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.lmdbjava</groupId>
<artifactId>lmdbjava</artifactId>
<version>0.8.2</version>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.30</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>30.1.1-jre</version>
</dependency>
<dependency>
<groupId>org.warp</groupId>
<artifactId>common-utils</artifactId>
<version>1.1.5</version>
</dependency>
<dependency>
<groupId>io.net5</groupId>
<artifactId>netty-buffer</artifactId>
<version>5.0.0.Final-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
<version>2.3.1</version>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-core</artifactId>
<version>3.0.2</version>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-impl</artifactId>
<version>3.0.2</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>it.cavallium</groupId>
<artifactId>concurrent-locks</artifactId>
<version>1.0.8</version>
</dependency>
<dependency>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
<version>1.28</version>
</dependency>
<dependency>
<groupId>javax.annotation</groupId>
<artifactId>javax.annotation-api</artifactId>
<version>1.3.2</version>
</dependency>
<dependency>
<groupId>it.unimi.dsi</groupId>
<artifactId>fastutil</artifactId>
<version>8.5.6</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>5.8.0-M1</version>
<exclusions>
<exclusion>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<version>5.8.0-M1</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
<version>5.8.0-M1</version>
</dependency>
<!-- This will get hamcrest-core automatically -->
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<version>2.2</version>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.14.1</version>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>33.0.0-jre</version>
</dependency>
<dependency>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
<version>1.33</version>
</dependency>
<dependency>
<groupId>it.unimi.dsi</groupId>
<artifactId>fastutil</artifactId>
<version>8.5.11</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<scope>test</scope>
<version>${junit.jupiter.version}</version>
<exclusions>
<exclusion>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<version>${junit.jupiter.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
<version>${junit.jupiter.version}</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<version>3.23.1</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
<exclusion>
<groupId>net.bytebuddy</groupId>
<artifactId>byte-buddy</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- This will get hamcrest-core automatically -->
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
<version>2.14.1</version>
<artifactId>log4j-slf4j2-impl</artifactId>
<version>2.23.1</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>2.0.12</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-api</artifactId>
<version>2.23.1</version>
</dependency>
<dependency>
<groupId>com.lmax</groupId>
<artifactId>disruptor</artifactId>
<version>4.0.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.rocksdb</groupId>
<artifactId>rocksdbjni</artifactId>
<version>6.25.3</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
<version>9.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-join</artifactId>
<version>9.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analysis-common</artifactId>
<version>9.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analysis-icu</artifactId>
<version>9.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-codecs</artifactId>
<version>9.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-backward-codecs</artifactId>
<version>9.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-queries</artifactId>
<version>9.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-misc</artifactId>
<version>9.0.0-SNAPSHOT</version>
<version>${rocksdb.version}</version>
</dependency>
<dependency>
<groupId>org.jetbrains</groupId>
<artifactId>annotations</artifactId>
<version>20.1.0</version>
<version>24.0.1</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-core</artifactId>
<version>3.4.11</version>
<groupId>com.squareup.moshi</groupId>
<artifactId>moshi</artifactId>
<version>1.14.0</version>
<exclusions>
<exclusion>
<groupId>org.jetbrains.kotlin</groupId>
<artifactId>kotlin-stdlib</artifactId>
</exclusion>
<exclusion>
<groupId>org.jetbrains.kotlin</groupId>
<artifactId>kotlin-stdlib-common</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-tools</artifactId>
<version>3.4.11</version>
<groupId>dev.zacsweers.moshix</groupId>
<artifactId>moshi-records-reflect</artifactId>
<version>0.14.1</version>
<exclusions>
<exclusion>
<groupId>com.squareup.moshi</groupId>
<artifactId>moshi</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-test</artifactId>
<version>3.4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.novasearch</groupId>
<artifactId>lucene-relevance</artifactId>
<version>9.0.0.0.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.soabase.record-builder</groupId>
<artifactId>record-builder-core</artifactId>
<version>26</version>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk15on</artifactId>
<version>1.70</version>
</dependency>
<dependency>
<groupId>it.cavallium</groupId>
<artifactId>data-generator-runtime</artifactId>
<version>1.0.32</version>
<artifactId>datagen</artifactId>
<version>${data.generator.version}</version>
<exclusions>
<exclusion>
<groupId>org.jetbrains</groupId>
<artifactId>annotations</artifactId>
</exclusion>
<exclusion>
<groupId>it.unimi.dsi</groupId>
<artifactId>fastutil</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-core</artifactId>
<version>${micrometer.version}</version>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-jmx</artifactId>
<version>${micrometer.version}</version>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.soabase.record-builder</groupId>
<artifactId>record-builder-core</artifactId>
<version>36</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.12.0</version>
<scope>compile</scope>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<testSourceDirectory>src/test/java</testSourceDirectory>
<resources>
<resource>
<directory>../src/main/libs</directory>
<excludes>
<exclude>**/*.jar</exclude>
</excludes>
</resource>
</resources>
<extensions>
<extension>
<groupId>kr.motd.maven</groupId>
@ -484,53 +321,30 @@
<artifactId>maven-install-plugin</artifactId>
<version>3.0.0-M1</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>3.1.2</version>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
<overWriteReleases>false</overWriteReleases>
<overWriteSnapshots>false</overWriteSnapshots>
<overWriteIfNewer>true</overWriteIfNewer>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<version>3.11.0</version>
<configuration>
<release>17</release>
<release>21</release>
<annotationProcessorPaths>
<annotationProcessorPath>
<groupId>io.soabase.record-builder</groupId>
<artifactId>record-builder-processor</artifactId>
<version>1.19</version>
<version>33</version>
</annotationProcessorPath>
</annotationProcessorPaths>
<annotationProcessors>
<annotationProcessor>io.soabase.recordbuilder.processor.RecordBuilderProcessor</annotationProcessor>
</annotationProcessors>
<useIncrementalCompilation>false</useIncrementalCompilation>
<compilerArgs>--enable-preview
<arg>--add-opens=java.base/jdk.internal.misc=ALL-UNNAMED</arg>
</compilerArgs>
<source>17</source>
<target>17</target>
<source>21</source>
<target>21</target>
</configuration>
</plugin>
<plugin>
<groupId>it.cavallium</groupId>
<artifactId>data-generator</artifactId>
<version>0.9.71</version>
<artifactId>datagen-plugin</artifactId>
<version>${data.generator.version}</version>
<executions>
<execution>
<id>generate-lucene-query-sources</id>
@ -541,6 +355,19 @@
<configuration>
<basePackageName>it.cavallium.dbengine.client.query</basePackageName>
<configPath>${basedir}/src/main/data-generator/lucene-query.yaml</configPath>
<useRecordBuilder>true</useRecordBuilder>
</configuration>
</execution>
<execution>
<id>generate-rpc-sources</id>
<phase>generate-sources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<basePackageName>it.cavallium.dbengine.rpc</basePackageName>
<configPath>${basedir}/src/main/data-generator/quic-rpc.yaml</configPath>
<useRecordBuilder>true</useRecordBuilder>
</configuration>
</execution>
</executions>
@ -548,16 +375,16 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>3.0.0-M5</version>
<version>3.0.0-M6</version>
<dependencies>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<version>5.8.0-M1</version>
<version>${junit.jupiter.version}</version>
</dependency>
</dependencies>
<configuration>
<argLine>--enable-preview --add-modules jdk.incubator.foreign -Dforeign.restricted=permit --add-opens=java.base/jdk.internal.misc=ALL-UNNAMED --enable-native-access=ALL-UNNAMED</argLine>
<useModulePath>false</useModulePath>
<systemProperties>
<property>
<name>ci</name>
@ -566,6 +393,165 @@
</systemProperties>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<version>3.0.0-M3</version>
<executions>
<execution>
<id>enforce</id>
<configuration>
<rules>
<dependencyConvergence />
</rules>
</configuration>
<goals>
<goal>enforce</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>flatten-maven-plugin</artifactId>
<version>1.1.0</version>
<configuration>
<updatePomFile>true</updatePomFile>
<flattenMode>oss</flattenMode>
</configuration>
<executions>
<execution>
<id>flatten</id>
<phase>process-resources</phase>
<goals>
<goal>flatten</goal>
</goals>
</execution>
<execution>
<id>flatten.clean</id>
<phase>clean</phase>
<goals>
<goal>clean</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
<pluginManagement>
<plugins>
<!--This plugin's configuration is used to store Eclipse m2e settings only. It has no influence on the Maven build itself.-->
<plugin>
<groupId>org.eclipse.m2e</groupId>
<artifactId>lifecycle-mapping</artifactId>
<version>1.0.0</version>
<configuration>
<lifecycleMappingMetadata>
<pluginExecutions>
<pluginExecution>
<pluginExecutionFilter>
<groupId>
org.codehaus.mojo
</groupId>
<artifactId>
flatten-maven-plugin
</artifactId>
<versionRange>
[1.1.0,)
</versionRange>
<goals>
<goal>flatten</goal>
</goals>
</pluginExecutionFilter>
<action>
<ignore></ignore>
</action>
</pluginExecution>
</pluginExecutions>
</lifecycleMappingMetadata>
</configuration>
</plugin>
</plugins>
</pluginManagement>
</build>
<profiles>
<profile>
<id>repair</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>dbengine.build</name>
<value>repair</value>
</property>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<version>3.4.0</version>
<executions>
<execution>
<id>add-source</id>
<phase>generate-sources</phase>
<goals>
<goal>add-source</goal>
</goals>
<configuration>
<sources>
<source>src/repair/java</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.6.0</version>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<appendAssemblyId>false</appendAssemblyId>
<finalName>dbengine-repair</finalName>
<archive>
<manifest>
<mainClass>it.cavallium.dbengine.repair.Repair</mainClass>
</manifest>
<manifestEntries>
<Multi-Release>true</Multi-Release>
</manifestEntries>
</archive>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
<resources>
<resource>
<directory>src/repair/resources</directory>
</resource>
</resources>
</build>
<dependencies>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j2-impl</artifactId>
<version>2.20.0</version>
</dependency>
<dependency>
<groupId>com.lmax</groupId>
<artifactId>disruptor</artifactId>
<version>3.4.4</version>
</dependency>
</dependencies>
</profile>
</profiles>
</project>

View File

@ -1,191 +0,0 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.client.query.QueryUtils;
import it.cavallium.dbengine.client.query.current.data.QueryParams;
import it.cavallium.dbengine.client.query.current.data.ScoreMode;
import it.cavallium.dbengine.client.query.current.data.ScoreSort;
import it.cavallium.dbengine.database.LLDocument;
import it.cavallium.dbengine.database.LLItem;
import it.cavallium.dbengine.database.LLLuceneIndex;
import it.cavallium.dbengine.database.LLSignal;
import it.cavallium.dbengine.database.LLTerm;
import it.cavallium.dbengine.database.disk.LLLocalDatabaseConnection;
import it.cavallium.dbengine.lucene.LuceneUtils;
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
import it.cavallium.dbengine.lucene.analyzer.TextFieldsSimilarity;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
import java.util.Comparator;
import java.util.StringJoiner;
import java.util.concurrent.CompletionException;
import org.apache.lucene.document.Field.Store;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
public class IndicizationExample {
public static void main(String[] args) {
tempIndex(true)
.flatMap(index -> index
.addDocument(new LLTerm("id", "123"),
new LLDocument(new LLItem[]{
LLItem.newStringField("id", "123", Store.YES),
LLItem.newTextField("name", "Mario", Store.NO),
LLItem.newStringField("surname", "Rossi", Store.NO)
})
)
.then(index.refresh())
.then(index.search(null,
QueryParams
.builder()
.query(QueryUtils.exactSearch(TextFieldsAnalyzer.N4GramPartialString, "name", "Mario"))
.limit(1)
.sort(ScoreSort.of())
.scoreMode(ScoreMode.of(false, true))
.build(),
"id"
))
.flatMap(results -> Mono.from(results
.results()
.flatMap(r -> r)
.doOnNext(signal -> {
if (signal.isValue()) {
System.out.println("Value: " + signal.getValue());
}
})
.filter(LLSignal::isTotalHitsCount))
)
.doOnNext(count -> System.out.println("Total hits: " + count))
.doOnTerminate(() -> System.out.println("Completed"))
.then(index.close())
)
.subscribeOn(Schedulers.parallel())
.block();
tempIndex(true)
.flatMap(index ->
index
.addDocument(new LLTerm("id", "126"),
new LLDocument(new LLItem[]{
LLItem.newStringField("id", "126", Store.YES),
LLItem.newTextField("name", "Marioxq", Store.NO),
LLItem.newStringField("surname", "Rossi", Store.NO)
})
)
.then(index
.addDocument(new LLTerm("id", "123"),
new LLDocument(new LLItem[]{
LLItem.newStringField("id", "123", Store.YES),
LLItem.newTextField("name", "Mario", Store.NO),
LLItem.newStringField("surname", "Rossi", Store.NO)
})
))
.then(index
.addDocument(new LLTerm("id", "124"),
new LLDocument(new LLItem[]{
LLItem.newStringField("id", "124", Store.YES),
LLItem.newTextField("name", "Mariossi", Store.NO),
LLItem.newStringField("surname", "Rossi", Store.NO)
})
))
.then(index
.addDocument(new LLTerm("id", "125"),
new LLDocument(new LLItem[]{
LLItem.newStringField("id", "125", Store.YES),
LLItem.newTextField("name", "Mario marios", Store.NO),
LLItem.newStringField("surname", "Rossi", Store.NO)
})
))
.then(index
.addDocument(new LLTerm("id", "128"),
new LLDocument(new LLItem[]{
LLItem.newStringField("id", "128", Store.YES),
LLItem.newTextField("name", "Marion", Store.NO),
LLItem.newStringField("surname", "Rossi", Store.NO)
})
))
.then(index
.addDocument(new LLTerm("id", "127"),
new LLDocument(new LLItem[]{
LLItem.newStringField("id", "127", Store.YES),
LLItem.newTextField("name", "Mariotto", Store.NO),
LLItem.newStringField("surname", "Rossi", Store.NO)
})
))
.then(index.refresh())
.then(index.search(null,
QueryParams
.builder()
.query(QueryUtils.exactSearch(TextFieldsAnalyzer.N4GramPartialString, "name", "Mario"))
.limit(10)
.sort(MultiSort.topScore().getQuerySort())
.scoreMode(ScoreMode.of(false, true))
.build(),
"id"
))
.flatMap(results -> LuceneUtils.mergeSignalStreamRaw(results
.results(), MultiSort.topScoreRaw(), 10L)
.doOnNext(value -> System.out.println("Value: " + value))
.then(Mono.from(results
.results()
.flatMap(part -> part)
.filter(LLSignal::isTotalHitsCount)
.map(LLSignal::getTotalHitsCount)))
)
.doOnNext(count -> System.out.println("Total hits: " + count))
.doOnTerminate(() -> System.out.println("Completed"))
.then(index.close())
)
.subscribeOn(Schedulers.parallel())
.block();
}
public static final class CurrentCustomType {
private final int number;
public CurrentCustomType(int number) {
this.number = number;
}
public int getNumber() {
return number;
}
@Override
public String toString() {
return new StringJoiner(", ", CurrentCustomType.class.getSimpleName() + "[", "]")
.add("number=" + number)
.toString();
}
}
private static <U> Mono<? extends LLLuceneIndex> tempIndex(boolean delete) {
var wrkspcPath = Path.of("/tmp/tempdb/");
return Mono
.fromCallable(() -> {
if (delete && Files.exists(wrkspcPath)) {
Files.walk(wrkspcPath).sorted(Comparator.reverseOrder()).forEach(file -> {
try {
Files.delete(file);
} catch (IOException ex) {
throw new CompletionException(ex);
}
});
}
Files.createDirectories(wrkspcPath);
return null;
})
.subscribeOn(Schedulers.boundedElastic())
.then(new LLLocalDatabaseConnection(wrkspcPath, true).connect())
.flatMap(conn -> conn.getLuceneIndex("testindices",
10,
TextFieldsAnalyzer.N4GramPartialString,
TextFieldsSimilarity.NGramBM25Plus,
Duration.ofSeconds(5),
Duration.ofSeconds(5),
false
));
}
}

View File

@ -2,22 +2,25 @@
currentVersion: "0.0.0"
interfacesData:
Query: []
# versions must have only numbers, lowercase letters, dots, dashes. Maximum: 99.999.9999
versions:
0.0.0:
details:
changelog: "First version"
superTypes:
superTypesData:
Query: [
BoxedQuery, TermQuery, PhraseQuery, WildcardQuery, SynonymQuery, FuzzyQuery, MatchAllDocsQuery,
MatchNoDocsQuery, BooleanQuery, SortedNumericDocValuesFieldSlowRangeQuery, SortedDocFieldExistsQuery,
ConstantScoreQuery, BoostQuery, IntPointRangeQuery, LongPointRangeQuery, IntPointExactQuery,
LongPointExactQuery
BoxedQuery, TermQuery, IntTermQuery, IntNDTermQuery, LongTermQuery, LongNDTermQuery, FloatTermQuery,
FloatNDTermQuery, DoubleTermQuery, DoubleNDTermQuery,
PhraseQuery, SolrTextQuery, WildcardQuery, SynonymQuery, FuzzyQuery, MatchAllDocsQuery, MatchNoDocsQuery,
BooleanQuery, SortedNumericDocValuesFieldSlowRangeQuery, SortedDocFieldExistsQuery,
ConstantScoreQuery, BoostQuery, IntPointRangeQuery, IntNDPointRangeQuery, LongPointRangeQuery,
FloatPointRangeQuery, DoublePointRangeQuery, LongNDPointRangeQuery, FloatNDPointRangeQuery,
DoubleNDPointRangeQuery, IntPointExactQuery, IntNDPointExactQuery, LongPointExactQuery, FloatPointExactQuery,
FloatPointExactQuery, DoublePointExactQuery, LongNDPointExactQuery, FloatNDPointExactQuery,
DoubleNDPointExactQuery, IntPointSetQuery, LongPointSetQuery, FloatPointSetQuery, DoublePointSetQuery,
StandardQuery, FieldExistsQuery, FilterConfigQuery, SolrFunctionQuery, MoreLikeThisQuery
]
Occur: [OccurMust, OccurMustNot, OccurShould, OccurFilter]
Sort: [NoSort, NumericSort, ScoreSort, DocSort, RandomSort]
customTypes: {}
classes:
NumberFormat: [NumberFormatDecimal]
PointType: [PointTypeInt, PointTypeLong, PointTypeFloat, PointTypeDouble]
customTypesData: {}
baseTypesData:
# Basic data
# ==========
@ -51,6 +54,31 @@ versions:
OccurFilter:
data: { }
# Special queries
# ===============
# Raw lucene string query, parsable by lucene StandardQueryParser
StandardQuery:
data:
query: String
# Should be at least one field!
defaultFields: String[]
pointsConfig: PointConfig[]
termFields: String[]
PointConfig:
data:
field: String
data: PointConfigData
PointConfigData:
data:
numberFormat: NumberFormat
type: PointType
NumberFormatDecimal: { data: { } }
PointTypeInt: { data: { } }
PointTypeLong: { data: { } }
PointTypeFloat: { data: { } }
PointTypeDouble: { data: { } }
# Text queries
# ============
@ -58,6 +86,73 @@ versions:
TermQuery:
data:
term: Term
# Query that matches a term.
LongTermQuery:
data:
field: String
value: long
LongNDTermQuery:
data:
field: String
value: long[]
# Query that matches a term.
IntTermQuery:
data:
field: String
value: int
# Query that matches a term.
IntNDTermQuery:
data:
field: String
value: int[]
# Query that matches a term.
FloatTermQuery:
data:
field: String
value: float
# Query that matches a term.
FloatNDTermQuery:
data:
field: String
value: float[]
# Query that matches a term.
DoubleTermQuery:
data:
field: String
value: double
# Query that matches a term.
DoubleNDTermQuery:
data:
field: String
value: double[]
# Query that matches the existence of a field.
FieldExistsQuery:
data:
field: String
# Query used to configure the Solr cache.
FilterConfigQuery:
data:
query: Query
cached: boolean
# Query that represents a Solr Function Query (https://solr.apache.org/guide/solr/latest/query-guide/function-queries.html)
SolrFunctionQuery:
data:
query: String
boost: double
MoreLikeThisQuery:
data:
id: String
fieldList: String[]
minTf: -int
minDf: -int
maxDf: -int
maxDfPct: -int
minWl: -int
maxWl: -int
maxQt: -int
maxNtp: -int
boost: -boolean
qf: String[]
# Query that matches a phrase.
PhraseQuery:
data:
@ -65,6 +160,14 @@ versions:
# counted as characters from the beginning of the phrase.
phrase: TermPosition[]
slop: int
# Query that matches a phrase. (Solr)
SolrTextQuery:
data:
# Field name
field: String
# Text query
phrase: String
slop: int
# Advanced query that matches text allowing asterisks in the query
WildcardQuery:
data:
@ -150,22 +253,108 @@ versions:
field: String
min: int
max: int
# Query that matches an int point field, from "min", to "max"
IntNDPointRangeQuery:
data:
field: String
min: int[]
max: int[]
# Query that matches a long point field, from "min", to "max"
LongPointRangeQuery:
data:
field: String
min: long
max: long
# Query that matches a float point field, from "min", to "max"
FloatPointRangeQuery:
data:
field: String
min: float
max: float
# Query that matches a double point field, from "min", to "max"
DoublePointRangeQuery:
data:
field: String
min: double
max: double
# Query that matches a long point field, from "min", to "max"
LongNDPointRangeQuery:
data:
field: String
min: long[]
max: long[]
# Query that matches a float point field, from "min", to "max"
FloatNDPointRangeQuery:
data:
field: String
min: float[]
max: float[]
# Query that matches a double point field, from "min", to "max"
DoubleNDPointRangeQuery:
data:
field: String
min: double[]
max: double[]
# Query that matches an int point field
IntPointExactQuery:
data:
field: String
value: int
# Query that matches an int point field
IntNDPointExactQuery:
data:
field: String
value: int[]
# Query that matches a long point field
LongPointExactQuery:
data:
field: String
value: long
# Query that matches a float point field
FloatPointExactQuery:
data:
field: String
value: float
# Query that matches a double point field
DoublePointExactQuery:
data:
field: String
value: double
# Query that matches a long point field
LongNDPointExactQuery:
data:
field: String
value: long[]
# Query that matches a float point field
FloatNDPointExactQuery:
data:
field: String
value: float[]
# Query that matches a double point field
DoubleNDPointExactQuery:
data:
field: String
value: double[]
# Query that matches a set of int point field
IntPointSetQuery:
data:
field: String
values: int[]
# Query that matches a set of long point field
LongPointSetQuery:
data:
field: String
values: long[]
# Query that matches a set of float point field
FloatPointSetQuery:
data:
field: String
values: float[]
# Query that matches a set of double point field
DoublePointSetQuery:
data:
field: String
values: double[]
# Extra data used for parameters and the client
@ -177,9 +366,9 @@ versions:
query: Query
offset: long
limit: long
minCompetitiveScore: -float
sort: Sort
complete: boolean
computePreciseHitsCount: boolean
timeoutMilliseconds: long
NoSort:
data: { }
NumericSort:
@ -193,7 +382,11 @@ versions:
DocSort:
data: { }
TotalHitsCount:
stringRepresenter: "it.cavallium.dbengine.lucene.LuceneUtils.toHumanReadableString"
stringRepresenter: "it.cavallium.dbengine.client.query.QueryUtil.toHumanReadableString"
data:
value: long
exact: boolean
versions:
0.0.0:
details:
changelog: "First version"

View File

@ -0,0 +1,220 @@
# A type that starts with "-" is an optional type, otherwise it can't be null
currentVersion: "0.0.0"
interfacesData:
ClientBoundRequest:
extendInterfaces: [RPCEvent]
ClientBoundResponse:
extendInterfaces: [RPCEvent]
ServerBoundRequest:
extendInterfaces: [RPCEvent]
ServerBoundResponse:
extendInterfaces: [RPCEvent]
superTypesData:
RPCEvent: [
Empty,
Binary,
BinaryOptional,
SingletonUpdateOldData,
GeneratedEntityId,
GetDatabase,
Disconnect,
GetSingleton,
SingletonGet,
SingletonSet,
SingletonUpdateInit,
SingletonUpdateEnd,
RPCCrash,
CloseDatabase
]
ServerBoundRequest: [
GetDatabase,
Disconnect,
GetSingleton,
SingletonGet,
SingletonSet,
SingletonUpdateInit,
CloseDatabase
]
ClientBoundResponse: [
Empty,
GeneratedEntityId,
Binary,
BinaryOptional,
RPCCrash
]
ClientBoundRequest: [
SingletonUpdateOldData
]
ServerBoundResponse: [
Empty,
SingletonUpdateEnd
]
Filter: [
NoFilter,
BloomFilter
]
customTypesData:
Path:
javaClass: java.nio.file.Path
serializer: it.cavallium.dbengine.database.remote.PathSerializer
Compression:
javaClass: it.cavallium.dbengine.client.Compression
serializer: it.cavallium.dbengine.database.remote.CompressionSerializer
Duration:
javaClass: java.time.Duration
serializer: it.cavallium.dbengine.database.remote.DurationSerializer
RocksDB:
javaClass: org.rocksdb.RocksDB
serializer: it.cavallium.dbengine.database.remote.RocksDBSerializer
ColumnFamilyHandle:
javaClass: org.rocksdb.ColumnFamilyHandle
serializer: it.cavallium.dbengine.database.remote.ColumnFamilyHandleSerializer
UpdateReturnMode:
javaClass: it.cavallium.dbengine.database.UpdateReturnMode
serializer: it.cavallium.dbengine.database.remote.UpdateReturnModeSerializer
LLSnapshot:
javaClass: it.cavallium.dbengine.database.LLSnapshot
serializer: it.cavallium.dbengine.database.remote.LLSnapshotSerializer
Bytes:
javaClass: it.cavallium.buffer.Buf
serializer: it.cavallium.dbengine.database.remote.BufSerializer
StringMap:
javaClass: java.util.Map<java.lang.String, java.lang.String>
serializer: it.cavallium.dbengine.database.remote.StringMapSerializer
String2ColumnFamilyHandleMap:
javaClass: java.util.Map<java.lang.String, org.rocksdb.ColumnFamilyHandle>
serializer: it.cavallium.dbengine.database.remote.String2ColumnFamilyHandleMapSerializer
baseTypesData:
BoxedRPCEvent:
data:
val: RPCEvent
# Server-bound requests
GetDatabase:
data:
name: String
columns: Column[]
databaseOptions: DatabaseOptions
Disconnect: { data: { } }
GetSingleton:
data:
databaseId: long
singletonListColumnName: byte[]
name: byte[]
defaultValue: -Bytes
SingletonGet:
data:
singletonId: long
snapshot: -LLSnapshot
SingletonSet:
data:
singletonId: long
value: -Bytes
SingletonUpdateInit:
data:
singletonId: long
updateReturnMode: UpdateReturnMode
SingletonUpdateEnd:
data:
exist: boolean
value: byte[]
CloseDatabase:
data:
databaseId: long
# Client-bound responses
GeneratedEntityId:
data:
id: long
RPCCrash:
data:
code: int
message: -String
# Client-bound requests
SingletonUpdateOldData:
data:
exist: boolean
oldValue: byte[]
# Server-bound responses
# Data
BinaryOptional:
data:
val: -Binary
Binary:
data:
val: byte[]
Empty: { data: { } }
Column:
data:
name: String
DatabaseOptions:
data:
volumes: DatabaseVolume[]
extraFlags: StringMap
absoluteConsistency: boolean
lowMemory: boolean
useDirectIO: boolean
allowMemoryMapping: boolean
optimistic: boolean
maxOpenFiles: -int
blockCache: -long
persistentCaches: PersistentCache[]
writeBufferManager: -long
spinning: boolean
defaultColumnOptions: ColumnOptions
columnOptions: NamedColumnOptions[]
logPath: -String
walPath: -String
openAsSecondary: boolean
secondaryDirectoryName: -String
ColumnOptions:
data:
levels: DatabaseLevel[]
memtableMemoryBudgetBytes: -long
cacheIndexAndFilterBlocks: -boolean
partitionFilters: -boolean
filter: -Filter
blockSize: -int
persistentCacheId: -String
writeBufferSize: -long
blobFiles: boolean
minBlobSize: -long
blobFileSize: -long
blobCompressionType: -Compression
NamedColumnOptions:
data:
name: String
options: ColumnOptions
NoFilter:
data: {}
BloomFilter:
data:
bitsPerKey: int
optimizeForHits: -boolean
PersistentCache:
data:
id: String
path: String
size: long
optimizeForNvm: boolean
DatabaseVolume:
data:
volumePath: Path
targetSizeBytes: long
DatabaseLevel:
data:
maxDictBytes: int
compression: Compression
versions:
0.0.0:
details:
changelog: "First version"

View File

@ -1,46 +0,0 @@
package io.net5.buffer.api.pool;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.util.List;
/**
* Netty5 hides some metrics. This utility class can read them.
*/
public class MetricUtils {
private static final MethodHandle GET_ARENA_METRICS;
static {
var lookup = MethodHandles.lookup();
// Get the method handle that returns the metrics of each pool arena
MethodHandle handle = null;
try {
// Find the class
var pooledBufferClass = Class.forName("io.net5.buffer.api.pool.PooledBufferAllocatorMetric");
// Find the handle of the method
handle = lookup.findVirtual(pooledBufferClass, "arenaMetrics", MethodType.methodType(List.class));
} catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignored) {
}
GET_ARENA_METRICS = handle;
}
/**
* Get the metrics of each pool arena of a pooled allocator
* @param allocator Pooled allocator
* @return A list of {@link PoolArenaMetric}
*/
@SuppressWarnings("unchecked")
public static List<PoolArenaMetric> getPoolArenaMetrics(PooledBufferAllocator allocator) {
var metric = allocator.metric();
try {
// Invoke the method to get the metrics
return (List<PoolArenaMetric>) GET_ARENA_METRICS.invoke(metric);
} catch (Throwable e) {
return List.of();
}
}
}

View File

@ -1,60 +0,0 @@
package io.net5.buffer.api.pool;
import java.util.List;
public class PooledBufferAllocatorMetricUtils implements BufferAllocatorMetric {
private final PooledBufferAllocator allocator;
@SuppressWarnings("RedundantThrows")
public PooledBufferAllocatorMetricUtils(PooledBufferAllocator allocator) throws Throwable {
this.allocator = allocator;
}
/**
* Return the number of arenas.
*/
public int numArenas() {
return allocator.numArenas();
}
/**
* Return a {@link List} of all {@link PoolArenaMetric}s that are provided by this pool.
*/
public List<PoolArenaMetric> arenaMetrics() {
return allocator.arenaMetrics();
}
/**
* Return the number of thread local caches used by this {@link PooledBufferAllocator}.
*/
public int numThreadLocalCaches() {
return allocator.numThreadLocalCaches();
}
/**
* Return the size of the small cache.
*/
public int smallCacheSize() {
return allocator.smallCacheSize();
}
/**
* Return the size of the normal cache.
*/
public int normalCacheSize() {
return allocator.normalCacheSize();
}
/**
* Return the chunk size for an arena.
*/
public int chunkSize() {
return allocator.chunkSize();
}
@Override
public long usedMemory() {
return allocator.usedMemory();
}
}

View File

@ -1,79 +0,0 @@
package it.cavallium.dbengine;
import static java.util.Objects.requireNonNull;
import static java.util.Objects.requireNonNullElseGet;
import io.net5.buffer.api.Send;
import it.cavallium.dbengine.database.disk.LLIndexSearcher;
import it.cavallium.dbengine.database.disk.LLIndexSearchers;
import it.cavallium.dbengine.lucene.searcher.LLSearchTransformer;
import it.cavallium.dbengine.lucene.searcher.LocalQueryParams;
import it.cavallium.dbengine.lucene.searcher.LocalSearcher;
import it.cavallium.dbengine.lucene.searcher.MultiSearcher;
import it.cavallium.dbengine.lucene.searcher.LuceneSearchResult;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicReference;
import reactor.core.publisher.Mono;
public class SwappableLuceneSearcher implements LocalSearcher, MultiSearcher, Closeable {
private final AtomicReference<LocalSearcher> single = new AtomicReference<>(null);
private final AtomicReference<MultiSearcher> multi = new AtomicReference<>(null);
public SwappableLuceneSearcher() {
}
@Override
public Mono<LuceneSearchResult> collect(Mono<Send<LLIndexSearcher>> indexSearcherMono,
LocalQueryParams queryParams,
String keyFieldName,
LLSearchTransformer transformer) {
var single = requireNonNullElseGet(this.single.get(), this.multi::get);
requireNonNull(single, "LuceneLocalSearcher not set");
return single.collect(indexSearcherMono, queryParams, keyFieldName, transformer);
}
@Override
public String getName() {
var single = this.single.get();
var multi = this.multi.get();
if (single == multi) {
if (single == null) {
return "swappable";
} else {
return single.getName();
}
} else {
return "swappable[single=" + single.getName() + ",multi=" + multi.getName() + "]";
}
}
@Override
public Mono<LuceneSearchResult> collectMulti(Mono<Send<LLIndexSearchers>> indexSearchersMono,
LocalQueryParams queryParams,
String keyFieldName,
LLSearchTransformer transformer) {
var multi = requireNonNull(this.multi.get(), "LuceneMultiSearcher not set");
return multi.collectMulti(indexSearchersMono, queryParams, keyFieldName, transformer);
}
public void setSingle(LocalSearcher single) {
this.single.set(single);
}
public void setMulti(MultiSearcher multi) {
this.multi.set(multi);
}
@Override
public void close() throws IOException {
if (this.single.get() instanceof Closeable closeable) {
closeable.close();
}
if (this.multi.get() instanceof Closeable closeable) {
closeable.close();
}
}
}

View File

@ -0,0 +1,56 @@
package it.cavallium.dbengine.client;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
public abstract class Backuppable implements IBackuppable {
public enum State {
RUNNING, PAUSING, PAUSED, RESUMING, STOPPED
}
private final AtomicInteger state = new AtomicInteger();
@Override
public final void pauseForBackup() {
if (state.compareAndSet(State.RUNNING.ordinal(), State.PAUSING.ordinal())) {
try {
onPauseForBackup();
state.compareAndSet(State.PAUSING.ordinal(), State.PAUSED.ordinal());
} catch (Throwable ex) {
state.compareAndSet(State.PAUSING.ordinal(), State.RUNNING.ordinal());
throw ex;
}
}
}
@Override
public final void resumeAfterBackup() {
if (state.compareAndSet(State.PAUSED.ordinal(), State.RESUMING.ordinal())) {
try {
onResumeAfterBackup();
state.compareAndSet(State.RESUMING.ordinal(), State.RUNNING.ordinal());
} catch (Throwable ex) {
state.compareAndSet(State.RESUMING.ordinal(), State.PAUSED.ordinal());
throw ex;
}
}
}
@Override
public final boolean isPaused() {
return state.get() == State.PAUSED.ordinal();
}
public final State getState() {
return State.values()[state.get()];
}
protected abstract void onPauseForBackup();
protected abstract void onResumeAfterBackup();
public final void setStopped() {
state.set(State.STOPPED.ordinal());
}
}

View File

@ -1,8 +0,0 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.database.Column;
import it.unimi.dsi.fastutil.bytes.ByteList;
import org.jetbrains.annotations.Nullable;
public record BadBlock(String databaseName, @Nullable Column column, @Nullable ByteList rawKey,
@Nullable Throwable ex) {}

View File

@ -1,14 +1,14 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.client.Mapper;
public class CastMapper<T, U> implements Mapper<T, U> {
@SuppressWarnings("unchecked")
@Override
public U map(T key) {
return (U) key;
}
@SuppressWarnings("unchecked")
@Override
public T unmap(U key) {
return (T) key;

View File

@ -1,32 +1,32 @@
package it.cavallium.dbengine.client;
import io.micrometer.core.instrument.MeterRegistry;
import io.net5.buffer.api.BufferAllocator;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import it.cavallium.dbengine.database.DatabaseOperations;
import it.cavallium.dbengine.database.DatabaseProperties;
import java.util.stream.Stream;
public interface CompositeDatabase {
public interface CompositeDatabase extends DatabaseProperties, DatabaseOperations {
Mono<Void> close();
void preClose();
void close();
/**
* Can return SnapshotException
*/
Mono<CompositeSnapshot> takeSnapshot();
CompositeSnapshot takeSnapshot();
/**
* Can return SnapshotException
*/
Mono<Void> releaseSnapshot(CompositeSnapshot snapshot);
BufferAllocator getAllocator();
void releaseSnapshot(CompositeSnapshot snapshot);
MeterRegistry getMeterRegistry();
/**
* Find corrupted items
*/
Flux<BadBlock> badBlocks();
Stream<DbProgress<SSTVerificationProgress>> verify();
Mono<Void> verifyChecksum();
void verifyChecksum();
}

View File

@ -18,8 +18,7 @@ public class CompositeDatabasePartLocation {
}
public enum CompositeDatabasePartType {
KV_DATABASE,
LUCENE_INDEX
KV_DATABASE
}
public CompositeDatabasePartType getPartType() {

View File

@ -2,7 +2,6 @@ package it.cavallium.dbengine.client;
import it.cavallium.dbengine.client.CompositeDatabasePartLocation.CompositeDatabasePartType;
import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure;
import it.cavallium.dbengine.database.LLLuceneIndex;
import it.cavallium.dbengine.database.LLSnapshot;
import java.util.Map;
import java.util.Objects;
@ -20,12 +19,6 @@ public class CompositeSnapshot {
)), () -> "No snapshot for database with name \"" + database.getDatabaseName() + "\"");
}
public LLSnapshot getSnapshot(LLLuceneIndex luceneIndex) {
return Objects.requireNonNull(snapshots.get(CompositeDatabasePartLocation.of(CompositeDatabasePartType.LUCENE_INDEX,
luceneIndex.getLuceneIndexName()
)), () -> "No snapshot for lucene index with name \"" + luceneIndex.getLuceneIndexName() + "\"");
}
public Map<CompositeDatabasePartLocation, LLSnapshot> getAllSnapshots() {
return snapshots;
}

View File

@ -0,0 +1,23 @@
package it.cavallium.dbengine.client;
import org.rocksdb.CompressionType;
public enum Compression {
PLAIN(CompressionType.NO_COMPRESSION),
SNAPPY(CompressionType.SNAPPY_COMPRESSION),
LZ4(CompressionType.LZ4_COMPRESSION),
LZ4_HC(CompressionType.LZ4HC_COMPRESSION),
ZSTD(CompressionType.ZSTD_COMPRESSION),
ZLIB(CompressionType.ZLIB_COMPRESSION),
BZLIB2(CompressionType.BZLIB2_COMPRESSION);
private final CompressionType type;
Compression(CompressionType compressionType) {
this.type = compressionType;
}
public CompressionType getType() {
return type;
}
}

View File

@ -0,0 +1,34 @@
package it.cavallium.dbengine.client;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
import java.net.SocketAddress;
import java.nio.file.Path;
import java.util.Map;
import org.jetbrains.annotations.Nullable;
public sealed interface ConnectionSettings {
sealed interface PrimaryConnectionSettings extends ConnectionSettings {}
sealed interface SubConnectionSettings extends ConnectionSettings {}
record MemoryConnectionSettings() implements PrimaryConnectionSettings, SubConnectionSettings {}
record LocalConnectionSettings(Path dataPath) implements PrimaryConnectionSettings, SubConnectionSettings {}
record MultiConnectionSettings(Map<ConnectionPart, SubConnectionSettings> parts) implements
PrimaryConnectionSettings {
public Multimap<SubConnectionSettings, ConnectionPart> getConnections() {
Multimap<SubConnectionSettings, ConnectionPart> result = com.google.common.collect.HashMultimap.create();
parts.forEach((connectionPart, subConnectionSettings) -> result.put(subConnectionSettings,connectionPart));
return Multimaps.unmodifiableMultimap(result);
}
}
sealed interface ConnectionPart {
record ConnectionPartRocksDB(@Nullable String name) implements ConnectionPart {}
}
}

View File

@ -1,47 +0,0 @@
package it.cavallium.dbengine.client;
import java.util.Collection;
import java.util.List;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class CountedStream<T> {
private final Flux<T> stream;
private final long count;
public CountedStream(Flux<T> stream, long count) {
this.stream = stream;
this.count = count;
}
public Flux<T> getStream() {
return stream;
}
public long getCount() {
return count;
}
@SafeVarargs
public static <T> CountedStream<T> merge(CountedStream<T>... stream) {
return merge(List.of(stream));
}
public static <T> CountedStream<T> merge(Collection<CountedStream<T>> stream) {
return stream
.stream()
.reduce((a, b) -> new CountedStream<>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount()))
.orElseGet(() -> new CountedStream<>(Flux.empty(), 0));
}
public static <T> Mono<CountedStream<T>> merge(Flux<CountedStream<T>> stream) {
return stream
.reduce((a, b) -> new CountedStream<>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount()))
.switchIfEmpty(Mono.fromSupplier(() -> new CountedStream<>(Flux.empty(), 0)));
}
public Mono<List<T>> collectList() {
return stream.collectList();
}
}

View File

@ -1,17 +0,0 @@
package it.cavallium.dbengine.client;
import io.soabase.recordbuilder.core.RecordBuilder;
import it.cavallium.dbengine.database.Column;
import java.util.List;
import java.util.Map;
@RecordBuilder
public record DatabaseOptions(Map<String, String> extraFlags,
boolean absoluteConsistency,
boolean lowMemory,
boolean inMemory,
boolean useDirectIO,
boolean allowMemoryMapping,
boolean allowNettyDirect,
int maxOpenFiles) {
}

View File

@ -0,0 +1,45 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.client.SSTProgress.SSTProgressReport;
import it.cavallium.dbengine.client.SSTProgress.SSTStart;
import it.cavallium.dbengine.rpc.current.data.Column;
import java.nio.file.Path;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Stream;
import org.jetbrains.annotations.Nullable;
public interface DbProgress<T extends SSTProgress> {
String databaseName();
record DbSSTProgress<T extends SSTProgress>(String databaseName, Column column, @Nullable Path file, long scanned,
long total, T sstProgress) implements DbProgress<T> {
public double getProgress() {
if (total == 0) {
return 0d;
}
return scanned / (double) total;
}
public String fileString() {
return file != null ? file.normalize().toString() : null;
}
}
static <T extends SSTProgress> Stream<DbProgress<T>> toDbProgress(String dbName,
String columnName,
LongProgressTracker totalTracker,
Stream<T> stream) {
Column column = Column.of(columnName);
AtomicReference<Path> filePath = new AtomicReference<>();
return stream.map(state -> {
switch (state) {
case SSTStart start -> filePath.set(start.metadata().filePath());
case SSTProgressReport progress -> totalTracker.incrementAndGet();
default -> {}
}
return new DbSSTProgress<>(dbName, column, filePath.get(), totalTracker.getCurrent(), totalTracker.getTotal(), state);
});
}
}

View File

@ -0,0 +1,74 @@
package it.cavallium.dbengine.client;
import it.cavallium.datagen.nativedata.NullableString;
import it.cavallium.datagen.nativedata.Nullableboolean;
import it.cavallium.datagen.nativedata.Nullableint;
import it.cavallium.datagen.nativedata.Nullablelong;
import it.cavallium.dbengine.rpc.current.data.ColumnOptions;
import it.cavallium.dbengine.rpc.current.data.ColumnOptionsBuilder;
import it.cavallium.dbengine.rpc.current.data.DatabaseOptions;
import it.cavallium.dbengine.rpc.current.data.DatabaseOptionsBuilder;
import it.cavallium.dbengine.rpc.current.data.NamedColumnOptions;
import it.cavallium.dbengine.rpc.current.data.NamedColumnOptionsBuilder;
import it.cavallium.dbengine.rpc.current.data.nullables.NullableCompression;
import it.cavallium.dbengine.rpc.current.data.nullables.NullableFilter;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.rocksdb.RocksDB;
public class DefaultDatabaseOptions {
public static ColumnOptions DEFAULT_DEFAULT_COLUMN_OPTIONS = new ColumnOptions(
Collections.emptyList(),
Nullablelong.empty(),
Nullableboolean.empty(),
Nullableboolean.empty(),
NullableFilter.empty(),
Nullableint.empty(),
NullableString.empty(),
Nullablelong.empty(),
false,
Nullablelong.empty(),
Nullablelong.empty(),
NullableCompression.empty()
);
public static NamedColumnOptions DEFAULT_NAMED_COLUMN_OPTIONS = new NamedColumnOptions(
new String(RocksDB.DEFAULT_COLUMN_FAMILY, StandardCharsets.UTF_8),
DEFAULT_DEFAULT_COLUMN_OPTIONS
);
public static DatabaseOptions DEFAULT_DATABASE_OPTIONS = new DatabaseOptions(List.of(),
Map.of(),
false,
false,
false,
false,
true,
Nullableint.empty(),
Nullablelong.empty(),
Collections.emptyList(),
Nullablelong.empty(),
false,
DEFAULT_DEFAULT_COLUMN_OPTIONS,
List.of(),
NullableString.empty(),
NullableString.empty(),
false,
NullableString.empty()
);
public static DatabaseOptionsBuilder builder() {
return DatabaseOptionsBuilder.builder(DEFAULT_DATABASE_OPTIONS);
}
public static ColumnOptionsBuilder defaultColumnOptionsBuilder() {
return ColumnOptionsBuilder.builder(DEFAULT_DEFAULT_COLUMN_OPTIONS);
}
public static NamedColumnOptionsBuilder namedColumnOptionsBuilder() {
return NamedColumnOptionsBuilder.builder(DEFAULT_NAMED_COLUMN_OPTIONS);
}
}

View File

@ -1,13 +1,26 @@
package it.cavallium.dbengine.client;
import java.util.Map;
import java.util.Map.Entry;
import org.jetbrains.annotations.Contract;
import org.jetbrains.annotations.NotNull;
import reactor.core.publisher.Mono;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.Unmodifiable;
public record HitEntry<T, U>(T key, U value, float score)
public record HitEntry<T, U>(T key, @Nullable U value, float score)
implements Comparable<HitEntry<T, U>> {
@Override
public int compareTo(@NotNull HitEntry<T, U> o) {
return Float.compare(o.score, this.score);
}
@Contract(pure = true)
public @Nullable @Unmodifiable Entry<T, U> toEntry() {
if (value != null) {
return Map.entry(key, value);
} else {
return null;
}
}
}

View File

@ -1,14 +1,21 @@
package it.cavallium.dbengine.client;
import java.util.Comparator;
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
import java.util.function.Function;
import org.jetbrains.annotations.NotNull;
import reactor.core.publisher.Mono;
public record HitKey<T>(T key, float score) implements Comparable<HitKey<T>> {
public <U> Mono<HitEntry<T, U>> withValue(Function<T, Mono<U>> valueGetter) {
return valueGetter.apply(key).map(value -> new HitEntry<>(key, value, score));
public <U> HitEntry<T, U> withValue(Function<T, U> valueGetter) {
return new HitEntry<>(key, valueGetter.apply(key), score);
}
public <U> HitEntry<T, U> withNullValue() {
return new HitEntry<>(key, null, score);
}
public HitEntry<T, Nothing> withNothingValue() {
return new HitEntry<>(key, Nothing.INSTANCE, score);
}
@Override

View File

@ -1,107 +1,47 @@
package it.cavallium.dbengine.client;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Send;
import com.google.common.collect.Lists;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import io.net5.buffer.api.internal.ResourceSupport;
import it.cavallium.dbengine.database.DiscardingCloseable;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.SafeCloseable;
import it.cavallium.dbengine.database.collections.ValueGetter;
import it.cavallium.dbengine.database.collections.ValueTransformer;
import java.util.Map.Entry;
import java.util.Optional;
import it.cavallium.dbengine.utils.SimpleResource;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuples;
import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public final class Hits<T> extends ResourceSupport<Hits<T>, Hits<T>> {
public class Hits<T> {
private static final Drop<Hits<?>> DROP = new Drop<>() {
@Override
public void drop(Hits<?> obj) {
if (obj.onClose != null) {
obj.onClose.run();
}
}
private static final Logger LOG = LogManager.getLogger(Hits.class);
private static final Hits<?> EMPTY_HITS = new Hits<>(List.of(), TotalHitsCount.of(0, true));
private final List<T> results;
private final TotalHitsCount totalHitsCount;
@Override
public Drop<Hits<?>> fork() {
return this;
}
@Override
public void attach(Hits<?> obj) {
}
};
private Flux<T> results;
private TotalHitsCount totalHitsCount;
private Runnable onClose;
@SuppressWarnings({"unchecked", "rawtypes"})
public Hits(Flux<T> results, TotalHitsCount totalHitsCount, Runnable onClose) {
super((Drop<Hits<T>>) (Drop) DROP);
public Hits(List<T> results, TotalHitsCount totalHitsCount) {
this.results = results;
this.totalHitsCount = totalHitsCount;
this.onClose = onClose;
}
@SuppressWarnings("unchecked")
public static <T> Hits<T> empty() {
return new Hits<>(Flux.empty(), TotalHitsCount.of(0, true), null);
return (Hits<T>) EMPTY_HITS;
}
public static <K, V> Hits<LazyHitEntry<K, V>> withValuesLazy(Hits<LazyHitKey<K>> hits,
ValueGetter<K, V> valuesGetter) {
var hitsEntry = hits.results().map(hitKey -> hitKey.withValue(valuesGetter::get));
return new Hits<>(hitsEntry, hits.totalHitsCount, hits::close);
}
public static <K, V> Hits<HitEntry<K, V>> withValues(Hits<HitKey<K>> hits, ValueGetter<K, V> valuesGetter) {
var hitsEntry = hits.results().flatMap(hitKey -> hitKey.withValue(valuesGetter::get));
return new Hits<>(hitsEntry, hits.totalHitsCount, hits::close);
}
public static <T, U> Function<Hits<HitKey<T>>, Hits<LazyHitEntry<T, U>>> generateMapper(
public static <T, U> Function<Hits<HitKey<T>>, Hits<HitEntry<T, U>>> generateMapper(
ValueGetter<T, U> valueGetter) {
return result -> {
var hitsToTransform = result.results()
.map(hit -> new LazyHitEntry<>(Mono.just(hit.key()), valueGetter.get(hit.key()), hit.score()));
return new Hits<>(hitsToTransform, result.totalHitsCount(), result::close);
List<HitEntry<T, U>> hitsToTransform = LLUtils.mapList(result.results,
hit -> new HitEntry<>(hit.key(), valueGetter.get(hit.key()), hit.score())
);
return new Hits<>(hitsToTransform, result.totalHitsCount());
};
}
public static <T, U> Function<Hits<HitKey<T>>, Hits<LazyHitEntry<T, U>>> generateMapper(
ValueTransformer<T, U> valueTransformer) {
return result -> {
try {
var sharedHitsFlux = result.results().publish().refCount(3);
var scoresFlux = sharedHitsFlux.map(HitKey::score);
var keysFlux = sharedHitsFlux.map(HitKey::key);
var valuesFlux = valueTransformer.transform(keysFlux);
var transformedFlux = Flux.zip((Object[] data) -> {
//noinspection unchecked
var keyMono = Mono.just((T) data[0]);
//noinspection unchecked
var val = (Entry<T, Optional<U>>) data[1];
var valMono = Mono.justOrEmpty(val.getValue());
var score = (Float) data[2];
return new LazyHitEntry<>(keyMono, valMono, score);
}, keysFlux, valuesFlux, scoresFlux);
return new Hits<>(transformedFlux, result.totalHitsCount(), result::close);
} catch (Throwable t) {
result.close();
throw t;
}
};
}
public Flux<T> results() {
public List<T> results() {
return results;
}
@ -113,27 +53,4 @@ public final class Hits<T> extends ResourceSupport<Hits<T>, Hits<T>> {
public String toString() {
return "Hits[" + "results=" + results + ", " + "totalHitsCount=" + totalHitsCount + ']';
}
@Override
protected RuntimeException createResourceClosedException() {
return new IllegalStateException("Closed");
}
@Override
protected Owned<Hits<T>> prepareSend() {
var results = this.results;
var totalHitsCount = this.totalHitsCount;
var onClose = this.onClose;
return drop -> {
var instance = new Hits<>(results, totalHitsCount, onClose);
drop.attach(instance);
return instance;
};
}
protected void makeInaccessible() {
this.results = null;
this.totalHitsCount = null;
this.onClose = null;
}
}

View File

@ -0,0 +1,10 @@
package it.cavallium.dbengine.client;
public interface IBackuppable {
void pauseForBackup();
void resumeAfterBackup();
boolean isPaused();
}

View File

@ -1,128 +0,0 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.client.IndexAction.Add;
import it.cavallium.dbengine.client.IndexAction.AddMulti;
import it.cavallium.dbengine.client.IndexAction.Update;
import it.cavallium.dbengine.client.IndexAction.UpdateMulti;
import it.cavallium.dbengine.client.IndexAction.Delete;
import it.cavallium.dbengine.client.IndexAction.DeleteAll;
import it.cavallium.dbengine.client.IndexAction.TakeSnapshot;
import it.cavallium.dbengine.client.IndexAction.ReleaseSnapshot;
import it.cavallium.dbengine.client.IndexAction.Flush;
import it.cavallium.dbengine.client.IndexAction.Refresh;
import it.cavallium.dbengine.client.IndexAction.Close;
import it.cavallium.dbengine.database.LLUpdateDocument;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.LLTerm;
import java.util.Map;
import java.util.Map.Entry;
import reactor.core.publisher.Flux;
import reactor.core.publisher.MonoSink;
sealed interface IndexAction permits Add, AddMulti, Update, UpdateMulti, Delete, DeleteAll, TakeSnapshot,
ReleaseSnapshot, Flush, Refresh, Close {
IndexActionType getType();
final record Add(LLTerm key, LLUpdateDocument doc, MonoSink<Void> addedFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.ADD;
}
}
final record AddMulti(Flux<Entry<LLTerm, LLUpdateDocument>> docsFlux, MonoSink<Void> addedMultiFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.ADD_MULTI;
}
}
final record Update(LLTerm key, LLUpdateDocument doc, MonoSink<Void> updatedFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.UPDATE;
}
}
final record UpdateMulti(Map<LLTerm, LLUpdateDocument> docs, MonoSink<Void> updatedMultiFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.UPDATE_MULTI;
}
}
final record Delete(LLTerm key, MonoSink<Void> deletedFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.DELETE;
}
}
final record DeleteAll(MonoSink<Void> deletedAllFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.DELETE_ALL;
}
}
final record TakeSnapshot(MonoSink<LLSnapshot> snapshotFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.TAKE_SNAPSHOT;
}
}
final record ReleaseSnapshot(LLSnapshot snapshot, MonoSink<Void> releasedFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.RELEASE_SNAPSHOT;
}
}
final record Flush(MonoSink<Void> flushFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.FLUSH;
}
}
final record Refresh(boolean force, MonoSink<Void> refreshFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.REFRESH;
}
}
final record Close(MonoSink<Void> closeFuture) implements IndexAction {
@Override
public IndexActionType getType() {
return IndexActionType.CLOSE;
}
}
enum IndexActionType {
ADD,
ADD_MULTI,
UPDATE,
UPDATE_MULTI,
DELETE,
DELETE_ALL,
TAKE_SNAPSHOT,
RELEASE_SNAPSHOT,
FLUSH,
REFRESH,
CLOSE
}
}

View File

@ -1,49 +0,0 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.database.LLIndexRequest;
import it.cavallium.dbengine.database.LLSoftUpdateDocument;
import it.cavallium.dbengine.database.LLUpdateDocument;
import it.cavallium.dbengine.database.LLTerm;
import it.cavallium.dbengine.database.LLUpdateFields;
import it.cavallium.dbengine.database.LLUtils;
import java.util.Set;
import org.jetbrains.annotations.NotNull;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuple2;
public abstract class Indicizer<T, U> {
/**
* Transform a value to an IndexRequest.
*/
public abstract @NotNull Mono<? extends LLIndexRequest> toIndexRequest(@NotNull T key, @NotNull U value);
public final @NotNull Mono<LLUpdateDocument> toDocument(@NotNull T key, @NotNull U value) {
return toIndexRequest(key, value).map(req -> {
if (req instanceof LLUpdateFields updateFields) {
return new LLUpdateDocument(updateFields.items());
} else if (req instanceof LLUpdateDocument updateDocument) {
return updateDocument;
} else if (req instanceof LLSoftUpdateDocument softUpdateDocument) {
return new LLUpdateDocument(softUpdateDocument.items());
} else {
throw new UnsupportedOperationException("Unexpected request type: " + req);
}
});
}
public abstract @NotNull LLTerm toIndex(@NotNull T key);
public abstract @NotNull String getKeyFieldName();
public abstract @NotNull T getKey(String key);
public abstract IndicizerAnalyzers getPerFieldAnalyzer();
public abstract IndicizerSimilarities getPerFieldSimilarity();
public Flux<Tuple2<String, Set<String>>> getMoreLikeThisDocumentFields(T key, U value) {
return Flux.empty();
}
}

View File

@ -1,19 +0,0 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
import java.util.Map;
public record IndicizerAnalyzers(TextFieldsAnalyzer defaultAnalyzer, Map<String, TextFieldsAnalyzer> fieldAnalyzer) {
public static IndicizerAnalyzers of() {
return of(TextFieldsAnalyzer.FullText);
}
public static IndicizerAnalyzers of(TextFieldsAnalyzer defaultAnalyzer) {
return of(defaultAnalyzer, Map.of());
}
public static IndicizerAnalyzers of(TextFieldsAnalyzer defaultAnalyzer, Map<String, TextFieldsAnalyzer> fieldAnalyzer) {
return new IndicizerAnalyzers(defaultAnalyzer, fieldAnalyzer);
}
}

View File

@ -1,20 +0,0 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
import it.cavallium.dbengine.lucene.analyzer.TextFieldsSimilarity;
import java.util.Map;
public record IndicizerSimilarities(TextFieldsSimilarity defaultSimilarity, Map<String, TextFieldsSimilarity> fieldSimilarity) {
public static IndicizerSimilarities of() {
return of(TextFieldsSimilarity.BM25Plus);
}
public static IndicizerSimilarities of(TextFieldsSimilarity defaultSimilarity) {
return of(defaultSimilarity, Map.of());
}
public static IndicizerSimilarities of(TextFieldsSimilarity defaultSimilarity, Map<String, TextFieldsSimilarity> fieldSimilarity) {
return new IndicizerSimilarities(defaultSimilarity, fieldSimilarity);
}
}

View File

@ -2,7 +2,6 @@ package it.cavallium.dbengine.client;
import com.squareup.moshi.JsonReader;
import com.squareup.moshi.JsonWriter;
import it.cavallium.data.generator.nativedata.Int52;
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
import java.io.IOException;
import org.jetbrains.annotations.NotNull;

View File

@ -1,11 +0,0 @@
package it.cavallium.dbengine.client;
import org.jetbrains.annotations.NotNull;
import reactor.core.publisher.Mono;
public record LazyHitEntry<T, U>(Mono<T> key, Mono<U> value, float score) {
public Mono<HitEntry<T, U>> resolve() {
return Mono.zip(key, value, (k, v) -> new HitEntry<>(k, v, score));
}
}

View File

@ -1,19 +0,0 @@
package it.cavallium.dbengine.client;
import java.util.function.Function;
import reactor.core.publisher.Mono;
public record LazyHitKey<T>(Mono<T> key, float score) {
public <U> LazyHitEntry<T, U> withValue(Function<T, Mono<U>> valueGetter) {
return new LazyHitEntry<>(key, key.flatMap(valueGetter), score);
}
public Mono<HitKey<T>> resolve() {
return key.map(k -> new HitKey<>(k, score));
}
public <U> Mono<HitEntry<T, U>> resolveWithValue(Function<T, Mono<U>> valueGetter) {
return resolve().flatMap(key -> key.withValue(valueGetter));
}
}

View File

@ -0,0 +1,42 @@
package it.cavallium.dbengine.client;
import java.util.concurrent.atomic.AtomicLong;
public class LongProgressTracker {
private final AtomicLong current = new AtomicLong();
private final AtomicLong total = new AtomicLong();
public LongProgressTracker(long size) {
setTotal(size);
}
public LongProgressTracker() {
}
public LongProgressTracker setTotal(long estimate) {
total.set(estimate);
return this;
}
public long getCurrent() {
return current.get();
}
public long incrementAndGet() {
return current.incrementAndGet();
}
public long getAndIncrement() {
return current.getAndIncrement();
}
public long getTotal() {
return Math.max(current.get(), total.get());
}
public double progress() {
return getCurrent() / (double) Math.max(1L, getTotal());
}
}

View File

@ -1,65 +0,0 @@
package it.cavallium.dbengine.client;
import io.net5.buffer.api.Send;
import it.cavallium.dbengine.client.query.ClientQueryParams;
import it.cavallium.dbengine.client.query.current.data.Query;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import it.cavallium.dbengine.database.Delta;
import it.cavallium.dbengine.database.LLSnapshottable;
import it.cavallium.dbengine.database.collections.ValueGetter;
import it.cavallium.dbengine.database.collections.ValueTransformer;
import java.util.Map.Entry;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public interface LuceneIndex<T, U> extends LLSnapshottable {
Mono<Void> addDocument(T key, U value);
Mono<Void> addDocuments(Flux<Entry<T, U>> entries);
Mono<Void> deleteDocument(T key);
Mono<Void> updateDocument(T key, @NotNull U value);
Mono<Void> updateDocuments(Flux<Entry<T, U>> entries);
default Mono<Void> updateOrDeleteDocument(T key, @Nullable U value) {
if (value == null) {
return deleteDocument(key);
} else {
return updateDocument(key, value);
}
}
default Mono<Void> updateOrDeleteDocumentIfModified(T key, @NotNull Delta<U> delta) {
return updateOrDeleteDocumentIfModified(key, delta.current(), delta.isModified());
}
default Mono<Void> updateOrDeleteDocumentIfModified(T key, @Nullable U currentValue, boolean modified) {
if (modified) {
return updateOrDeleteDocument(key, currentValue);
} else {
return Mono.empty();
}
}
Mono<Void> deleteAll();
Mono<Hits<HitKey<T>>> moreLikeThis(ClientQueryParams queryParams, T key,
U mltDocumentValue);
Mono<Hits<HitKey<T>>> search(ClientQueryParams queryParams);
Mono<TotalHitsCount> count(@Nullable CompositeSnapshot snapshot, Query query);
boolean isLowMemoryMode();
Mono<Void> close();
Mono<Void> flush();
Mono<Void> refresh(boolean force);
}

View File

@ -1,167 +0,0 @@
package it.cavallium.dbengine.client;
import io.net5.buffer.api.Send;
import it.cavallium.dbengine.client.query.ClientQueryParams;
import it.cavallium.dbengine.client.query.current.data.Query;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import it.cavallium.dbengine.database.LLLuceneIndex;
import it.cavallium.dbengine.database.LLSearchResultShard;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.LLTerm;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuple2;
public class LuceneIndexImpl<T, U> implements LuceneIndex<T, U> {
private final LLLuceneIndex luceneIndex;
private final Indicizer<T,U> indicizer;
public LuceneIndexImpl(LLLuceneIndex luceneIndex, Indicizer<T, U> indicizer) {
this.luceneIndex = luceneIndex;
this.indicizer = indicizer;
}
private LLSnapshot resolveSnapshot(CompositeSnapshot snapshot) {
if (snapshot == null) {
return null;
} else {
return snapshot.getSnapshot(luceneIndex);
}
}
@Override
public Mono<Void> addDocument(T key, U value) {
return indicizer
.toDocument(key, value)
.flatMap(doc -> luceneIndex.addDocument(indicizer.toIndex(key), doc));
}
@Override
public Mono<Void> addDocuments(Flux<Entry<T, U>> entries) {
return luceneIndex
.addDocuments(entries
.flatMap(entry -> indicizer
.toDocument(entry.getKey(), entry.getValue())
.map(doc -> Map.entry(indicizer.toIndex(entry.getKey()), doc)))
);
}
@Override
public Mono<Void> deleteDocument(T key) {
LLTerm id = indicizer.toIndex(key);
return luceneIndex.deleteDocument(id);
}
@Override
public Mono<Void> updateDocument(T key, @NotNull U value) {
return indicizer
.toIndexRequest(key, value)
.flatMap(doc -> luceneIndex.update(indicizer.toIndex(key), doc));
}
@Override
public Mono<Void> updateDocuments(Flux<Entry<T, U>> entries) {
return luceneIndex
.updateDocuments(entries
.flatMap(entry -> indicizer
.toDocument(entry.getKey(), entry.getValue())
.map(doc -> Map.entry(indicizer.toIndex(entry.getKey()), doc)))
.collectMap(Entry::getKey, Entry::getValue)
);
}
@Override
public Mono<Void> deleteAll() {
return luceneIndex.deleteAll();
}
@Override
public Mono<Hits<HitKey<T>>> moreLikeThis(ClientQueryParams queryParams,
T key,
U mltDocumentValue) {
Flux<Tuple2<String, Set<String>>> mltDocumentFields
= indicizer.getMoreLikeThisDocumentFields(key, mltDocumentValue);
return luceneIndex
.moreLikeThis(resolveSnapshot(queryParams.snapshot()),
queryParams.toQueryParams(),
indicizer.getKeyFieldName(),
mltDocumentFields
)
.map(this::mapResults)
.single();
}
@Override
public Mono<Hits<HitKey<T>>> search(ClientQueryParams queryParams) {
return luceneIndex
.search(resolveSnapshot(queryParams.snapshot()),
queryParams.toQueryParams(),
indicizer.getKeyFieldName()
)
.map(this::mapResults)
.single();
}
private Hits<HitKey<T>> mapResults(LLSearchResultShard llSearchResult) {
var scoresWithKeysFlux = llSearchResult
.results()
.map(hit -> new HitKey<>(indicizer.getKey(hit.key()), hit.score()));
return new Hits<>(scoresWithKeysFlux, llSearchResult.totalHitsCount(), llSearchResult::close);
}
@Override
public Mono<TotalHitsCount> count(@Nullable CompositeSnapshot snapshot, Query query) {
return this
.search(ClientQueryParams.builder().snapshot(snapshot).query(query).limit(0).build())
.single()
.map(searchResultKeys -> {
try (searchResultKeys) {
return searchResultKeys.totalHitsCount();
}
});
}
@Override
public boolean isLowMemoryMode() {
return luceneIndex.isLowMemoryMode();
}
@Override
public Mono<Void> close() {
return luceneIndex.close();
}
/**
* Flush writes to disk
*/
@Override
public Mono<Void> flush() {
return luceneIndex.flush();
}
/**
* Refresh index searcher
*/
@Override
public Mono<Void> refresh(boolean force) {
return luceneIndex.refresh(force);
}
@Override
public Mono<LLSnapshot> takeSnapshot() {
return luceneIndex.takeSnapshot();
}
@Override
public Mono<Void> releaseSnapshot(LLSnapshot snapshot) {
return luceneIndex.releaseSnapshot(snapshot);
}
}

View File

@ -1,20 +0,0 @@
package it.cavallium.dbengine.client;
import io.soabase.recordbuilder.core.RecordBuilder;
import java.time.Duration;
import java.util.Map;
import java.util.Optional;
import org.jetbrains.annotations.Nullable;
@RecordBuilder
public record LuceneOptions(Map<String, String> extraFlags,
Duration queryRefreshDebounceTime,
Duration commitDebounceTime,
boolean lowMemory,
boolean inMemory,
Optional<DirectIOOptions> directIOOptions,
boolean allowMemoryMapping,
Optional<NRTCachingOptions> nrtCachingOptions,
int indexWriterBufferSize,
boolean applyAllDeletes,
boolean writeAllDeletes) {}

View File

@ -1,11 +1,12 @@
package it.cavallium.dbengine.client;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.Send;
import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.Serializer;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public class MappedSerializer<A, B> implements Serializer<B> {
@ -18,14 +19,24 @@ public class MappedSerializer<A, B> implements Serializer<B> {
this.keyMapper = keyMapper;
}
@Override
public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException {
return keyMapper.map(serializer.deserialize(serialized));
public static <A, B> Serializer<B> of(Serializer<A> ser,
Mapper<A, B> keyMapper) {
if (keyMapper.getClass() == NoMapper.class) {
//noinspection unchecked
return (Serializer<B>) ser;
} else {
return new MappedSerializer<>(ser, keyMapper);
}
}
@Override
public void serialize(@NotNull B deserialized, Buffer output) throws SerializationException {
serializer.serialize(keyMapper.unmap(deserialized), output);
public @NotNull B deserialize(@NotNull BufDataInput in) throws SerializationException {
return keyMapper.map(serializer.deserialize(in));
}
@Override
public void serialize(@NotNull B deserialized, BufDataOutput out) throws SerializationException {
serializer.serialize(keyMapper.unmap(deserialized), out);
}
@Override

View File

@ -1,11 +1,11 @@
package it.cavallium.dbengine.client;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.Send;
import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public class MappedSerializerFixedLength<A, B> implements SerializerFixedBinaryLength<B> {
@ -18,14 +18,24 @@ public class MappedSerializerFixedLength<A, B> implements SerializerFixedBinaryL
this.keyMapper = keyMapper;
}
@Override
public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException {
return keyMapper.map(fixedLengthSerializer.deserialize(serialized));
public static <A, B> SerializerFixedBinaryLength<B> of(SerializerFixedBinaryLength<A> fixedLengthSerializer,
Mapper<A, B> keyMapper) {
if (keyMapper.getClass() == NoMapper.class) {
//noinspection unchecked
return (SerializerFixedBinaryLength<B>) fixedLengthSerializer;
} else {
return new MappedSerializerFixedLength<>(fixedLengthSerializer, keyMapper);
}
}
@Override
public void serialize(@NotNull B deserialized, Buffer output) throws SerializationException {
fixedLengthSerializer.serialize(keyMapper.unmap(deserialized), output);
public @NotNull B deserialize(@NotNull BufDataInput in) throws SerializationException {
return keyMapper.map(fixedLengthSerializer.deserialize(in));
}
@Override
public void serialize(@NotNull B deserialized, BufDataOutput out) throws SerializationException {
fixedLengthSerializer.serialize(keyMapper.unmap(deserialized), out);
}
@Override

View File

@ -0,0 +1,5 @@
package it.cavallium.dbengine.client;
public record MemoryStats(long estimateTableReadersMem, long sizeAllMemTables,
long curSizeAllMemTables, long estimateNumKeys, long blockCacheCapacity,
long blockCacheUsage, long blockCachePinnedUsage, long liveVersions) {}

View File

@ -1,6 +0,0 @@
package it.cavallium.dbengine.client;
import io.soabase.recordbuilder.core.RecordBuilder;
@RecordBuilder
public record NRTCachingOptions(double maxMergeSizeMB, double maxCachedMB) {}

View File

@ -1,7 +1,5 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.client.Mapper;
public class NoMapper<T> implements Mapper<T, T> {
@Override

View File

@ -0,0 +1,17 @@
package it.cavallium.dbengine.client;
import it.cavallium.buffer.Buf;
import it.cavallium.dbengine.client.SSTDumpProgress.SSTBlockFail;
import it.cavallium.dbengine.client.SSTDumpProgress.SSTBlockKeyValue;
import it.cavallium.dbengine.client.SSTProgress.SSTOk;
import it.cavallium.dbengine.client.SSTProgress.SSTProgressReport;
import it.cavallium.dbengine.client.SSTProgress.SSTStart;
import org.rocksdb.RocksDBException;
public sealed interface SSTDumpProgress extends SSTProgress permits SSTBlockFail, SSTBlockKeyValue, SSTOk,
SSTProgressReport, SSTStart {
record SSTBlockKeyValue(Buf rawKey, Buf rawValue) implements SSTDumpProgress {}
record SSTBlockFail(RocksDBException ex) implements SSTDumpProgress {}
}

View File

@ -0,0 +1,21 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.database.disk.RocksDBFile.IterationMetadata;
import it.cavallium.dbengine.rpc.current.data.Column;
import org.jetbrains.annotations.Nullable;
public interface SSTProgress {
record SSTStart(IterationMetadata metadata) implements SSTProgress, SSTVerificationProgress, SSTDumpProgress {}
record SSTOk(long scannedCount) implements SSTProgress, SSTVerificationProgress, SSTDumpProgress {}
record SSTProgressReport(long fileScanned, long fileTotal) implements SSTProgress, SSTVerificationProgress,
SSTDumpProgress {
public double getFileProgress() {
if (fileTotal == 0) return 0d;
return fileScanned / (double) fileTotal;
}
}
}

View File

@ -0,0 +1,17 @@
package it.cavallium.dbengine.client;
import it.cavallium.buffer.Buf;
import it.cavallium.dbengine.client.DbProgress.DbSSTProgress;
import it.cavallium.dbengine.client.SSTProgress.SSTOk;
import it.cavallium.dbengine.client.SSTProgress.SSTProgressReport;
import it.cavallium.dbengine.client.SSTProgress.SSTStart;
import it.cavallium.dbengine.client.SSTVerificationProgress.SSTBlockBad;
import it.cavallium.dbengine.rpc.current.data.Column;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Stream;
public sealed interface SSTVerificationProgress extends SSTProgress permits SSTOk, SSTProgressReport, SSTStart,
SSTBlockBad {
record SSTBlockBad(Buf rawKey, Throwable ex) implements SSTVerificationProgress {}
}

View File

@ -1,6 +1,6 @@
package it.cavallium.dbengine.client;
public class SnapshotException extends RuntimeException {
public class SnapshotException extends IllegalStateException {
public SnapshotException(Throwable ex) {
super(ex);

View File

@ -1,6 +1,6 @@
package it.cavallium.dbengine.client;
import it.cavallium.dbengine.client.query.BasicType;
import it.cavallium.dbengine.client.query.BaseType;
import it.cavallium.dbengine.client.query.current.data.DocSort;
import it.cavallium.dbengine.client.query.current.data.NoSort;
import it.cavallium.dbengine.client.query.current.data.NumericSort;
@ -11,7 +11,7 @@ import org.jetbrains.annotations.NotNull;
public record Sort(@NotNull it.cavallium.dbengine.client.query.current.data.Sort querySort) {
public boolean isSorted() {
return querySort.getBasicType$() != BasicType.NoSort;
return querySort.getBaseType$() != BaseType.NoSort;
}
public static Sort random() {

View File

@ -1,49 +0,0 @@
package it.cavallium.dbengine.client.query;
import io.soabase.recordbuilder.core.RecordBuilder;
import it.cavallium.data.generator.nativedata.Nullablefloat;
import it.cavallium.dbengine.client.CompositeSnapshot;
import it.cavallium.dbengine.client.Sort;
import it.cavallium.dbengine.client.query.current.data.NoSort;
import it.cavallium.dbengine.client.query.current.data.Query;
import it.cavallium.dbengine.client.query.current.data.QueryParams;
import it.cavallium.dbengine.client.query.current.data.QueryParamsBuilder;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
@RecordBuilder
public final record ClientQueryParams(@Nullable CompositeSnapshot snapshot,
@NotNull Query query,
long offset,
long limit,
@Nullable Float minCompetitiveScore,
@Nullable Sort sort,
boolean complete) {
public static ClientQueryParamsBuilder builder() {
return ClientQueryParamsBuilder
.builder()
.snapshot(null)
.offset(0)
.limit(Long.MAX_VALUE)
.minCompetitiveScore(null)
.sort(null)
.complete(true);
}
public boolean isSorted() {
return sort != null && sort.isSorted();
}
public QueryParams toQueryParams() {
return QueryParamsBuilder
.builder()
.query(query())
.sort(sort != null ? sort.querySort() : new NoSort())
.minCompetitiveScore(Nullablefloat.ofNullable(minCompetitiveScore()))
.offset(offset())
.limit(limit())
.complete(complete())
.build();
}
}

View File

@ -1,87 +0,0 @@
package it.cavallium.dbengine.client.query;
import com.squareup.moshi.JsonAdapter;
import it.cavallium.dbengine.client.IntOpenHashSetJsonAdapter;
import it.cavallium.dbengine.client.query.current.CurrentVersion;
import it.cavallium.dbengine.client.query.current.data.IBasicType;
import it.cavallium.dbengine.client.query.current.data.IType;
import it.unimi.dsi.fastutil.booleans.BooleanList;
import it.unimi.dsi.fastutil.bytes.ByteList;
import it.unimi.dsi.fastutil.chars.CharList;
import it.unimi.dsi.fastutil.ints.IntList;
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
import it.unimi.dsi.fastutil.longs.LongList;
import it.unimi.dsi.fastutil.shorts.ShortList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.warp.commonutils.moshi.BooleanListJsonAdapter;
import org.warp.commonutils.moshi.ByteListJsonAdapter;
import org.warp.commonutils.moshi.CharListJsonAdapter;
import org.warp.commonutils.moshi.IntListJsonAdapter;
import org.warp.commonutils.moshi.LongListJsonAdapter;
import org.warp.commonutils.moshi.MoshiPolymorphic;
import org.warp.commonutils.moshi.ShortListJsonAdapter;
public class QueryMoshi extends MoshiPolymorphic<IType> {
private final Set<Class<IType>> abstractClasses;
private final Set<Class<IType>> concreteClasses;
private final Map<Class<?>, JsonAdapter<?>> extraAdapters;
@SuppressWarnings({"unchecked", "RedundantCast", "rawtypes"})
public QueryMoshi() {
super(true, GetterStyle.RECORDS_GETTERS);
HashSet<Class<IType>> abstractClasses = new HashSet<>();
HashSet<Class<IType>> concreteClasses = new HashSet<>();
// Add all super types with their implementations
for (var superTypeClass : CurrentVersion.getSuperTypeClasses()) {
for (Class<? extends IBasicType> superTypeSubtypesClass : CurrentVersion.getSuperTypeSubtypesClasses(
superTypeClass)) {
concreteClasses.add((Class<IType>) (Class) superTypeSubtypesClass);
}
abstractClasses.add((Class<IType>) (Class) superTypeClass);
}
// Add IBasicType with all basic types
abstractClasses.add((Class<IType>) (Class) IBasicType.class);
for (BasicType basicType : BasicType.values()) {
concreteClasses.add((Class<IType>) (Class) CurrentVersion.VERSION.getClass(basicType));
}
this.abstractClasses = abstractClasses;
this.concreteClasses = concreteClasses;
Map<Class<?>, JsonAdapter<?>> extraAdapters = new HashMap<>();
extraAdapters.put(BooleanList.class, new BooleanListJsonAdapter());
extraAdapters.put(ByteList.class, new ByteListJsonAdapter());
extraAdapters.put(ShortList.class, new ShortListJsonAdapter());
extraAdapters.put(CharList.class, new CharListJsonAdapter());
extraAdapters.put(IntList.class, new IntListJsonAdapter());
extraAdapters.put(LongList.class, new LongListJsonAdapter());
extraAdapters.put(IntOpenHashSet.class, new IntOpenHashSetJsonAdapter());
this.extraAdapters = Collections.unmodifiableMap(extraAdapters);
}
@Override
public Map<Class<?>, JsonAdapter<?>> getExtraAdapters() {
return extraAdapters;
}
@Override
protected Set<Class<IType>> getAbstractClasses() {
return abstractClasses;
}
@Override
protected Set<Class<IType>> getConcreteClasses() {
return concreteClasses;
}
@Override
protected boolean shouldIgnoreField(String fieldName) {
return fieldName.contains("$");
}
}

View File

@ -1,156 +1,454 @@
package it.cavallium.dbengine.client.query;
import com.google.common.xml.XmlEscapers;
import it.cavallium.dbengine.client.query.current.data.BooleanQuery;
import it.cavallium.dbengine.client.query.current.data.BooleanQueryPart;
import it.cavallium.dbengine.client.query.current.data.BoostQuery;
import it.cavallium.dbengine.client.query.current.data.BoxedQuery;
import it.cavallium.dbengine.client.query.current.data.ConstantScoreQuery;
import it.cavallium.dbengine.client.query.current.data.DoubleNDPointExactQuery;
import it.cavallium.dbengine.client.query.current.data.DoubleNDPointRangeQuery;
import it.cavallium.dbengine.client.query.current.data.DoubleNDTermQuery;
import it.cavallium.dbengine.client.query.current.data.DoublePointExactQuery;
import it.cavallium.dbengine.client.query.current.data.DoublePointRangeQuery;
import it.cavallium.dbengine.client.query.current.data.DoublePointSetQuery;
import it.cavallium.dbengine.client.query.current.data.DoubleTermQuery;
import it.cavallium.dbengine.client.query.current.data.FieldExistsQuery;
import it.cavallium.dbengine.client.query.current.data.FloatNDPointExactQuery;
import it.cavallium.dbengine.client.query.current.data.FloatNDPointRangeQuery;
import it.cavallium.dbengine.client.query.current.data.FloatNDTermQuery;
import it.cavallium.dbengine.client.query.current.data.FloatPointExactQuery;
import it.cavallium.dbengine.client.query.current.data.FloatPointRangeQuery;
import it.cavallium.dbengine.client.query.current.data.FloatPointSetQuery;
import it.cavallium.dbengine.client.query.current.data.FloatTermQuery;
import it.cavallium.dbengine.client.query.current.data.IntNDPointRangeQuery;
import it.cavallium.dbengine.client.query.current.data.IntNDTermQuery;
import it.cavallium.dbengine.client.query.current.data.IntPointExactQuery;
import it.cavallium.dbengine.client.query.current.data.IntPointRangeQuery;
import it.cavallium.dbengine.client.query.current.data.IntPointSetQuery;
import it.cavallium.dbengine.client.query.current.data.IntTermQuery;
import it.cavallium.dbengine.client.query.current.data.LongNDPointExactQuery;
import it.cavallium.dbengine.client.query.current.data.LongNDPointRangeQuery;
import it.cavallium.dbengine.client.query.current.data.LongNDTermQuery;
import it.cavallium.dbengine.client.query.current.data.LongPointExactQuery;
import it.cavallium.dbengine.client.query.current.data.LongPointRangeQuery;
import it.cavallium.dbengine.client.query.current.data.NumericSort;
import it.cavallium.dbengine.client.query.current.data.LongPointSetQuery;
import it.cavallium.dbengine.client.query.current.data.LongTermQuery;
import it.cavallium.dbengine.client.query.current.data.OccurShould;
import it.cavallium.dbengine.client.query.current.data.PhraseQuery;
import it.cavallium.dbengine.client.query.current.data.QueryParams;
import it.cavallium.dbengine.client.query.current.data.SolrTextQuery;
import it.cavallium.dbengine.client.query.current.data.SortedDocFieldExistsQuery;
import it.cavallium.dbengine.client.query.current.data.SortedNumericDocValuesFieldSlowRangeQuery;
import it.cavallium.dbengine.client.query.current.data.SynonymQuery;
import it.cavallium.dbengine.client.query.current.data.TermAndBoost;
import it.cavallium.dbengine.client.query.current.data.TermPosition;
import it.cavallium.dbengine.client.query.current.data.TermQuery;
import it.cavallium.dbengine.client.query.current.data.WildcardQuery;
import it.cavallium.dbengine.lucene.RandomSortField;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery.Builder;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortField.Type;
import org.apache.lucene.search.SortedNumericSortField;
import java.text.BreakIterator;
import java.util.Comparator;
import java.util.Locale;
import org.apache.commons.lang3.StringUtils;
import org.jetbrains.annotations.Nullable;
public class QueryParser {
public static Query toQuery(it.cavallium.dbengine.client.query.current.data.Query query) {
if (query == null) return null;
switch (query.getBasicType$()) {
case BooleanQuery:
private static final String[] QUERY_STRING_FIND = {"\\", "\""};
private static final String[] QUERY_STRING_REPLACE = {"\\\\", "\\\""};
public static void toQueryXML(StringBuilder out,
it.cavallium.dbengine.client.query.current.data.Query query,
@Nullable Float boost) {
if (query == null) {
return;
}
switch (query.getBaseType$()) {
case StandardQuery -> {
var standardQuery = (it.cavallium.dbengine.client.query.current.data.StandardQuery) query;
out.append("<UserQuery");
if (standardQuery.defaultFields().size() > 1) {
throw new UnsupportedOperationException("Maximum supported default fields count: 1");
}
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
if (standardQuery.defaultFields().size() == 1) {
out
.append(" fieldName=\"")
.append(XmlEscapers.xmlAttributeEscaper().escape(standardQuery.defaultFields().get(0)))
.append("\"");
}
if (!standardQuery.termFields().isEmpty()) {
throw new UnsupportedOperationException("Term fields unsupported");
}
if (!standardQuery.pointsConfig().isEmpty()) {
throw new UnsupportedOperationException("Points config unsupported");
}
out.append(">");
out.append(XmlEscapers.xmlContentEscaper().escape(standardQuery.query()));
out.append("</UserQuery>\n");
}
case BooleanQuery -> {
var booleanQuery = (it.cavallium.dbengine.client.query.current.data.BooleanQuery) query;
var bq = new Builder();
if (booleanQuery.parts().size() == 1
&& booleanQuery.parts().get(0).occur().getBaseType$() == BaseType.OccurMust) {
toQueryXML(out, booleanQuery.parts().get(0).query(), boost);
} else {
out.append("<BooleanQuery");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" minimumNumberShouldMatch=\"").append(booleanQuery.minShouldMatch()).append("\"");
out.append(">\n");
for (BooleanQueryPart part : booleanQuery.parts()) {
Occur occur = switch (part.occur().getBasicType$()) {
case OccurFilter -> Occur.FILTER;
case OccurMust -> Occur.MUST;
case OccurShould -> Occur.SHOULD;
case OccurMustNot -> Occur.MUST_NOT;
default -> throw new IllegalStateException("Unexpected value: " + part.occur().getBasicType$());
};
bq.add(toQuery(part.query()), occur);
out.append("<Clause");
out.append(" occurs=\"").append(switch (part.occur().getBaseType$()) {
case OccurFilter -> "filter";
case OccurMust -> "must";
case OccurShould -> "should";
case OccurMustNot -> "mustNot";
default -> throw new IllegalStateException("Unexpected value: " + part.occur().getBaseType$());
}).append("\"");
out.append(">\n");
toQueryXML(out, part.query(), null);
out.append("</Clause>\n");
}
bq.setMinimumNumberShouldMatch(booleanQuery.minShouldMatch());
return bq.build();
case IntPointExactQuery:
out.append("</BooleanQuery>\n");
}
}
case IntPointExactQuery -> {
var intPointExactQuery = (IntPointExactQuery) query;
return IntPoint.newExactQuery(intPointExactQuery.field(), intPointExactQuery.value());
case LongPointExactQuery:
out.append("<PointRangeQuery type=\"int\"");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(intPointExactQuery.field())).append("\"");
out.append(" lowerTerm=\"").append(intPointExactQuery.value()).append("\"");
out.append(" upperTerm=\"").append(intPointExactQuery.value()).append("\"");
out.append(" />\n");
}
case IntNDPointExactQuery -> {
var intPointExactQuery = (IntPointExactQuery) query;
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
}
case LongPointExactQuery -> {
var longPointExactQuery = (LongPointExactQuery) query;
return LongPoint.newExactQuery(longPointExactQuery.field(), longPointExactQuery.value());
case TermQuery:
out.append("<PointRangeQuery type=\"long\"");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(longPointExactQuery.field())).append("\"");
out.append(" lowerTerm=\"").append(longPointExactQuery.value()).append("\"");
out.append(" upperTerm=\"").append(longPointExactQuery.value()).append("\"");
out.append(" />\n");
}
case FloatPointExactQuery -> {
var floatPointExactQuery = (FloatPointExactQuery) query;
out.append("<PointRangeQuery type=\"float\"");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(floatPointExactQuery.field())).append("\"");
out.append(" lowerTerm=\"").append(floatPointExactQuery.value()).append("\"");
out.append(" upperTerm=\"").append(floatPointExactQuery.value()).append("\"");
out.append(" />\n");
}
case DoublePointExactQuery -> {
var doublePointExactQuery = (DoublePointExactQuery) query;
out.append("<PointRangeQuery type=\"double\"");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(doublePointExactQuery.field())).append("\"");
out.append(" lowerTerm=\"").append(doublePointExactQuery.value()).append("\"");
out.append(" upperTerm=\"").append(doublePointExactQuery.value()).append("\"");
out.append(" />\n");
}
case LongNDPointExactQuery -> {
var longndPointExactQuery = (LongNDPointExactQuery) query;
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
}
case FloatNDPointExactQuery -> {
var floatndPointExactQuery = (FloatNDPointExactQuery) query;
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
}
case DoubleNDPointExactQuery -> {
var doublendPointExactQuery = (DoubleNDPointExactQuery) query;
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
}
case IntPointSetQuery -> {
var intPointSetQuery = (IntPointSetQuery) query;
// Polyfill
toQueryXML(out, BooleanQuery.of(intPointSetQuery.values().intStream()
.mapToObj(val -> IntPointExactQuery.of(intPointSetQuery.field(), val))
.map(q -> BooleanQueryPart.of(q, OccurShould.of()))
.toList(), 1), boost);
}
case LongPointSetQuery -> {
var longPointSetQuery = (LongPointSetQuery) query;
// Polyfill
toQueryXML(out, BooleanQuery.of(longPointSetQuery.values().longStream()
.mapToObj(val -> LongPointExactQuery.of(longPointSetQuery.field(), val))
.map(q -> BooleanQueryPart.of(q, OccurShould.of()))
.toList(), 1), boost);
}
case FloatPointSetQuery -> {
var floatPointSetQuery = (FloatPointSetQuery) query;
// Polyfill
toQueryXML(out, BooleanQuery.of(floatPointSetQuery.values().stream()
.map(val -> FloatPointExactQuery.of(floatPointSetQuery.field(), val))
.map(q -> BooleanQueryPart.of(q, OccurShould.of()))
.toList(), 1), boost);
}
case DoublePointSetQuery -> {
var doublePointSetQuery = (DoublePointSetQuery) query;
// Polyfill
toQueryXML(out, BooleanQuery.of(doublePointSetQuery.values().doubleStream()
.mapToObj(val -> DoublePointExactQuery.of(doublePointSetQuery.field(), val))
.map(q -> BooleanQueryPart.of(q, OccurShould.of()))
.toList(), 1), boost);
}
case TermQuery -> {
var termQuery = (TermQuery) query;
return new org.apache.lucene.search.TermQuery(toTerm(termQuery.term()));
case BoostQuery:
out
.append("<TermQuery");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out
.append(" fieldName=\"")
.append(XmlEscapers.xmlAttributeEscaper().escape(termQuery.term().field()))
.append("\"");
out.append(">");
out.append(XmlEscapers.xmlContentEscaper().escape(termQuery.term().value()));
out.append("</TermQuery>\n");
}
case IntTermQuery -> {
var intTermQuery = (IntTermQuery) query;
throw new UnsupportedOperationException("Non-string term fields are not supported");
}
case IntNDTermQuery -> {
var intNDTermQuery = (IntNDTermQuery) query;
throw new UnsupportedOperationException("Non-string term fields are not supported");
}
case LongTermQuery -> {
var longTermQuery = (LongTermQuery) query;
throw new UnsupportedOperationException("Non-string term fields are not supported");
}
case LongNDTermQuery -> {
var longNDTermQuery = (LongNDTermQuery) query;
throw new UnsupportedOperationException("Non-string term fields are not supported");
}
case FloatTermQuery -> {
var floatTermQuery = (FloatTermQuery) query;
throw new UnsupportedOperationException("Non-string term fields are not supported");
}
case FloatNDTermQuery -> {
var floatNDTermQuery = (FloatNDTermQuery) query;
throw new UnsupportedOperationException("Non-string term fields are not supported");
}
case DoubleTermQuery -> {
var doubleTermQuery = (DoubleTermQuery) query;
throw new UnsupportedOperationException("Non-string term fields are not supported");
}
case DoubleNDTermQuery -> {
var doubleNDTermQuery = (DoubleNDTermQuery) query;
throw new UnsupportedOperationException("Non-string term fields are not supported");
}
case FieldExistsQuery -> {
var fieldExistQuery = (FieldExistsQuery) query;
out.append("<UserQuery");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(">");
ensureValidField(fieldExistQuery.field());
out.append(fieldExistQuery.field());
out.append(":[* TO *]");
out.append("</UserQuery>\n");
}
case SolrTextQuery -> {
var solrTextQuery = (SolrTextQuery) query;
out.append("<UserQuery");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(">");
ensureValidField(solrTextQuery.field());
out.append(solrTextQuery.field());
out.append(":");
out.append("\"").append(XmlEscapers.xmlContentEscaper().escape(escapeQueryStringValue(solrTextQuery.phrase()))).append("\"");
if (solrTextQuery.slop() > 0 && hasMoreThanOneWord(solrTextQuery.phrase())) {
out.append("~").append(solrTextQuery.slop());
}
out.append("</UserQuery>\n");
}
case BoostQuery -> {
var boostQuery = (BoostQuery) query;
return new org.apache.lucene.search.BoostQuery(toQuery(boostQuery.query()), boostQuery.scoreBoost());
case ConstantScoreQuery:
toQueryXML(out, boostQuery.query(), boostQuery.scoreBoost());
}
case ConstantScoreQuery -> {
var constantScoreQuery = (ConstantScoreQuery) query;
return new org.apache.lucene.search.ConstantScoreQuery(toQuery(constantScoreQuery.query()));
case BoxedQuery:
return toQuery(((BoxedQuery) query).query());
case FuzzyQuery:
var fuzzyQuery = (it.cavallium.dbengine.client.query.current.data.FuzzyQuery) query;
return new FuzzyQuery(toTerm(fuzzyQuery.term()),
fuzzyQuery.maxEdits(),
fuzzyQuery.prefixLength(),
fuzzyQuery.maxExpansions(),
fuzzyQuery.transpositions()
);
case IntPointRangeQuery:
out.append("<ConstantScoreQuery");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(">\n");
toQueryXML(out, query, null);
out.append("</ConstantScoreQuery>\n");
}
case BoxedQuery -> {
toQueryXML(out, ((BoxedQuery) query).query(), boost);
}
case FuzzyQuery -> {
throw new UnsupportedOperationException("Fuzzy query is not supported, use span queries");
}
case IntPointRangeQuery -> {
var intPointRangeQuery = (IntPointRangeQuery) query;
return IntPoint.newRangeQuery(intPointRangeQuery.field(),
intPointRangeQuery.min(),
intPointRangeQuery.max()
);
case LongPointRangeQuery:
out.append("<PointRangeQuery type=\"int\"");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(intPointRangeQuery.field())).append("\"");
out.append(" lowerTerm=\"").append(intPointRangeQuery.min()).append("\"");
out.append(" upperTerm=\"").append(intPointRangeQuery.max()).append("\"");
out.append(" />\n");
}
case IntNDPointRangeQuery -> {
var intndPointRangeQuery = (IntNDPointRangeQuery) query;
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
}
case LongPointRangeQuery -> {
var longPointRangeQuery = (LongPointRangeQuery) query;
return LongPoint.newRangeQuery(longPointRangeQuery.field(),
longPointRangeQuery.min(),
longPointRangeQuery.max()
);
case MatchAllDocsQuery:
return new MatchAllDocsQuery();
case MatchNoDocsQuery:
return new MatchNoDocsQuery();
case PhraseQuery:
out.append("<PointRangeQuery type=\"long\"");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(longPointRangeQuery.field())).append("\"");
out.append(" lowerTerm=\"").append(longPointRangeQuery.min()).append("\"");
out.append(" upperTerm=\"").append(longPointRangeQuery.max()).append("\"");
out.append(" />\n");
}
case FloatPointRangeQuery -> {
var floatPointRangeQuery = (FloatPointRangeQuery) query;
out.append("<PointRangeQuery type=\"float\"");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(floatPointRangeQuery.field())).append("\"");
out.append(" lowerTerm=\"").append(floatPointRangeQuery.min()).append("\"");
out.append(" upperTerm=\"").append(floatPointRangeQuery.max()).append("\"");
out.append(" />\n");
}
case DoublePointRangeQuery -> {
var doublePointRangeQuery = (DoublePointRangeQuery) query;
out.append("<PointRangeQuery type=\"double\"");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(doublePointRangeQuery.field())).append("\"");
out.append(" lowerTerm=\"").append(doublePointRangeQuery.min()).append("\"");
out.append(" upperTerm=\"").append(doublePointRangeQuery.max()).append("\"");
out.append(" />\n");
}
case LongNDPointRangeQuery -> {
var longndPointRangeQuery = (LongNDPointRangeQuery) query;
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
}
case FloatNDPointRangeQuery -> {
var floatndPointRangeQuery = (FloatNDPointRangeQuery) query;
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
}
case DoubleNDPointRangeQuery -> {
var doublendPointRangeQuery = (DoubleNDPointRangeQuery) query;
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
}
case MatchAllDocsQuery -> {
out.append("<UserQuery");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(">");
out.append("*:*");
out.append("</UserQuery>\n");
}
case MatchNoDocsQuery -> {
out.append("<UserQuery");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
out.append(">");
//todo: check if it's correct
out.append("!*:*");
out.append("</UserQuery>\n");
}
case PhraseQuery -> {
//todo: check if it's correct
var phraseQuery = (PhraseQuery) query;
var pqb = new org.apache.lucene.search.PhraseQuery.Builder();
for (TermPosition phrase : phraseQuery.phrase()) {
pqb.add(toTerm(phrase.term()), phrase.position());
out.append("<SpanNear");
if (boost != null) {
out.append(" boost=\"").append(boost).append("\"");
}
pqb.setSlop(phraseQuery.slop());
return pqb.build();
case SortedDocFieldExistsQuery:
out.append(" inOrder=\"true\"");
out.append(">\n");
phraseQuery.phrase().stream().sorted(Comparator.comparingInt(TermPosition::position)).forEach(term -> {
out
.append("<SpanTerm fieldName=\"")
.append(XmlEscapers.xmlAttributeEscaper().escape(term.term().field()))
.append("\">")
.append(XmlEscapers.xmlContentEscaper().escape(term.term().value()))
.append("</SpanTerm>\n");
});
out.append("</SpanNear>\n");
}
case SortedDocFieldExistsQuery -> {
var sortedDocFieldExistsQuery = (SortedDocFieldExistsQuery) query;
return new DocValuesFieldExistsQuery(sortedDocFieldExistsQuery.field());
case SynonymQuery:
throw new UnsupportedOperationException("Field existence query is not supported");
}
case SynonymQuery -> {
var synonymQuery = (SynonymQuery) query;
var sqb = new org.apache.lucene.search.SynonymQuery.Builder(synonymQuery.field());
for (TermAndBoost part : synonymQuery.parts()) {
sqb.addTerm(toTerm(part.term()), part.boost());
throw new UnsupportedOperationException("Synonym query is not supported");
}
return sqb.build();
case SortedNumericDocValuesFieldSlowRangeQuery:
var sortedNumericDocValuesFieldSlowRangeQuery = (SortedNumericDocValuesFieldSlowRangeQuery) query;
return SortedNumericDocValuesField.newSlowRangeQuery(sortedNumericDocValuesFieldSlowRangeQuery.field(),
sortedNumericDocValuesFieldSlowRangeQuery.min(),
sortedNumericDocValuesFieldSlowRangeQuery.max()
);
case WildcardQuery:
case SortedNumericDocValuesFieldSlowRangeQuery -> {
throw new UnsupportedOperationException("Slow range query is not supported");
}
case WildcardQuery -> {
var wildcardQuery = (WildcardQuery) query;
return new org.apache.lucene.search.WildcardQuery(new Term(wildcardQuery.field(), wildcardQuery.pattern()));
default:
throw new IllegalStateException("Unexpected value: " + query.getBasicType$());
throw new UnsupportedOperationException("Wildcard query is not supported");
}
default -> throw new IllegalStateException("Unexpected value: " + query.getBaseType$());
}
}
private static Term toTerm(it.cavallium.dbengine.client.query.current.data.Term term) {
return new Term(term.field(), term.value());
private static boolean hasMoreThanOneWord(String sentence) {
BreakIterator iterator = BreakIterator.getWordInstance(Locale.ENGLISH);
iterator.setText(sentence);
boolean firstWord = false;
iterator.first();
int end = iterator.next();
while (end != BreakIterator.DONE) {
if (!firstWord) {
firstWord = true;
} else {
return true;
}
end = iterator.next();
}
return false;
}
public static Sort toSort(it.cavallium.dbengine.client.query.current.data.Sort sort) {
switch (sort.getBasicType$()) {
case NoSort:
return null;
case ScoreSort:
return new Sort(SortField.FIELD_SCORE);
case DocSort:
return new Sort(SortField.FIELD_DOC);
case NumericSort:
NumericSort numericSort = (NumericSort) sort;
return new Sort(new SortedNumericSortField(numericSort.field(), Type.LONG, numericSort.reverse()));
case RandomSort:
return new Sort(new RandomSortField());
default:
throw new IllegalStateException("Unexpected value: " + sort.getBasicType$());
}
private static String escapeQueryStringValue(String text) {
return StringUtils.replaceEach(text, QUERY_STRING_FIND, QUERY_STRING_REPLACE);
}
public static it.cavallium.dbengine.client.query.current.data.Term toQueryTerm(Term term) {
return it.cavallium.dbengine.client.query.current.data.Term.of(term.field(), term.text());
private static void ensureValidField(String field) {
field.codePoints().forEach(codePoint -> {
if (!Character.isLetterOrDigit(codePoint) && codePoint != '_') {
throw new UnsupportedOperationException(
"Invalid character \"" + codePoint + "\" in field name \"" + field + "\"");
}
});
}
}

View File

@ -0,0 +1,16 @@
package it.cavallium.dbengine.client.query;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
public class QueryUtil {
@SuppressWarnings("unused")
public static String toHumanReadableString(TotalHitsCount totalHitsCount) {
if (totalHitsCount.exact()) {
return Long.toString(totalHitsCount.value());
} else {
return totalHitsCount.value() + "+";
}
}
}

View File

@ -1,97 +0,0 @@
package it.cavallium.dbengine.client.query;
import it.cavallium.dbengine.client.query.current.data.BooleanQuery;
import it.cavallium.dbengine.client.query.current.data.BooleanQueryPart;
import it.cavallium.dbengine.client.query.current.data.Occur;
import it.cavallium.dbengine.client.query.current.data.OccurFilter;
import it.cavallium.dbengine.client.query.current.data.OccurMust;
import it.cavallium.dbengine.client.query.current.data.OccurMustNot;
import it.cavallium.dbengine.client.query.current.data.OccurShould;
import it.cavallium.dbengine.client.query.current.data.PhraseQuery;
import it.cavallium.dbengine.client.query.current.data.Query;
import it.cavallium.dbengine.client.query.current.data.SynonymQuery;
import it.cavallium.dbengine.client.query.current.data.TermAndBoost;
import it.cavallium.dbengine.client.query.current.data.TermPosition;
import it.cavallium.dbengine.client.query.current.data.TermQuery;
import it.cavallium.dbengine.lucene.LuceneUtils;
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.util.QueryBuilder;
import org.jetbrains.annotations.NotNull;
@SuppressWarnings("unused")
public class QueryUtils {
public static Query approximateSearch(TextFieldsAnalyzer preferredAnalyzer, String field, String text) {
var qb = new QueryBuilder(LuceneUtils.getAnalyzer(preferredAnalyzer));
var luceneQuery = qb.createMinShouldMatchQuery(field, text, 0.75f);
return transformQuery(field, luceneQuery);
}
public static Query exactSearch(TextFieldsAnalyzer preferredAnalyzer, String field, String text) {
var qb = new QueryBuilder(LuceneUtils.getAnalyzer(preferredAnalyzer));
var luceneQuery = qb.createPhraseQuery(field, text);
return transformQuery(field, luceneQuery);
}
@NotNull
private static Query transformQuery(String field, org.apache.lucene.search.Query luceneQuery) {
if (luceneQuery == null) {
return TermQuery.of(it.cavallium.dbengine.client.query.current.data.Term.of(field, ""));
}
if (luceneQuery instanceof org.apache.lucene.search.TermQuery) {
return TermQuery.of(QueryParser.toQueryTerm(((org.apache.lucene.search.TermQuery) luceneQuery).getTerm()));
}
if (luceneQuery instanceof org.apache.lucene.search.BooleanQuery) {
var booleanQuery = (org.apache.lucene.search.BooleanQuery) luceneQuery;
var queryParts = new ArrayList<BooleanQueryPart>();
for (BooleanClause booleanClause : booleanQuery) {
org.apache.lucene.search.Query queryPartQuery = booleanClause.getQuery();
Occur occur;
switch (booleanClause.getOccur()) {
case MUST:
occur = OccurMust.of();
break;
case FILTER:
occur = OccurFilter.of();
break;
case SHOULD:
occur = OccurShould.of();
break;
case MUST_NOT:
occur = OccurMustNot.of();
break;
default:
throw new IllegalArgumentException();
}
queryParts.add(BooleanQueryPart.of(transformQuery(field, queryPartQuery), occur));
}
return BooleanQuery.of(List.copyOf(queryParts), booleanQuery.getMinimumNumberShouldMatch());
}
if (luceneQuery instanceof org.apache.lucene.search.PhraseQuery) {
var phraseQuery = (org.apache.lucene.search.PhraseQuery) luceneQuery;
int slop = phraseQuery.getSlop();
var terms = phraseQuery.getTerms();
var positions = phraseQuery.getPositions();
TermPosition[] termPositions = new TermPosition[terms.length];
for (int i = 0; i < terms.length; i++) {
var term = terms[i];
var position = positions[i];
termPositions[i] = TermPosition.of(QueryParser.toQueryTerm(term), position);
}
return PhraseQuery.of(List.of(termPositions), slop);
}
org.apache.lucene.search.SynonymQuery synonymQuery = (org.apache.lucene.search.SynonymQuery) luceneQuery;
return SynonymQuery.of(field,
synonymQuery
.getTerms()
.stream()
.map(term -> TermAndBoost.of(QueryParser.toQueryTerm(term), 1))
.toList()
);
}
}

View File

@ -0,0 +1,3 @@
package it.cavallium.dbengine.database;
public record ColumnProperty<T>(String columnName, String propertyName, T value) {}

View File

@ -1,10 +1,13 @@
package it.cavallium.dbengine.database;
import it.cavallium.dbengine.rpc.current.data.Column;
import java.nio.charset.StandardCharsets;
import java.util.Objects;
import java.util.StringJoiner;
public record Column(String name) {
public class ColumnUtils {
private ColumnUtils() {
}
public static Column dictionary(String name) {
return new Column("hash_map_" + name);

View File

@ -0,0 +1,10 @@
package it.cavallium.dbengine.database;
import it.cavallium.dbengine.rpc.current.data.Column;
import java.nio.file.Path;
import java.util.stream.Stream;
public interface DatabaseOperations {
void ingestSST(Column column, Stream<Path> files, boolean replaceExisting);
}

View File

@ -0,0 +1,31 @@
package it.cavallium.dbengine.database;
import it.cavallium.dbengine.client.MemoryStats;
import it.cavallium.dbengine.rpc.current.data.Column;
import java.io.IOException;
import java.util.Map;
import java.util.stream.Stream;
import org.jetbrains.annotations.Nullable;
public interface DatabaseProperties {
MemoryStats getMemoryStats();
String getRocksDBStats();
Map<String, String> getMapProperty(@Nullable Column column, RocksDBMapProperty property);
Stream<ColumnProperty<Map<String, String>>> getMapColumnProperties(RocksDBMapProperty property);
String getStringProperty(@Nullable Column column, RocksDBStringProperty property);
Stream<ColumnProperty<String>> getStringColumnProperties(RocksDBStringProperty property);
Long getLongProperty(@Nullable Column column, RocksDBLongProperty property);
Stream<ColumnProperty<Long>> getLongColumnProperties(RocksDBLongProperty property);
Long getAggregatedLongProperty(RocksDBLongProperty property);
Stream<TableWithProperties> getTableProperties();
}

View File

@ -5,6 +5,7 @@ import org.jetbrains.annotations.Nullable;
public class Delta<T> {
private static final Delta<?> EMPTY = new Delta<>(null, null);
private final @Nullable T previous;
private final @Nullable T current;
@ -25,6 +26,11 @@ public class Delta<T> {
return current;
}
public static <X> Delta<X> empty() {
//noinspection unchecked
return (Delta<X>) EMPTY;
}
@Override
public boolean equals(Object obj) {
if (obj == this)

View File

@ -0,0 +1,6 @@
package it.cavallium.dbengine.database;
/**
* Closeable resource that can be closed if discarded
*/
public interface DiscardingCloseable extends SafeCloseable {}

View File

@ -1,35 +1,20 @@
package it.cavallium.dbengine.database;
import io.micrometer.core.instrument.MeterRegistry;
import io.net5.buffer.api.BufferAllocator;
import it.cavallium.dbengine.client.DatabaseOptions;
import it.cavallium.dbengine.client.IndicizerAnalyzers;
import it.cavallium.dbengine.client.IndicizerSimilarities;
import it.cavallium.dbengine.client.LuceneOptions;
import it.cavallium.dbengine.lucene.LuceneHacks;
import it.cavallium.dbengine.rpc.current.data.Column;
import it.cavallium.dbengine.rpc.current.data.DatabaseOptions;
import java.util.List;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Mono;
@SuppressWarnings("UnusedReturnValue")
public interface LLDatabaseConnection {
BufferAllocator getAllocator();
MeterRegistry getMeterRegistry();
Mono<? extends LLDatabaseConnection> connect();
LLDatabaseConnection connect();
Mono<? extends LLKeyValueDatabase> getDatabase(String name,
LLKeyValueDatabase getDatabase(String name,
List<Column> columns,
DatabaseOptions databaseOptions);
Mono<? extends LLLuceneIndex> getLuceneIndex(String name,
int instancesCount,
IndicizerAnalyzers indicizerAnalyzers,
IndicizerSimilarities indicizerSimilarities,
LuceneOptions luceneOptions,
@Nullable LuceneHacks luceneHacks);
Mono<Void> disconnect();
void disconnect();
}

View File

@ -1,109 +1,41 @@
package it.cavallium.dbengine.database;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import static it.cavallium.dbengine.database.LLUtils.unmodifiableBytes;
import it.cavallium.buffer.Buf;
import java.util.StringJoiner;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
public class LLDelta extends ResourceSupport<LLDelta, LLDelta> {
private static final Logger logger = LoggerFactory.getLogger(LLDelta.class);
private static final Drop<LLDelta> DROP = new Drop<>() {
@Override
public void drop(LLDelta obj) {
try {
if (obj.previous != null) {
obj.previous.close();
}
} catch (Throwable ex) {
logger.error("Failed to close previous", ex);
}
try {
if (obj.current != null) {
obj.current.close();
}
} catch (Throwable ex) {
logger.error("Failed to close current", ex);
}
try {
if (obj.onClose != null) {
obj.onClose.run();
}
} catch (Throwable ex) {
logger.error("Failed to close onDrop", ex);
}
}
@Override
public Drop<LLDelta> fork() {
return this;
}
@Override
public void attach(LLDelta obj) {
}
};
public class LLDelta {
@Nullable
private Buffer previous;
private final Buf previous;
@Nullable
private Buffer current;
@Nullable
private Runnable onClose;
private final Buf current;
private LLDelta(@Nullable Send<Buffer> previous, @Nullable Send<Buffer> current, @Nullable Runnable onClose) {
super(DROP);
assert isAllAccessible();
this.previous = previous != null ? previous.receive().makeReadOnly() : null;
this.current = current != null ? current.receive().makeReadOnly() : null;
this.onClose = onClose;
private LLDelta(@Nullable Buf previous, @Nullable Buf current) {
super();
this.previous = unmodifiableBytes(previous);
this.current = unmodifiableBytes(current);
}
private boolean isAllAccessible() {
assert previous == null || previous.isAccessible();
assert current == null || current.isAccessible();
assert this.isAccessible();
assert this.isOwned();
return true;
}
public static LLDelta of(Send<Buffer> previous, Send<Buffer> current) {
public static LLDelta of(Buf previous, Buf current) {
assert (previous == null && current == null) || (previous != current);
return new LLDelta(previous, current, null);
return new LLDelta(previous, current);
}
public Send<Buffer> previous() {
ensureOwned();
return previous != null ? previous.copy().send() : null;
public Buf previous() {
return previous;
}
public Send<Buffer> current() {
ensureOwned();
return current != null ? current.copy().send() : null;
public Buf current() {
return current;
}
public boolean isModified() {
return !LLUtils.equals(previous, current);
}
private void ensureOwned() {
assert isAllAccessible();
if (!isOwned()) {
if (!isAccessible()) {
throw this.createResourceClosedException();
} else {
throw new IllegalStateException("Resource not owned");
}
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
@ -131,28 +63,4 @@ public class LLDelta extends ResourceSupport<LLDelta, LLDelta> {
.toString();
}
@Override
protected RuntimeException createResourceClosedException() {
return new IllegalStateException("Closed");
}
@Override
protected void makeInaccessible() {
this.current = null;
this.previous = null;
this.onClose = null;
}
@Override
protected Owned<LLDelta> prepareSend() {
Send<Buffer> minSend = this.previous != null ? this.previous.send() : null;
Send<Buffer> maxSend = this.current != null ? this.current.send() : null;
Runnable onClose = this.onClose;
return drop -> {
var instance = new LLDelta(minSend, maxSend, onClose);
drop.attach(instance);
return instance;
};
}
}

View File

@ -1,140 +1,93 @@
package it.cavallium.dbengine.database;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.BufferAllocator;
import io.net5.buffer.api.Send;
import it.cavallium.dbengine.client.BadBlock;
import it.cavallium.buffer.Buf;
import it.cavallium.dbengine.client.DbProgress;
import it.cavallium.dbengine.client.SSTVerificationProgress;
import it.cavallium.dbengine.database.serialization.KVSerializationFunction;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ForkJoinPool;
import java.util.function.Function;
import java.util.stream.Stream;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.concurrency.atomicity.NotAtomic;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@SuppressWarnings("unused")
@NotAtomic
public interface LLDictionary extends LLKeyValueDatabaseStructure {
String getColumnName();
BufferAllocator getAllocator();
Buf get(@Nullable LLSnapshot snapshot, Buf key);
Mono<Send<Buffer>> get(@Nullable LLSnapshot snapshot, Mono<Send<Buffer>> key, boolean existsAlmostCertainly);
Buf put(Buf key, Buf value, LLDictionaryResultType resultType);
default Mono<Send<Buffer>> get(@Nullable LLSnapshot snapshot, Mono<Send<Buffer>> key) {
return get(snapshot, key, false);
UpdateMode getUpdateMode();
default Buf update(Buf key, SerializationFunction<@Nullable Buf, @Nullable Buf> updater, UpdateReturnMode updateReturnMode) {
LLDelta prev = this.updateAndGetDelta(key, updater);
return LLUtils.resolveLLDelta(prev, updateReturnMode);
}
Mono<Send<Buffer>> put(Mono<Send<Buffer>> key, Mono<Send<Buffer>> value, LLDictionaryResultType resultType);
LLDelta updateAndGetDelta(Buf key, SerializationFunction<@Nullable Buf, @Nullable Buf> updater);
Mono<UpdateMode> getUpdateMode();
void clear();
default Mono<Send<Buffer>> update(Mono<Send<Buffer>> key,
SerializationFunction<@Nullable Send<Buffer>, @Nullable Buffer> updater,
UpdateReturnMode updateReturnMode,
boolean existsAlmostCertainly) {
return this
.updateAndGetDelta(key, updater, existsAlmostCertainly)
.transform(prev -> LLUtils.resolveLLDelta(prev, updateReturnMode));
}
Buf remove(Buf key, LLDictionaryResultType resultType);
default Mono<Send<Buffer>> update(Mono<Send<Buffer>> key,
SerializationFunction<@Nullable Send<Buffer>, @Nullable Buffer> updater,
UpdateReturnMode returnMode) {
return update(key, updater, returnMode, false);
}
Stream<OptionalBuf> getMulti(@Nullable LLSnapshot snapshot, Stream<Buf> keys);
Mono<Send<LLDelta>> updateAndGetDelta(Mono<Send<Buffer>> key,
SerializationFunction<@Nullable Send<Buffer>, @Nullable Buffer> updater,
boolean existsAlmostCertainly);
void putMulti(Stream<LLEntry> entries);
default Mono<Send<LLDelta>> updateAndGetDelta(Mono<Send<Buffer>> key,
SerializationFunction<@Nullable Send<Buffer>, @Nullable Buffer> updater) {
return updateAndGetDelta(key, updater, false);
}
<K> Stream<Boolean> updateMulti(Stream<SerializedKey<K>> keys,
KVSerializationFunction<K, @Nullable Buf, @Nullable Buf> updateFunction);
Mono<Void> clear();
Stream<LLEntry> getRange(@Nullable LLSnapshot snapshot,
LLRange range,
boolean reverse,
boolean smallRange);
Mono<Send<Buffer>> remove(Mono<Send<Buffer>> key, LLDictionaryResultType resultType);
Flux<Optional<Buffer>> getMulti(@Nullable LLSnapshot snapshot,
Flux<Send<Buffer>> keys,
boolean existsAlmostCertainly);
default Flux<Optional<Buffer>> getMulti(@Nullable LLSnapshot snapshot,
Flux<Send<Buffer>> keys) {
return getMulti(snapshot, keys, false);
}
Flux<Send<LLEntry>> putMulti(Flux<Send<LLEntry>> entries, boolean getOldValues);
<K> Flux<Boolean> updateMulti(Flux<K> keys, Flux<Send<Buffer>> serializedKeys,
KVSerializationFunction<K, @Nullable Send<Buffer>, @Nullable Buffer> updateFunction);
Flux<Send<LLEntry>> getRange(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range, boolean existsAlmostCertainly);
default Flux<Send<LLEntry>> getRange(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range) {
return getRange(snapshot, range, false);
}
Flux<List<Send<LLEntry>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
Mono<Send<LLRange>> range,
Stream<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
LLRange range,
int prefixLength,
boolean existsAlmostCertainly);
boolean smallRange);
default Flux<List<Send<LLEntry>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
Mono<Send<LLRange>> range,
int prefixLength) {
return getRangeGrouped(snapshot, range, prefixLength, false);
}
Stream<Buf> getRangeKeys(@Nullable LLSnapshot snapshot,
LLRange range,
boolean reverse,
boolean smallRange);
Flux<Send<Buffer>> getRangeKeys(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range);
Stream<List<Buf>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot,
LLRange range,
int prefixLength,
boolean smallRange);
Flux<List<Send<Buffer>>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range, int prefixLength);
Stream<Buf> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot,
LLRange range,
int prefixLength,
boolean smallRange);
Flux<Send<Buffer>> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range, int prefixLength);
Stream<DbProgress<SSTVerificationProgress>> verifyChecksum(LLRange range);
Flux<BadBlock> badBlocks(Mono<Send<LLRange>> range);
void setRange(LLRange range, Stream<LLEntry> entries, boolean smallRange);
Mono<Void> setRange(Mono<Send<LLRange>> range, Flux<Send<LLEntry>> entries);
default Mono<Void> replaceRange(Mono<Send<LLRange>> range,
default void replaceRange(LLRange range,
boolean canKeysChange,
Function<Send<LLEntry>, Mono<Send<LLEntry>>> entriesReplacer,
boolean existsAlmostCertainly) {
return Mono.defer(() -> {
Function<@NotNull LLEntry, @NotNull LLEntry> entriesReplacer,
boolean smallRange) {
if (canKeysChange) {
return this
.setRange(range, this
.getRange(null, range, existsAlmostCertainly)
.flatMap(entriesReplacer)
);
this.setRange(range, this.getRange(null, range, false, smallRange).map(entriesReplacer), smallRange);
} else {
return this
.putMulti(this
.getRange(null, range, existsAlmostCertainly)
.flatMap(entriesReplacer), false)
.then();
this.putMulti(this.getRange(null, range, false, smallRange).map(entriesReplacer));
}
});
}
default Mono<Void> replaceRange(Mono<Send<LLRange>> range,
boolean canKeysChange,
Function<Send<LLEntry>, Mono<Send<LLEntry>>> entriesReplacer) {
return replaceRange(range, canKeysChange, entriesReplacer, false);
}
Mono<Boolean> isRangeEmpty(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range);
Mono<Long> sizeRange(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range, boolean fast);
Mono<Send<LLEntry>> getOne(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range);
Mono<Send<Buffer>> getOneKey(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range);
Mono<Send<LLEntry>> removeOne(Mono<Send<LLRange>> range);
boolean isRangeEmpty(@Nullable LLSnapshot snapshot, LLRange range, boolean fillCache);
long sizeRange(@Nullable LLSnapshot snapshot, LLRange range, boolean fast);
LLEntry getOne(@Nullable LLSnapshot snapshot, LLRange range);
Buf getOneKey(@Nullable LLSnapshot snapshot, LLRange range);
LLEntry removeOne(LLRange range);
}

View File

@ -1,120 +1,37 @@
package it.cavallium.dbengine.database;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Resource;
import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import it.cavallium.buffer.Buf;
import java.util.Objects;
import java.util.StringJoiner;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
public class LLEntry extends ResourceSupport<LLEntry, LLEntry> {
public class LLEntry {
private static final Logger logger = LoggerFactory.getLogger(LLEntry.class);
private static final Logger logger = LogManager.getLogger(LLEntry.class);
private final Buf key;
private final Buf value;
private static final Drop<LLEntry> DROP = new Drop<>() {
@Override
public void drop(LLEntry obj) {
try {
if (obj.key != null) {
obj.key.close();
}
} catch (Throwable ex) {
logger.error("Failed to close key", ex);
}
try {
if (obj.value != null) {
obj.value.close();
}
} catch (Throwable ex) {
logger.error("Failed to close value", ex);
}
private LLEntry(@NotNull Buf key, @NotNull Buf value) {
this.key = key;
this.value = value;
}
@Override
public Drop<LLEntry> fork() {
return this;
}
@Override
public void attach(LLEntry obj) {
}
};
@Nullable
private Buffer key;
@Nullable
private Buffer value;
private LLEntry(@NotNull Send<Buffer> key, @NotNull Send<Buffer> value) {
super(DROP);
this.key = key.receive().makeReadOnly();
this.value = value.receive().makeReadOnly();
assert isAllAccessible();
}
private LLEntry(@NotNull Buffer key, @NotNull Buffer value) {
super(DROP);
this.key = key.makeReadOnly();
this.value = value.makeReadOnly();
assert isAllAccessible();
}
private boolean isAllAccessible() {
assert key != null && key.isAccessible();
assert value != null && value.isAccessible();
assert this.isAccessible();
assert this.isOwned();
return true;
}
public static LLEntry of(@NotNull Send<Buffer> key, @NotNull Send<Buffer> value) {
public static LLEntry of(@NotNull Buf key, @NotNull Buf value) {
return new LLEntry(key, value);
}
public static LLEntry of(@NotNull Buffer key, @NotNull Buffer value) {
return new LLEntry(key, value);
public static LLEntry copyOf(Buf keyView, Buf valueView) {
return new LLEntry(keyView.copy(), valueView.copy());
}
public Send<Buffer> getKey() {
ensureOwned();
return Objects.requireNonNull(key).copy().send();
public Buf getKey() {
return Objects.requireNonNull(key);
}
public Buffer getKeyUnsafe() {
return key;
}
public Send<Buffer> getValue() {
ensureOwned();
return Objects.requireNonNull(value).copy().send();
}
public Buffer getValueUnsafe() {
return value;
}
private void ensureOwned() {
assert isAllAccessible();
if (!isOwned()) {
if (!isAccessible()) {
throw this.createResourceClosedException();
} else {
throw new IllegalStateException("Resource not owned");
}
}
}
@Override
protected void makeInaccessible() {
this.key = null;
this.value = null;
public Buf getValue() {
return Objects.requireNonNull(value);
}
@Override
@ -143,22 +60,4 @@ public class LLEntry extends ResourceSupport<LLEntry, LLEntry> {
.add("value=" + LLUtils.toString(value))
.toString();
}
@Override
protected RuntimeException createResourceClosedException() {
return new IllegalStateException("Closed");
}
@Override
protected Owned<LLEntry> prepareSend() {
Send<Buffer> keySend;
Send<Buffer> valueSend;
keySend = Objects.requireNonNull(this.key).send();
valueSend = Objects.requireNonNull(this.value).send();
return drop -> {
var instance = new LLEntry(keySend, valueSend);
drop.attach(instance);
return instance;
};
}
}

View File

@ -1,3 +0,0 @@
package it.cavallium.dbengine.database;
public sealed interface LLIndexRequest permits LLSoftUpdateDocument, LLUpdateDocument, LLUpdateFields {}

View File

@ -1,127 +0,0 @@
package it.cavallium.dbengine.database;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Objects;
import java.util.StringJoiner;
import org.apache.lucene.document.Field;
public class LLItem {
private final LLType type;
private final String name;
private final byte[] data;
public LLItem(LLType type, String name, byte[] data) {
this.type = type;
this.name = name;
this.data = data;
}
private LLItem(LLType type, String name, String data) {
this.type = type;
this.name = name;
this.data = data.getBytes(StandardCharsets.UTF_8);
}
private LLItem(LLType type, String name, int data) {
this.type = type;
this.name = name;
this.data = Ints.toByteArray(data);
}
private LLItem(LLType type, String name, float data) {
this.type = type;
this.name = name;
this.data = ByteBuffer.allocate(4).putFloat(data).array();
}
private LLItem(LLType type, String name, long data) {
this.type = type;
this.name = name;
this.data = Longs.toByteArray(data);
}
public static LLItem newIntPoint(String name, int data) {
return new LLItem(LLType.IntPoint, name, data);
}
public static LLItem newLongPoint(String name, long data) {
return new LLItem(LLType.LongPoint, name, data);
}
public static LLItem newFloatPoint(String name, float data) {
return new LLItem(LLType.FloatPoint, name, data);
}
public static LLItem newTextField(String name, String data, Field.Store store) {
if (store == Field.Store.YES) {
return new LLItem(LLType.TextFieldStored, name, data);
} else {
return new LLItem(LLType.TextField, name, data);
}
}
public static LLItem newStringField(String name, String data, Field.Store store) {
if (store == Field.Store.YES) {
return new LLItem(LLType.StringFieldStored, name, data);
} else {
return new LLItem(LLType.StringField, name, data);
}
}
public static LLItem newSortedNumericDocValuesField(String name, long data) {
return new LLItem(LLType.SortedNumericDocValuesField, name, data);
}
public String getName() {
return name;
}
public LLType getType() {
return type;
}
public byte[] getData() {
return data;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
LLItem llItem = (LLItem) o;
return type == llItem.type &&
Objects.equals(name, llItem.name) &&
Arrays.equals(data, llItem.data);
}
@Override
public int hashCode() {
int result = Objects.hash(type, name);
result = 31 * result + Arrays.hashCode(data);
return result;
}
@Override
public String toString() {
var sj = new StringJoiner(", ", "[", "]")
.add("type=" + type)
.add("name='" + name + "'");
if (data != null && data.length > 0) {
sj.add("data=" + new String(data));
}
return sj.toString();
}
public String stringValue() {
return new String(data, StandardCharsets.UTF_8);
}
}

View File

@ -1,8 +0,0 @@
package it.cavallium.dbengine.database;
import java.util.Objects;
import java.util.StringJoiner;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Mono;
public record LLKeyScore(int docId, float score, @Nullable String key) {}

View File

@ -3,52 +3,67 @@ package it.cavallium.dbengine.database;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import io.micrometer.core.instrument.MeterRegistry;
import io.net5.buffer.api.BufferAllocator;
import it.cavallium.dbengine.client.IBackuppable;
import it.cavallium.dbengine.database.collections.DatabaseInt;
import it.cavallium.dbengine.database.collections.DatabaseLong;
import java.io.Closeable;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import reactor.core.publisher.Mono;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.RocksDBException;
public interface LLKeyValueDatabase extends LLSnapshottable, LLKeyValueDatabaseStructure {
public interface LLKeyValueDatabase extends LLSnapshottable, LLKeyValueDatabaseStructure, DatabaseProperties,
IBackuppable, DatabaseOperations, Closeable {
Mono<? extends LLSingleton> getSingleton(byte[] singletonListColumnName, byte[] name, byte[] defaultValue);
LLSingleton getSingleton(byte[] singletonListColumnName, byte[] name, byte @Nullable [] defaultValue);
Mono<? extends LLDictionary> getDictionary(byte[] columnName, UpdateMode updateMode);
LLDictionary getDictionary(byte[] columnName, UpdateMode updateMode);
@Deprecated
default Mono<? extends LLDictionary> getDeprecatedSet(String name, UpdateMode updateMode) {
return getDictionary(Column.deprecatedSet(name).name().getBytes(StandardCharsets.US_ASCII), updateMode);
default LLDictionary getDeprecatedSet(String name, UpdateMode updateMode) {
return getDictionary(ColumnUtils.deprecatedSet(name).name().getBytes(StandardCharsets.US_ASCII), updateMode);
}
default Mono<? extends LLDictionary> getDictionary(String name, UpdateMode updateMode) {
return getDictionary(Column.dictionary(name).name().getBytes(StandardCharsets.US_ASCII), updateMode);
default LLDictionary getDictionary(String name, UpdateMode updateMode) {
return getDictionary(ColumnUtils.dictionary(name).name().getBytes(StandardCharsets.US_ASCII), updateMode);
}
default Mono<DatabaseInt> getInteger(String singletonListName, String name, int defaultValue) {
return this
.getSingleton(Column.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII),
default LLSingleton getSingleton(String singletonListName, String name) {
return getSingleton(ColumnUtils.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII),
name.getBytes(StandardCharsets.US_ASCII),
null
);
}
default DatabaseInt getInteger(String singletonListName, String name, int defaultValue) {
return new DatabaseInt(this.getSingleton(ColumnUtils
.special(singletonListName)
.name()
.getBytes(StandardCharsets.US_ASCII),
name.getBytes(StandardCharsets.US_ASCII),
Ints.toByteArray(defaultValue)
)
.map(DatabaseInt::new);
));
}
default Mono<DatabaseLong> getLong(String singletonListName, String name, long defaultValue) {
return this
.getSingleton(Column.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII),
default DatabaseLong getLong(String singletonListName, String name, long defaultValue) {
return new DatabaseLong(this.getSingleton(ColumnUtils
.special(singletonListName)
.name()
.getBytes(StandardCharsets.US_ASCII),
name.getBytes(StandardCharsets.US_ASCII),
Longs.toByteArray(defaultValue)
)
.map(DatabaseLong::new);
));
}
Mono<Long> getProperty(String propertyName);
void verifyChecksum();
Mono<Void> verifyChecksum();
void compact();
BufferAllocator getAllocator();
void flush();
MeterRegistry getMeterRegistry();
Mono<Void> close();
void preClose();
void close();
}

View File

@ -1,6 +1,12 @@
package it.cavallium.dbengine.database;
import java.util.concurrent.ForkJoinPool;
public interface LLKeyValueDatabaseStructure {
String getDatabaseName();
ForkJoinPool getDbReadPool();
ForkJoinPool getDbWritePool();
}

View File

@ -1,83 +0,0 @@
package it.cavallium.dbengine.database;
import io.net5.buffer.api.Resource;
import io.net5.buffer.api.Send;
import it.cavallium.data.generator.nativedata.Nullablefloat;
import it.cavallium.dbengine.client.query.current.data.NoSort;
import it.cavallium.dbengine.client.query.current.data.Query;
import it.cavallium.dbengine.client.query.current.data.QueryParams;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuple2;
public interface LLLuceneIndex extends LLSnapshottable {
String getLuceneIndexName();
Mono<Void> addDocument(LLTerm id, LLUpdateDocument doc);
/**
* WARNING! This operation is atomic!
* Please don't send infinite or huge documents fluxes, because they will
* be kept in ram all at once.
*/
Mono<Void> addDocuments(Flux<Entry<LLTerm, LLUpdateDocument>> documents);
Mono<Void> deleteDocument(LLTerm id);
Mono<Void> update(LLTerm id, LLIndexRequest request);
Mono<Void> updateDocuments(Mono<Map<LLTerm, LLUpdateDocument>> documents);
Mono<Void> deleteAll();
/**
* @param queryParams the limit is valid for each lucene instance. If you have 15 instances, the number of elements
* returned can be at most <code>limit * 15</code>.
* <p>
* The additional query will be used with the moreLikeThis query: "mltQuery AND additionalQuery"
* @return the collection has one or more flux
*/
Mono<LLSearchResultShard> moreLikeThis(@Nullable LLSnapshot snapshot,
QueryParams queryParams,
String keyFieldName,
Flux<Tuple2<String, Set<String>>> mltDocumentFields);
/**
* @param queryParams the limit is valid for each lucene instance. If you have 15 instances, the number of elements
* returned can be at most <code>limit * 15</code>
* @return the collection has one or more flux
*/
Mono<LLSearchResultShard> search(@Nullable LLSnapshot snapshot, QueryParams queryParams, String keyFieldName);
default Mono<TotalHitsCount> count(@Nullable LLSnapshot snapshot, Query query) {
QueryParams params = QueryParams.of(query, 0, 0, Nullablefloat.empty(), NoSort.of(), false);
return Mono.from(this.search(snapshot, params, null)
.map(llSearchResultShard -> {
try (llSearchResultShard) {
return llSearchResultShard.totalHitsCount();
}
})
.defaultIfEmpty(TotalHitsCount.of(0, true))
).doOnDiscard(Send.class, Send::close).doOnDiscard(Resource.class, Resource::close);
}
boolean isLowMemoryMode();
Mono<Void> close();
/**
* Flush writes to disk
*/
Mono<Void> flush();
/**
* Refresh index searcher
*/
Mono<Void> refresh(boolean force);
}

View File

@ -0,0 +1,104 @@
package it.cavallium.dbengine.database;
import static it.cavallium.dbengine.utils.StreamUtils.collect;
import static it.cavallium.dbengine.utils.StreamUtils.executing;
import com.google.common.collect.Multimap;
import io.micrometer.core.instrument.MeterRegistry;
import it.cavallium.dbengine.client.ConnectionSettings.ConnectionPart;
import it.cavallium.dbengine.client.ConnectionSettings.ConnectionPart.ConnectionPartRocksDB;
import it.cavallium.dbengine.rpc.current.data.Column;
import it.cavallium.dbengine.rpc.current.data.DatabaseOptions;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.StringJoiner;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class LLMultiDatabaseConnection implements LLDatabaseConnection {
private static final Logger LOG = LogManager.getLogger(LLMultiDatabaseConnection.class);
private final Map<String, LLDatabaseConnection> databaseShardConnections = new HashMap<>();
private final Set<LLDatabaseConnection> allConnections = new HashSet<>();
private final LLDatabaseConnection defaultDatabaseConnection;
private final LLDatabaseConnection anyConnection;
public LLMultiDatabaseConnection(Multimap<LLDatabaseConnection, ConnectionPart> subConnections) {
LLDatabaseConnection defaultDatabaseConnection = null;
for (Entry<LLDatabaseConnection, ConnectionPart> entry : subConnections.entries()) {
var subConnectionSettings = entry.getKey();
var connectionPart = entry.getValue();
if (connectionPart instanceof ConnectionPartRocksDB connectionPartRocksDB) {
if (connectionPartRocksDB.name() == null) {
defaultDatabaseConnection = subConnectionSettings;
} else {
databaseShardConnections.put(connectionPartRocksDB.name(), subConnectionSettings);
}
} else {
throw new IllegalArgumentException("Unsupported connection part: " + connectionPart);
}
}
this.defaultDatabaseConnection = defaultDatabaseConnection;
if (defaultDatabaseConnection != null) {
anyConnection = defaultDatabaseConnection;
} else {
anyConnection = subConnections.keySet().stream().findAny().orElse(null);
}
if (defaultDatabaseConnection != null) {
allConnections.add(defaultDatabaseConnection);
}
allConnections.addAll(databaseShardConnections.values());
}
@Override
public MeterRegistry getMeterRegistry() {
return anyConnection.getMeterRegistry();
}
@Override
public LLDatabaseConnection connect() {
collect(allConnections.stream(), executing(connection -> {
try {
connection.connect();
} catch (Exception ex) {
LOG.error("Failed to open connection", ex);
}
}));
return this;
}
@Override
public LLKeyValueDatabase getDatabase(String name,
List<Column> columns,
DatabaseOptions databaseOptions) {
var conn = databaseShardConnections.getOrDefault(name, defaultDatabaseConnection);
Objects.requireNonNull(conn, "Null connection");
return conn.getDatabase(name, columns, databaseOptions);
}
@Override
public void disconnect() {
collect(allConnections.stream(), executing(connection -> {
try {
connection.disconnect();
} catch (Exception ex) {
LOG.error("Failed to close connection", ex);
}
}));
}
@Override
public String toString() {
return new StringJoiner(", ", LLMultiDatabaseConnection.class.getSimpleName() + "[", "]")
.add("databaseShardConnections=" + databaseShardConnections)
.add("allConnections=" + allConnections)
.add("defaultDatabaseConnection=" + defaultDatabaseConnection)
.add("anyConnection=" + anyConnection)
.toString();
}
}

View File

@ -1,205 +1,145 @@
package it.cavallium.dbengine.database;
import static io.net5.buffer.Unpooled.wrappedBuffer;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import java.util.StringJoiner;
import it.cavallium.buffer.Buf;
import java.util.Objects;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
/**
* Range of data, from min (inclusive), to max (exclusive)
*/
public class LLRange extends ResourceSupport<LLRange, LLRange> {
public class LLRange {
private static final Logger logger = LoggerFactory.getLogger(LLRange.class);
private static final Drop<LLRange> DROP = new Drop<>() {
@Override
public void drop(LLRange obj) {
try {
if (obj.min != null) {
obj.min.close();
}
} catch (Throwable ex) {
logger.error("Failed to close min", ex);
}
try {
if (obj.max != null) {
obj.max.close();
}
} catch (Throwable ex) {
logger.error("Failed to close max", ex);
}
try {
if (obj.single != null) {
obj.single.close();
}
} catch (Throwable ex) {
logger.error("Failed to close single", ex);
}
}
@Override
public Drop<LLRange> fork() {
return this;
}
@Override
public void attach(LLRange obj) {
}
};
private static final LLRange RANGE_ALL = new LLRange((Buffer) null, (Buffer) null, (Buffer) null);
private static final LLRange RANGE_ALL = new LLRange( null, null, (Buf) null);
@Nullable
private Buffer min;
private final Buf min;
@Nullable
private Buffer max;
private final Buf max;
@Nullable
private Buffer single;
private final Buf single;
private LLRange(Send<Buffer> min, Send<Buffer> max, Send<Buffer> single) {
super(DROP);
assert isAllAccessible();
private LLRange(@Nullable Buf min, @Nullable Buf max, @Nullable Buf single) {
assert single == null || (min == null && max == null);
this.min = min != null ? min.receive().makeReadOnly() : null;
this.max = max != null ? max.receive().makeReadOnly() : null;
this.single = single != null ? single.receive().makeReadOnly() : null;
}
private LLRange(Buffer min, Buffer max, Buffer single) {
super(DROP);
assert isAllAccessible();
assert single == null || (min == null && max == null);
this.min = min != null ? min.makeReadOnly() : null;
this.max = max != null ? max.makeReadOnly() : null;
this.single = single != null ? single.makeReadOnly() : null;
}
private boolean isAllAccessible() {
assert min == null || min.isAccessible();
assert max == null || max.isAccessible();
assert single == null || single.isAccessible();
assert this.isAccessible();
assert this.isOwned();
return true;
assert min == null || max == null || min.compareTo(max) <= 0
: "Minimum buffer is bigger than maximum buffer: " + min + " > " + max;
this.min = min;
this.max = max;
this.single = single;
}
public static LLRange all() {
return RANGE_ALL.copy();
return RANGE_ALL;
}
public static LLRange from(Send<Buffer> min) {
public static LLRange from(Buf min) {
return new LLRange(min, null, null);
}
public static LLRange to(Send<Buffer> max) {
public static LLRange to(Buf max) {
return new LLRange(null, max, null);
}
public static LLRange single(Send<Buffer> single) {
public static LLRange single(Buf single) {
return new LLRange(null, null, single);
}
public static LLRange of(Send<Buffer> min, Send<Buffer> max) {
public static LLRange of(Buf min, Buf max) {
return new LLRange(min, max, null);
}
public static LLRange ofUnsafe(Buffer min, Buffer max) {
return new LLRange(min, max, null);
public static boolean isInside(LLRange rangeSub, LLRange rangeParent) {
if (rangeParent.isAll()) {
return true;
} else if (rangeParent.isSingle()) {
return Objects.equals(rangeSub, rangeParent);
} else {
return ((!rangeParent.hasMin() || (rangeSub.hasMin() && rangeParent.getMin().compareTo(rangeSub.getMin()) <= 0)))
&& ((!rangeParent.hasMax() || (rangeSub.hasMax() && rangeParent.getMax().compareTo(rangeSub.getMax()) >= 0)));
}
}
@Nullable
public static LLRange intersect(LLRange rangeA, LLRange rangeB) {
boolean aEndInclusive = rangeA.isSingle();
boolean bEndInclusive = rangeB.isSingle();
Buf min = rangeA.isAll()
? rangeB.getMin()
: (rangeB.isAll()
? rangeA.getMin()
: (rangeA.getMin().compareTo(rangeB.getMin()) <= 0 ? rangeB.getMin() : rangeA.getMin()));
int aComparedToB;
Buf max;
boolean maxInclusive;
if (rangeA.isAll()) {
max = rangeB.getMax();
maxInclusive = bEndInclusive;
} else if (rangeB.isAll()) {
max = rangeA.getMax();
maxInclusive = aEndInclusive;
} else if ((aComparedToB = rangeA.getMax().compareTo(rangeB.getMax())) >= 0) {
max = rangeB.getMax();
if (aComparedToB == 0) {
maxInclusive = bEndInclusive && aEndInclusive;
} else {
maxInclusive = bEndInclusive;
}
} else {
max = rangeA.getMax();
maxInclusive = aEndInclusive;
}
if (min != null && max != null && min.compareTo(max) >= (maxInclusive ? 1 : 0)) {
return null;
} else {
if (min != null && min.equals(max)) {
return LLRange.single(min);
} else {
return LLRange.of(min, max);
}
}
}
public boolean isAll() {
ensureOwned();
return min == null && max == null && single == null;
}
public boolean isSingle() {
ensureOwned();
return single != null;
}
public boolean hasMin() {
ensureOwned();
return min != null || single != null;
}
public Send<Buffer> getMin() {
ensureOwned();
if (min != null) {
return min.copy().send();
} else if (single != null) {
return single.copy().send();
} else {
return null;
}
}
public Buffer getMinUnsafe() {
ensureOwned();
public Buf getMin() {
// todo: use a read-only copy
if (min != null) {
return min;
} else if (single != null) {
return single;
} else {
return null;
return single;
}
}
public boolean hasMax() {
ensureOwned();
return max != null || single != null;
}
public Send<Buffer> getMax() {
ensureOwned();
if (max != null) {
return max.copy().send();
} else if (single != null) {
return single.copy().send();
} else {
return null;
}
}
public Buffer getMaxUnsafe() {
ensureOwned();
public Buf getMax() {
// todo: use a read-only copy
if (max != null) {
return max;
} else if (single != null) {
return single;
} else {
return null;
return single;
}
}
public Send<Buffer> getSingle() {
ensureOwned();
assert isSingle();
return single != null ? single.copy().send() : null;
}
public Buffer getSingleUnsafe() {
ensureOwned();
public Buf getSingle() {
assert isSingle();
// todo: use a read-only copy
return single;
}
private void ensureOwned() {
assert isAllAccessible();
if (!isOwned()) {
if (!isAccessible()) {
throw this.createResourceClosedException();
} else {
throw new IllegalStateException("Resource not owned");
}
}
public Buf getSingleUnsafe() {
assert isSingle();
return single;
}
@Override
@ -221,45 +161,24 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
return result;
}
@SuppressWarnings("UnnecessaryUnicodeEscape")
@Override
public String toString() {
return new StringJoiner(", ", LLRange.class.getSimpleName() + "[", "]")
.add("min=" + LLUtils.toString(min))
.add("max=" + LLUtils.toString(max))
.toString();
if (single != null) {
return "[" + single + "]";
} else if (min != null && max != null) {
return "[" + LLUtils.toString(min) + "," + LLUtils.toString(max) + ")";
} else if (min != null) {
return "[" + min + ",\u221E)";
} else if (max != null) {
return "[\u2205," + max + ")";
} else {
return "[\u221E)";
}
}
public LLRange copy() {
ensureOwned();
return new LLRange(min != null ? min.copy().send() : null,
max != null ? max.copy().send() : null,
single != null ? single.copy().send(): null
);
}
@Override
protected RuntimeException createResourceClosedException() {
return new IllegalStateException("Closed");
}
@Override
protected Owned<LLRange> prepareSend() {
Send<Buffer> minSend;
Send<Buffer> maxSend;
Send<Buffer> singleSend;
minSend = this.min != null ? this.min.send() : null;
maxSend = this.max != null ? this.max.send() : null;
singleSend = this.single != null ? this.single.send() : null;
return drop -> {
var instance = new LLRange(minSend, maxSend, singleSend);
drop.attach(instance);
return instance;
};
}
protected void makeInaccessible() {
this.min = null;
this.max = null;
this.single = null;
// todo: use a read-only copy
return new LLRange(min, max, single);
}
}

View File

@ -1,7 +1,5 @@
package it.cavallium.dbengine.database;
import org.apache.lucene.search.Scorer;
public enum LLScoreMode {
/**
* Produced scorers will allow visiting all matches and get their score.
@ -15,7 +13,7 @@ public enum LLScoreMode {
COMPLETE_NO_SCORES,
/**
* Produced scorers will optionally allow skipping over non-competitive
* hits using the {@link Scorer#setMinCompetitiveScore(float)} API.
* hits using the Scorer#setMinCompetitiveScore(float) API.
* This can reduce time if using setMinCompetitiveScore.
*/
TOP_SCORES,

View File

@ -1,13 +0,0 @@
package it.cavallium.dbengine.database;
import java.util.function.BiFunction;
import org.jetbrains.annotations.NotNull;
import reactor.core.publisher.Flux;
public record LLSearchResult(Flux<LLSearchResultShard> results) {
@NotNull
public static BiFunction<LLSearchResult, LLSearchResult, LLSearchResult> accumulator() {
return (a, b) -> new LLSearchResult(Flux.merge(a.results, b.results));
}
}

View File

@ -1,102 +0,0 @@
package it.cavallium.dbengine.database;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.internal.ResourceSupport;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import java.util.Objects;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
public final class LLSearchResultShard extends ResourceSupport<LLSearchResultShard, LLSearchResultShard> {
private static final Logger logger = LoggerFactory.getLogger(LLSearchResultShard.class);
private static final Drop<LLSearchResultShard> DROP = new Drop<>() {
@Override
public void drop(LLSearchResultShard obj) {
try {
if (obj.onClose != null) {
obj.onClose.run();
}
} catch (Throwable ex) {
logger.error("Failed to close onClose", ex);
}
}
@Override
public Drop<LLSearchResultShard> fork() {
return this;
}
@Override
public void attach(LLSearchResultShard obj) {
}
};
private Flux<LLKeyScore> results;
private TotalHitsCount totalHitsCount;
private Runnable onClose;
public LLSearchResultShard(Flux<LLKeyScore> results, TotalHitsCount totalHitsCount, Runnable onClose) {
super(DROP);
this.results = results;
this.totalHitsCount = totalHitsCount;
this.onClose = onClose;
}
public Flux<LLKeyScore> results() {
if (!isOwned()) {
throw attachTrace(new IllegalStateException("LLSearchResultShard must be owned to be used"));
}
return results;
}
public TotalHitsCount totalHitsCount() {
if (!isOwned()) {
throw attachTrace(new IllegalStateException("LLSearchResultShard must be owned to be used"));
}
return totalHitsCount;
}
@Override
public boolean equals(Object obj) {
if (obj == this)
return true;
if (obj == null || obj.getClass() != this.getClass())
return false;
var that = (LLSearchResultShard) obj;
return Objects.equals(this.results, that.results) && Objects.equals(this.totalHitsCount, that.totalHitsCount);
}
@Override
public int hashCode() {
return Objects.hash(results, totalHitsCount);
}
@Override
public String toString() {
return "LLSearchResultShard[" + "results=" + results + ", " + "totalHitsCount=" + totalHitsCount + ']';
}
@Override
protected RuntimeException createResourceClosedException() {
return new IllegalStateException("Closed");
}
@Override
protected Owned<LLSearchResultShard> prepareSend() {
var results = this.results;
var totalHitsCount = this.totalHitsCount;
var onClose = this.onClose;
return drop -> new LLSearchResultShard(results, totalHitsCount, onClose);
}
protected void makeInaccessible() {
this.results = null;
this.totalHitsCount = null;
this.onClose = null;
}
}

View File

@ -1,11 +1,24 @@
package it.cavallium.dbengine.database;
import it.cavallium.buffer.Buf;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import java.io.IOException;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Mono;
public interface LLSingleton extends LLKeyValueDatabaseStructure {
Mono<byte[]> get(@Nullable LLSnapshot snapshot);
Buf get(@Nullable LLSnapshot snapshot);
Mono<Void> set(byte[] value);
void set(Buf value);
default Buf update(SerializationFunction<@Nullable Buf, @Nullable Buf> updater, UpdateReturnMode updateReturnMode) {
var prev = this.updateAndGetDelta(updater);
return LLUtils.resolveLLDelta(prev, updateReturnMode);
}
LLDelta updateAndGetDelta(SerializationFunction<@Nullable Buf, @Nullable Buf> updater);
String getColumnName();
String getName();
}

View File

@ -1,10 +1,10 @@
package it.cavallium.dbengine.database;
import reactor.core.publisher.Mono;
import java.io.IOException;
public interface LLSnapshottable {
Mono<LLSnapshot> takeSnapshot();
LLSnapshot takeSnapshot();
Mono<Void> releaseSnapshot(LLSnapshot snapshot);
void releaseSnapshot(LLSnapshot snapshot);
}

View File

@ -1,3 +0,0 @@
package it.cavallium.dbengine.database;
public record LLSoftUpdateDocument(LLItem[] items, LLItem[] softDeleteItems) implements LLIndexRequest {}

View File

@ -1,48 +0,0 @@
package it.cavallium.dbengine.database;
import java.util.Objects;
public class LLTerm {
private final String key;
private final String value;
public LLTerm(String key, String value) {
this.key = key;
this.value = value;
}
public String getKey() {
return key;
}
public String getValue() {
return value;
}
@Override
public String toString() {
return "LLTerm{" +
"key='" + key + '\'' +
", value='" + value + '\'' +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
LLTerm llTerm = (LLTerm) o;
return Objects.equals(key, llTerm.key) &&
Objects.equals(value, llTerm.value);
}
@Override
public int hashCode() {
return Objects.hash(key, value);
}
}

View File

@ -1,52 +0,0 @@
package it.cavallium.dbengine.database;
import java.util.Arrays;
import java.util.Objects;
@SuppressWarnings("unused")
public class LLTopKeys {
private final long totalHitsCount;
private final LLKeyScore[] hits;
public LLTopKeys(long totalHitsCount, LLKeyScore[] hits) {
this.totalHitsCount = totalHitsCount;
this.hits = hits;
}
public long getTotalHitsCount() {
return totalHitsCount;
}
public LLKeyScore[] getHits() {
return hits;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
LLTopKeys llTopKeys = (LLTopKeys) o;
return totalHitsCount == llTopKeys.totalHitsCount &&
Arrays.equals(hits, llTopKeys.hits);
}
@Override
public int hashCode() {
int result = Objects.hash(totalHitsCount);
result = 31 * result + Arrays.hashCode(hits);
return result;
}
@Override
public String toString() {
return "LLTopKeys{" +
"totalHitsCount=" + totalHitsCount +
", hits=" + Arrays.toString(hits) +
'}';
}
}

View File

@ -1,11 +1,22 @@
package it.cavallium.dbengine.database;
/**
* <a href="https://lucene.apache.org/core/8_0_0/core/org/apache/lucene/document/Field.html">Field.html</a>
*/
public enum LLType {
StringField,
StringFieldStored,
IntPoint,
LongPoint,
FloatPoint,
DoublePoint,
IntPointND,
LongPointND,
FloatPointND,
DoublePointND,
LongStoredField,
BytesStoredField,
NumericDocValuesField,
SortedNumericDocValuesField,
TextField,
TextFieldStored

View File

@ -1,3 +0,0 @@
package it.cavallium.dbengine.database;
public record LLUpdateDocument(LLItem[] items) implements LLIndexRequest {}

View File

@ -1,3 +0,0 @@
package it.cavallium.dbengine.database;
public record LLUpdateFields(LLItem[] items) implements LLIndexRequest {}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,97 @@
package it.cavallium.dbengine.database;
import it.cavallium.buffer.Buf;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Consumer;
import java.util.function.Function;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public final class OptionalBuf {
private static final OptionalBuf EMPTY = new OptionalBuf(null);
private final Buf buffer;
private OptionalBuf(@Nullable Buf buffer) {
this.buffer = buffer;
}
public static OptionalBuf ofNullable(@Nullable Buf buffer) {
return new OptionalBuf(buffer);
}
public static OptionalBuf of(@NotNull Buf buffer) {
Objects.requireNonNull(buffer);
return new OptionalBuf(buffer);
}
public static OptionalBuf empty() {
return EMPTY;
}
@Override
public String toString() {
if (buffer != null) {
return buffer.toString();
} else {
return "(empty)";
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
OptionalBuf that = (OptionalBuf) o;
return Objects.equals(buffer, that.buffer);
}
@Override
public int hashCode() {
return buffer != null ? buffer.hashCode() : 0;
}
public Buf get() {
if (buffer == null) {
throw new NoSuchElementException();
}
return buffer;
}
public Buf orElse(Buf alternative) {
if (buffer == null) {
return alternative;
}
return buffer;
}
public void ifPresent(Consumer<Buf> consumer) {
if (buffer != null) {
consumer.accept(buffer);
}
}
public boolean isPresent() {
return buffer != null;
}
public boolean isEmpty() {
return buffer == null;
}
public <U> Optional<U> map(Function<Buf, U> mapper) {
if (buffer != null) {
return Optional.of(mapper.apply(buffer));
} else {
return Optional.empty();
}
}
}

View File

@ -0,0 +1,107 @@
package it.cavallium.dbengine.database;
public enum RocksDBLongProperty implements RocksDBProperty {
NUM_FILES_AT_LEVEL_0("num-files-at-level0"),
NUM_FILES_AT_LEVEL_1("num-files-at-level1"),
NUM_FILES_AT_LEVEL_2("num-files-at-level2"),
NUM_FILES_AT_LEVEL_3("num-files-at-level3"),
NUM_FILES_AT_LEVEL_4("num-files-at-level4"),
NUM_FILES_AT_LEVEL_5("num-files-at-level5"),
NUM_FILES_AT_LEVEL_6("num-files-at-level6"),
NUM_FILES_AT_LEVEL_7("num-files-at-level7"),
NUM_FILES_AT_LEVEL_8("num-files-at-level8"),
NUM_FILES_AT_LEVEL_9("num-files-at-level9"),
COMPRESSION_RATIO_AT_LEVEL_0("compression-ratio-at-level0"),
COMPRESSION_RATIO_AT_LEVEL_1("compression-ratio-at-level1"),
COMPRESSION_RATIO_AT_LEVEL_2("compression-ratio-at-level2"),
COMPRESSION_RATIO_AT_LEVEL_3("compression-ratio-at-level3"),
COMPRESSION_RATIO_AT_LEVEL_4("compression-ratio-at-level4"),
COMPRESSION_RATIO_AT_LEVEL_5("compression-ratio-at-level5"),
COMPRESSION_RATIO_AT_LEVEL_6("compression-ratio-at-level6"),
COMPRESSION_RATIO_AT_LEVEL_7("compression-ratio-at-level7"),
COMPRESSION_RATIO_AT_LEVEL_8("compression-ratio-at-level8"),
COMPRESSION_RATIO_AT_LEVEL_9("compression-ratio-at-level9"),
NUM_IMMUTABLE_MEM_TABLE("num-immutable-mem-table"),
NUM_IMMUTABLE_MEM_TABLE_FLUSHED("num-immutable-mem-table-flushed"),
MEM_TABLE_FLUSH_PENDING("mem-table-flush-pending"),
NUM_RUNNING_FLUSHES("num-running-flushes"),
COMPACTION_PENDING("compaction-pending"),
NUM_RUNNING_COMPACTIONS("num-running-compactions"),
BACKGROUND_ERRORS("background-errors"),
CUR_SIZE_ACTIVE_MEM_TABLE("cur-size-active-mem-table"),
CUR_SIZE_ALL_MEM_TABLES("cur-size-all-mem-tables"),
SIZE_ALL_MEM_TABLES("size-all-mem-tables"),
NUM_ENTRIES_ACTIVE_MEM_TABLE("num-entries-active-mem-table"),
NUM_ENTRIES_IMMUTABLE_MEM_TABLES("num-entries-imm-mem-tables"),
NUM_DELETES_ACTIVE_MEM_TABLE("num-deletes-active-mem-table"),
NUM_DELETES_IMMUTABLE_MEM_TABLES("num-deletes-imm-mem-tables"),
ESTIMATE_NUM_KEYS("estimate-num-keys"),
ESTIMATE_TABLE_READERS_MEM("estimate-table-readers-mem"),
IS_FILE_DELETIONS_ENABLED("is-file-deletions-enabled"),
NUM_SNAPSHOTS("num-snapshots"),
OLDEST_SNAPSHOT_TIME("oldest-snapshot-time"),
OLDEST_SNAPSHOT_SEQUENCE("oldest-snapshot-sequence"),
NUM_LIVE_VERSIONS("num-live-versions"),
CURRENT_SUPER_VERSION_NUMBER("current-super-version-number"),
ESTIMATE_LIVE_DATA_SIZE("estimate-live-data-size"),
MIN_LOG_NUMBER_TO_KEEP("min-log-number-to-keep"),
MIN_OBSOLETE_SST_NUMBER_TO_KEEP("min-obsolete-sst-number-to-keep"),
TOTAL_SST_FILES_SIZE("total-sst-files-size"),
LIVE_SST_FILES_SIZE("live-sst-files-size"),
LIVE_SST_FILES_SIZE_AT_TEMPERATURE("live-sst-files-size-at-temperature"),
BASE_LEVEL("base-level"),
ESTIMATE_PENDING_COMPACTION_BYTES("estimate-pending-compaction-bytes"),
ACTUAL_DELAYED_WRITE_RATE("actual-delayed-write-rate"),
IS_WRITE_STOPPED("is-write-stopped"),
ESTIMATE_OLDEST_KEY_TIME("estimate-oldest-key-time"),
BLOCK_CACHE_CAPACITY("block-cache-capacity", false),
BLOCK_CACHE_USAGE("block-cache-usage", false),
BLOCK_CACHE_PINNED_USAGE("block-cache-pinned-usage", false),
NUM_BLOB_FILES("num-blob-files"),
TOTAL_BLOB_FILE_SIZE("total-blob-file-size"),
LIVE_BLOB_FILE_SIZE("live-blob-file-size"),
LIVE_BLOB_FILE_GARBAGE_SIZE("live-blob-file-garbage-size"),
FILE_READ_DB_OPEN_MICROS("file.read.db.open.micros")
;
private final String name;
private final boolean dividedByColumnFamily;
RocksDBLongProperty(String name) {
this(name, true);
}
RocksDBLongProperty(String name, boolean dividedByColumnFamily) {
this.name = name;
this.dividedByColumnFamily = dividedByColumnFamily;
}
@Override
public String toString() {
return "rocksdb." + name;
}
@Override
public String getName() {
return "rocksdb." + name;
}
@Override
public boolean isNumeric() {
return true;
}
@Override
public boolean isMap() {
return false;
}
@Override
public boolean isString() {
return false;
}
public boolean isDividedByColumnFamily() {
return dividedByColumnFamily;
}
}

View File

@ -0,0 +1,40 @@
package it.cavallium.dbengine.database;
public enum RocksDBMapProperty implements RocksDBProperty {
CFSTATS("cfstats"),
DBSTATS("dbstats"),
BLOCK_CACHE_ENTRY_STATS("block-cache-entry-stats"),
AGGREGATED_TABLE_PROPERTIES("aggregated-table-properties"),
;
private final String name;
RocksDBMapProperty(String name) {
this.name = name;
}
@Override
public String toString() {
return "rocksdb." + name;
}
@Override
public String getName() {
return "rocksdb." + name;
}
@Override
public boolean isNumeric() {
return false;
}
@Override
public boolean isMap() {
return true;
}
@Override
public boolean isString() {
return false;
}
}

View File

@ -0,0 +1,16 @@
package it.cavallium.dbengine.database;
public interface RocksDBProperty {
/**
* Get rocksdb property name
* @return name, with the "rocksdb." prefix included
*/
String getName();
boolean isNumeric();
boolean isMap();
boolean isString();
}

View File

@ -0,0 +1,53 @@
package it.cavallium.dbengine.database;
public enum RocksDBStringProperty implements RocksDBProperty {
STATS("stats"),
SSTABLES("sstables"),
CFSTATS_NO_FILE_HISTOGRAM("cfstats-no-file-histogram"),
CF_FILE_HISTOGRAM("cf-file-histogram"),
LEVELSTATS("levelstats"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_0("aggregated-table-properties-at-level0"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_1("aggregated-table-properties-at-level1"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_2("aggregated-table-properties-at-level2"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_3("aggregated-table-properties-at-level3"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_4("aggregated-table-properties-at-level4"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_5("aggregated-table-properties-at-level5"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_6("aggregated-table-properties-at-level6"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_7("aggregated-table-properties-at-level7"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_8("aggregated-table-properties-at-level8"),
AGGREGATED_TABLE_PROPERTIES_AT_LEVEL_9("aggregated-table-properties-at-level9"),
OPTIONS_STATISTICS("options-statistics"),
BLOB_STATS("blob-stats")
;
private final String name;
RocksDBStringProperty(String name) {
this.name = name;
}
@Override
public String toString() {
return "rocksdb." + name;
}
@Override
public String getName() {
return "rocksdb." + name;
}
@Override
public boolean isNumeric() {
return false;
}
@Override
public boolean isMap() {
return false;
}
@Override
public boolean isString() {
return true;
}
}

View File

@ -2,6 +2,5 @@ package it.cavallium.dbengine.database;
public interface SafeCloseable extends AutoCloseable {
@Override
void close();
}

View File

@ -0,0 +1,5 @@
package it.cavallium.dbengine.database;
import it.cavallium.buffer.Buf;
public record SerializedKey<T>(T key, Buf serialized) {}

View File

@ -0,0 +1,53 @@
package it.cavallium.dbengine.database;
import it.cavallium.dbengine.database.collections.DatabaseStage;
import java.util.Map.Entry;
import java.util.Objects;
public final class SubStageEntry<T, U extends DatabaseStage<?>> implements Entry<T, U> {
private final T key;
private final U value;
public SubStageEntry(T key, U value) {
this.key = key;
this.value = value;
}
@Override
public T getKey() {
return key;
}
@Override
public U getValue() {
return value;
}
@Override
public U setValue(U value) {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object obj) {
if (obj == this)
return true;
if (obj == null || obj.getClass() != this.getClass())
return false;
//noinspection rawtypes
var that = (SubStageEntry) obj;
return Objects.equals(this.key, that.key) && Objects.equals(this.value, that.value);
}
@Override
public int hashCode() {
return Objects.hash(key, value);
}
@Override
public String toString() {
return "SubStageEntry[" + "key=" + key + ", " + "value=" + value + ']';
}
}

View File

@ -0,0 +1,5 @@
package it.cavallium.dbengine.database;
import org.rocksdb.TableProperties;
public record TableWithProperties(String column, String table, TableProperties properties) {}

View File

@ -1,31 +1,28 @@
package it.cavallium.dbengine.database.collections;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.BufferAllocator;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Send;
import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.Serializer;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public class DatabaseEmpty {
@SuppressWarnings({"unused", "InstantiationOfUtilityClass"})
public static final Nothing NOTHING = new Nothing();
public static Serializer<Nothing> nothingSerializer(BufferAllocator bufferAllocator) {
public static Serializer<Nothing> nothingSerializer() {
return new Serializer<>() {
@Override
public @NotNull Nothing deserialize(@NotNull Buffer serialized) {
public @NotNull Nothing deserialize(@NotNull BufDataInput in) throws SerializationException {
return NOTHING;
}
@Override
public void serialize(@NotNull Nothing deserialized, Buffer output) {
public void serialize(@NotNull Nothing deserialized, BufDataOutput out) throws SerializationException {
}
@ -39,8 +36,8 @@ public class DatabaseEmpty {
private DatabaseEmpty() {
}
public static DatabaseStageEntry<Nothing> create(LLDictionary dictionary, Buffer key, Runnable onClose) {
return new DatabaseSingle<>(dictionary, key, nothingSerializer(dictionary.getAllocator()), onClose);
public static DatabaseStageEntry<Nothing> create(LLDictionary dictionary, Buf key) {
return new DatabaseMapSingle<>(dictionary, key, nothingSerializer());
}
public static final class Nothing {

View File

@ -1,30 +1,47 @@
package it.cavallium.dbengine.database.collections;
import com.google.common.primitives.Ints;
import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput;
import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure;
import it.cavallium.dbengine.database.LLSingleton;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.util.concurrent.ForkJoinPool;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Mono;
public class DatabaseInt implements LLKeyValueDatabaseStructure {
private final LLSingleton singleton;
private final SerializerFixedBinaryLength<Integer> serializer;
public DatabaseInt(LLSingleton singleton) {
this.singleton = singleton;
this.serializer = SerializerFixedBinaryLength.intSerializer();
}
public Mono<Integer> get(@Nullable LLSnapshot snapshot) {
return singleton.get(snapshot).map(Ints::fromByteArray);
public Integer get(@Nullable LLSnapshot snapshot) {
var result = singleton.get(snapshot);
return serializer.deserialize(BufDataInput.create(result));
}
public Mono<Void> set(int value) {
return singleton.set(Ints.toByteArray(value));
public void set(int value) {
var buf = BufDataOutput.createLimited(Integer.BYTES);
serializer.serialize(value, buf);
singleton.set(buf.asList());
}
@Override
public String getDatabaseName() {
return singleton.getDatabaseName();
}
@Override
public ForkJoinPool getDbReadPool() {
return singleton.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return singleton.getDbWritePool();
}
}

View File

@ -1,37 +1,95 @@
package it.cavallium.dbengine.database.collections;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput;
import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure;
import it.cavallium.dbengine.database.LLSingleton;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.util.concurrent.ForkJoinPool;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Mono;
public class DatabaseLong implements LLKeyValueDatabaseStructure {
private final LLSingleton singleton;
private final SerializerFixedBinaryLength<Long> serializer;
private final SerializerFixedBinaryLength<Integer> bugSerializer;
public DatabaseLong(LLSingleton singleton) {
this.singleton = singleton;
this.serializer = SerializerFixedBinaryLength.longSerializer();
this.bugSerializer = SerializerFixedBinaryLength.intSerializer();
}
public Mono<Long> get(@Nullable LLSnapshot snapshot) {
return singleton.get(snapshot).map(array -> {
if (array.length == 4) {
return (long) Ints.fromByteArray(array);
public Long get(@Nullable LLSnapshot snapshot) {
var result = BufDataInput.create(singleton.get(snapshot));
if (result.available() == 4) {
return (long) (int) bugSerializer.deserialize(result);
} else {
return Longs.fromByteArray(array);
return serializer.deserialize(result);
}
});
}
public Mono<Void> set(long value) {
return singleton.set(Longs.toByteArray(value));
public Long incrementAndGet() {
return addAnd(1, UpdateReturnMode.GET_NEW_VALUE);
}
public Long getAndIncrement() {
return addAnd(1, UpdateReturnMode.GET_OLD_VALUE);
}
public Long decrementAndGet() {
return addAnd(-1, UpdateReturnMode.GET_NEW_VALUE);
}
public Long getAndDecrement() {
return addAnd(-1, UpdateReturnMode.GET_OLD_VALUE);
}
public Long addAndGet(long count) {
return addAnd(count, UpdateReturnMode.GET_NEW_VALUE);
}
public Long getAndAdd(long count) {
return addAnd(count, UpdateReturnMode.GET_OLD_VALUE);
}
private Long addAnd(long count, UpdateReturnMode updateReturnMode) {
var result = singleton.update(prev -> {
if (prev != null) {
var prevLong = prev.getLong(0);
var buf = Buf.createZeroes(Long.BYTES);
buf.setLong(0, prevLong + count);
return buf;
} else {
var buf = Buf.createZeroes(Long.BYTES);
buf.setLong(0, count);
return buf;
}
}, updateReturnMode);
return result.getLong(0);
}
public void set(long value) {
var buf = BufDataOutput.createLimited(Long.BYTES);
serializer.serialize(value, buf);
singleton.set(buf.asList());
}
@Override
public String getDatabaseName() {
return singleton.getDatabaseName();
}
@Override
public ForkJoinPool getDbReadPool() {
return singleton.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return singleton.getDbWritePool();
}
}

View File

@ -1,507 +1,618 @@
package it.cavallium.dbengine.database.collections;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.Resource;
import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import com.google.common.collect.Lists;
import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput;
import it.cavallium.dbengine.client.CompositeSnapshot;
import it.cavallium.dbengine.database.Delta;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLDictionaryResultType;
import it.cavallium.dbengine.database.LLEntry;
import it.cavallium.dbengine.database.LLRange;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.SerializedKey;
import it.cavallium.dbengine.database.SubStageEntry;
import it.cavallium.dbengine.database.UpdateMode;
import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
import it.cavallium.dbengine.database.disk.LLLocalDictionary;
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationKeyState.RocksDBFileIterationStateKeyError;
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationKeyState.RocksDBFileIterationStateKeyOk;
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationState.RocksDBFileIterationStateBegin;
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationState.RocksDBFileIterationStateEnd;
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationState.RocksDBFileIterationStateKey;
import it.cavallium.dbengine.database.disk.SSTRange.SSTRangeFull;
import it.cavallium.dbengine.database.serialization.KVSerializationFunction;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.database.serialization.Serializer;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.util.Collections;
import java.util.HashMap;
import it.cavallium.dbengine.utils.StreamUtils;
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMaps;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.SynchronousSink;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;
import org.rocksdb.RocksDBException;
/**
* Optimized implementation of "DatabaseMapDictionary with SubStageGetterSingle"
*/
public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U, DatabaseStageEntry<U>> {
private static final Logger LOG = LogManager.getLogger(DatabaseMapDictionary.class);
private final AtomicLong totalZeroBytesErrors = new AtomicLong();
private final Serializer<U> valueSerializer;
protected DatabaseMapDictionary(LLDictionary dictionary,
@Nullable Buffer prefixKey,
@Nullable Buf prefixKey,
SerializerFixedBinaryLength<T> keySuffixSerializer,
Serializer<U> valueSerializer,
Runnable onClose) {
Serializer<U> valueSerializer) {
// Do not retain or release or use the prefixKey here
super(dictionary, prefixKey, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0, onClose);
super(dictionary, prefixKey, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0);
this.valueSerializer = valueSerializer;
}
public static <T, U> DatabaseMapDictionary<T, U> simple(LLDictionary dictionary,
SerializerFixedBinaryLength<T> keySerializer,
Serializer<U> valueSerializer,
Runnable onClose) {
return new DatabaseMapDictionary<>(dictionary, null, keySerializer,
valueSerializer, onClose);
Serializer<U> valueSerializer) {
return new DatabaseMapDictionary<>(dictionary, null, keySerializer, valueSerializer);
}
public static <T, U> DatabaseMapDictionary<T, U> tail(LLDictionary dictionary,
@Nullable Buffer prefixKey,
@Nullable Buf prefixKey,
SerializerFixedBinaryLength<T> keySuffixSerializer,
Serializer<U> valueSerializer,
Runnable onClose) {
return new DatabaseMapDictionary<>(dictionary, prefixKey, keySuffixSerializer, valueSerializer, onClose);
Serializer<U> valueSerializer) {
return new DatabaseMapDictionary<>(dictionary, prefixKey, keySuffixSerializer, valueSerializer);
}
private void deserializeValue(Send<Buffer> valueToReceive, SynchronousSink<U> sink) {
try (var value = valueToReceive.receive()) {
sink.next(valueSerializer.deserialize(value));
} catch (Throwable ex) {
sink.error(ex);
public static <K, V> Stream<Entry<K, V>> getLeavesFrom(DatabaseMapDictionary<K, V> databaseMapDictionary,
CompositeSnapshot snapshot,
@Nullable K keyMin,
@Nullable K keyMax,
boolean reverse,
boolean smallRange) {
if (keyMin != null || keyMax != null) {
return databaseMapDictionary.getAllEntries(snapshot,
keyMin,
keyMax,
reverse,
smallRange,
Map::entry
);
} else {
return databaseMapDictionary.getAllEntries(snapshot, smallRange, Map::entry);
}
}
private Buffer serializeValue(U value) throws SerializationException {
public static <K> Stream<K> getKeyLeavesFrom(DatabaseMapDictionary<K, ?> databaseMapDictionary,
CompositeSnapshot snapshot,
@Nullable K keyMin,
@Nullable K keyMax,
boolean reverse,
boolean smallRange) {
Stream<? extends Entry<K, ? extends DatabaseStageEntry<?>>> stagesFlux;
if (keyMin != null || keyMax != null) {
stagesFlux = databaseMapDictionary.getAllStages(snapshot, keyMin, keyMax, reverse, smallRange);
} else {
stagesFlux = databaseMapDictionary.getAllStages(snapshot, smallRange);
}
return stagesFlux.map(Entry::getKey);
}
private U deserializeValue(Buf value) {
return valueSerializer.deserialize(BufDataInput.create(value));
}
private @Nullable U deserializeValue(T keySuffix, BufDataInput value) {
try {
return valueSerializer.deserialize(value);
} catch (IndexOutOfBoundsException ex) {
var exMessage = ex.getMessage();
if (exMessage != null && exMessage.contains("read 0 to 0, write 0 to ")) {
var totalZeroBytesErrors = this.totalZeroBytesErrors.incrementAndGet();
if (totalZeroBytesErrors < 512 || totalZeroBytesErrors % 10000 == 0) {
var keySuffixBytes = serializeKeySuffixToKey(keySuffix);
try {
LOG.error(
"Unexpected zero-bytes value at " + dictionary.getDatabaseName() + ":" + dictionary.getColumnName()
+ ":" + LLUtils.toStringSafe(keyPrefix) + ":" + keySuffix + "(" + LLUtils.toStringSafe(
keySuffixBytes) + ") total=" + totalZeroBytesErrors);
} catch (SerializationException e) {
LOG.error(
"Unexpected zero-bytes value at " + dictionary.getDatabaseName() + ":" + dictionary.getColumnName()
+ ":" + LLUtils.toStringSafe(keyPrefix) + ":" + keySuffix + "(?) total=" + totalZeroBytesErrors);
}
}
return null;
} else {
throw ex;
}
}
}
private Buf serializeValue(U value) throws SerializationException {
var valSizeHint = valueSerializer.getSerializedSizeHint();
if (valSizeHint == -1) valSizeHint = 128;
var valBuf = dictionary.getAllocator().allocate(valSizeHint);
var valBuf = BufDataOutput.create(valSizeHint);
try {
valueSerializer.serialize(value, valBuf);
return valBuf;
} catch (Throwable t) {
valBuf.close();
throw t;
} catch (SerializationException ex) {
throw ex;
} catch (Exception ex) {
throw new SerializationException("Failed to serialize value", ex);
}
return valBuf.asList();
}
private Buffer serializeKeySuffixToKey(T keySuffix) throws SerializationException {
Buffer keyBuf;
private Buf serializeKeySuffixToKey(T keySuffix) throws SerializationException {
BufDataOutput keyBuf = BufDataOutput.createLimited(keyPrefixLength + keySuffixLength + keyExtLength);
if (keyPrefix != null) {
keyBuf = keyPrefix.copy();
} else {
keyBuf = this.dictionary.getAllocator().allocate(keyPrefixLength + keySuffixLength + keyExtLength);
}
try {
assert keyBuf.readableBytes() == keyPrefixLength;
keyBuf.ensureWritable(keySuffixLength + keyExtLength);
serializeSuffix(keySuffix, keyBuf);
assert keyBuf.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
return keyBuf;
} catch (Throwable t) {
keyBuf.close();
throw t;
keyBuf.writeBytes(keyPrefix);
}
assert keyBuf.size() == keyPrefixLength;
serializeSuffixTo(keySuffix, keyBuf);
assert keyBuf.size() == keyPrefixLength + keySuffixLength + keyExtLength;
return keyBuf.asList();
}
private Buffer toKey(Buffer suffixKey) {
assert suffixKeyLengthConsistency(suffixKey.readableBytes());
if (keyPrefix != null && keyPrefix.readableBytes() > 0) {
var result = LLUtils.compositeBuffer(dictionary.getAllocator(),
LLUtils.copy(dictionary.getAllocator(), keyPrefix),
suffixKey.send()
);
try {
assert result.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
private Buf toKey(Buf suffixKey) {
assert suffixKeyLengthConsistency(suffixKey.size());
if (keyPrefix != null) {
var result = keyPrefix.copy();
result.addAll(suffixKey);
assert result.size() == keyPrefixLength + keySuffixLength + keyExtLength;
return result;
} catch (Throwable t) {
result.close();
throw t;
}
} else {
assert suffixKey.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
assert suffixKey.size() == keyPrefixLength + keySuffixLength + keyExtLength;
return suffixKey;
}
}
@Override
public Mono<Map<T, U>> get(@Nullable CompositeSnapshot snapshot, boolean existsAlmostCertainly) {
return dictionary
.getRange(resolveSnapshot(snapshot), rangeMono, existsAlmostCertainly)
.<Entry<T, U>>handle((entrySend, sink) -> {
public Object2ObjectSortedMap<T, U> get(@Nullable CompositeSnapshot snapshot) {
Stream<Entry<T, U>> stream = dictionary
.getRange(resolveSnapshot(snapshot), range, false, true)
.map(entry -> {
Entry<T, U> deserializedEntry;
try {
try (var entry = entrySend.receive()) {
T key;
try (var serializedKey = entry.getKey().receive()) {
splitPrefix(serializedKey).close();
suffixKeyLengthConsistency(serializedKey.readableBytes());
key = deserializeSuffix(serializedKey);
}
U value;
try (var valueBuf = entry.getValue().receive()) {
value = valueSerializer.deserialize(valueBuf);
}
// serializedKey
var buf1 = BufDataInput.create(entry.getKey());
var serializedValue = BufDataInput.create(entry.getValue());
// after this, it becomes serializedSuffixAndExt
buf1.skipNBytes(keyPrefixLength);
suffixAndExtKeyConsistency(buf1.available());
key = deserializeSuffix(buf1);
U value = valueSerializer.deserialize(serializedValue);
deserializedEntry = Map.entry(key, value);
}
sink.next(deserializedEntry);
} catch (Throwable ex) {
sink.error(ex);
}
})
.collectMap(Entry::getKey, Entry::getValue, HashMap::new)
.filter(map -> !map.isEmpty());
return deserializedEntry;
});
// serializedKey
// after this, it becomes serializedSuffixAndExt
var map = StreamUtils.collect(stream,
Collectors.toMap(Entry::getKey, Entry::getValue, (a, b) -> a, Object2ObjectLinkedOpenHashMap::new)
);
return map == null || map.isEmpty() ? null : map;
}
@Override
public Mono<Map<T, U>> setAndGetPrevious(Map<T, U> value) {
return this
.get(null, false)
.concatWith(dictionary.setRange(rangeMono, Flux
.fromIterable(Collections.unmodifiableMap(value).entrySet())
.handle(this::serializeEntrySink)
).then(Mono.empty()))
.singleOrEmpty()
.transform(LLUtils::handleDiscard);
public Object2ObjectSortedMap<T, U> setAndGetPrevious(Object2ObjectSortedMap<T, U> value) {
Object2ObjectSortedMap<T, U> prev = this.get(null);
if (value == null || value.isEmpty()) {
dictionary.clear();
} else {
dictionary.setRange(range, value.entrySet().stream().map(this::serializeEntry), true);
}
return prev != null && prev.isEmpty() ? null : prev;
}
@Override
public Mono<Map<T, U>> clearAndGetPrevious() {
return this
.setAndGetPrevious(Map.of());
public Object2ObjectSortedMap<T, U> clearAndGetPrevious() {
return this.setAndGetPrevious(Object2ObjectSortedMaps.emptyMap());
}
@Override
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
return dictionary.sizeRange(resolveSnapshot(snapshot), rangeMono, fast);
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
return dictionary.sizeRange(resolveSnapshot(snapshot), range, fast);
}
@Override
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), rangeMono);
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range, false);
}
@Override
public Mono<DatabaseStageEntry<U>> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
return Mono.fromCallable(() ->
new DatabaseSingle<>(dictionary, serializeKeySuffixToKey(keySuffix), valueSerializer, null));
public @NotNull DatabaseStageEntry<U> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
return new DatabaseMapSingle<>(dictionary, serializeKeySuffixToKey(keySuffix), valueSerializer);
}
@Override
public Mono<U> getValue(@Nullable CompositeSnapshot snapshot, T keySuffix, boolean existsAlmostCertainly) {
return dictionary
.get(resolveSnapshot(snapshot),
Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send()),
existsAlmostCertainly
)
.handle(this::deserializeValue);
public boolean containsKey(@Nullable CompositeSnapshot snapshot, T keySuffix) {
return !dictionary.isRangeEmpty(resolveSnapshot(snapshot),
LLRange.single(serializeKeySuffixToKey(keySuffix)), true);
}
@Override
public Mono<Void> putValue(T keySuffix, U value) {
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send()).single();
var valueMono = Mono.fromCallable(() -> serializeValue(value).send()).single();
return dictionary
.put(keyMono, valueMono, LLDictionaryResultType.VOID)
.doOnNext(Send::close)
.then();
public U getValue(@Nullable CompositeSnapshot snapshot, T keySuffix) {
var keySuffixBuf = serializeKeySuffixToKey(keySuffix);
Buf value = dictionary.get(resolveSnapshot(snapshot), keySuffixBuf);
return value != null ? deserializeValue(keySuffix, BufDataInput.create(value)) : null;
}
@Override
public Mono<UpdateMode> getUpdateMode() {
public void putValue(T keySuffix, U value) {
var keyMono = serializeKeySuffixToKey(keySuffix);
var valueMono = serializeValue(value);
dictionary.put(keyMono, valueMono, LLDictionaryResultType.VOID);
}
@Override
public UpdateMode getUpdateMode() {
return dictionary.getUpdateMode();
}
@Override
public Mono<U> updateValue(T keySuffix,
public U updateValue(T keySuffix,
UpdateReturnMode updateReturnMode,
boolean existsAlmostCertainly,
SerializationFunction<@Nullable U, @Nullable U> updater) {
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send());
return dictionary
.update(keyMono, getSerializedUpdater(updater), updateReturnMode, existsAlmostCertainly)
.handle(this::deserializeValue);
var keyMono = serializeKeySuffixToKey(keySuffix);
var serializedUpdater = getSerializedUpdater(updater);
dictionary.update(keyMono, serializedUpdater, UpdateReturnMode.NOTHING);
return serializedUpdater.getResult(updateReturnMode);
}
@Override
public Mono<Delta<U>> updateValueAndGetDelta(T keySuffix,
boolean existsAlmostCertainly,
SerializationFunction<@Nullable U, @Nullable U> updater) {
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send());
return dictionary
.updateAndGetDelta(keyMono, getSerializedUpdater(updater), existsAlmostCertainly)
.transform(mono -> LLUtils.mapLLDelta(mono, serializedToReceive -> {
try (var serialized = serializedToReceive.receive()) {
return valueSerializer.deserialize(serialized);
}
}));
public Delta<U> updateValueAndGetDelta(T keySuffix, SerializationFunction<@Nullable U, @Nullable U> updater) {
var keyMono = serializeKeySuffixToKey(keySuffix);
var serializedUpdater = getSerializedUpdater(updater);
dictionary.update(keyMono, serializedUpdater, UpdateReturnMode.NOTHING);
return serializedUpdater.getDelta();
}
public SerializationFunction<@Nullable Send<Buffer>, @Nullable Buffer> getSerializedUpdater(
SerializationFunction<@Nullable U, @Nullable U> updater) {
return oldSerialized -> {
try (oldSerialized) {
U result;
if (oldSerialized == null) {
result = updater.apply(null);
} else {
try (var oldSerializedReceived = oldSerialized.receive()) {
result = updater.apply(valueSerializer.deserialize(oldSerializedReceived));
}
}
if (result == null) {
return null;
} else {
return serializeValue(result);
}
}
};
public CachedSerializationFunction<U, Buf, Buf> getSerializedUpdater(SerializationFunction<@Nullable U, @Nullable U> updater) {
return new CachedSerializationFunction<>(updater, this::serializeValue, this::deserializeValue);
}
public KVSerializationFunction<@NotNull T, @Nullable Send<Buffer>, @Nullable Buffer> getSerializedUpdater(
public KVSerializationFunction<@NotNull T, @Nullable Buf, @Nullable Buf> getSerializedUpdater(
KVSerializationFunction<@NotNull T, @Nullable U, @Nullable U> updater) {
return (key, oldSerialized) -> {
try (oldSerialized) {
U result;
if (oldSerialized == null) {
result = updater.apply(key, null);
} else {
try (var oldSerializedReceived = oldSerialized.receive()) {
result = updater.apply(key, valueSerializer.deserialize(oldSerializedReceived));
}
result = updater.apply(key, valueSerializer.deserialize(BufDataInput.create(oldSerialized)));
}
if (result == null) {
return null;
} else {
return serializeValue(result);
}
}
};
}
@Override
public Mono<U> putValueAndGetPrevious(T keySuffix, U value) {
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send());
var valueMono = Mono.fromCallable(() -> serializeValue(value).send());
return dictionary.put(keyMono, valueMono, LLDictionaryResultType.PREVIOUS_VALUE).handle(this::deserializeValue);
public U putValueAndGetPrevious(T keySuffix, U value) {
var keyMono = serializeKeySuffixToKey(keySuffix);
var valueMono = serializeValue(value);
var valueBuf = dictionary.put(keyMono, valueMono, LLDictionaryResultType.PREVIOUS_VALUE);
if (valueBuf == null) {
return null;
}
return deserializeValue(keySuffix, BufDataInput.create(valueBuf));
}
@Override
public Mono<Boolean> putValueAndGetChanged(T keySuffix, U value) {
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send());
var valueMono = Mono.fromCallable(() -> serializeValue(value).send());
public boolean putValueAndGetChanged(T keySuffix, U value) {
var keyMono = serializeKeySuffixToKey(keySuffix);
var valueMono = serializeValue(value);
var oldValueBuf = dictionary.put(keyMono, valueMono, LLDictionaryResultType.PREVIOUS_VALUE);
var oldValue = oldValueBuf != null ? deserializeValue(keySuffix, BufDataInput.create(oldValueBuf)) : null;
if (oldValue == null) {
return value != null;
} else {
return !Objects.equals(oldValue, value);
}
}
@Override
public void remove(T keySuffix) {
var keyMono = serializeKeySuffixToKey(keySuffix);
dictionary.remove(keyMono, LLDictionaryResultType.VOID);
}
@Override
public U removeAndGetPrevious(T keySuffix) {
var keyMono = serializeKeySuffixToKey(keySuffix);
var valueBuf = dictionary.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE);
return valueBuf != null ? deserializeValue(keySuffix, BufDataInput.create(valueBuf)) : null;
}
@Override
public boolean removeAndGetStatus(T keySuffix) {
var keyMono = serializeKeySuffixToKey(keySuffix);
return LLUtils.responseToBoolean(dictionary.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE));
}
@Override
public Stream<Optional<U>> getMulti(@Nullable CompositeSnapshot snapshot, Stream<T> keys) {
var mappedKeys = keys.map(keySuffix -> serializeKeySuffixToKey(keySuffix));
return dictionary
.put(keyMono, valueMono, LLDictionaryResultType.PREVIOUS_VALUE)
.handle(this::deserializeValue)
.map(oldValue -> !Objects.equals(oldValue, value))
.defaultIfEmpty(value != null);
}
@Override
public Mono<Void> remove(T keySuffix) {
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send());
return dictionary
.remove(keyMono, LLDictionaryResultType.VOID)
.doOnNext(Send::close)
.then();
}
@Override
public Mono<U> removeAndGetPrevious(T keySuffix) {
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send());
return dictionary.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE).handle(this::deserializeValue);
}
@Override
public Mono<Boolean> removeAndGetStatus(T keySuffix) {
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix).send());
return dictionary
.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE)
.map(LLUtils::responseToBoolean);
}
@Override
public Flux<Optional<U>> getMulti(@Nullable CompositeSnapshot snapshot, Flux<T> keys, boolean existsAlmostCertainly) {
var mappedKeys = keys
.<Send<Buffer>>handle((keySuffix, sink) -> {
try {
sink.next(serializeKeySuffixToKey(keySuffix).send());
} catch (Throwable ex) {
sink.error(ex);
.getMulti(resolveSnapshot(snapshot), mappedKeys)
.map(valueBufOpt -> {
if (valueBufOpt.isPresent()) {
return Optional.of(valueSerializer.deserialize(BufDataInput.create(valueBufOpt.get())));
} else {
return Optional.empty();
}
});
return dictionary
.getMulti(resolveSnapshot(snapshot), mappedKeys, existsAlmostCertainly)
.<Optional<U>>handle((valueBufOpt, sink) -> {
try {
Optional<U> valueOpt;
if (valueBufOpt.isPresent()) {
valueOpt = Optional.of(valueSerializer.deserialize(valueBufOpt.get()));
} else {
valueOpt = Optional.empty();
}
sink.next(valueOpt);
} catch (Throwable ex) {
sink.error(ex);
} finally {
valueBufOpt.ifPresent(Resource::close);
}
})
.transform(LLUtils::handleDiscard);
}
private LLEntry serializeEntry(T keySuffix, U value) throws SerializationException {
var key = serializeKeySuffixToKey(keySuffix);
try {
var serializedValue = serializeValue(value);
return LLEntry.of(key, serializedValue);
} catch (Throwable t) {
key.close();
throw t;
}
}
private void serializeEntrySink(Entry<T,U> entry, SynchronousSink<Send<LLEntry>> sink) {
try {
sink.next(serializeEntry(entry.getKey(), entry.getValue()).send());
} catch (Throwable e) {
sink.error(e);
private LLEntry serializeEntry(Entry<T, U> entry) throws SerializationException {
return serializeEntry(entry.getKey(), entry.getValue());
}
@Override
public void putMulti(Stream<Entry<T, U>> entries) {
try (var serializedEntries = entries.map(entry -> serializeEntry(entry))) {
dictionary.putMulti(serializedEntries);
}
}
@Override
public Mono<Void> putMulti(Flux<Entry<T, U>> entries) {
var serializedEntries = entries
.<Send<LLEntry>>handle((entry, sink) -> {
try {
sink.next(serializeEntry(entry.getKey(), entry.getValue()).send());
} catch (Throwable e) {
sink.error(e);
}
})
.doOnDiscard(Send.class, Send::close)
.doOnDiscard(Resource.class, Resource::close);
return dictionary
.putMulti(serializedEntries, false)
.then()
.doOnDiscard(Send.class, Send::close)
.doOnDiscard(Resource.class, Resource::close)
.doOnDiscard(LLEntry.class, ResourceSupport::close)
.doOnDiscard(List.class, list -> {
for (Object o : list) {
if (o instanceof Send send) {
send.close();
} else if (o instanceof Buffer buf) {
buf.close();
}
}
});
}
@Override
public Flux<Boolean> updateMulti(Flux<T> keys,
public Stream<Boolean> updateMulti(Stream<T> keys,
KVSerializationFunction<T, @Nullable U, @Nullable U> updater) {
var sharedKeys = keys.publish().refCount(2);
var serializedKeys = sharedKeys
.<Send<Buffer>>handle((key, sink) -> {
try {
Send<Buffer> serializedKey = serializeKeySuffixToKey(key).send();
sink.next(serializedKey);
} catch (Throwable ex) {
sink.error(ex);
}
})
.doOnDiscard(Tuple2.class, uncastedEntry -> {
if (uncastedEntry.getT1() instanceof Buffer byteBuf) {
byteBuf.close();
}
if (uncastedEntry.getT2() instanceof Buffer byteBuf) {
byteBuf.close();
}
});
var serializedKeys = keys.map(keySuffix -> new SerializedKey<>(keySuffix, serializeKeySuffixToKey(keySuffix)));
var serializedUpdater = getSerializedUpdater(updater);
return dictionary.updateMulti(sharedKeys, serializedKeys, serializedUpdater);
return dictionary.updateMulti(serializedKeys, serializedUpdater);
}
@Override
public Flux<Entry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot) {
public Stream<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
return getAllStages(snapshot, range, false, smallRange);
}
private LLRange getPatchedRange(@NotNull LLRange range, @Nullable T keyMin, @Nullable T keyMax)
throws SerializationException {
Buf keyMinBuf = serializeSuffixForRange(keyMin);
if (keyMinBuf == null) {
keyMinBuf = range.getMin();
}
Buf keyMaxBuf = serializeSuffixForRange(keyMax);
if (keyMaxBuf == null) {
keyMaxBuf = range.getMax();
}
return LLRange.of(keyMinBuf, keyMaxBuf);
}
private Buf serializeSuffixForRange(@Nullable T key) throws SerializationException {
if (key == null) {
return null;
}
var keyWithoutExtBuf = BufDataOutput.createLimited(keyPrefixLength + keySuffixLength);
if (keyPrefix != null) {
keyWithoutExtBuf.writeBytes(keyPrefix);
}
serializeSuffixTo(key, keyWithoutExtBuf);
return keyWithoutExtBuf.asList();
}
/**
* Get all stages
* @param reverse if true, the results will go backwards from the specified key (inclusive)
*/
public Stream<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
@Nullable T keyMin,
@Nullable T keyMax,
boolean reverse,
boolean smallRange) {
if (keyMin == null && keyMax == null) {
return getAllStages(snapshot, smallRange);
} else {
LLRange boundedRange = getPatchedRange(range, keyMin, keyMax);
return getAllStages(snapshot, boundedRange, reverse, smallRange);
}
}
private Stream<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
LLRange sliceRange, boolean reverse, boolean smallRange) {
return dictionary
.getRangeKeys(resolveSnapshot(snapshot), rangeMono)
.handle((keyBufToReceive, sink) -> {
var keyBuf = keyBufToReceive.receive();
try {
assert keyBuf.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
.getRangeKeys(resolveSnapshot(snapshot), sliceRange, reverse, smallRange)
.map(keyBuf -> {
assert keyBuf.size() == keyPrefixLength + keySuffixLength + keyExtLength;
// Remove prefix. Keep only the suffix and the ext
splitPrefix(keyBuf).close();
suffixKeyLengthConsistency(keyBuf.readableBytes());
T keySuffix;
try (var keyBufCopy = keyBuf.copy()) {
keySuffix = deserializeSuffix(keyBufCopy);
}
var subStage = new DatabaseSingle<>(dictionary, toKey(keyBuf), valueSerializer, null);
sink.next(Map.entry(keySuffix, subStage));
} catch (Throwable ex) {
keyBuf.close();
sink.error(ex);
var suffixAndExtIn = BufDataInput.create(keyBuf);
suffixAndExtIn.skipBytes(keyPrefixLength);
suffixKeyLengthConsistency(suffixAndExtIn.available());
T keySuffix = deserializeSuffix(suffixAndExtIn);
var subStage = new DatabaseMapSingle<>(dictionary, keyBuf, valueSerializer);
return new SubStageEntry<>(keySuffix, subStage);
});
}
private Stream<T> getAllKeys(@Nullable CompositeSnapshot snapshot,
LLRange sliceRange, boolean reverse, boolean smallRange) {
return dictionary
.getRangeKeys(resolveSnapshot(snapshot), sliceRange, reverse, smallRange)
.map(keyBuf -> {
assert keyBuf.size() == keyPrefixLength + keySuffixLength + keyExtLength;
// Remove prefix. Keep only the suffix and the ext
var suffixAndExtIn = BufDataInput.create(keyBuf);
suffixAndExtIn.skipBytes(keyPrefixLength);
suffixKeyLengthConsistency(suffixAndExtIn.available());
return deserializeSuffix(suffixAndExtIn);
});
}
@Override
public Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot) {
public Stream<Entry<T, U>> getAllEntries(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
return getAllEntries(snapshot, smallRange, Map::entry);
}
@Override
public Stream<U> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
return getAllEntries(snapshot, range, false, smallRange, (k, v) -> v);
}
@Override
public Stream<T> getAllKeys(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
return getAllKeys(snapshot, range, false, smallRange);
}
/**
* Get all values
* @param reverse if true, the results will go backwards from the specified key (inclusive)
*/
public Stream<Entry<T, U>> getAllEntries(@Nullable CompositeSnapshot snapshot,
@Nullable T keyMin,
@Nullable T keyMax,
boolean reverse,
boolean smallRange) {
return getAllEntries(snapshot, keyMin, keyMax, reverse, smallRange, Map::entry);
}
/**
* Get all values
* @param reverse if true, the results will go backwards from the specified key (inclusive)
*/
public <X> Stream<X> getAllEntries(@Nullable CompositeSnapshot snapshot,
@Nullable T keyMin,
@Nullable T keyMax,
boolean reverse,
boolean smallRange,
BiFunction<T, U, X> mapper) {
if (keyMin == null && keyMax == null) {
return getAllEntries(snapshot, smallRange, mapper);
} else {
LLRange boundedRange = getPatchedRange(range, keyMin, keyMax);
return getAllEntries(snapshot, boundedRange, reverse, smallRange, mapper);
}
}
private <X> Stream<X> getAllEntries(@Nullable CompositeSnapshot snapshot, boolean smallRange, BiFunction<T, U, X> mapper) {
return getAllEntries(snapshot, range, false, smallRange, mapper);
}
private <X> Stream<X> getAllEntries(@Nullable CompositeSnapshot snapshot,
LLRange sliceRangeMono,
boolean reverse,
boolean smallRange,
BiFunction<T, U, X> mapper) {
return dictionary
.getRange(resolveSnapshot(snapshot), rangeMono)
.<Entry<T, U>>handle((serializedEntryToReceive, sink) -> {
try {
Entry<T, U> entry;
try (var serializedEntry = serializedEntryToReceive.receive()) {
var keyBuf = serializedEntry.getKeyUnsafe();
.getRange(resolveSnapshot(snapshot), sliceRangeMono, reverse, smallRange)
.map((serializedEntry) -> {
X entry;
var keyBuf = serializedEntry.getKey();
assert keyBuf != null;
assert keyBuf.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
// Remove prefix. Keep only the suffix and the ext
splitPrefix(keyBuf).close();
suffixKeyLengthConsistency(keyBuf.readableBytes());
T keySuffix = deserializeSuffix(keyBuf);
assert keyBuf.size() == keyPrefixLength + keySuffixLength + keyExtLength;
assert serializedEntry.getValueUnsafe() != null;
U value = valueSerializer.deserialize(serializedEntry.getValueUnsafe());
entry = Map.entry(keySuffix, value);
}
sink.next(entry);
} catch (Throwable e) {
sink.error(e);
}
})
.doOnDiscard(Entry.class, uncastedEntry -> {
if (uncastedEntry.getKey() instanceof Buffer byteBuf) {
byteBuf.close();
}
if (uncastedEntry.getValue() instanceof Buffer byteBuf) {
byteBuf.close();
}
// Remove prefix. Keep only the suffix and the ext
var suffixAndExtIn = BufDataInput.create(keyBuf);
suffixAndExtIn.skipBytes(keyPrefixLength);
assert suffixKeyLengthConsistency(suffixAndExtIn.available());
T keySuffix = deserializeSuffix(suffixAndExtIn);
assert serializedEntry.getValue() != null;
U value = valueSerializer.deserialize(BufDataInput.create(serializedEntry.getValue()));
entry = mapper.apply(keySuffix, value);
return entry;
});
}
@Override
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
return Flux.concat(
this.getAllValues(null),
dictionary.setRange(rangeMono, entries.handle(this::serializeEntrySink)).then(Mono.empty())
public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) {
return resourceStream(
() -> getAllEntries(null, false),
() -> dictionary.setRange(range, entries.map(entry -> serializeEntry(entry)), false)
);
}
@Override
public Mono<Void> clear() {
public void clear() {
if (range.isAll()) {
return dictionary.clear();
dictionary.clear();
} else if (range.isSingle()) {
return dictionary
.remove(Mono.fromCallable(range::getSingle), LLDictionaryResultType.VOID)
.doOnNext(Send::close)
.then();
dictionary.remove(range.getSingleUnsafe(), LLDictionaryResultType.VOID);
} else {
return dictionary.setRange(rangeMono, Flux.empty());
dictionary.setRange(range, Stream.empty(), false);
}
}
public static <T, U> List<Stream<UnsafeSSTEntry<T, U>>> getAllEntriesFastUnsafe(DatabaseMapDictionary<T, U> dict,
boolean disableRocksdbChecks,
BiConsumer<UnsafeRawSSTEntry<T, U>, Throwable> deserializationErrorHandler) {
try {
var liveFiles = ((LLLocalDictionary) dict.dictionary).getAllLiveFiles();
return Lists.transform(liveFiles, file -> file.iterate(new SSTRangeFull(), disableRocksdbChecks)
.map(state -> switch (state) {
case RocksDBFileIterationStateBegin rocksDBFileIterationStateBegin:
yield null;
case RocksDBFileIterationStateEnd rocksDBFileIterationStateEnd:
yield null;
case RocksDBFileIterationStateKey rocksDBFileIterationStateKey:
yield switch (rocksDBFileIterationStateKey.state()) {
case RocksDBFileIterationStateKeyError e -> null;
case RocksDBFileIterationStateKeyOk rocksDBFileIterationStateKeyOk -> {
var key = rocksDBFileIterationStateKey.key();
var value = rocksDBFileIterationStateKeyOk.value();
try {
var deserializedKey = dict.deserializeSuffix(BufDataInput.create(key));
var deserializedValue = dict.deserializeValue(value);
yield new UnsafeSSTEntry<>(file,
deserializedKey,
deserializedValue,
key,
value,
k -> dict.deserializeSuffix(BufDataInput.create(k)),
dict::deserializeValue
);
} catch (Throwable t) {
if (deserializationErrorHandler != null) {
deserializationErrorHandler.accept(new UnsafeRawSSTEntry<>(file,
key,
value,
k -> dict.deserializeSuffix(BufDataInput.create(k)),
dict::deserializeValue
), t);
yield null;
} else {
throw t;
}
}
}
};
})
.filter(Objects::nonNull));
} catch (RocksDBException e) {
throw new RuntimeException(e);
}
}

View File

@ -1,139 +1,118 @@
package it.cavallium.dbengine.database.collections;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.BufferAllocator;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Resource;
import io.net5.buffer.api.Send;
import it.cavallium.dbengine.client.BadBlock;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput;
import it.cavallium.dbengine.client.DbProgress;
import it.cavallium.dbengine.client.CompositeSnapshot;
import it.cavallium.dbengine.client.SSTVerificationProgress;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLDictionaryResultType;
import it.cavallium.dbengine.database.LLRange;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.LLUtils;
import io.net5.buffer.api.internal.ResourceSupport;
import it.cavallium.dbengine.database.SubStageEntry;
import it.cavallium.dbengine.database.UpdateMode;
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.Serializer;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.util.Map;
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Stream;
import org.apache.commons.lang3.function.TriFunction;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import org.jetbrains.annotations.VisibleForTesting;
// todo: implement optimized methods (which?)
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extends ResourceSupport<DatabaseStage<Map<T, U>>, DatabaseMapDictionaryDeep<T, U, US>>
implements DatabaseStageMap<T, U, US> {
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implements DatabaseStageMap<T, U, US> {
private static final Logger logger = LoggerFactory.getLogger(DatabaseMapDictionaryDeep.class);
private static final Drop<DatabaseMapDictionaryDeep<?, ?, ?>> DROP = new Drop<>() {
@Override
public void drop(DatabaseMapDictionaryDeep<?, ?, ?> obj) {
try {
if (obj.range != null) {
obj.range.close();
}
} catch (Throwable ex) {
logger.error("Failed to close range", ex);
}
try {
if (obj.keyPrefix != null) {
obj.keyPrefix.close();
}
} catch (Throwable ex) {
logger.error("Failed to close keyPrefix", ex);
}
try {
if (obj.onClose != null) {
obj.onClose.run();
}
} catch (Throwable ex) {
logger.error("Failed to close onClose", ex);
}
}
@Override
public Drop<DatabaseMapDictionaryDeep<?, ?, ?>> fork() {
return this;
}
@Override
public void attach(DatabaseMapDictionaryDeep<?, ?, ?> obj) {
}
};
private static final Logger LOG = LogManager.getLogger(DatabaseMapDictionaryDeep.class);
protected final LLDictionary dictionary;
private final BufferAllocator alloc;
private final AtomicLong totalZeroBytesErrors = new AtomicLong();
protected final SubStageGetter<U, US> subStageGetter;
protected final SerializerFixedBinaryLength<T> keySuffixSerializer;
protected final int keyPrefixLength;
protected final int keySuffixLength;
protected final int keyExtLength;
protected final Mono<Send<LLRange>> rangeMono;
protected final LLRange range;
protected LLRange range;
protected Buffer keyPrefix;
protected Runnable onClose;
protected Buf keyPrefix;
private static void incrementPrefix(Buffer prefix, int prefixLength) {
assert prefix.readableBytes() >= prefixLength;
assert prefix.readerOffset() == 0;
final var originalKeyLength = prefix.readableBytes();
private static void incrementPrefix(Buf modifiablePrefix, int prefixLength) {
assert modifiablePrefix.size() >= prefixLength;
final var originalKeyLength = modifiablePrefix.size();
boolean overflowed = true;
final int ff = 0xFF;
int writtenBytes = 0;
for (int i = prefixLength - 1; i >= 0; i--) {
int iByte = prefix.getUnsignedByte(i);
int iByte = Byte.toUnsignedInt(modifiablePrefix.getByte(i));
if (iByte != ff) {
prefix.setUnsignedByte(i, iByte + 1);
modifiablePrefix.set(i, (byte) (iByte + 1));
writtenBytes++;
overflowed = false;
break;
} else {
prefix.setUnsignedByte(i, 0x00);
modifiablePrefix.set(i, (byte) 0x00);
writtenBytes++;
}
}
assert prefixLength - writtenBytes >= 0;
if (overflowed) {
assert prefix.writerOffset() == originalKeyLength;
prefix.ensureWritable(1, 1, true);
prefix.writerOffset(originalKeyLength + 1);
modifiablePrefix.add((byte) 0);
for (int i = 0; i < originalKeyLength; i++) {
prefix.setUnsignedByte(i, 0xFF);
modifiablePrefix.set(i, (byte) 0xFF);
}
prefix.setUnsignedByte(originalKeyLength, (byte) 0x00);
modifiablePrefix.set(originalKeyLength, (byte) 0x00);
}
}
static void firstRangeKey(Buffer prefixKey, int prefixLength, int suffixLength, int extLength) {
zeroFillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength);
@VisibleForTesting
public static Buf firstRangeKey(Buf prefixKey, int prefixLength, Buf suffixAndExtZeroes) {
return createFullKeyWithEmptySuffixAndExt(prefixKey, prefixLength, suffixAndExtZeroes);
}
static void nextRangeKey(Buffer prefixKey, int prefixLength, int suffixLength, int extLength) {
zeroFillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength);
incrementPrefix(prefixKey, prefixLength);
@VisibleForTesting
public static Buf nextRangeKey(Buf prefixKey, int prefixLength, Buf suffixAndExtZeroes) {
Buf modifiablePrefixKey = createFullKeyWithEmptySuffixAndExt(prefixKey, prefixLength, suffixAndExtZeroes);
incrementPrefix(modifiablePrefixKey, prefixLength);
return modifiablePrefixKey;
}
protected static void zeroFillKeySuffixAndExt(@NotNull Buffer prefixKey,
int prefixLength, int suffixLength, int extLength) {
private static Buf createFullKeyWithEmptySuffixAndExt(Buf prefixKey, int prefixLength, Buf suffixAndExtZeroes) {
var modifiablePrefixKey = Buf.create(prefixLength + suffixAndExtZeroes.size());
if (prefixKey != null) {
modifiablePrefixKey.addAll(prefixKey);
}
assert prefixKey != null || prefixLength == 0 : "Prefix length is " + prefixLength + " but the prefix key is null";
zeroFillKeySuffixAndExt(modifiablePrefixKey, prefixLength, suffixAndExtZeroes);
return modifiablePrefixKey;
}
/**
* @param modifiablePrefixKey This field content will be modified
*/
protected static void zeroFillKeySuffixAndExt(@NotNull Buf modifiablePrefixKey, int prefixLength, Buf suffixAndExtZeroes) {
//noinspection UnnecessaryLocalVariable
var result = prefixKey;
assert result.readableBytes() == prefixLength;
assert suffixLength > 0;
assert extLength >= 0;
result.ensureWritable(suffixLength + extLength, suffixLength + extLength, true);
for (int i = 0; i < suffixLength + extLength; i++) {
result.writeByte((byte) 0x0);
}
var result = modifiablePrefixKey;
var suffixLengthAndExtLength = suffixAndExtZeroes.size();
assert result.size() == prefixLength;
assert suffixLengthAndExtLength > 0 : "Suffix length + ext length is < 0: " + suffixLengthAndExtLength;
result.size(prefixLength);
modifiablePrefixKey.addAll(suffixAndExtZeroes);
assert modifiablePrefixKey.size() == prefixLength + suffixAndExtZeroes.size() : "Result buffer size is wrong";
}
/**
@ -141,107 +120,74 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
*/
@Deprecated
public static <T, U> DatabaseMapDictionaryDeep<T, U, DatabaseStageEntry<U>> simple(LLDictionary dictionary,
SerializerFixedBinaryLength<T> keySerializer, SubStageGetterSingle<U> subStageGetter,
Runnable onClose) {
return new DatabaseMapDictionaryDeep<>(dictionary, null, keySerializer,
subStageGetter, 0, onClose);
SerializerFixedBinaryLength<T> keySerializer, SubStageGetterSingle<U> subStageGetter) {
return new DatabaseMapDictionaryDeep<>(dictionary, null, keySerializer, subStageGetter, 0);
}
public static <T, U, US extends DatabaseStage<U>> DatabaseMapDictionaryDeep<T, U, US> deepTail(
LLDictionary dictionary, SerializerFixedBinaryLength<T> keySerializer, int keyExtLength,
SubStageGetter<U, US> subStageGetter, Runnable onClose) {
return new DatabaseMapDictionaryDeep<>(dictionary, null, keySerializer,
subStageGetter, keyExtLength, onClose);
SubStageGetter<U, US> subStageGetter) {
return new DatabaseMapDictionaryDeep<>(dictionary, null, keySerializer, subStageGetter, keyExtLength);
}
public static <T, U, US extends DatabaseStage<U>> DatabaseMapDictionaryDeep<T, U, US> deepIntermediate(
LLDictionary dictionary, Buffer prefixKey, SerializerFixedBinaryLength<T> keySuffixSerializer,
SubStageGetter<U, US> subStageGetter, int keyExtLength, Runnable onClose) {
return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter,
keyExtLength, onClose);
LLDictionary dictionary, Buf prefixKey, SerializerFixedBinaryLength<T> keySuffixSerializer,
SubStageGetter<U, US> subStageGetter, int keyExtLength) {
return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter, keyExtLength);
}
@SuppressWarnings({"unchecked", "rawtypes"})
protected DatabaseMapDictionaryDeep(LLDictionary dictionary, @Nullable Buffer prefixKey,
SerializerFixedBinaryLength<T> keySuffixSerializer, SubStageGetter<U, US> subStageGetter, int keyExtLength,
Runnable onClose) {
super((Drop<DatabaseMapDictionaryDeep<T, U, US>>) (Drop) DROP);
try {
protected DatabaseMapDictionaryDeep(LLDictionary dictionary, @Nullable Buf prefixKey,
SerializerFixedBinaryLength<T> keySuffixSerializer, SubStageGetter<U, US> subStageGetter, int keyExtLength) {
this.dictionary = dictionary;
this.alloc = dictionary.getAllocator();
this.subStageGetter = subStageGetter;
this.keySuffixSerializer = keySuffixSerializer;
assert prefixKey == null || prefixKey.isAccessible();
this.keyPrefixLength = prefixKey == null ? 0 : prefixKey.readableBytes();
this.keyPrefixLength = prefixKey != null ? prefixKey.size() : 0;
this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength();
this.keyExtLength = keyExtLength;
var firstKey = prefixKey == null ? alloc.allocate(keyPrefixLength + keySuffixLength + keyExtLength)
: prefixKey.copy();
try {
firstRangeKey(firstKey, keyPrefixLength, keySuffixLength, keyExtLength);
var nextRangeKey = prefixKey == null ? alloc.allocate(keyPrefixLength + keySuffixLength + keyExtLength)
: prefixKey.copy();
try {
nextRangeKey(nextRangeKey, keyPrefixLength, keySuffixLength, keyExtLength);
assert prefixKey == null || prefixKey.isAccessible();
var keySuffixAndExtZeroBuffer = Buf.createZeroes(keySuffixLength + keyExtLength);
assert keySuffixAndExtZeroBuffer.size() == keySuffixLength + keyExtLength :
"Key suffix and ext zero buffer readable length is not equal"
+ " to the key suffix length + key ext length. keySuffixAndExtZeroBuffer="
+ keySuffixAndExtZeroBuffer.size() + " keySuffixLength=" + keySuffixLength + " keyExtLength="
+ keyExtLength;
assert keySuffixAndExtZeroBuffer.size() > 0;
var firstKey = firstRangeKey(prefixKey, keyPrefixLength, keySuffixAndExtZeroBuffer);
var nextRangeKey = nextRangeKey(prefixKey, keyPrefixLength, keySuffixAndExtZeroBuffer);
assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey);
if (keyPrefixLength == 0) {
this.range = LLRange.all();
firstKey.close();
nextRangeKey.close();
} else {
this.range = LLRange.ofUnsafe(firstKey, nextRangeKey);
this.range = LLRange.of(firstKey, nextRangeKey);
}
this.rangeMono = LLUtils.lazyRetainRange(this.range);
assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength);
} catch (Throwable t) {
nextRangeKey.close();
throw t;
}
} catch (Throwable t) {
firstKey.close();
throw t;
}
this.keyPrefix = prefixKey;
this.onClose = onClose;
} catch (Throwable t) {
if (prefixKey != null && prefixKey.isAccessible()) {
prefixKey.close();
}
throw t;
}
}
@SuppressWarnings({"unchecked", "rawtypes"})
private DatabaseMapDictionaryDeep(LLDictionary dictionary,
BufferAllocator alloc,
SubStageGetter<U, US> subStageGetter,
SerializerFixedBinaryLength<T> keySuffixSerializer,
int keyPrefixLength,
int keySuffixLength,
int keyExtLength,
Mono<Send<LLRange>> rangeMono,
Send<LLRange> range,
Send<Buffer> keyPrefix,
Runnable onClose) {
super((Drop<DatabaseMapDictionaryDeep<T,U,US>>) (Drop) DROP);
LLRange range,
Buf keyPrefix) {
this.dictionary = dictionary;
this.alloc = alloc;
this.subStageGetter = subStageGetter;
this.keySuffixSerializer = keySuffixSerializer;
this.keyPrefixLength = keyPrefixLength;
this.keySuffixLength = keySuffixLength;
this.keyExtLength = keyExtLength;
this.rangeMono = rangeMono;
this.range = range;
this.range = range.receive();
this.keyPrefix = keyPrefix.receive();
this.onClose = onClose;
this.keyPrefix = keyPrefix;
}
@SuppressWarnings("unused")
protected boolean suffixKeyLengthConsistency(int keySuffixLength) {
assert
this.keySuffixLength == keySuffixLength :
"Key suffix length is " + keySuffixLength + ", but it should be " + this.keySuffixLength + " bytes long";
//noinspection ConstantValue
return this.keySuffixLength == keySuffixLength;
}
@ -256,16 +202,39 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
}
/**
* Removes the prefix from the key
* @return the prefix
*/
protected Buffer splitPrefix(Buffer key) {
assert key.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength
|| key.readableBytes() == keyPrefixLength + keySuffixLength;
var prefix = key.readSplit(this.keyPrefixLength);
assert key.readableBytes() == keySuffixLength + keyExtLength
|| key.readableBytes() == keySuffixLength;
return prefix;
protected Buf prefixSubList(Buf key) {
assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength
|| key.size() == keyPrefixLength + keySuffixLength;
return key.subList(0, this.keyPrefixLength);
}
/**
* @return the suffix
*/
protected Buf suffixSubList(Buf key) {
assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength
|| key.size() == keyPrefixLength + keySuffixLength;
return key.subList(this.keyPrefixLength, keyPrefixLength + keySuffixLength);
}
/**
* @return the suffix
*/
protected Buf suffixAndExtSubList(Buf key) {
assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength
|| key.size() == keyPrefixLength + keySuffixLength;
return key.subList(this.keyPrefixLength, key.size());
}
/**
* @return the ext
*/
protected Buf extSubList(Buf key) {
assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength
|| key.size() == keyPrefixLength + keySuffixLength;
return key.subList(this.keyPrefixLength + this.keySuffixLength, key.size());
}
protected LLSnapshot resolveSnapshot(@Nullable CompositeSnapshot snapshot) {
@ -277,75 +246,46 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
}
@Override
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
return dictionary.sizeRange(resolveSnapshot(snapshot), rangeMono, fast);
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
return dictionary.sizeRange(resolveSnapshot(snapshot), range, fast);
}
@Override
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), rangeMono);
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range, false);
}
@Override
public Mono<US> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
var suffixKeyWithoutExt = Mono.fromCallable(() -> {
try (var keyWithoutExtBuf = keyPrefix == null
? alloc.allocate(keySuffixLength + keyExtLength) : keyPrefix.copy()) {
keyWithoutExtBuf.ensureWritable(keySuffixLength + keyExtLength);
serializeSuffix(keySuffix, keyWithoutExtBuf);
return keyWithoutExtBuf.send();
public @NotNull US at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
BufDataOutput bufOutput = BufDataOutput.createLimited(keyPrefixLength + keySuffixLength + keyExtLength);
if (keyPrefix != null) {
bufOutput.writeBytes(keyPrefix);
}
});
return this.subStageGetter
.subStage(dictionary, snapshot, suffixKeyWithoutExt)
.transform(LLUtils::handleDiscard)
.doOnDiscard(DatabaseStage.class, DatabaseStage::close);
serializeSuffixTo(keySuffix, bufOutput);
return this.subStageGetter.subStage(dictionary, snapshot, bufOutput.asList());
}
@Override
public Mono<UpdateMode> getUpdateMode() {
public UpdateMode getUpdateMode() {
return dictionary.getUpdateMode();
}
@Override
public Flux<BadBlock> badBlocks() {
return dictionary.badBlocks(rangeMono);
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
return dictionary.verifyChecksum(range);
}
@Override
public Flux<Entry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot) {
public Stream<SubStageEntry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
return dictionary
.getRangeKeyPrefixes(resolveSnapshot(snapshot), rangeMono, keyPrefixLength + keySuffixLength)
.flatMapSequential(groupKeyWithoutExtSend_ -> Mono.using(
groupKeyWithoutExtSend_::receive,
groupKeyWithoutExtSend -> this.subStageGetter
.subStage(dictionary, snapshot, Mono.fromCallable(() -> groupKeyWithoutExtSend.copy().send()))
.<Entry<T, US>>handle((us, sink) -> {
.getRangeKeyPrefixes(resolveSnapshot(snapshot), range, keyPrefixLength + keySuffixLength, smallRange)
.map(groupKeyWithoutExt -> {
T deserializedSuffix;
try {
deserializedSuffix = this.deserializeSuffix(splitGroupSuffix(groupKeyWithoutExtSend));
sink.next(Map.entry(deserializedSuffix, us));
} catch (SerializationException ex) {
sink.error(ex);
}
}),
Resource::close
))
.transform(LLUtils::handleDiscard);
}
/**
* Split the input. The input will become the ext, the returned data will be the group suffix
* @param groupKey group key, will become ext
* @return group suffix
*/
private Buffer splitGroupSuffix(@NotNull Buffer groupKey) {
assert subStageKeysConsistency(groupKey.readableBytes())
|| subStageKeysConsistency(groupKey.readableBytes() + keyExtLength);
this.splitPrefix(groupKey).close();
assert subStageKeysConsistency(keyPrefixLength + groupKey.readableBytes())
|| subStageKeysConsistency(keyPrefixLength + groupKey.readableBytes() + keyExtLength);
return groupKey.readSplit(keySuffixLength);
var splittedGroupSuffix = suffixSubList(groupKeyWithoutExt);
deserializedSuffix = this.deserializeSuffix(BufDataInput.create(splittedGroupSuffix));
return new SubStageEntry<>(deserializedSuffix,
this.subStageGetter.subStage(dictionary, snapshot, groupKeyWithoutExt));
});
}
private boolean subStageKeysConsistency(int totalKeyLength) {
@ -361,83 +301,151 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
}
@Override
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
return this
.getAllValues(null)
.concatWith(this
.clear()
.then(this.putMulti(entries))
.then(Mono.empty())
);
public void setAllEntries(Stream<Entry<T, U>> entries) {
this.clear();
this.putMulti(entries);
}
@Override
public Mono<Void> clear() {
return Mono
.defer(() -> {
public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) {
return resourceStream(() -> this.getAllEntries(null, false), () -> setAllEntries(entries));
}
@Override
public ForkJoinPool getDbReadPool() {
return dictionary.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return dictionary.getDbWritePool();
}
@Override
public void clear() {
if (range.isAll()) {
return dictionary.clear();
dictionary.clear();
} else if (range.isSingle()) {
return dictionary
.remove(Mono.fromCallable(range::getSingle), LLDictionaryResultType.VOID)
.doOnNext(Send::close)
.then();
dictionary.remove(range.getSingleUnsafe(), LLDictionaryResultType.VOID);
} else {
return dictionary.setRange(rangeMono, Flux.empty());
dictionary.setRange(range, Stream.empty(), false);
}
}
protected T deserializeSuffix(@NotNull BufDataInput keySuffix) throws SerializationException {
assert suffixKeyLengthConsistency(keySuffix.available());
return keySuffixSerializer.deserialize(keySuffix);
}
protected void serializeSuffixTo(T keySuffix, BufDataOutput output) throws SerializationException {
var beforeWriterOffset = output.size();
assert beforeWriterOffset == keyPrefixLength;
assert keySuffixSerializer.getSerializedBinaryLength() == keySuffixLength
: "Invalid key suffix serializer length: " + keySuffixSerializer.getSerializedBinaryLength()
+ ". Expected: " + keySuffixLength;
keySuffixSerializer.serialize(keySuffix, output);
var afterWriterOffset = output.size();
assert suffixKeyLengthConsistency(afterWriterOffset - beforeWriterOffset)
: "Invalid key suffix length: " + (afterWriterOffset - beforeWriterOffset) + ". Expected: " + keySuffixLength;
}
public static <K1, K2, V, R> Stream<R> getAllLeaves2(DatabaseMapDictionaryDeep<K1, Object2ObjectSortedMap<K2, V>, ? extends DatabaseStageMap<K2, V, DatabaseStageEntry<V>>> deepMap,
CompositeSnapshot snapshot,
TriFunction<K1, K2, V, R> merger,
@Nullable K1 savedProgressKey1) {
var keySuffix1Serializer = deepMap.keySuffixSerializer;
SerializerFixedBinaryLength<?> keySuffix2Serializer;
Serializer<?> valueSerializer;
boolean isHashed;
boolean isHashedSet;
if (deepMap.subStageGetter instanceof SubStageGetterMap subStageGetterMap) {
isHashed = false;
isHashedSet = false;
keySuffix2Serializer = subStageGetterMap.keySerializer;
valueSerializer = subStageGetterMap.valueSerializer;
} else if (deepMap.subStageGetter instanceof SubStageGetterHashMap subStageGetterHashMap) {
isHashed = true;
isHashedSet = false;
keySuffix2Serializer = subStageGetterHashMap.keyHashSerializer;
//noinspection unchecked
ValueWithHashSerializer<K2, V> valueWithHashSerializer = new ValueWithHashSerializer<>(
(Serializer<K2>) subStageGetterHashMap.keySerializer,
(Serializer<V>) subStageGetterHashMap.valueSerializer
);
valueSerializer = new ValuesSetSerializer<>(valueWithHashSerializer);
} else if (deepMap.subStageGetter instanceof SubStageGetterHashSet subStageGetterHashSet) {
isHashed = true;
isHashedSet = true;
keySuffix2Serializer = subStageGetterHashSet.keyHashSerializer;
//noinspection unchecked
valueSerializer = new ValuesSetSerializer<K2>(subStageGetterHashSet.keySerializer);
} else {
throw new IllegalArgumentException();
}
var firstKey = Optional.ofNullable(savedProgressKey1);
var fullRange = deepMap.range;
LLRange range;
if (firstKey.isPresent()) {
var key1Buf = BufDataOutput.create(keySuffix1Serializer.getSerializedBinaryLength());
keySuffix1Serializer.serialize(firstKey.get(), key1Buf);
range = LLRange.of(key1Buf.asList(), fullRange.getMax());
} else {
range = fullRange;
}
return deepMap.dictionary.getRange(deepMap.resolveSnapshot(snapshot), range, false, false)
.flatMap(entry -> {
K1 key1 = null;
Object key2 = null;
try {
var keyBuf = entry.getKey();
var valueBuf = entry.getValue();
try {
assert keyBuf != null;
var suffix1And2 = BufDataInput.create(keyBuf.subList(deepMap.keyPrefixLength, deepMap.keyPrefixLength + deepMap.keySuffixLength + deepMap.keyExtLength));
key1 = keySuffix1Serializer.deserialize(suffix1And2);
key2 = keySuffix2Serializer.deserialize(suffix1And2);
assert valueBuf != null;
Object value = valueSerializer.deserialize(BufDataInput.create(valueBuf));
if (isHashedSet) {
//noinspection unchecked
Set<K2> set = (Set<K2>) value;
K1 finalKey1 = key1;
//noinspection unchecked
return set.stream().map(e -> merger.apply(finalKey1, e, (V) Nothing.INSTANCE));
} else if (isHashed) {
//noinspection unchecked
Set<Entry<K2, V>> set = (Set<Entry<K2, V>>) value;
K1 finalKey1 = key1;
return set.stream().map(e -> merger.apply(finalKey1, e.getKey(), e.getValue()));
} else {
//noinspection unchecked
return Stream.of(merger.apply(key1, (K2) key2, (V) value));
}
} catch (IndexOutOfBoundsException ex) {
var exMessage = ex.getMessage();
if (exMessage != null && exMessage.contains("read 0 to 0, write 0 to ")) {
var totalZeroBytesErrors = deepMap.totalZeroBytesErrors.incrementAndGet();
if (totalZeroBytesErrors < 512 || totalZeroBytesErrors % 10000 == 0) {
LOG.error("Unexpected zero-bytes value at " + deepMap.dictionary.getDatabaseName()
+ ":" + deepMap.dictionary.getColumnName()
+ ":[" + key1
+ ":" + key2
+ "](" + LLUtils.toStringSafe(keyBuf) + ") total=" + totalZeroBytesErrors);
}
return Stream.empty();
} else {
throw ex;
}
}
} catch (SerializationException ex) {
throw new CompletionException(ex);
}
});
}
//todo: temporary wrapper. convert the whole class to buffers
protected T deserializeSuffix(@NotNull Buffer keySuffix) throws SerializationException {
assert suffixKeyLengthConsistency(keySuffix.readableBytes());
var result = keySuffixSerializer.deserialize(keySuffix);
assert keyPrefix == null || keyPrefix.isAccessible();
return result;
}
//todo: temporary wrapper. convert the whole class to buffers
protected void serializeSuffix(T keySuffix, Buffer output) throws SerializationException {
output.ensureWritable(keySuffixLength);
var beforeWriterOffset = output.writerOffset();
keySuffixSerializer.serialize(keySuffix, output);
var afterWriterOffset = output.writerOffset();
assert suffixKeyLengthConsistency(afterWriterOffset - beforeWriterOffset);
assert keyPrefix == null || keyPrefix.isAccessible();
}
@Override
protected RuntimeException createResourceClosedException() {
throw new IllegalStateException("Closed");
}
@Override
protected Owned<DatabaseMapDictionaryDeep<T, U, US>> prepareSend() {
var keyPrefix = this.keyPrefix == null ? null : this.keyPrefix.send();
var range = this.range.send();
var onClose = this.onClose;
return drop -> {
var instance = new DatabaseMapDictionaryDeep<>(dictionary,
alloc,
subStageGetter,
keySuffixSerializer,
keyPrefixLength,
keySuffixLength,
keyExtLength,
rangeMono,
range,
keyPrefix,
onClose
);
drop.attach(instance);
return instance;
};
}
@Override
protected void makeInaccessible() {
this.keyPrefix = null;
this.range = null;
this.onClose = null;
}
}

View File

@ -1,138 +1,99 @@
package it.cavallium.dbengine.database.collections;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.BufferAllocator;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Resource;
import io.net5.buffer.api.Send;
import it.cavallium.dbengine.client.BadBlock;
import it.cavallium.buffer.Buf;
import it.cavallium.dbengine.client.CompositeSnapshot;
import it.cavallium.dbengine.client.DbProgress;
import it.cavallium.dbengine.client.SSTVerificationProgress;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLUtils;
import io.net5.buffer.api.internal.ResourceSupport;
import it.cavallium.dbengine.database.SubStageEntry;
import it.cavallium.dbengine.database.UpdateMode;
import it.cavallium.dbengine.database.serialization.Serializer;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
import it.unimi.dsi.fastutil.objects.ObjectArraySet;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ForkJoinPool;
import java.util.function.Function;
import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@SuppressWarnings("unused")
public class DatabaseMapDictionaryHashed<T, U, TH> extends ResourceSupport<DatabaseStage<Map<T, U>>, DatabaseMapDictionaryHashed<T, U, TH>>
implements DatabaseStageMap<T, U, DatabaseStageEntry<U>> {
public class DatabaseMapDictionaryHashed<T, U, TH> implements DatabaseStageMap<T, U, DatabaseStageEntry<U>> {
private static final Logger logger = LoggerFactory.getLogger(DatabaseMapDictionaryHashed.class);
private static final Logger logger = LogManager.getLogger(DatabaseMapDictionaryHashed.class);
private static final Drop<DatabaseMapDictionaryHashed<?, ?, ?>> DROP = new Drop<>() {
@Override
public void drop(DatabaseMapDictionaryHashed<?, ?, ?> obj) {
try {
if (obj.subDictionary != null) {
obj.subDictionary.close();
}
} catch (Throwable ex) {
logger.error("Failed to close subDictionary", ex);
}
}
@Override
public Drop<DatabaseMapDictionaryHashed<?, ?, ?>> fork() {
return this;
}
@Override
public void attach(DatabaseMapDictionaryHashed<?, ?, ?> obj) {
}
};
private final BufferAllocator alloc;
private final Function<T, TH> keySuffixHashFunction;
private DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>> subDictionary;
private final DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>> subDictionary;
@SuppressWarnings({"unchecked", "rawtypes"})
protected DatabaseMapDictionaryHashed(LLDictionary dictionary,
@Nullable Buffer prefixKey,
@Nullable Buf prefixKeySupplier,
Serializer<T> keySuffixSerializer,
Serializer<U> valueSerializer,
Function<T, TH> keySuffixHashFunction,
SerializerFixedBinaryLength<TH> keySuffixHashSerializer,
Runnable onClose) {
super((Drop<DatabaseMapDictionaryHashed<T, U, TH>>) (Drop) DROP);
if (dictionary.getUpdateMode().block() != UpdateMode.ALLOW) {
SerializerFixedBinaryLength<TH> keySuffixHashSerializer) {
var updateMode = dictionary.getUpdateMode();
if (updateMode != UpdateMode.ALLOW) {
throw new IllegalArgumentException("Hashed maps only works when UpdateMode is ALLOW");
}
this.alloc = dictionary.getAllocator();
ValueWithHashSerializer<T, U> valueWithHashSerializer
= new ValueWithHashSerializer<>(keySuffixSerializer, valueSerializer);
ValuesSetSerializer<Entry<T, U>> valuesSetSerializer
= new ValuesSetSerializer<>(valueWithHashSerializer);
this.subDictionary = DatabaseMapDictionary.tail(dictionary, prefixKey, keySuffixHashSerializer,
valuesSetSerializer, onClose);
this.subDictionary = DatabaseMapDictionary.tail(dictionary, prefixKeySupplier, keySuffixHashSerializer,
valuesSetSerializer);
this.keySuffixHashFunction = keySuffixHashFunction;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private DatabaseMapDictionaryHashed(BufferAllocator alloc,
Function<T, TH> keySuffixHashFunction,
Send<DatabaseStage<Map<TH, ObjectArraySet<Entry<T, U>>>>> subDictionary,
Drop<DatabaseMapDictionaryHashed<T, U, TH>> drop) {
super((Drop<DatabaseMapDictionaryHashed<T, U, TH>>) (Drop) DROP);
this.alloc = alloc;
private DatabaseMapDictionaryHashed(Function<T, TH> keySuffixHashFunction,
DatabaseStage<Object2ObjectSortedMap<TH, ObjectArraySet<Entry<T, U>>>> subDictionary) {
this.keySuffixHashFunction = keySuffixHashFunction;
this.subDictionary = (DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>>) subDictionary.receive();
this.subDictionary = (DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>>) subDictionary;
}
public static <T, U, UH> DatabaseMapDictionaryHashed<T, U, UH> simple(LLDictionary dictionary,
Serializer<T> keySerializer,
Serializer<U> valueSerializer,
Function<T, UH> keyHashFunction,
SerializerFixedBinaryLength<UH> keyHashSerializer,
Runnable onClose) {
SerializerFixedBinaryLength<UH> keyHashSerializer) {
return new DatabaseMapDictionaryHashed<>(
dictionary,
null,
keySerializer,
valueSerializer,
keyHashFunction,
keyHashSerializer,
onClose
keyHashSerializer
);
}
public static <T, U, UH> DatabaseMapDictionaryHashed<T, U, UH> tail(LLDictionary dictionary,
@Nullable Buffer prefixKey,
@Nullable Buf prefixKeySupplier,
Serializer<T> keySuffixSerializer,
Serializer<U> valueSerializer,
Function<T, UH> keySuffixHashFunction,
SerializerFixedBinaryLength<UH> keySuffixHashSerializer,
Runnable onClose) {
SerializerFixedBinaryLength<UH> keySuffixHashSerializer) {
return new DatabaseMapDictionaryHashed<>(dictionary,
prefixKey,
prefixKeySupplier,
keySuffixSerializer,
valueSerializer,
keySuffixHashFunction,
keySuffixHashSerializer,
onClose
keySuffixHashSerializer
);
}
private Map<TH, ObjectArraySet<Entry<T, U>>> serializeMap(Map<T, U> map) {
var newMap = new HashMap<TH, ObjectArraySet<Entry<T, U>>>(map.size());
private Object2ObjectSortedMap<TH, ObjectArraySet<Entry<T, U>>> serializeMap(Object2ObjectSortedMap<T, U> map) {
var newMap = new Object2ObjectLinkedOpenHashMap<TH, ObjectArraySet<Entry<T, U>>>(map.size());
map.forEach((key, value) -> newMap.compute(keySuffixHashFunction.apply(key), (hash, prev) -> {
if (prev == null) {
prev = new ObjectArraySet<>();
@ -143,133 +104,144 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends ResourceSupport<Datab
return newMap;
}
private Map<T, U> deserializeMap(Map<TH, ObjectArraySet<Entry<T, U>>> map) {
var newMap = new HashMap<T, U>(map.size());
private Object2ObjectSortedMap<T, U> deserializeMap(Object2ObjectSortedMap<TH, ObjectArraySet<Entry<T, U>>> map) {
var newMap = new Object2ObjectLinkedOpenHashMap<T, U>(map.size());
map.forEach((hash, set) -> set.forEach(entry -> newMap.put(entry.getKey(), entry.getValue())));
return newMap;
}
@Override
public Mono<Map<T, U>> get(@Nullable CompositeSnapshot snapshot) {
return subDictionary.get(snapshot).map(this::deserializeMap);
public ForkJoinPool getDbReadPool() {
return subDictionary.getDbReadPool();
}
@Override
public Mono<Map<T, U>> getOrDefault(@Nullable CompositeSnapshot snapshot, Mono<Map<T, U>> defaultValue) {
return this.get(snapshot).switchIfEmpty(defaultValue);
public ForkJoinPool getDbWritePool() {
return subDictionary.getDbWritePool();
}
@Override
public Mono<Void> set(Map<T, U> map) {
return Mono.fromSupplier(() -> this.serializeMap(map)).flatMap(subDictionary::set);
public Object2ObjectSortedMap<T, U> get(@Nullable CompositeSnapshot snapshot) {
var v = subDictionary.get(snapshot);
var result = v != null ? deserializeMap(v) : null;
return result != null && result.isEmpty() ? null : result;
}
@Override
public Mono<Boolean> setAndGetChanged(Map<T, U> map) {
return Mono.fromSupplier(() -> this.serializeMap(map)).flatMap(subDictionary::setAndGetChanged).single();
public Object2ObjectSortedMap<T, U> getOrDefault(@Nullable CompositeSnapshot snapshot,
Object2ObjectSortedMap<T, U> defaultValue) {
return Objects.requireNonNullElse(this.get(snapshot), defaultValue);
}
@Override
public Mono<Boolean> clearAndGetStatus() {
public void set(Object2ObjectSortedMap<T, U> map) {
var value = this.serializeMap(map);
subDictionary.set(value);
}
@Override
public boolean setAndGetChanged(Object2ObjectSortedMap<T, U> map) {
return subDictionary.setAndGetChanged(this.serializeMap(map));
}
@Override
public boolean clearAndGetStatus() {
return subDictionary.clearAndGetStatus();
}
@Override
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
return subDictionary.isEmpty(snapshot);
}
@Override
public DatabaseStageEntry<Map<T, U>> entry() {
public DatabaseStageEntry<Object2ObjectSortedMap<T, U>> entry() {
return this;
}
@Override
public Flux<BadBlock> badBlocks() {
return this.subDictionary.badBlocks();
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
return this.subDictionary.verifyChecksum();
}
@Override
public Mono<DatabaseStageEntry<U>> at(@Nullable CompositeSnapshot snapshot, T key) {
return this
.atPrivate(snapshot, key, keySuffixHashFunction.apply(key))
.map(cast -> (DatabaseStageEntry<U>) cast)
.doOnDiscard(Resource.class, Resource::close);
public @NotNull DatabaseStageEntry<U> at(@Nullable CompositeSnapshot snapshot, T key) {
return this.atPrivate(snapshot, key, keySuffixHashFunction.apply(key));
}
private Mono<DatabaseSingleBucket<T, U, TH>> atPrivate(@Nullable CompositeSnapshot snapshot, T key, TH hash) {
return subDictionary
.at(snapshot, hash)
.map(entry -> new DatabaseSingleBucket<T, U, TH>(entry, key, null))
.doOnDiscard(Resource.class, Resource::close);
private DatabaseSingleBucket<T, U, TH> atPrivate(@Nullable CompositeSnapshot snapshot, T key, TH hash) {
return new DatabaseSingleBucket<T, U, TH>(subDictionary.at(snapshot, hash), key);
}
@Override
public Mono<UpdateMode> getUpdateMode() {
public UpdateMode getUpdateMode() {
return subDictionary.getUpdateMode();
}
@Override
public Flux<Entry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot) {
public Stream<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
boolean smallRange) {
return subDictionary
.getAllValues(snapshot)
.getAllEntries(snapshot, smallRange)
.map(Entry::getValue)
.map(Collections::unmodifiableSet)
.flatMap(bucket -> Flux
.fromIterable(bucket)
.flatMap(bucket -> bucket.stream()
.map(Entry::getKey)
.flatMap(key -> this.at(snapshot, key).map(stage -> Map.entry(key, stage)))
);
.map(key -> new SubStageEntry<>(key, this.at(snapshot, key))));
}
@Override
public Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot) {
public Stream<Entry<T, U>> getAllEntries(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
return subDictionary
.getAllValues(snapshot)
.getAllEntries(snapshot, smallRange)
.map(Entry::getValue)
.map(Collections::unmodifiableSet)
.concatMapIterable(list -> list);
.flatMap(Collection::stream);
}
@Override
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
return entries
.flatMap(entry -> LLUtils.usingResource(this.at(null, entry.getKey()),
stage -> stage
.setAndGetPrevious(entry.getValue())
.map(prev -> Map.entry(entry.getKey(), prev)), true)
);
public Stream<T> getAllKeys(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
return getAllEntries(snapshot, smallRange).map(Entry::getKey);
}
@Override
public Mono<Void> clear() {
return subDictionary.clear();
public Stream<U> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
return getAllEntries(snapshot, smallRange).map(Entry::getValue);
}
@Override
public Mono<Map<T, U>> setAndGetPrevious(Map<T, U> value) {
return Mono
.fromSupplier(() -> this.serializeMap(value))
.flatMap(subDictionary::setAndGetPrevious)
.map(this::deserializeMap);
public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) {
List<Entry<T, U>> prevList = entries.map(entry -> {
var prev = this.at(null, entry.getKey()).setAndGetPrevious(entry.getValue());
if (prev != null) {
return Map.entry(entry.getKey(), prev);
} else {
return null;
}
}).filter(Objects::nonNull).toList();
return prevList.stream();
}
@Override
public Mono<Map<T, U>> clearAndGetPrevious() {
return subDictionary
.clearAndGetPrevious()
.map(this::deserializeMap);
public void clear() {
subDictionary.clear();
}
@Override
public Mono<Map<T, U>> get(@Nullable CompositeSnapshot snapshot, boolean existsAlmostCertainly) {
return subDictionary
.get(snapshot, existsAlmostCertainly)
.map(this::deserializeMap);
public Object2ObjectSortedMap<T, U> setAndGetPrevious(Object2ObjectSortedMap<T, U> value) {
var v = subDictionary.setAndGetPrevious(this.serializeMap(value));
var result = v != null ? deserializeMap(v) : null;
return result != null && result.isEmpty() ? null : result;
}
@Override
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
public Object2ObjectSortedMap<T, U> clearAndGetPrevious() {
var v = subDictionary.clearAndGetPrevious();
return v != null ? deserializeMap(v) : null;
}
@Override
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
return subDictionary.leavesCount(snapshot, fast);
}
@ -282,13 +254,14 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends ResourceSupport<Datab
@Override
public ValueGetter<T, U> getAsyncDbValueGetter(@Nullable CompositeSnapshot snapshot) {
ValueGetter<TH, ObjectArraySet<Entry<T, U>>> getter = subDictionary.getAsyncDbValueGetter(snapshot);
return key -> getter
.get(keySuffixHashFunction.apply(key))
.flatMap(set -> this.extractValueTransformation(set, key));
return key -> {
ObjectArraySet<Entry<T, U>> set = getter.get(keySuffixHashFunction.apply(key));
if (set != null) {
return this.extractValue(set, key);
} else {
return null;
}
private Mono<U> extractValueTransformation(ObjectArraySet<Entry<T, U>> entries, T key) {
return Mono.fromCallable(() -> extractValue(entries, key));
};
}
@Nullable
@ -336,20 +309,4 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends ResourceSupport<Datab
return null;
}
}
@Override
protected RuntimeException createResourceClosedException() {
throw new IllegalStateException("Closed");
}
@Override
protected Owned<DatabaseMapDictionaryHashed<T, U, TH>> prepareSend() {
var subDictionary = this.subDictionary.send();
return drop -> new DatabaseMapDictionaryHashed<>(alloc, keySuffixHashFunction, subDictionary, drop);
}
@Override
protected void makeInaccessible() {
this.subDictionary = null;
}
}

View File

@ -0,0 +1,140 @@
package it.cavallium.dbengine.database.collections;
import it.cavallium.buffer.Buf;
import it.cavallium.buffer.BufDataInput;
import it.cavallium.buffer.BufDataOutput;
import it.cavallium.dbengine.client.CompositeSnapshot;
import it.cavallium.dbengine.client.DbProgress;
import it.cavallium.dbengine.client.SSTVerificationProgress;
import it.cavallium.dbengine.database.Delta;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLDictionaryResultType;
import it.cavallium.dbengine.database.LLRange;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.database.serialization.Serializer;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
public final class DatabaseMapSingle<U> implements DatabaseStageEntry<U> {
private static final Logger LOG = LogManager.getLogger(DatabaseMapSingle.class);
private final LLDictionary dictionary;
private final Buf key;
private final Serializer<U> serializer;
public DatabaseMapSingle(LLDictionary dictionary, Buf key, Serializer<U> serializer) {
this.dictionary = dictionary;
this.key = key;
this.serializer = serializer;
}
private LLSnapshot resolveSnapshot(@Nullable CompositeSnapshot snapshot) {
if (snapshot == null) {
return null;
} else {
return snapshot.getSnapshot(dictionary);
}
}
private U deserializeValue(Buf value) {
try {
return serializer.deserialize(BufDataInput.create(value));
} catch (IndexOutOfBoundsException ex) {
var exMessage = ex.getMessage();
if (exMessage != null && exMessage.contains("read 0 to 0, write 0 to ")) {
LOG.error("Unexpected zero-bytes value at %s:%s:%s".formatted(dictionary.getDatabaseName(),
dictionary.getColumnName(),
LLUtils.toStringSafe(key)
));
return null;
} else {
throw ex;
}
}
}
private Buf serializeValue(U value) throws SerializationException {
BufDataOutput valBuf = BufDataOutput.create(serializer.getSerializedSizeHint());
serializer.serialize(value, valBuf);
return valBuf.asList();
}
@Override
public ForkJoinPool getDbReadPool() {
return dictionary.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return dictionary.getDbWritePool();
}
@Override
public U get(@Nullable CompositeSnapshot snapshot) {
var result = dictionary.get(resolveSnapshot(snapshot), key);
if (result != null) {
return deserializeValue(result);
} else {
return null;
}
}
@Override
public U setAndGetPrevious(U value) {
var serializedKey = value != null ? serializeValue(value) : null;
var result = dictionary.put(key, serializedKey, LLDictionaryResultType.PREVIOUS_VALUE);
if (result != null) {
return deserializeValue(result);
} else {
return null;
}
}
@Override
public U update(SerializationFunction<@Nullable U, @Nullable U> updater, UpdateReturnMode updateReturnMode) {
var serializedUpdater = createUpdater(updater);
dictionary.update(key, serializedUpdater, UpdateReturnMode.NOTHING);
return serializedUpdater.getResult(updateReturnMode);
}
@Override
public Delta<U> updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) {
var serializedUpdater = createUpdater(updater);
dictionary.update(key, serializedUpdater, UpdateReturnMode.NOTHING);
return serializedUpdater.getDelta();
}
private CachedSerializationFunction<U, Buf, Buf> createUpdater(SerializationFunction<U, U> updater) {
return new CachedSerializationFunction<>(updater, this::serializeValue, this::deserializeValue);
}
@Override
public U clearAndGetPrevious() {
return deserializeValue(dictionary.remove(key, LLDictionaryResultType.PREVIOUS_VALUE));
}
@Override
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key), false) ? 0L : 1L;
}
@Override
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key), true);
}
@Override
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
return dictionary.verifyChecksum(LLRange.single(key));
}
}

View File

@ -1,55 +1,50 @@
package it.cavallium.dbengine.database.collections;
import io.net5.buffer.api.Buffer;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Send;
import it.cavallium.buffer.Buf;
import it.cavallium.dbengine.client.CompositeSnapshot;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.util.HashMap;
import java.util.Map;
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
import java.util.Set;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Mono;
@SuppressWarnings("unused")
public class DatabaseSetDictionary<T> extends DatabaseMapDictionary<T, Nothing> {
protected DatabaseSetDictionary(LLDictionary dictionary,
Buffer prefixKey,
SerializerFixedBinaryLength<T> keySuffixSerializer,
Runnable onClose) {
super(dictionary, prefixKey, keySuffixSerializer, DatabaseEmpty.nothingSerializer(dictionary.getAllocator()), onClose);
Buf prefixKeySupplier,
SerializerFixedBinaryLength<T> keySuffixSerializer) {
super(dictionary, prefixKeySupplier, keySuffixSerializer, DatabaseEmpty.nothingSerializer());
}
public static <T> DatabaseSetDictionary<T> simple(LLDictionary dictionary,
SerializerFixedBinaryLength<T> keySerializer,
Runnable onClose) {
return new DatabaseSetDictionary<>(dictionary, null, keySerializer, onClose);
SerializerFixedBinaryLength<T> keySerializer) {
return new DatabaseSetDictionary<>(dictionary, null, keySerializer);
}
public static <T> DatabaseSetDictionary<T> tail(LLDictionary dictionary,
Buffer prefixKey,
SerializerFixedBinaryLength<T> keySuffixSerializer,
Runnable onClose) {
return new DatabaseSetDictionary<>(dictionary, prefixKey, keySuffixSerializer, onClose);
Buf prefixKeySupplier,
SerializerFixedBinaryLength<T> keySuffixSerializer) {
return new DatabaseSetDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer);
}
public Mono<Set<T>> getKeySet(@Nullable CompositeSnapshot snapshot) {
return get(snapshot).map(Map::keySet);
public Set<T> getKeySet(@Nullable CompositeSnapshot snapshot) {
var v = get(snapshot);
return v != null ? v.keySet() : null;
}
public Mono<Set<T>> setAndGetPreviousKeySet(Set<T> value) {
var hm = new HashMap<T, Nothing>();
public Set<T> setAndGetPreviousKeySet(Set<T> value) {
var hm = new Object2ObjectLinkedOpenHashMap<T, Nothing>();
for (T t : value) {
hm.put(t, DatabaseEmpty.NOTHING);
}
return setAndGetPrevious(hm).map(Map::keySet);
var v = setAndGetPrevious(hm);
return v != null ? v.keySet() : null;
}
public Mono<Set<T>> clearAndGetPreviousKeySet() {
return clearAndGetPrevious().map(Map::keySet);
public Set<T> clearAndGetPreviousKeySet() {
var v = clearAndGetPrevious();
return v != null ? v.keySet() : null;
}
}

Some files were not shown because too many files have changed in this diff Show More