Added io_uring JNI

Motivation:

prototype is not buildable and JNI io_uring implementation is missing

Modifications:

-added io_uring implementation(source from https://github.com/axboe/liburing)
-eventloop stores netty io_uring pointer which is used for two ring buffers to execute events like read and write operations in JNI
-memory barriers already included in JNI(will be changed in the future)
-pom file adopted from native epoll

Result:

prototype can finally be built
This commit is contained in:
Josef Grieb 2020-06-26 11:23:41 +02:00
parent 187ec6dffd
commit 962a3433ca
17 changed files with 1847 additions and 746 deletions

View File

@ -14,29 +14,30 @@
* under the License.
*/
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <jni.h>
#include <limits.h>
#include <link.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/un.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/timerfd.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <stddef.h>
#include <limits.h>
#include <inttypes.h>
#include <link.h>
#include <time.h>
// Needed to be able to use syscalls directly and so not depend on newer GLIBC versions
#include <unistd.h>
// Needed to be able to use syscalls directly and so not depend on newer GLIBC
// versions
#include <linux/net.h>
#include <sys/syscall.h>
@ -49,7 +50,8 @@
#include "netty_unix_socket.h"
#include "netty_unix_util.h"
// TCP_FASTOPEN is defined in linux 3.7. We define this here so older kernels can compile.
// TCP_FASTOPEN is defined in linux 3.7. We define this here so older kernels
// can compile.
#ifndef TCP_FASTOPEN
#define TCP_FASTOPEN 23
#endif
@ -68,10 +70,12 @@ struct mmsghdr {
#ifndef SYS_recvmmsg
// Only support SYS_recvmmsg for __x86_64__ / __i386__ for now
#if defined(__x86_64__)
// See https://github.com/torvalds/linux/blob/v5.4/arch/x86/entry/syscalls/syscall_64.tbl
// See
// https://github.com/torvalds/linux/blob/v5.4/arch/x86/entry/syscalls/syscall_64.tbl
#define SYS_recvmmsg 299
#elif defined(__i386__)
// See https://github.com/torvalds/linux/blob/v5.4/arch/x86/entry/syscalls/syscall_32.tbl
// See
// https://github.com/torvalds/linux/blob/v5.4/arch/x86/entry/syscalls/syscall_32.tbl
#define SYS_recvmmsg 337
#else
#define SYS_recvmmsg -1
@ -81,17 +85,20 @@ struct mmsghdr {
#ifndef SYS_sendmmsg
// Only support SYS_sendmmsg for __x86_64__ / __i386__ for now
#if defined(__x86_64__)
// See https://github.com/torvalds/linux/blob/v5.4/arch/x86/entry/syscalls/syscall_64.tbl
// See
// https://github.com/torvalds/linux/blob/v5.4/arch/x86/entry/syscalls/syscall_64.tbl
#define SYS_sendmmsg 307
#elif defined(__i386__)
// See https://github.com/torvalds/linux/blob/v5.4/arch/x86/entry/syscalls/syscall_32.tbl
// See
// https://github.com/torvalds/linux/blob/v5.4/arch/x86/entry/syscalls/syscall_32.tbl
#define SYS_sendmmsg 345
#else
#define SYS_sendmmsg -1
#endif
#endif // SYS_sendmmsg
// Those are initialized in the init(...) method and cached for performance reasons
// Those are initialized in the init(...) method and cached for performance
// reasons
static jfieldID packetAddrFieldId = NULL;
static jfieldID packetAddrLenFieldId = NULL;
static jfieldID packetScopeIdFieldId = NULL;
@ -100,9 +107,9 @@ static jfieldID packetMemoryAddressFieldId = NULL;
static jfieldID packetCountFieldId = NULL;
// util methods
static int getSysctlValue(const char * property, int* returnValue) {
static int getSysctlValue(const char *property, int *returnValue) {
int rc = -1;
FILE *fd=fopen(property, "r");
FILE *fd = fopen(property, "r");
if (fd != NULL) {
char buf[32] = {0x0};
if (fgets(buf, 32, fd) != NULL) {
@ -114,43 +121,44 @@ static int getSysctlValue(const char * property, int* returnValue) {
return rc;
}
static inline jint epollCtl(JNIEnv* env, jint efd, int op, jint fd, jint flags) {
static inline jint epollCtl(JNIEnv *env, jint efd, int op, jint fd,
jint flags) {
uint32_t events = flags;
struct epoll_event ev = {
.data.fd = fd,
.events = events
};
struct epoll_event ev = {.data.fd = fd, .events = events};
return epoll_ctl(efd, op, fd, &ev);
}
// JNI Registered Methods Begin
static jint netty_epoll_native_eventFd(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_eventFd(JNIEnv *env, jclass clazz) {
jint eventFD = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (eventFD < 0) {
netty_unix_errors_throwChannelExceptionErrorNo(env, "eventfd() failed: ", errno);
netty_unix_errors_throwChannelExceptionErrorNo(env,
"eventfd() failed: ", errno);
}
return eventFD;
}
static jint netty_epoll_native_timerFd(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_timerFd(JNIEnv *env, jclass clazz) {
jint timerFD = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK);
if (timerFD < 0) {
netty_unix_errors_throwChannelExceptionErrorNo(env, "timerfd_create() failed: ", errno);
netty_unix_errors_throwChannelExceptionErrorNo(
env, "timerfd_create() failed: ", errno);
}
return timerFD;
}
static void netty_epoll_native_eventFdWrite(JNIEnv* env, jclass clazz, jint fd, jlong value) {
static void netty_epoll_native_eventFdWrite(JNIEnv *env, jclass clazz, jint fd,
jlong value) {
uint64_t val;
for (;;) {
jint ret = eventfd_write(fd, (eventfd_t) value);
jint ret = eventfd_write(fd, (eventfd_t)value);
if (ret < 0) {
// We need to read before we can write again, let's try to read and then write again and if this
// fails we will bail out.
// We need to read before we can write again, let's try to read and then
// write again and if this fails we will bail out.
//
// See http://man7.org/linux/man-pages/man2/eventfd.2.html.
if (errno == EAGAIN) {
@ -158,16 +166,18 @@ static void netty_epoll_native_eventFdWrite(JNIEnv* env, jclass clazz, jint fd,
// Try again
continue;
}
netty_unix_errors_throwChannelExceptionErrorNo(env, "eventfd_read(...) failed: ", errno);
netty_unix_errors_throwChannelExceptionErrorNo(
env, "eventfd_read(...) failed: ", errno);
} else {
netty_unix_errors_throwChannelExceptionErrorNo(env, "eventfd_write(...) failed: ", errno);
netty_unix_errors_throwChannelExceptionErrorNo(
env, "eventfd_write(...) failed: ", errno);
}
}
break;
}
}
static void netty_epoll_native_eventFdRead(JNIEnv* env, jclass clazz, jint fd) {
static void netty_epoll_native_eventFdRead(JNIEnv *env, jclass clazz, jint fd) {
uint64_t eventfd_t;
if (eventfd_read(fd, &eventfd_t) != 0) {
@ -176,16 +186,18 @@ static void netty_epoll_native_eventFdRead(JNIEnv* env, jclass clazz, jint fd) {
}
}
static void netty_epoll_native_timerFdRead(JNIEnv* env, jclass clazz, jint fd) {
static void netty_epoll_native_timerFdRead(JNIEnv *env, jclass clazz, jint fd) {
uint64_t timerFireCount;
if (read(fd, &timerFireCount, sizeof(uint64_t)) < 0) {
// it is expected that this is only called where there is known to be activity, so this is an error.
netty_unix_errors_throwChannelExceptionErrorNo(env, "read() failed: ", errno);
// it is expected that this is only called where there is known to be
// activity, so this is an error.
netty_unix_errors_throwChannelExceptionErrorNo(env,
"read() failed: ", errno);
}
}
static jint netty_epoll_native_epollCreate(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_epollCreate(JNIEnv *env, jclass clazz) {
jint efd;
if (epoll_create1) {
efd = epoll_create1(EPOLL_CLOEXEC);
@ -196,9 +208,11 @@ static jint netty_epoll_native_epollCreate(JNIEnv* env, jclass clazz) {
if (efd < 0) {
int err = errno;
if (epoll_create1) {
netty_unix_errors_throwChannelExceptionErrorNo(env, "epoll_create1() failed: ", err);
netty_unix_errors_throwChannelExceptionErrorNo(
env, "epoll_create1() failed: ", err);
} else {
netty_unix_errors_throwChannelExceptionErrorNo(env, "epoll_create() failed: ", err);
netty_unix_errors_throwChannelExceptionErrorNo(
env, "epoll_create() failed: ", err);
}
return efd;
}
@ -206,25 +220,31 @@ static jint netty_epoll_native_epollCreate(JNIEnv* env, jclass clazz) {
if (fcntl(efd, F_SETFD, FD_CLOEXEC) < 0) {
int err = errno;
close(efd);
netty_unix_errors_throwChannelExceptionErrorNo(env, "fcntl() failed: ", err);
netty_unix_errors_throwChannelExceptionErrorNo(env,
"fcntl() failed: ", err);
return err;
}
}
return efd;
}
static void netty_epoll_native_timerFdSetTime(JNIEnv* env, jclass clazz, jint timerFd, jint tvSec, jint tvNsec) {
static void netty_epoll_native_timerFdSetTime(JNIEnv *env, jclass clazz,
jint timerFd, jint tvSec,
jint tvNsec) {
struct itimerspec ts;
memset(&ts.it_interval, 0, sizeof(struct timespec));
ts.it_value.tv_sec = tvSec;
ts.it_value.tv_nsec = tvNsec;
if (timerfd_settime(timerFd, 0, &ts, NULL) < 0) {
netty_unix_errors_throwIOExceptionErrorNo(env, "timerfd_settime() failed: ", errno);
netty_unix_errors_throwIOExceptionErrorNo(
env, "timerfd_settime() failed: ", errno);
}
}
static jint netty_epoll_native_epollWait(JNIEnv* env, jclass clazz, jint efd, jlong address, jint len, jint timeout) {
struct epoll_event *ev = (struct epoll_event*) (intptr_t) address;
static jint netty_epoll_native_epollWait(JNIEnv *env, jclass clazz, jint efd,
jlong address, jint len,
jint timeout) {
struct epoll_event *ev = (struct epoll_event *)(intptr_t)address;
int result, err;
do {
@ -232,21 +252,24 @@ static jint netty_epoll_native_epollWait(JNIEnv* env, jclass clazz, jint efd, jl
if (result >= 0) {
return result;
}
} while((err = errno) == EINTR);
} while ((err = errno) == EINTR);
return -err;
}
// This method is deprecated!
static jint netty_epoll_native_epollWait0(JNIEnv* env, jclass clazz, jint efd, jlong address, jint len, jint timerFd, jint tvSec, jint tvNsec) {
static jint netty_epoll_native_epollWait0(JNIEnv *env, jclass clazz, jint efd,
jlong address, jint len, jint timerFd,
jint tvSec, jint tvNsec) {
// only reschedule the timer if there is a newer event.
// -1 is a special value used by EpollEventLoop.
if (tvSec != ((jint) -1) && tvNsec != ((jint) -1)) {
if (tvSec != ((jint)-1) && tvNsec != ((jint)-1)) {
struct itimerspec ts;
memset(&ts.it_interval, 0, sizeof(struct timespec));
ts.it_value.tv_sec = tvSec;
ts.it_value.tv_nsec = tvNsec;
if (timerfd_settime(timerFd, 0, &ts, NULL) < 0) {
netty_unix_errors_throwChannelExceptionErrorNo(env, "timerfd_settime() failed: ", errno);
netty_unix_errors_throwChannelExceptionErrorNo(
env, "timerfd_settime() failed: ", errno);
return -1;
}
}
@ -255,12 +278,14 @@ static jint netty_epoll_native_epollWait0(JNIEnv* env, jclass clazz, jint efd, j
static inline void cpu_relax() {
#if defined(__x86_64__)
asm volatile("pause\n": : :"memory");
asm volatile("pause\n" : : : "memory");
#endif
}
static jint netty_epoll_native_epollBusyWait0(JNIEnv* env, jclass clazz, jint efd, jlong address, jint len) {
struct epoll_event *ev = (struct epoll_event*) (intptr_t) address;
static jint netty_epoll_native_epollBusyWait0(JNIEnv *env, jclass clazz,
jint efd, jlong address,
jint len) {
struct epoll_event *ev = (struct epoll_event *)(intptr_t)address;
int result, err;
// Zeros = poll (aka return immediately).
@ -275,19 +300,21 @@ static jint netty_epoll_native_epollBusyWait0(JNIEnv* env, jclass clazz, jint ef
if (result >= 0) {
return result;
}
} while((err = errno) == EINTR);
} while ((err = errno) == EINTR);
return -err;
}
static jint netty_epoll_native_epollCtlAdd0(JNIEnv* env, jclass clazz, jint efd, jint fd, jint flags) {
static jint netty_epoll_native_epollCtlAdd0(JNIEnv *env, jclass clazz, jint efd,
jint fd, jint flags) {
int res = epollCtl(env, efd, EPOLL_CTL_ADD, fd, flags);
if (res < 0) {
return -errno;
}
return res;
}
static jint netty_epoll_native_epollCtlMod0(JNIEnv* env, jclass clazz, jint efd, jint fd, jint flags) {
static jint netty_epoll_native_epollCtlMod0(JNIEnv *env, jclass clazz, jint efd,
jint fd, jint flags) {
int res = epollCtl(env, efd, EPOLL_CTL_MOD, fd, flags);
if (res < 0) {
return -errno;
@ -295,9 +322,11 @@ static jint netty_epoll_native_epollCtlMod0(JNIEnv* env, jclass clazz, jint efd,
return res;
}
static jint netty_epoll_native_epollCtlDel0(JNIEnv* env, jclass clazz, jint efd, jint fd) {
// Create an empty event to workaround a bug in older kernels which can not handle NULL.
struct epoll_event event = { 0 };
static jint netty_epoll_native_epollCtlDel0(JNIEnv *env, jclass clazz, jint efd,
jint fd) {
// Create an empty event to workaround a bug in older kernels which can not
// handle NULL.
struct epoll_event event = {0};
int res = epoll_ctl(efd, EPOLL_CTL_DEL, fd, &event);
if (res < 0) {
return -errno;
@ -305,7 +334,9 @@ static jint netty_epoll_native_epollCtlDel0(JNIEnv* env, jclass clazz, jint efd,
return res;
}
static jint netty_epoll_native_sendmmsg0(JNIEnv* env, jclass clazz, jint fd, jboolean ipv6, jobjectArray packets, jint offset, jint len) {
static jint netty_epoll_native_sendmmsg0(JNIEnv *env, jclass clazz, jint fd,
jboolean ipv6, jobjectArray packets,
jint offset, jint len) {
struct mmsghdr msg[len];
struct sockaddr_storage addr[len];
socklen_t addrSize;
@ -316,22 +347,26 @@ static jint netty_epoll_native_sendmmsg0(JNIEnv* env, jclass clazz, jint fd, jbo
for (i = 0; i < len; i++) {
jobject packet = (*env)->GetObjectArrayElement(env, packets, i + offset);
jbyteArray address = (jbyteArray) (*env)->GetObjectField(env, packet, packetAddrFieldId);
jbyteArray address =
(jbyteArray)(*env)->GetObjectField(env, packet, packetAddrFieldId);
jint addrLen = (*env)->GetIntField(env, packet, packetAddrLenFieldId);
if (addrLen != 0) {
jint scopeId = (*env)->GetIntField(env, packet, packetScopeIdFieldId);
jint port = (*env)->GetIntField(env, packet, packetPortFieldId);
if (netty_unix_socket_initSockaddr(env, ipv6, address, scopeId, port, &addr[i], &addrSize) == -1) {
if (netty_unix_socket_initSockaddr(env, ipv6, address, scopeId, port,
&addr[i], &addrSize) == -1) {
return -1;
}
msg[i].msg_hdr.msg_name = &addr[i];
msg[i].msg_hdr.msg_namelen = addrSize;
}
msg[i].msg_hdr.msg_iov = (struct iovec*) (intptr_t) (*env)->GetLongField(env, packet, packetMemoryAddressFieldId);
msg[i].msg_hdr.msg_iovlen = (*env)->GetIntField(env, packet, packetCountFieldId);
msg[i].msg_hdr.msg_iov = (struct iovec *)(intptr_t)(*env)->GetLongField(
env, packet, packetMemoryAddressFieldId);
msg[i].msg_hdr.msg_iovlen =
(*env)->GetIntField(env, packet, packetCountFieldId);
}
ssize_t res;
@ -345,10 +380,12 @@ static jint netty_epoll_native_sendmmsg0(JNIEnv* env, jclass clazz, jint fd, jbo
if (res < 0) {
return -err;
}
return (jint) res;
return (jint)res;
}
static jint netty_epoll_native_recvmmsg0(JNIEnv* env, jclass clazz, jint fd, jboolean ipv6, jobjectArray packets, jint offset, jint len) {
static jint netty_epoll_native_recvmmsg0(JNIEnv *env, jclass clazz, jint fd,
jboolean ipv6, jobjectArray packets,
jint offset, jint len) {
struct mmsghdr msg[len];
memset(msg, 0, sizeof(msg));
struct sockaddr_storage addr[len];
@ -359,11 +396,13 @@ static jint netty_epoll_native_recvmmsg0(JNIEnv* env, jclass clazz, jint fd, jbo
for (i = 0; i < len; i++) {
jobject packet = (*env)->GetObjectArrayElement(env, packets, i + offset);
msg[i].msg_hdr.msg_iov = (struct iovec*) (intptr_t) (*env)->GetLongField(env, packet, packetMemoryAddressFieldId);
msg[i].msg_hdr.msg_iovlen = (*env)->GetIntField(env, packet, packetCountFieldId);
msg[i].msg_hdr.msg_iov = (struct iovec *)(intptr_t)(*env)->GetLongField(
env, packet, packetMemoryAddressFieldId);
msg[i].msg_hdr.msg_iovlen =
(*env)->GetIntField(env, packet, packetCountFieldId);
msg[i].msg_hdr.msg_name = addr + i;
msg[i].msg_hdr.msg_namelen = (socklen_t) addrSize;
msg[i].msg_hdr.msg_namelen = (socklen_t)addrSize;
}
ssize_t res;
@ -381,50 +420,59 @@ static jint netty_epoll_native_recvmmsg0(JNIEnv* env, jclass clazz, jint fd, jbo
for (i = 0; i < res; i++) {
jobject packet = (*env)->GetObjectArrayElement(env, packets, i + offset);
jbyteArray address = (jbyteArray) (*env)->GetObjectField(env, packet, packetAddrFieldId);
jbyteArray address =
(jbyteArray)(*env)->GetObjectField(env, packet, packetAddrFieldId);
(*env)->SetIntField(env, packet, packetCountFieldId, msg[i].msg_len);
struct sockaddr_storage* addr = (struct sockaddr_storage*) msg[i].msg_hdr.msg_name;
struct sockaddr_storage *addr =
(struct sockaddr_storage *)msg[i].msg_hdr.msg_name;
if (addr->ss_family == AF_INET) {
struct sockaddr_in* ipaddr = (struct sockaddr_in*) addr;
struct sockaddr_in *ipaddr = (struct sockaddr_in *)addr;
(*env)->SetByteArrayRegion(env, address, 0, 4, (jbyte*) &ipaddr->sin_addr.s_addr);
(*env)->SetByteArrayRegion(env, address, 0, 4,
(jbyte *)&ipaddr->sin_addr.s_addr);
(*env)->SetIntField(env, packet, packetAddrLenFieldId, 4);
(*env)->SetIntField(env, packet, packetScopeIdFieldId, 0);
(*env)->SetIntField(env, packet, packetPortFieldId, ntohs(ipaddr->sin_port));
(*env)->SetIntField(env, packet, packetPortFieldId,
ntohs(ipaddr->sin_port));
} else {
int addrLen = netty_unix_socket_ipAddressLength(addr);
struct sockaddr_in6* ip6addr = (struct sockaddr_in6*) addr;
struct sockaddr_in6 *ip6addr = (struct sockaddr_in6 *)addr;
if (addrLen == 4) {
// IPV4 mapped IPV6 address
jbyte* addr = (jbyte*) &ip6addr->sin6_addr.s6_addr;
jbyte *addr = (jbyte *)&ip6addr->sin6_addr.s6_addr;
(*env)->SetByteArrayRegion(env, address, 0, 4, addr + 12);
} else {
(*env)->SetByteArrayRegion(env, address, 0, 16, (jbyte*) &ip6addr->sin6_addr.s6_addr);
(*env)->SetByteArrayRegion(env, address, 0, 16,
(jbyte *)&ip6addr->sin6_addr.s6_addr);
}
(*env)->SetIntField(env, packet, packetAddrLenFieldId, addrLen);
(*env)->SetIntField(env, packet, packetScopeIdFieldId, ip6addr->sin6_scope_id);
(*env)->SetIntField(env, packet, packetPortFieldId, ntohs(ip6addr->sin6_port));
(*env)->SetIntField(env, packet, packetScopeIdFieldId,
ip6addr->sin6_scope_id);
(*env)->SetIntField(env, packet, packetPortFieldId,
ntohs(ip6addr->sin6_port));
}
}
return (jint) res;
return (jint)res;
}
static jstring netty_epoll_native_kernelVersion(JNIEnv* env, jclass clazz) {
static jstring netty_epoll_native_kernelVersion(JNIEnv *env, jclass clazz) {
struct utsname name;
int res = uname(&name);
if (res == 0) {
return (*env)->NewStringUTF(env, name.release);
}
netty_unix_errors_throwRuntimeExceptionErrorNo(env, "uname() failed: ", errno);
netty_unix_errors_throwRuntimeExceptionErrorNo(env,
"uname() failed: ", errno);
return NULL;
}
static jboolean netty_epoll_native_isSupportingSendmmsg(JNIEnv* env, jclass clazz) {
static jboolean netty_epoll_native_isSupportingSendmmsg(JNIEnv *env,
jclass clazz) {
if (SYS_sendmmsg == -1) {
return JNI_FALSE;
}
@ -436,7 +484,8 @@ static jboolean netty_epoll_native_isSupportingSendmmsg(JNIEnv* env, jclass claz
return JNI_TRUE;
}
static jboolean netty_epoll_native_isSupportingRecvmmsg(JNIEnv* env, jclass clazz) {
static jboolean netty_epoll_native_isSupportingRecvmmsg(JNIEnv *env,
jclass clazz) {
if (SYS_recvmmsg == -1) {
return JNI_FALSE;
}
@ -448,7 +497,8 @@ static jboolean netty_epoll_native_isSupportingRecvmmsg(JNIEnv* env, jclass claz
return JNI_TRUE;
}
static jboolean netty_epoll_native_isSupportingTcpFastopen(JNIEnv* env, jclass clazz) {
static jboolean netty_epoll_native_isSupportingTcpFastopen(JNIEnv *env,
jclass clazz) {
int fastopen = 0;
getSysctlValue("/proc/sys/net/ipv4/tcp_fastopen", &fastopen);
if (fastopen > 0) {
@ -457,55 +507,58 @@ static jboolean netty_epoll_native_isSupportingTcpFastopen(JNIEnv* env, jclass c
return JNI_FALSE;
}
static jint netty_epoll_native_epollet(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_epollet(JNIEnv *env, jclass clazz) {
return EPOLLET;
}
static jint netty_epoll_native_epollin(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_epollin(JNIEnv *env, jclass clazz) {
return EPOLLIN;
}
static jint netty_epoll_native_epollout(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_epollout(JNIEnv *env, jclass clazz) {
return EPOLLOUT;
}
static jint netty_epoll_native_epollrdhup(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_epollrdhup(JNIEnv *env, jclass clazz) {
return EPOLLRDHUP;
}
static jint netty_epoll_native_epollerr(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_epollerr(JNIEnv *env, jclass clazz) {
return EPOLLERR;
}
static jint netty_epoll_native_sizeofEpollEvent(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_sizeofEpollEvent(JNIEnv *env, jclass clazz) {
return sizeof(struct epoll_event);
}
static jint netty_epoll_native_offsetofEpollData(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_offsetofEpollData(JNIEnv *env, jclass clazz) {
return offsetof(struct epoll_event, data);
}
static jint netty_epoll_native_splice0(JNIEnv* env, jclass clazz, jint fd, jlong offIn, jint fdOut, jlong offOut, jlong len) {
static jint netty_epoll_native_splice0(JNIEnv *env, jclass clazz, jint fd,
jlong offIn, jint fdOut, jlong offOut,
jlong len) {
ssize_t res;
int err;
loff_t off_in = (loff_t) offIn;
loff_t off_out = (loff_t) offOut;
loff_t off_in = (loff_t)offIn;
loff_t off_out = (loff_t)offOut;
loff_t* p_off_in = off_in >= 0 ? &off_in : NULL;
loff_t* p_off_out = off_out >= 0 ? &off_out : NULL;
loff_t *p_off_in = off_in >= 0 ? &off_in : NULL;
loff_t *p_off_out = off_out >= 0 ? &off_out : NULL;
do {
res = splice(fd, p_off_in, fdOut, p_off_out, (size_t) len, SPLICE_F_NONBLOCK | SPLICE_F_MOVE);
res = splice(fd, p_off_in, fdOut, p_off_out, (size_t)len,
SPLICE_F_NONBLOCK | SPLICE_F_MOVE);
// keep on splicing if it was interrupted
} while (res == -1 && ((err = errno) == EINTR));
if (res < 0) {
return -err;
}
return (jint) res;
return (jint)res;
}
static jint netty_epoll_native_tcpMd5SigMaxKeyLen(JNIEnv* env, jclass clazz) {
static jint netty_epoll_native_tcpMd5SigMaxKeyLen(JNIEnv *env, jclass clazz) {
struct tcp_md5sig md5sig;
// Defensive size check
@ -519,77 +572,92 @@ static jint netty_epoll_native_tcpMd5SigMaxKeyLen(JNIEnv* env, jclass clazz) {
// JNI Method Registration Table Begin
static const JNINativeMethod statically_referenced_fixed_method_table[] = {
{ "epollet", "()I", (void *) netty_epoll_native_epollet },
{ "epollin", "()I", (void *) netty_epoll_native_epollin },
{ "epollout", "()I", (void *) netty_epoll_native_epollout },
{ "epollrdhup", "()I", (void *) netty_epoll_native_epollrdhup },
{ "epollerr", "()I", (void *) netty_epoll_native_epollerr },
{ "tcpMd5SigMaxKeyLen", "()I", (void *) netty_epoll_native_tcpMd5SigMaxKeyLen },
{ "isSupportingSendmmsg", "()Z", (void *) netty_epoll_native_isSupportingSendmmsg },
{ "isSupportingRecvmmsg", "()Z", (void *) netty_epoll_native_isSupportingRecvmmsg },
{ "isSupportingTcpFastopen", "()Z", (void *) netty_epoll_native_isSupportingTcpFastopen },
{ "kernelVersion", "()Ljava/lang/String;", (void *) netty_epoll_native_kernelVersion }
};
static const jint statically_referenced_fixed_method_table_size = sizeof(statically_referenced_fixed_method_table) / sizeof(statically_referenced_fixed_method_table[0]);
{"epollet", "()I", (void *)netty_epoll_native_epollet},
{"epollin", "()I", (void *)netty_epoll_native_epollin},
{"epollout", "()I", (void *)netty_epoll_native_epollout},
{"epollrdhup", "()I", (void *)netty_epoll_native_epollrdhup},
{"epollerr", "()I", (void *)netty_epoll_native_epollerr},
{"tcpMd5SigMaxKeyLen", "()I",
(void *)netty_epoll_native_tcpMd5SigMaxKeyLen},
{"isSupportingSendmmsg", "()Z",
(void *)netty_epoll_native_isSupportingSendmmsg},
{"isSupportingRecvmmsg", "()Z",
(void *)netty_epoll_native_isSupportingRecvmmsg},
{"isSupportingTcpFastopen", "()Z",
(void *)netty_epoll_native_isSupportingTcpFastopen},
{"kernelVersion", "()Ljava/lang/String;",
(void *)netty_epoll_native_kernelVersion}};
static const jint statically_referenced_fixed_method_table_size =
sizeof(statically_referenced_fixed_method_table) /
sizeof(statically_referenced_fixed_method_table[0]);
static const JNINativeMethod fixed_method_table[] = {
{ "eventFd", "()I", (void *) netty_epoll_native_eventFd },
{ "timerFd", "()I", (void *) netty_epoll_native_timerFd },
{ "eventFdWrite", "(IJ)V", (void *) netty_epoll_native_eventFdWrite },
{ "eventFdRead", "(I)V", (void *) netty_epoll_native_eventFdRead },
{ "timerFdRead", "(I)V", (void *) netty_epoll_native_timerFdRead },
{ "timerFdSetTime", "(III)V", (void *) netty_epoll_native_timerFdSetTime },
{ "epollCreate", "()I", (void *) netty_epoll_native_epollCreate },
{ "epollWait0", "(IJIIII)I", (void *) netty_epoll_native_epollWait0 }, // This method is deprecated!
{ "epollWait", "(IJII)I", (void *) netty_epoll_native_epollWait },
{ "epollBusyWait0", "(IJI)I", (void *) netty_epoll_native_epollBusyWait0 },
{ "epollCtlAdd0", "(III)I", (void *) netty_epoll_native_epollCtlAdd0 },
{ "epollCtlMod0", "(III)I", (void *) netty_epoll_native_epollCtlMod0 },
{ "epollCtlDel0", "(II)I", (void *) netty_epoll_native_epollCtlDel0 },
{"eventFd", "()I", (void *)netty_epoll_native_eventFd},
{"timerFd", "()I", (void *)netty_epoll_native_timerFd},
{"eventFdWrite", "(IJ)V", (void *)netty_epoll_native_eventFdWrite},
{"eventFdRead", "(I)V", (void *)netty_epoll_native_eventFdRead},
{"timerFdRead", "(I)V", (void *)netty_epoll_native_timerFdRead},
{"timerFdSetTime", "(III)V", (void *)netty_epoll_native_timerFdSetTime},
{"epollCreate", "()I", (void *)netty_epoll_native_epollCreate},
{"epollWait0", "(IJIIII)I",
(void *)netty_epoll_native_epollWait0}, // This method is deprecated!
{"epollWait", "(IJII)I", (void *)netty_epoll_native_epollWait},
{"epollBusyWait0", "(IJI)I", (void *)netty_epoll_native_epollBusyWait0},
{"epollCtlAdd0", "(III)I", (void *)netty_epoll_native_epollCtlAdd0},
{"epollCtlMod0", "(III)I", (void *)netty_epoll_native_epollCtlMod0},
{"epollCtlDel0", "(II)I", (void *)netty_epoll_native_epollCtlDel0},
// "sendmmsg0" has a dynamic signature
{ "sizeofEpollEvent", "()I", (void *) netty_epoll_native_sizeofEpollEvent },
{ "offsetofEpollData", "()I", (void *) netty_epoll_native_offsetofEpollData },
{ "splice0", "(IJIJJ)I", (void *) netty_epoll_native_splice0 }
};
static const jint fixed_method_table_size = sizeof(fixed_method_table) / sizeof(fixed_method_table[0]);
{"sizeofEpollEvent", "()I", (void *)netty_epoll_native_sizeofEpollEvent},
{"offsetofEpollData", "()I", (void *)netty_epoll_native_offsetofEpollData},
{"splice0", "(IJIJJ)I", (void *)netty_epoll_native_splice0}};
static const jint fixed_method_table_size =
sizeof(fixed_method_table) / sizeof(fixed_method_table[0]);
static jint dynamicMethodsTableSize() {
return fixed_method_table_size + 2; // 2 is for the dynamic method signatures.
}
static JNINativeMethod* createDynamicMethodsTable(const char* packagePrefix) {
char* dynamicTypeName = NULL;
static JNINativeMethod *createDynamicMethodsTable(const char *packagePrefix) {
char *dynamicTypeName = NULL;
size_t size = sizeof(JNINativeMethod) * dynamicMethodsTableSize();
JNINativeMethod* dynamicMethods = malloc(size);
JNINativeMethod *dynamicMethods = malloc(size);
if (dynamicMethods == NULL) {
return NULL;
}
memset(dynamicMethods, 0, size);
memcpy(dynamicMethods, fixed_method_table, sizeof(fixed_method_table));
JNINativeMethod* dynamicMethod = &dynamicMethods[fixed_method_table_size];
NETTY_PREPEND(packagePrefix, "io/netty/channel/epoll/NativeDatagramPacketArray$NativeDatagramPacket;II)I", dynamicTypeName, error);
JNINativeMethod *dynamicMethod = &dynamicMethods[fixed_method_table_size];
NETTY_PREPEND(packagePrefix,
"io/netty/channel/epoll/"
"NativeDatagramPacketArray$NativeDatagramPacket;II)I",
dynamicTypeName, error);
NETTY_PREPEND("(IZ[L", dynamicTypeName, dynamicMethod->signature, error);
dynamicMethod->name = "sendmmsg0";
dynamicMethod->fnPtr = (void *) netty_epoll_native_sendmmsg0;
dynamicMethod->fnPtr = (void *)netty_epoll_native_sendmmsg0;
netty_unix_util_free_dynamic_name(&dynamicTypeName);
++dynamicMethod;
NETTY_PREPEND(packagePrefix, "io/netty/channel/epoll/NativeDatagramPacketArray$NativeDatagramPacket;II)I", dynamicTypeName, error);
NETTY_PREPEND(packagePrefix,
"io/netty/channel/epoll/"
"NativeDatagramPacketArray$NativeDatagramPacket;II)I",
dynamicTypeName, error);
NETTY_PREPEND("(IZ[L", dynamicTypeName, dynamicMethod->signature, error);
dynamicMethod->name = "recvmmsg0";
dynamicMethod->fnPtr = (void *) netty_epoll_native_recvmmsg0;
dynamicMethod->fnPtr = (void *)netty_epoll_native_recvmmsg0;
netty_unix_util_free_dynamic_name(&dynamicTypeName);
return dynamicMethods;
error:
free(dynamicTypeName);
netty_unix_util_free_dynamic_methods_table(dynamicMethods, fixed_method_table_size, dynamicMethodsTableSize());
netty_unix_util_free_dynamic_methods_table(
dynamicMethods, fixed_method_table_size, dynamicMethodsTableSize());
return NULL;
}
// JNI Method Registration Table End
static jint netty_epoll_native_JNI_OnLoad(JNIEnv* env, const char* packagePrefix) {
static jint netty_epoll_native_JNI_OnLoad(JNIEnv *env,
const char *packagePrefix) {
int ret = JNI_ERR;
int limitsOnLoadCalled = 0;
int errorsOnLoadCalled = 0;
@ -597,13 +665,13 @@ static jint netty_epoll_native_JNI_OnLoad(JNIEnv* env, const char* packagePrefix
int socketOnLoadCalled = 0;
int bufferOnLoadCalled = 0;
int linuxsocketOnLoadCalled = 0;
char* nettyClassName = NULL;
char *nettyClassName = NULL;
jclass nativeDatagramPacketCls = NULL;
JNINativeMethod* dynamicMethods = NULL;
JNINativeMethod *dynamicMethods = NULL;
// We must register the statically referenced methods first!
if (netty_unix_util_register_natives(env,
packagePrefix,
if (netty_unix_util_register_natives(
env, packagePrefix,
"io/netty/channel/epoll/NativeStaticallyReferencedJniMethods",
statically_referenced_fixed_method_table,
statically_referenced_fixed_method_table_size) != 0) {
@ -615,10 +683,8 @@ static jint netty_epoll_native_JNI_OnLoad(JNIEnv* env, const char* packagePrefix
goto done;
}
if (netty_unix_util_register_natives(env,
packagePrefix,
"io/netty/channel/epoll/Native",
dynamicMethods,
if (netty_unix_util_register_natives(
env, packagePrefix, "io/netty/channel/epoll/Native", dynamicMethods,
dynamicMethodsTableSize()) != 0) {
goto done;
}
@ -654,21 +720,31 @@ static jint netty_epoll_native_JNI_OnLoad(JNIEnv* env, const char* packagePrefix
linuxsocketOnLoadCalled = 1;
// Initialize this module
NETTY_PREPEND(packagePrefix, "io/netty/channel/epoll/NativeDatagramPacketArray$NativeDatagramPacket", nettyClassName, done);
NETTY_PREPEND(
packagePrefix,
"io/netty/channel/epoll/NativeDatagramPacketArray$NativeDatagramPacket",
nettyClassName, done);
NETTY_FIND_CLASS(env, nativeDatagramPacketCls, nettyClassName, done);
netty_unix_util_free_dynamic_name(&nettyClassName);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetAddrFieldId, "addr", "[B", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetAddrLenFieldId, "addrLen", "I", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetScopeIdFieldId, "scopeId", "I", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetPortFieldId, "port", "I", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetMemoryAddressFieldId, "memoryAddress", "J", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetCountFieldId, "count", "I", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetAddrFieldId, "addr", "[B",
done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetAddrLenFieldId, "addrLen",
"I", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetScopeIdFieldId, "scopeId",
"I", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetPortFieldId, "port", "I",
done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetMemoryAddressFieldId,
"memoryAddress", "J", done);
NETTY_GET_FIELD(env, nativeDatagramPacketCls, packetCountFieldId, "count",
"I", done);
ret = NETTY_JNI_VERSION;
done:
netty_unix_util_free_dynamic_methods_table(dynamicMethods, fixed_method_table_size, dynamicMethodsTableSize());
netty_unix_util_free_dynamic_methods_table(
dynamicMethods, fixed_method_table_size, dynamicMethodsTableSize());
free(nettyClassName);
if (ret == JNI_ERR) {
@ -700,7 +776,7 @@ done:
return ret;
}
static void netty_epoll_native_JNI_OnUnLoad(JNIEnv* env) {
static void netty_epoll_native_JNI_OnUnLoad(JNIEnv *env) {
netty_unix_limits_JNI_OnUnLoad(env);
netty_unix_errors_JNI_OnUnLoad(env);
netty_unix_filedescriptor_JNI_OnUnLoad(env);
@ -717,24 +793,31 @@ static void netty_epoll_native_JNI_OnUnLoad(JNIEnv* env) {
}
// Invoked by the JVM when statically linked
static jint JNI_OnLoad_netty_transport_native_epoll0(JavaVM* vm, void* reserved) {
JNIEnv* env;
if ((*vm)->GetEnv(vm, (void**) &env, NETTY_JNI_VERSION) != JNI_OK) {
static jint JNI_OnLoad_netty_transport_native_epoll0(JavaVM *vm,
void *reserved) {
JNIEnv *env;
if ((*vm)->GetEnv(vm, (void **)&env, NETTY_JNI_VERSION) != JNI_OK) {
return JNI_ERR;
}
char* packagePrefix = NULL;
char *packagePrefix = NULL;
#ifndef NETTY_BUILD_STATIC
Dl_info dlinfo;
jint status = 0;
// We need to use an address of a function that is uniquely part of this library, so choose a static
// function. See https://github.com/netty/netty/issues/4840.
if (!dladdr((void*) netty_epoll_native_JNI_OnUnLoad, &dlinfo)) {
fprintf(stderr, "FATAL: transport-native-epoll JNI call to dladdr failed!\n");
// We need to use an address of a function that is uniquely part of this
// library, so choose a static function. See
// https://github.com/netty/netty/issues/4840.
if (!dladdr((void *)netty_epoll_native_JNI_OnUnLoad, &dlinfo)) {
fprintf(stderr,
"FATAL: transport-native-epoll JNI call to dladdr failed!\n");
return JNI_ERR;
}
packagePrefix = netty_unix_util_parse_package_prefix(dlinfo.dli_fname, "netty_transport_native_epoll", &status);
packagePrefix = netty_unix_util_parse_package_prefix(
dlinfo.dli_fname, "netty_transport_native_epoll", &status);
if (status == JNI_ERR) {
fprintf(stderr, "FATAL: transport-native-epoll JNI encountered unexpected dlinfo.dli_fname: %s\n", dlinfo.dli_fname);
fprintf(stderr,
"FATAL: transport-native-epoll JNI encountered unexpected "
"dlinfo.dli_fname: %s\n",
dlinfo.dli_fname);
return JNI_ERR;
}
#endif /* NETTY_BUILD_STATIC */
@ -744,34 +827,38 @@ static jint JNI_OnLoad_netty_transport_native_epoll0(JavaVM* vm, void* reserved)
return ret;
}
static void JNI_OnUnload_netty_transport_native_epoll0(JavaVM* vm, void* reserved) {
JNIEnv* env;
if ((*vm)->GetEnv(vm, (void**) &env, NETTY_JNI_VERSION) != JNI_OK) {
static void JNI_OnUnload_netty_transport_native_epoll0(JavaVM *vm,
void *reserved) {
JNIEnv *env;
if ((*vm)->GetEnv(vm, (void **)&env, NETTY_JNI_VERSION) != JNI_OK) {
// Something is wrong but nothing we can do about this :(
return;
}
netty_epoll_native_JNI_OnUnLoad(env);
}
// We build with -fvisibility=hidden so ensure we mark everything that needs to be visible with JNIEXPORT
// We build with -fvisibility=hidden so ensure we mark everything that needs to
// be visible with JNIEXPORT
// http://mail.openjdk.java.net/pipermail/core-libs-dev/2013-February/014549.html
// Invoked by the JVM when statically linked
JNIEXPORT jint JNI_OnLoad_netty_transport_native_epoll(JavaVM* vm, void* reserved) {
JNIEXPORT jint JNI_OnLoad_netty_transport_native_epoll(JavaVM *vm,
void *reserved) {
return JNI_OnLoad_netty_transport_native_epoll0(vm, reserved);
}
// Invoked by the JVM when statically linked
JNIEXPORT void JNI_OnUnload_netty_transport_native_epoll(JavaVM* vm, void* reserved) {
JNIEXPORT void JNI_OnUnload_netty_transport_native_epoll(JavaVM *vm,
void *reserved) {
JNI_OnUnload_netty_transport_native_epoll0(vm, reserved);
}
#ifndef NETTY_BUILD_STATIC
JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved) {
JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *reserved) {
return JNI_OnLoad_netty_transport_native_epoll0(vm, reserved);
}
JNIEXPORT void JNI_OnUnload(JavaVM* vm, void* reserved) {
JNIEXPORT void JNI_OnUnload(JavaVM *vm, void *reserved) {
JNI_OnUnload_netty_transport_native_epoll0(vm, reserved);
}
#endif /* NETTY_BUILD_STATIC */

View File

@ -35,16 +35,334 @@
<unix.common.lib.dir>${project.build.directory}/unix-common-lib</unix.common.lib.dir>
<unix.common.lib.unpacked.dir>${unix.common.lib.dir}/META-INF/native/lib</unix.common.lib.unpacked.dir>
<unix.common.include.unpacked.dir>${unix.common.lib.dir}/META-INF/native/include</unix.common.include.unpacked.dir>
<jni.compiler.args.cflags>CFLAGS=-O3 -Werror -fno-omit-frame-pointer -Wunused-variable -fvisibility=hidden
-I${unix.common.include.unpacked.dir}
<jni.compiler.args.cflags>CFLAGS=-O3 -Werror -fno-omit-frame-pointer -Wunused-variable -fvisibility=hidden -I${unix.common.include.unpacked.dir}
</jni.compiler.args.cflags>
<jni.compiler.args.ldflags>LDFLAGS=-L${unix.common.lib.unpacked.dir} -Wl,--no-as-needed -lrt -Wl,--whole-archive
-l${unix.common.lib.name} -Wl,--no-whole-archive
<jni.compiler.args.ldflags>LDFLAGS=-L${unix.common.lib.unpacked.dir} -Wl,--no-as-needed -lrt -Wl,--whole-archive -l${unix.common.lib.name} -Wl,--no-whole-archive
</jni.compiler.args.ldflags>
<nativeSourceDirectory>${project.basedir}/src/main/c</nativeSourceDirectory>
<skipTests>true</skipTests>
</properties>
<profiles>
<!--
Netty must be released from RHEL 6.8 x86_64 or compatible so that:
1) we ship x86_64 version of epoll transport officially, and
2) we ensure the ABI compatibility with older GLIBC versions.
The shared library built on a distribution with newer GLIBC
will not run on older distributions.
-->
<profile>
<id>restricted-release-io_uring</id>
<build>
<pluginManagement>
<plugins>
<plugin>
<artifactId>maven-enforcer-plugin</artifactId>
<version>1.4.1</version>
<dependencies>
<!-- Provides the 'requireFilesContent' enforcer rule. -->
<dependency>
<groupId>com.ceilfors.maven.plugin</groupId>
<artifactId>enforcer-rules</artifactId>
<version>1.2.0</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>enforce-release-environment</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireProperty>
<regexMessage>
Release process must be performed on linux-x86_64.
</regexMessage>
<property>os.detected.classifier</property>
<regex>^linux-x86_64$</regex>
</requireProperty>
<requireFilesContent>
<message>
Release process must be performed on RHEL 6.8 or its derivatives.
</message>
<files>
<file>/etc/redhat-release</file>
</files>
<content>release 6.9</content>
</requireFilesContent>
</rules>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>linux</id>
<activation>
<os>
<family>linux</family>
</os>
</activation>
<properties>
<skipTests>false</skipTests>
</properties>
<build>
<plugins>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<!-- unpack the unix-common static library and include files -->
<execution>
<id>unpack</id>
<phase>generate-sources</phase>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<configuration>
<includeGroupIds>${project.groupId}</includeGroupIds>
<includeArtifactIds>netty-transport-native-unix-common</includeArtifactIds>
<classifier>${jni.classifier}</classifier>
<outputDirectory>${unix.common.lib.dir}</outputDirectory>
<includes>META-INF/native/**</includes>
<overWriteReleases>false</overWriteReleases>
<overWriteSnapshots>true</overWriteSnapshots>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.fusesource.hawtjni</groupId>
<artifactId>maven-hawtjni-plugin</artifactId>
<executions>
<execution>
<id>build-native-lib</id>
<configuration>
<name>netty_transport_native_io_uring_${os.detected.arch}</name>
<nativeSourceDirectory>${nativeSourceDirectory}</nativeSourceDirectory>
<libDirectory>${project.build.outputDirectory}</libDirectory>
<!-- We use Maven's artifact classifier instead.
This hack will make the hawtjni plugin to put the native library
under 'META-INF/native' rather than 'META-INF/native/${platform}'. -->
<platform>.</platform>
<configureArgs>
<arg>${jni.compiler.args.ldflags}</arg>
<arg>${jni.compiler.args.cflags}</arg>
<configureArg>--libdir=${project.build.directory}/native-build/target/lib</configureArg>
</configureArgs>
</configuration>
<goals>
<goal>generate</goal>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<!-- Generate the JAR that contains the native library in it. -->
<execution>
<id>native-jar</id>
<goals>
<goal>jar</goal>
</goals>
<configuration>
<archive>
<manifest>
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
</manifest>
<manifestEntries>
<Bundle-NativeCode>META-INF/native/libnetty_transport_native_io_uring_${os.detected.arch}.so; osname=Linux; processor=${os.detected.arch},*</Bundle-NativeCode>
<Automatic-Module-Name>${javaModuleName}</Automatic-Module-Name>
</manifestEntries>
<index>true</index>
<manifestFile>${project.build.outputDirectory}/META-INF/MANIFEST.MF</manifestFile>
</archive>
<classifier>${jni.classifier}</classifier>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-transport-native-unix-common</artifactId>
<version>${project.version}</version>
<classifier>${jni.classifier}</classifier>
<!--
The unix-common with classifier dependency is optional because it is not a runtime dependency, but a build time
dependency to get the static library which is built directly into the shared library generated by this project.
-->
<optional>true</optional>
</dependency>
</dependencies>
</profile>
<profile>
<id>linux-aarch64</id>
<properties>
<jni.classifier>${os.detected.name}-aarch64</jni.classifier>
</properties>
<build>
<pluginManagement>
<plugins>
<plugin>
<artifactId>maven-enforcer-plugin</artifactId>
<version>1.4.1</version>
<dependencies>
<!-- Provides the 'requireFilesContent' enforcer rule. -->
<dependency>
<groupId>com.ceilfors.maven.plugin</groupId>
<artifactId>enforcer-rules</artifactId>
<version>1.2.0</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>enforce-release-environment</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireProperty>
<regexMessage>
Cross compile and Release process must be performed on linux-x86_64.
</regexMessage>
<property>os.detected.classifier</property>
<regex>^linux-x86_64.*</regex>
</requireProperty>
<requireFilesContent>
<message>
Cross compile and Release process must be performed on RHEL 7.6 or its derivatives.
</message>
<files>
<file>/etc/redhat-release</file>
</files>
<content>release 7.6</content>
</requireFilesContent>
</rules>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<!-- unpack the unix-common static library and include files -->
<execution>
<id>unpack</id>
<phase>generate-sources</phase>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<configuration>
<includeGroupIds>${project.groupId}</includeGroupIds>
<includeArtifactIds>netty-transport-native-unix-common</includeArtifactIds>
<classifier>${jni.classifier}</classifier>
<outputDirectory>${unix.common.lib.dir}</outputDirectory>
<includes>META-INF/native/**</includes>
<overWriteReleases>false</overWriteReleases>
<overWriteSnapshots>true</overWriteSnapshots>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.fusesource.hawtjni</groupId>
<artifactId>maven-hawtjni-plugin</artifactId>
<executions>
<execution>
<id>build-native-lib</id>
<configuration>
<name>netty_transport_native_io_uring_aarch_64</name>
<nativeSourceDirectory>${nativeSourceDirectory}</nativeSourceDirectory>
<libDirectory>${project.build.outputDirectory}</libDirectory>
<!-- We use Maven's artifact classifier instead.
This hack will make the hawtjni plugin to put the native library
under 'META-INF/native' rather than 'META-INF/native/${platform}'. -->
<platform>.</platform>
<configureArgs>
<arg>${jni.compiler.args.ldflags}</arg>
<arg>${jni.compiler.args.cflags}</arg>
<configureArg>--libdir=${project.build.directory}/native-build/target/lib</configureArg>
<configureArg>--host=aarch64-linux-gnu</configureArg>
</configureArgs>
</configuration>
<goals>
<goal>generate</goal>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<!-- Generate the JAR that contains the native library in it. -->
<execution>
<id>native-jar</id>
<goals>
<goal>jar</goal>
</goals>
<configuration>
<archive>
<manifest>
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
</manifest>
<manifestEntries>
<Bundle-NativeCode>META-INF/native/libnetty_transport_native_io_uring_aarch_64.so; osname=Linux; processor=aarch_64,*</Bundle-NativeCode>
<Automatic-Module-Name>${javaModuleName}</Automatic-Module-Name>
</manifestEntries>
<index>true</index>
<manifestFile>${project.build.outputDirectory}/META-INF/MANIFEST.MF</manifestFile>
</archive>
<classifier>${jni.classifier}</classifier>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-transport-native-unix-common</artifactId>
<version>${project.version}</version>
<classifier>${jni.classifier}</classifier>
<!--
The unix-common with classifier dependency is optional because it is not a runtime dependency, but a build time
dependency to get the static library which is built directly into the shared library generated by this project.
-->
<optional>true</optional>
</dependency>
</dependencies>
</profile>
</profiles>
<dependencies>
<dependency>
<groupId>io.netty</groupId>
@ -86,4 +404,42 @@
</dependency>
</dependencies>
<build>
<plugins>
<!-- Also include c files in source jar -->
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<phase>generate-sources</phase>
<goals>
<goal>add-source</goal>
</goals>
<configuration>
<sources>
<source>${nativeSourceDirectory}</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<!-- Generate the fallback JAR that does not contain the native library. -->
<execution>
<id>default-jar</id>
<configuration>
<excludes>
<exclude>META-INF/native/**</exclude>
</excludes>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: MIT */
#ifndef LIBURING_BARRIER_H
#define LIBURING_BARRIER_H
#include <stdatomic.h>
/*
From the kernel documentation file refcount-vs-atomic.rst:
A RELEASE memory ordering guarantees that all prior loads and
stores (all po-earlier instructions) on the same CPU are completed
before the operation. It also guarantees that all po-earlier
stores on the same CPU and all propagated stores from other CPUs
must propagate to all other CPUs before the release operation
(A-cumulative property). This is implemented using
:c:func:`smp_store_release`.
An ACQUIRE memory ordering guarantees that all post loads and
stores (all po-later instructions) on the same CPU are
completed after the acquire operation. It also guarantees that all
po-later stores on the same CPU must propagate to all other CPUs
after the acquire operation executes. This is implemented using
:c:func:`smp_acquire__after_ctrl_dep`.
*/
#define IO_URING_WRITE_ONCE(var, val) \
atomic_store_explicit(&(var), (val), memory_order_relaxed)
#define IO_URING_READ_ONCE(var) \
atomic_load_explicit(&(var), memory_order_relaxed)
#define io_uring_smp_store_release(p, v) \
atomic_store_explicit((p), (v), memory_order_release)
#define io_uring_smp_load_acquire(p) \
atomic_load_explicit((p), memory_order_acquire)
#endif /* defined(LIBURING_BARRIER_H) */

View File

@ -1,9 +1,9 @@
/* SPDX-License-Identifier: MIT */
#include "barrier.h"
#include <linux/io_uring.h>
#include <stdio.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#ifndef LIB_TEST
#define LIB_TEST
@ -44,67 +44,39 @@ struct io_uring {
int ring_fd;
};
void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq) {
munmap(sq->ring_ptr, sq->ring_sz);
if (cq->ring_ptr && cq->ring_ptr != sq->ring_ptr)
munmap(cq->ring_ptr, cq->ring_sz);
#define io_uring_for_each_cqe(ring, head, cqe) \
/* \
* io_uring_smp_load_acquire() enforces the order of tail \
* and CQE reads. \
*/ \
for (head = *(ring)->cq.khead; \
(cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) \
? &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] \
: NULL)); \
head++)
/*
* Must be called after io_uring_for_each_cqe()
*/
static inline void io_uring_cq_advance(struct io_uring *ring, unsigned nr) {
if (nr) {
struct io_uring_cq *cq = &ring->cq;
/*
* Ensure that the kernel only sees the new value of the head
* index after the CQEs have been read.
*/
io_uring_smp_store_release(cq->khead, *cq->khead + nr);
}
}
int io_uring_mmap(int fd, struct io_uring_params *p, struct io_uring_sq *sq,
struct io_uring_cq *cq) {
size_t size;
int ret;
sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
if (p->features & IORING_FEAT_SINGLE_MMAP) {
if (cq->ring_sz > sq->ring_sz)
sq->ring_sz = cq->ring_sz;
cq->ring_sz = sq->ring_sz;
}
sq->ring_ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
if (sq->ring_ptr == MAP_FAILED)
return -errno;
if (p->features & IORING_FEAT_SINGLE_MMAP) {
cq->ring_ptr = sq->ring_ptr;
} else {
cq->ring_ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
if (cq->ring_ptr == MAP_FAILED) {
cq->ring_ptr = NULL;
ret = -errno;
goto err;
}
}
sq->khead = sq->ring_ptr + p->sq_off.head;
sq->ktail = sq->ring_ptr + p->sq_off.tail;
sq->kring_mask = sq->ring_ptr + p->sq_off.ring_mask;
sq->kring_entries = sq->ring_ptr + p->sq_off.ring_entries;
sq->kflags = sq->ring_ptr + p->sq_off.flags;
sq->kdropped = sq->ring_ptr + p->sq_off.dropped;
sq->array = sq->ring_ptr + p->sq_off.array;
size = p->sq_entries * sizeof(struct io_uring_sqe);
sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
fd, IORING_OFF_SQES);
if (sq->sqes == MAP_FAILED) {
ret = -errno;
err:
io_uring_unmap_rings(sq, cq);
return ret;
}
cq->khead = cq->ring_ptr + p->cq_off.head;
cq->ktail = cq->ring_ptr + p->cq_off.tail;
cq->kring_mask = cq->ring_ptr + p->cq_off.ring_mask;
cq->kring_entries = cq->ring_ptr + p->cq_off.ring_entries;
cq->koverflow = cq->ring_ptr + p->cq_off.overflow;
cq->cqes = cq->ring_ptr + p->cq_off.cqes;
return 0;
/*
* Must be called after io_uring_{peek,wait}_cqe() after the cqe has
* been processed by the application.
*/
static void io_uring_cqe_seen(struct io_uring *ring, struct io_uring_cqe *cqe) {
if (cqe)
io_uring_cq_advance(ring, 1);
}
#endif

View File

@ -1,9 +1,29 @@
#include <jni.h>
#include <stdint.h>
#include <stdlib.h>
#include <errno.h>
#define _GNU_SOURCE // RTLD_DEFAULT
#include "io_uring.h"
#include "netty_unix_errors.h"
#include "netty_unix_filedescriptor.h"
#include "netty_unix_jni.h"
#include "netty_unix_socket.h"
#include "netty_unix_util.h"
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <jni.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "syscall.h"
#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@ -13,9 +33,493 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <syscall.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
// From netty jni unix socket
static jsize addressLength(const struct sockaddr_storage *addr) {
int len = netty_unix_socket_ipAddressLength(addr);
if (len == 4) {
// Only encode port into it
return len + 4;
}
// we encode port + scope into it
return len + 8;
}
/*
* Sync internal state with kernel ring state on the SQ side. Returns the
* number of pending items in the SQ ring, for the shared ring.
*/
int io_uring_flush_sq(struct io_uring *ring) {
struct io_uring_sq *sq = &ring->sq;
const unsigned mask = *sq->kring_mask;
unsigned ktail, to_submit;
if (sq->sqe_head == sq->sqe_tail) {
ktail = *sq->ktail;
goto out;
}
/*
* Fill in sqes that we have queued up, adding them to the kernel ring
*/
ktail = *sq->ktail;
to_submit = sq->sqe_tail - sq->sqe_head;
while (to_submit--) {
sq->array[ktail & mask] = sq->sqe_head & mask;
ktail++;
sq->sqe_head++;
}
/*
* Ensure that the kernel sees the SQE updates before it sees the tail
* update.
*/
io_uring_smp_store_release(sq->ktail, ktail);
out:
return ktail - *sq->khead;
}
// From netty unix socket jni
static void initInetSocketAddressArray(JNIEnv *env,
const struct sockaddr_storage *addr,
jbyteArray bArray, int offset,
jsize len) {
int port;
if (addr->ss_family == AF_INET) {
struct sockaddr_in *s = (struct sockaddr_in *)addr;
port = ntohs(s->sin_port);
// Encode address and port into the array
unsigned char a[4];
a[0] = port >> 24;
a[1] = port >> 16;
a[2] = port >> 8;
a[3] = port;
(*env)->SetByteArrayRegion(env, bArray, offset, 4,
(jbyte *)&s->sin_addr.s_addr);
(*env)->SetByteArrayRegion(env, bArray, offset + 4, 4, (jbyte *)&a);
} else {
struct sockaddr_in6 *s = (struct sockaddr_in6 *)addr;
port = ntohs(s->sin6_port);
if (len == 8) {
// IPv4-mapped-on-IPv6
// Encode port into the array and write it into the jbyteArray
unsigned char a[4];
a[0] = port >> 24;
a[1] = port >> 16;
a[2] = port >> 8;
a[3] = port;
// we only need the last 4 bytes for mapped address
(*env)->SetByteArrayRegion(env, bArray, offset, 4,
(jbyte *)&(s->sin6_addr.s6_addr[12]));
(*env)->SetByteArrayRegion(env, bArray, offset + 4, 4, (jbyte *)&a);
} else {
// Encode scopeid and port into the array
unsigned char a[8];
a[0] = s->sin6_scope_id >> 24;
a[1] = s->sin6_scope_id >> 16;
a[2] = s->sin6_scope_id >> 8;
a[3] = s->sin6_scope_id;
a[4] = port >> 24;
a[5] = port >> 16;
a[6] = port >> 8;
a[7] = port;
(*env)->SetByteArrayRegion(env, bArray, offset, 16,
(jbyte *)&(s->sin6_addr.s6_addr));
(*env)->SetByteArrayRegion(env, bArray, offset + 16, 8, (jbyte *)&a);
}
}
}
static struct io_uring_sqe *__io_uring_get_sqe(struct io_uring_sq *sq,
unsigned int __head) {
unsigned int __next = (sq)->sqe_tail + 1;
struct io_uring_sqe *__sqe = NULL;
if (__next - __head <= *(sq)->kring_entries) {
__sqe = &(sq)->sqes[(sq)->sqe_tail & *(sq)->kring_mask];
if (!__sqe) {
printf("SQE is null \n");
}
(sq)->sqe_tail = __next;
}
return __sqe;
}
struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) {
struct io_uring_sq *sq = &ring->sq;
return __io_uring_get_sqe(sq, sq->sqe_head);
}
static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
const void *addr, unsigned len,
__u64 offset) {
sqe->opcode = op;
sqe->flags = 0;
sqe->ioprio = 0;
sqe->fd = fd;
sqe->off = offset;
sqe->addr = (unsigned long)addr;
sqe->len = len;
sqe->rw_flags = 0;
sqe->user_data = 0;
sqe->__pad2[0] = sqe->__pad2[1] = sqe->__pad2[2] = 0;
}
void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq) {
munmap(sq->ring_ptr, sq->ring_sz);
if (cq->ring_ptr && cq->ring_ptr != sq->ring_ptr)
munmap(cq->ring_ptr, cq->ring_sz);
}
int io_uring_mmap(int fd, struct io_uring_params *p, struct io_uring_sq *sq,
struct io_uring_cq *cq) {
size_t size;
int ret;
sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
if (p->features & IORING_FEAT_SINGLE_MMAP) {
if (cq->ring_sz > sq->ring_sz)
sq->ring_sz = cq->ring_sz;
cq->ring_sz = sq->ring_sz;
}
sq->ring_ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
if (sq->ring_ptr == MAP_FAILED)
return -errno;
if (p->features & IORING_FEAT_SINGLE_MMAP) {
cq->ring_ptr = sq->ring_ptr;
} else {
cq->ring_ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
if (cq->ring_ptr == MAP_FAILED) {
cq->ring_ptr = NULL;
ret = -errno;
goto err;
}
}
sq->khead = sq->ring_ptr + p->sq_off.head;
sq->ktail = sq->ring_ptr + p->sq_off.tail;
sq->kring_mask = sq->ring_ptr + p->sq_off.ring_mask;
sq->kring_entries = sq->ring_ptr + p->sq_off.ring_entries;
sq->kflags = sq->ring_ptr + p->sq_off.flags;
sq->kdropped = sq->ring_ptr + p->sq_off.dropped;
sq->array = sq->ring_ptr + p->sq_off.array;
size = p->sq_entries * sizeof(struct io_uring_sqe);
sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
fd, IORING_OFF_SQES);
if (sq->sqes == MAP_FAILED) {
ret = -errno;
err:
io_uring_unmap_rings(sq, cq);
return ret;
}
cq->khead = cq->ring_ptr + p->cq_off.head;
cq->ktail = cq->ring_ptr + p->cq_off.tail;
cq->kring_mask = cq->ring_ptr + p->cq_off.ring_mask;
cq->kring_entries = cq->ring_ptr + p->cq_off.ring_entries;
cq->koverflow = cq->ring_ptr + p->cq_off.overflow;
cq->cqes = cq->ring_ptr + p->cq_off.cqes;
return 0;
}
void setup_io_uring(int ring_fd, struct io_uring *io_uring_ring,
struct io_uring_params *p) {
int ret;
ret = io_uring_mmap(ring_fd, p, &io_uring_ring->sq, &io_uring_ring->cq);
if (!ret) {
io_uring_ring->flags = p->flags;
io_uring_ring->ring_fd = ring_fd;
} else {
perror("setup_io_uring error \n");
}
}
void io_uring_prep_write(struct io_uring_sqe *sqe, int fd, const void *buf,
unsigned nbytes, off_t offset) {
io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
}
void io_uring_prep_read(struct io_uring_sqe *sqe, int fd, void *buf,
unsigned nbytes, off_t offset) {
io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
}
void io_uring_sqe_set_data(struct io_uring_sqe *sqe, unsigned long data) {
sqe->user_data = (unsigned long)data;
}
void queue_read(int file_fd, struct io_uring *ring, void *buffer, jint event_id,
jint pos, jint limit) {
struct io_uring_sqe *sqe = NULL;
sqe = io_uring_get_sqe(ring);
if (!sqe) {
fprintf(stderr, "Could not get SQE.\n");
return;
}
io_uring_prep_read(sqe, file_fd, buffer + pos, (size_t)(limit - pos), 0);
io_uring_sqe_set_data(sqe, (int)event_id);
}
void queue_write(int file_fd, struct io_uring *ring, void *buffer,
jint event_id, jint pos, jint limit) {
struct io_uring_sqe *sqe;
sqe = io_uring_get_sqe(ring);
if (!sqe) {
fprintf(stderr, "Could not get SQE.\n");
return;
}
io_uring_prep_write(sqe, file_fd, buffer + pos, (size_t)(limit - pos), 0);
io_uring_sqe_set_data(sqe, (unsigned long)event_id);
}
int __io_uring_peek_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) {
struct io_uring_cqe *cqe;
unsigned head;
int err = 0;
do {
io_uring_for_each_cqe(ring, head, cqe) break;
break;
} while (1);
*cqe_ptr = cqe;
return err;
}
long io_uring_wait_cqe(struct io_uring *ring, unsigned wait_nr) {
struct io_uring_cqe *cqe = NULL;
int ret = 0, err;
unsigned flags = 0;
err = __io_uring_peek_cqe(ring, &cqe);
if (err) {
printf("error peek \n");
return -errno;
}
if (cqe) {
return (long)cqe;
}
flags = IORING_ENTER_GETEVENTS;
ret = sys_io_uring_enter(ring->ring_fd, 0, wait_nr, flags, NULL);
if (ret < 0) {
return -1;
} else if (ret == 0) {
err = __io_uring_peek_cqe(ring, &cqe);
if (err) {
printf("error peek \n");
return -1;
}
if (cqe) {
return (long)cqe;
}
}
return -1;
}
/*
* Submit sqes acquired from io_uring_get_sqe() to the kernel.
*
* Returns number of sqes submitted
*/
int io_uring_submit(struct io_uring *ring) {
int submitted = io_uring_flush_sq(ring);
int ret;
ret = sys_io_uring_enter(ring->ring_fd, submitted, 0, 0, NULL);
if (ret < 0)
return -errno;
return ret;
}
// all jni methods
static jlong netty_io_uring_setup(JNIEnv *env, jclass class1, jint entries) {
struct io_uring_params p;
memset(&p, 0, sizeof(p));
int ring_fd = sys_io_uring_setup((int)entries, &p);
struct io_uring *io_uring_ring =
(struct io_uring *)malloc(sizeof(struct io_uring));
io_uring_ring->flags = 0;
io_uring_ring->sq.sqe_tail = 0;
io_uring_ring->sq.sqe_head = 0;
setup_io_uring(ring_fd, io_uring_ring, &p);
return (long)io_uring_ring;
}
static jint netty_read_operation(JNIEnv *jenv, jclass clazz, jlong uring,
jlong fd, jlong event_id, jlong buffer_address,
jint pos, jint limit) {
queue_read((int)fd, (struct io_uring *)uring, (void *)buffer_address,
event_id, pos, limit);
return 0;
}
static jint netty_write_operation(JNIEnv *jenv, jclass clazz, jlong uring,
jlong fd, jlong event_id,
jlong buffer_address, jint pos, jint limit) {
queue_write((int)fd, (struct io_uring *)uring, (void *)buffer_address,
event_id, pos, limit);
return 0;
}
static jint netty_accept_operation(JNIEnv *env, jclass clazz, jlong uring,
jlong fd, jbyteArray byte_array) {
jint socketFd;
jsize len;
jbyte len_b;
int err;
struct sockaddr_storage addr;
socklen_t address_len = sizeof(addr);
socketFd = accept(fd, (struct sockaddr *)&addr, &address_len);
if ((err = errno) != EINTR) {
return -err;
}
len = addressLength(&addr);
len_b = (jbyte)len;
// Fill in remote address details
(*env)->SetByteArrayRegion(env, byte_array, 0, 1, (jbyte *)&len_b);
initInetSocketAddressArray(env, &addr, byte_array, 1, len);
return socketFd;
}
static jlong netty_wait_cqe(JNIEnv *env, jclass clazz, jlong uring) {
return (jlong)io_uring_wait_cqe((struct io_uring *)uring, 1);
}
static jlong netty_delete_cqe(JNIEnv *env, jclass clazz, jlong uring,
jlong cqe_address) {
struct io_uring_cqe *cqe = (struct io_uring_cqe *)cqe_address;
io_uring_cqe_seen((struct io_uring *)uring, cqe);
return 0;
}
static jlong netty_get_event_id(JNIEnv *env, jclass classz, jlong cqe_address) {
struct io_uring_cqe *cqe = (struct io_uring_cqe *)cqe_address;
return (long)cqe->user_data;
}
static jint netty_get_res(JNIEnv *env, jclass classz, jlong cqe_address) {
struct io_uring_cqe *cqe = (struct io_uring_cqe *)cqe_address;
return (long)cqe->res;
}
static jlong netty_close(JNIEnv *env, jclass classz, jlong io_uring) {
struct io_uring *ring = (struct io_uring *)io_uring;
struct io_uring_sq *sq = &ring->sq;
struct io_uring_cq *cq = &ring->cq;
munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
io_uring_unmap_rings(sq, cq);
close(ring->ring_fd);
}
static jlong netty_submit(JNIEnv *jenv, jclass classz, jlong uring) {
return io_uring_submit((struct io_uring *)uring);
}
static jlong netty_create_file(JNIEnv *env, jclass class) {
return open("io-uring-test.txt", O_RDWR | O_TRUNC | O_CREAT, 0644);
}
// end jni methods
static void netty_io_uring_native_JNI_OnUnLoad(JNIEnv *env) {
// OnUnLoad
}
// JNI Registered Methods Begin
static jint netty_io_uring_close(JNIEnv *env, jclass clazz, jint fd) {
return 111;
}
// JNI Registered Methods End
// JNI Method Registration Table Begin
static const JNINativeMethod method_table[] = {
{"ioUringSetup", "(I)J", (void *)netty_io_uring_setup},
{"ioUringClose", "(J)J", (void *)netty_io_uring_close},
{"ioUringRead", "(JJJJII)I", (void *)netty_read_operation},
{"ioUringWrite", "(JJJJII)I", (void *)netty_write_operation},
{"ioUringAccept", "(JJ[B)I", (void *)netty_accept_operation},
{"ioUringWaitCqe", "(J)J", (void *)netty_wait_cqe},
{"ioUringDeleteCqe", "(JJ)J", (void *)netty_delete_cqe},
{"ioUringGetEventId", "(J)J", (void *)netty_get_event_id},
{"ioUringGetRes", "(J)I", (void *)netty_get_res},
{"ioUringSubmit", "(J)J", (void *)netty_submit},
{"createFile", "()J", (void *)netty_create_file}};
static const jint method_table_size =
sizeof(method_table) / sizeof(method_table[0]);
// JNI Method Registration Table End
JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *reserved) {
JNIEnv *env;
if ((*vm)->GetEnv(vm, (void **)&env, NETTY_JNI_VERSION) != JNI_OK) {
return JNI_ERR;
}
char *packagePrefix = NULL;
Dl_info dlinfo;
jint status = 0;
if (!dladdr((void *)netty_io_uring_native_JNI_OnUnLoad, &dlinfo)) {
fprintf(stderr,
"FATAL: transport-native-epoll JNI call to dladdr failed!\n");
return JNI_ERR;
}
packagePrefix = netty_unix_util_parse_package_prefix(
dlinfo.dli_fname, "netty_transport_native_io_uring", &status);
if (status == JNI_ERR) {
fprintf(stderr,
"FATAL: netty_transport_native_io_uring JNI encountered unexpected "
"dlinfo.dli_fname: %s\n",
dlinfo.dli_fname);
return JNI_ERR;
}
if (netty_unix_util_register_natives(env, packagePrefix,
"io/netty/channel/uring/Native",
method_table, method_table_size) != 0) {
printf("netty register natives error\n");
}
return NETTY_JNI_VERSION;
}

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: MIT */
/*
* Will go away once libc support is there
*/
#include "syscall.h"
#include <signal.h>
#include <sys/syscall.h>
#include <sys/uio.h>
#include <unistd.h>
#ifdef __alpha__
/*
* alpha is the only exception, all other architectures
* have common numbers for new system calls.
*/
#ifndef __NR_io_uring_setup
#define __NR_io_uring_setup 535
#endif
#ifndef __NR_io_uring_enter
#define __NR_io_uring_enter 536
#endif
#ifndef __NR_io_uring_register
#define __NR_io_uring_register 537
#endif
#else /* !__alpha__ */
#ifndef __NR_io_uring_setup
#define __NR_io_uring_setup 425
#endif
#ifndef __NR_io_uring_enter
#define __NR_io_uring_enter 426
#endif
#ifndef __NR_io_uring_register
#define __NR_io_uring_register 427
#endif
#endif
int sys_io_uring_register(int fd, unsigned opcode, const void *arg,
unsigned nr_args) {
return syscall(__NR_io_uring_register, fd, opcode, arg, nr_args);
}
int sys_io_uring_setup(unsigned entries, struct io_uring_params *p) {
return syscall(__NR_io_uring_setup, entries, p);
}
int sys_io_uring_enter(int fd, unsigned to_submit, unsigned min_complete,
unsigned flags, sigset_t *sig) {
return syscall(__NR_io_uring_enter, fd, to_submit, min_complete, flags, sig,
_NSIG / 8);
}

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: MIT */
#include <linux/io_uring.h>
#include <signal.h>
#ifndef LIBURING_SYSCALL_H
#define LIBURING_SYSCALL_H
/*
* System calls
*/
// extern int sys_io_uring_setup(unsigned entries, struct io_uring_params *p);
extern int sys_io_uring_setup(unsigned entries, struct io_uring_params *p);
extern int sys_io_uring_enter(int fd, unsigned to_submit, unsigned min_complete,
unsigned flags, sigset_t *sig);
extern int sys_io_uring_register(int fd, unsigned int opcode, const void *arg,
unsigned int nr_args);
#endif

View File

@ -91,13 +91,10 @@ public abstract class AbstractIOUringChannel extends AbstractChannel implements
}
}
protected final ByteBuf newDirectBuffer(ByteBuf buf) {
return newDirectBuffer(buf, buf);
}
protected final ByteBuf newDirectBuffer(Object holder, ByteBuf buf) {
final int readableBytes = buf.readableBytes();
if (readableBytes == 0) {
@ -129,15 +126,13 @@ public abstract class AbstractIOUringChannel extends AbstractChannel implements
@Override
protected void doDisconnect() throws Exception {
}
@Override
protected void doClose() throws Exception {
}
//Channel/ChannelHandlerContext.read() was called
// Channel/ChannelHandlerContext.read() was called
@Override
protected void doBeginRead() throws Exception {
final AbstractUringUnsafe unsafe = (AbstractUringUnsafe) unsafe();
@ -160,7 +155,7 @@ public abstract class AbstractIOUringChannel extends AbstractChannel implements
while (readableBytes > 0) {
doWriteBytes(buf);
//have to move it to the eventloop
// have to move it to the eventloop
int newReadableBytes = buf.readableBytes();
in.progress(readableBytes - newReadableBytes);
readableBytes = newReadableBytes;
@ -190,7 +185,6 @@ public abstract class AbstractIOUringChannel extends AbstractChannel implements
}
};
/**
* Create a new {@link } instance.
*
@ -200,7 +194,6 @@ public abstract class AbstractIOUringChannel extends AbstractChannel implements
return new IOUringRecvByteAllocatorHandle(handle);
}
@Override
public IOUringRecvByteAllocatorHandle recvBufAllocHandle() {
if (allocHandle == null) {
@ -212,7 +205,6 @@ public abstract class AbstractIOUringChannel extends AbstractChannel implements
@Override
public void connect(final SocketAddress remoteAddress, final SocketAddress localAddress,
final ChannelPromise promise) {
}
final void executeUringReadOperator() {
@ -225,16 +217,14 @@ public abstract class AbstractIOUringChannel extends AbstractChannel implements
public abstract void uringEventExecution();
}
@Override
protected Object filterOutboundMessage(Object msg) {
if (msg instanceof ByteBuf) {
ByteBuf buf = (ByteBuf) msg;
return UnixChannelUtil.isBufferCopyNeededForWrite(buf)? newDirectBuffer(buf) : buf;
return UnixChannelUtil.isBufferCopyNeededForWrite(buf) ? newDirectBuffer(buf) : buf;
}
throw new UnsupportedOperationException(
"unsupported message type");
throw new UnsupportedOperationException("unsupported message type");
}
@Override

View File

@ -82,7 +82,7 @@ public class AbstractIOUringServerChannel extends AbstractIOUringChannel impleme
if (socket.acceptEvent(getIoUring(), eventId, acceptedAddress) == 0) {
ioUringEventLoop.addNewEvent(event);
Native.submit(getIoUring());
Native.ioUringSubmit(getIoUring());
}
}
}

View File

@ -19,6 +19,7 @@ import io.netty.channel.EventLoopGroup;
import io.netty.channel.SingleThreadEventLoop;
import io.netty.util.collection.IntObjectHashMap;
import io.netty.util.collection.IntObjectMap;
import io.netty.util.collection.LongObjectHashMap;
import io.netty.util.concurrent.RejectedExecutionHandler;
import java.util.HashMap;
@ -26,19 +27,18 @@ import java.util.concurrent.Executor;
class IOUringEventLoop extends SingleThreadEventLoop {
//C pointer
// C pointer
private final long io_uring;
private final IntObjectMap<AbstractIOUringChannel> channels = new IntObjectHashMap<AbstractIOUringChannel>(4096);
//events should be unique to identify which event type that was
// events should be unique to identify which event type that was
private long eventIdCounter;
private HashMap<Long, Event> events = new HashMap<Long, Event>();
private final LongObjectHashMap<Event> events = new LongObjectHashMap<Event>();
protected IOUringEventLoop(final EventLoopGroup parent, final Executor executor, final boolean addTaskWakesUp,
final int maxPendingTasks,
final RejectedExecutionHandler rejectedExecutionHandler) {
final int maxPendingTasks, final RejectedExecutionHandler rejectedExecutionHandler) {
super(parent, executor, addTaskWakesUp, maxPendingTasks, rejectedExecutionHandler);
this.io_uring = Native.io_uring_setup(100);
this.io_uring = Native.ioUringSetup(32);
}
public long incrementEventIdCounter() {
@ -53,24 +53,25 @@ class IOUringEventLoop extends SingleThreadEventLoop {
@Override
protected void run() {
for (; ; ) {
//wait until an event has finished
final long cqe = Native.wait_cqe(io_uring);
final Event event = events.get(Native.getEventId(cqe));
final int ret = Native.getRes(cqe);
for (;;) {
// wait until an event has finished
final long cqe = Native.ioUringWaitCqe(io_uring);
final Event event = events.get(Native.ioUringGetEventId(cqe));
final int ret = Native.ioUringGetRes(cqe);
switch (event.getOp()) {
case ACCEPT:
//serverChannel is necessary to call newChildchannel
//create a new accept event
// serverChannel is necessary to call newChildchannel
// create a new accept event
break;
case READ:
//need to save the Bytebuf before I execute the read operation fireChannelRead(byteBuf)
// need to save the Bytebuf before I execute the read operation
// fireChannelRead(byteBuf)
break;
case WRITE:
//you have to store Bytebuf to continue writing
// you have to store Bytebuf to continue writing
break;
}
//processing Tasks
// processing Tasks
}
}
}

View File

@ -18,13 +18,11 @@ package io.netty.channel.uring;
import io.netty.channel.Channel;
import io.netty.channel.socket.ServerSocketChannel;
import io.netty.channel.socket.ServerSocketChannelConfig;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
public class IOUringServerSocketChannel extends AbstractIOUringServerChannel implements ServerSocketChannel {
IOUringServerSocketChannel(Channel parent, LinuxSocket fd, boolean active,
long ioUring) {
IOUringServerSocketChannel(Channel parent, LinuxSocket fd, boolean active, long ioUring) {
super(parent, fd, active, ioUring);
}
@ -33,7 +31,6 @@ public class IOUringServerSocketChannel extends AbstractIOUringServerChannel imp
super.doBind(localAddress);
}
@Override
public boolean isOpen() {
return false;
@ -49,7 +46,6 @@ public class IOUringServerSocketChannel extends AbstractIOUringServerChannel imp
return (ServerSocketChannel) super.parent();
}
@Override
public InetSocketAddress remoteAddress() {
return (InetSocketAddress) super.remoteAddress();

View File

@ -30,7 +30,6 @@ import io.netty.channel.unix.FileDescriptor;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
public class IOUringSocketChannel extends AbstractIOUringChannel implements SocketChannel {
IOUringSocketChannel(final Channel parent, final LinuxSocket fd, final boolean active, final long ioUring) {
@ -47,14 +46,12 @@ public class IOUringSocketChannel extends AbstractIOUringChannel implements Sock
return null;
}
@Override
protected AbstractUringUnsafe newUnsafe() {
return new AbstractUringUnsafe() {
@Override
public void uringEventExecution() {
final ChannelConfig config = config();
final ByteBufAllocator allocator = config.getAllocator();
@ -63,18 +60,14 @@ public class IOUringSocketChannel extends AbstractIOUringChannel implements Sock
ByteBuf byteBuf = allocHandle.allocate(allocator);
doReadBytes(byteBuf);
}
};
}
@Override
public void doBind(SocketAddress localAddress) throws Exception {
}
@Override
public boolean isInputShutdown() {
return false;
@ -145,5 +138,3 @@ public class IOUringSocketChannel extends AbstractIOUringChannel implements Sock
return (InetSocketAddress) super.localAddress();
}
}

View File

@ -18,7 +18,7 @@ package io.netty.channel.uring;
import io.netty.channel.unix.Socket;
public class LinuxSocket extends Socket {
private long fd;
private final long fd;
public LinuxSocket(final int fd) {
super(fd);
@ -26,15 +26,15 @@ public class LinuxSocket extends Socket {
}
public int readEvent(long ring, long eventId, long bufferAddress, int pos, int limit) {
return Native.read(ring, fd, eventId, bufferAddress, pos, limit);
return Native.ioUringRead(ring, fd, eventId, bufferAddress, pos, limit);
}
public int writeEvent(long ring, long eventId, long bufferAddress, int pos, int limit) {
return Native.write(ring, fd, eventId, bufferAddress, pos, limit);
return Native.ioUringWrite(ring, fd, eventId, bufferAddress, pos, limit);
}
public int acceptEvent(long ring, long eventId, byte[] addr) {
return Native.accept(ring, eventId, addr);
return Native.ioUringAccept(ring, eventId, addr);
}
}

View File

@ -15,31 +15,76 @@
*/
package io.netty.channel.uring;
import io.netty.channel.unix.FileDescriptor;
import io.netty.channel.unix.Socket;
import io.netty.util.internal.NativeLibraryLoader;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.SystemPropertyUtil;
import io.netty.util.internal.ThrowableUtil;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.io.IOException;
import java.nio.channels.Selector;
import java.util.Locale;
public final class Native {
public static native long io_uring_setup(int entries);
public static native long getSQE(long io_uring);
static {
loadNativeLibrary();
}
public static native long getQC(long io_uring);
public static native long ioUringSetup(int entries);
public static native int read(long io_uring, long fd, long eventId, long bufferAddress, int pos,
int limit);
public static native int ioUringRead(long io_uring, long fd, long eventId, long bufferAddress, int pos, int limit);
public static native int write(long io_uring, long fd, long eventId, long bufferAddress, int pos,
int limit);
public static native int ioUringWrite(long io_uring, long fd, long eventId, long bufferAddress, int pos, int limit);
public static native int accept(long io_uring, long fd, byte[] addr);
public static native int ioUringAccept(long io_uring, long fd, byte[] addr);
//return id
public static native long wait_cqe(long io_uring);
// return id
public static native long ioUringWaitCqe(long io_uring);
public static native long deleteCqe(long io_uring, long cqeAddress);
public static native long ioUringDeleteCqe(long io_uring, long cqeAddress);
public static native long getEventId(long cqeAddress);
public static native long ioUringGetEventId(long cqeAddress);
public static native int getRes(long cqeAddress);
public static native int ioUringGetRes(long cqeAddress);
public static native long close(long io_uring);
public static native long ioUringClose(long io_uring);
public static native long submit(long io_uring);
public static native long ioUringSubmit(long io_uring);
public static native long ioUringGetSQE(long io_uring);
public static native long ioUringGetQC(long io_uring);
// for testing(it is only temporary)
public static native long createFile();
private Native() {
// utility
}
// From epoll native library
private static void loadNativeLibrary() {
String name = SystemPropertyUtil.get("os.name").toLowerCase(Locale.UK).trim();
if (!name.startsWith("linux")) {
throw new IllegalStateException("Only supported on Linux");
}
String staticLibName = "netty_transport_native_io_uring";
String sharedLibName = staticLibName + '_' + PlatformDependent.normalizedArch();
ClassLoader cl = PlatformDependent.getClassLoader(Native.class);
try {
NativeLibraryLoader.load(sharedLibName, cl);
} catch (UnsatisfiedLinkError e1) {
// try {
// NativeLibraryLoader.load(staticLibName, cl);
// System.out.println("Failed to load io_uring");
// } catch (UnsatisfiedLinkError e2) {
// ThrowableUtil.addSuppressed(e1, e2);
// throw e1;
// }
}
}
}

View File

@ -0,0 +1,58 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel.uring;
import org.junit.Test;
import java.io.FileInputStream;
import java.io.File;
import sun.misc.SharedSecrets;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.buffer.UnpooledUnsafeDirectByteBuf;
import static org.junit.Assert.*;
public class NativeTest {
@Test
public void test_io_uring() {
long uring = Native.ioUringSetup(32);
long fd = Native.createFile();
System.out.println("Fd: " + fd);
ByteBufAllocator allocator = new UnpooledByteBufAllocator(true);
UnpooledUnsafeDirectByteBuf directByteBufPooled = new UnpooledUnsafeDirectByteBuf(allocator, 500, 1000);
System.out.println("MemoryAddress: " + directByteBufPooled.hasMemoryAddress());
String inputString = "Hello World!";
byte[] byteArrray = inputString.getBytes();
directByteBufPooled.writeBytes(byteArrray);
Native.ioUringWrite(uring, fd, 1, directByteBufPooled.memoryAddress(), directByteBufPooled.readerIndex(),
directByteBufPooled.writerIndex());
Native.ioUringSubmit(uring);
long cqe = Native.ioUringWaitCqe(uring);
// ystem.out.println("Res: " + Native.ioUringGetRes(cqe));
assertEquals(12, Native.ioUringGetRes(cqe));
Native.ioUringClose(uring);
}
}