Possible leak in AbstractDiskHttpData

Motivation:
SonarQube (clinker.netty.io/sonar) reported a resource which may not have been properly closed in all situations in AbstractDiskHttpData.

Modifications:
- Ensure file channels are closed in the presence of exceptions.
- Correct instances where local channels were created but potentially not closed.

Result:
Less leaks. Less SonarQube vulnerabilities.
This commit is contained in:
scottmitch 2015-01-27 14:34:00 -05:00 committed by Norman Maurer
parent 50a857cecf
commit 86cb41bf95

View File

@ -120,6 +120,7 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
return;
}
FileOutputStream outputStream = new FileOutputStream(file);
try {
FileChannel localfileChannel = outputStream.getChannel();
ByteBuffer byteBuffer = buffer.nioBuffer();
int written = 0;
@ -128,8 +129,9 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
}
buffer.readerIndex(buffer.readerIndex() + written);
localfileChannel.force(false);
localfileChannel.close();
} finally {
outputStream.close();
}
setCompleted();
} finally {
// Release the buffer as it was retained before and we not need a reference to it at all
@ -210,11 +212,12 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
}
file = tempFile();
FileOutputStream outputStream = new FileOutputStream(file);
int written = 0;
try {
FileChannel localfileChannel = outputStream.getChannel();
byte[] bytes = new byte[4096 * 4];
ByteBuffer byteBuffer = ByteBuffer.wrap(bytes);
int read = inputStream.read(bytes);
int written = 0;
while (read > 0) {
byteBuffer.position(read).flip();
written += localfileChannel.write(byteBuffer);
@ -222,7 +225,9 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
read = inputStream.read(bytes);
}
localfileChannel.force(false);
localfileChannel.close();
} finally {
outputStream.close();
}
size = written;
if (definedSize > 0 && definedSize < size) {
if (!file.delete()) {
@ -337,20 +342,51 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
}
if (!file.renameTo(dest)) {
// must copy
FileInputStream inputStream = new FileInputStream(file);
FileOutputStream outputStream = new FileOutputStream(dest);
FileChannel in = inputStream.getChannel();
FileChannel out = outputStream.getChannel();
IOException exception = null;
FileInputStream inputStream = null;
FileOutputStream outputStream = null;
long chunkSize = 8196;
long position = 0;
try {
inputStream = new FileInputStream(file);
outputStream = new FileOutputStream(dest);
FileChannel in = inputStream.getChannel();
FileChannel out = outputStream.getChannel();
while (position < size) {
if (chunkSize < size - position) {
chunkSize = size - position;
}
position += in.transferTo(position, chunkSize , out);
}
in.close();
out.close();
} catch (IOException e) {
exception = e;
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
if (exception == null) { // Choose to report the first exception
exception = e;
} else {
logger.warn("Multiple exceptions detected, the following will be suppressed {}", e);
}
}
}
if (outputStream != null) {
try {
outputStream.close();
} catch (IOException e) {
if (exception == null) { // Choose to report the first exception
exception = e;
} else {
logger.warn("Multiple exceptions detected, the following will be suppressed {}", e);
}
}
}
}
if (exception != null) {
throw exception;
}
if (position == size) {
if (!file.delete()) {
logger.warn("Failed to delete: {}", file);
@ -381,14 +417,17 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
"File too big to be loaded in memory");
}
FileInputStream inputStream = new FileInputStream(src);
FileChannel fileChannel = inputStream.getChannel();
byte[] array = new byte[(int) srcsize];
try {
FileChannel fileChannel = inputStream.getChannel();
ByteBuffer byteBuffer = ByteBuffer.wrap(array);
int read = 0;
while (read < srcsize) {
read += fileChannel.read(byteBuffer);
}
fileChannel.close();
} finally {
inputStream.close();
}
return array;
}