2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2012-06-08 10:11:14 +02:00
|
|
|
#ifdef USE_HDFS
|
2013-10-05 07:32:05 +02:00
|
|
|
#ifndef ROCKSDB_HDFS_FILE_C
|
|
|
|
#define ROCKSDB_HDFS_FILE_C
|
2012-06-08 10:11:14 +02:00
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <iostream>
|
|
|
|
#include <sstream>
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/status.h"
|
2012-06-08 10:11:14 +02:00
|
|
|
#include "hdfs/env_hdfs.h"
|
|
|
|
|
2014-05-14 21:14:18 +02:00
|
|
|
#define HDFS_EXISTS 0
|
2014-05-20 23:22:12 +02:00
|
|
|
#define HDFS_DOESNT_EXIST -1
|
|
|
|
#define HDFS_SUCCESS 0
|
2014-05-14 21:14:18 +02:00
|
|
|
|
2012-06-08 10:11:14 +02:00
|
|
|
//
|
2013-10-05 07:32:05 +02:00
|
|
|
// This file defines an HDFS environment for rocksdb. It uses the libhdfs
|
|
|
|
// api to access HDFS. All HDFS files created by one instance of rocksdb
|
2012-11-29 01:42:36 +01:00
|
|
|
// will reside on the same HDFS cluster.
|
2012-06-08 10:11:14 +02:00
|
|
|
//
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2012-06-08 10:11:14 +02:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Log error message
|
|
|
|
static Status IOError(const std::string& context, int err_number) {
|
|
|
|
return Status::IOError(context, strerror(err_number));
|
|
|
|
}
|
|
|
|
|
|
|
|
// assume that there is one global logger for now. It is not thread-safe,
|
|
|
|
// but need not be because the logger is initialized at db-open time.
|
2013-03-01 03:04:58 +01:00
|
|
|
static Logger* mylog = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
|
|
|
|
// Used for reading a file from HDFS. It implements both sequential-read
|
|
|
|
// access methods as well as random read access methods.
|
2014-05-14 21:14:18 +02:00
|
|
|
class HdfsReadableFile : virtual public SequentialFile,
|
|
|
|
virtual public RandomAccessFile {
|
2012-06-08 10:11:14 +02:00
|
|
|
private:
|
|
|
|
hdfsFS fileSys_;
|
|
|
|
std::string filename_;
|
|
|
|
hdfsFile hfile_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
HdfsReadableFile(hdfsFS fileSys, const std::string& fname)
|
2013-03-01 03:04:58 +01:00
|
|
|
: fileSys_(fileSys), filename_(fname), hfile_(nullptr) {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile opening file %s\n",
|
2012-06-08 10:11:14 +02:00
|
|
|
filename_.c_str());
|
|
|
|
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0);
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n",
|
|
|
|
filename_.c_str(), hfile_);
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~HdfsReadableFile() {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile closing file %s\n",
|
|
|
|
filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
hdfsCloseFile(fileSys_, hfile_);
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile closed file %s\n",
|
2012-06-08 10:11:14 +02:00
|
|
|
filename_.c_str());
|
2013-03-01 03:04:58 +01:00
|
|
|
hfile_ = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool isValid() {
|
2013-03-01 03:04:58 +01:00
|
|
|
return hfile_ != nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// sequential access, read data at current offset in file
|
|
|
|
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
|
|
|
Status s;
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile reading %s %ld\n",
|
2012-06-08 10:11:14 +02:00
|
|
|
filename_.c_str(), n);
|
2014-05-14 21:14:18 +02:00
|
|
|
|
|
|
|
char* buffer = scratch;
|
|
|
|
size_t total_bytes_read = 0;
|
|
|
|
tSize bytes_read = 0;
|
|
|
|
tSize remaining_bytes = (tSize)n;
|
|
|
|
|
|
|
|
// Read a total of n bytes repeatedly until we hit error or eof
|
|
|
|
while (remaining_bytes > 0) {
|
|
|
|
bytes_read = hdfsRead(fileSys_, hfile_, buffer, remaining_bytes);
|
|
|
|
if (bytes_read <= 0) {
|
|
|
|
break;
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
2014-05-14 21:14:18 +02:00
|
|
|
assert(bytes_read <= remaining_bytes);
|
|
|
|
|
|
|
|
total_bytes_read += bytes_read;
|
|
|
|
remaining_bytes -= bytes_read;
|
|
|
|
buffer += bytes_read;
|
|
|
|
}
|
|
|
|
assert(total_bytes_read <= n);
|
|
|
|
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile read %s\n", filename_.c_str());
|
2014-05-14 21:14:18 +02:00
|
|
|
|
|
|
|
if (bytes_read < 0) {
|
|
|
|
s = IOError(filename_, errno);
|
|
|
|
} else {
|
|
|
|
*result = Slice(scratch, total_bytes_read);
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
2014-05-14 21:14:18 +02:00
|
|
|
|
2012-06-08 10:11:14 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// random access, read data from specified offset in file
|
|
|
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const {
|
|
|
|
Status s;
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile preading %s\n", filename_.c_str());
|
2012-11-29 01:42:36 +01:00
|
|
|
ssize_t bytes_read = hdfsPread(fileSys_, hfile_, offset,
|
2012-06-08 10:11:14 +02:00
|
|
|
(void*)scratch, (tSize)n);
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile pread %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
*result = Slice(scratch, (bytes_read < 0) ? 0 : bytes_read);
|
|
|
|
if (bytes_read < 0) {
|
|
|
|
// An error: return a non-ok status
|
|
|
|
s = IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Skip(uint64_t n) {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile skip %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
// get current offset from file
|
|
|
|
tOffset current = hdfsTell(fileSys_, hfile_);
|
|
|
|
if (current < 0) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
// seek to new offset in file
|
|
|
|
tOffset newoffset = current + n;
|
|
|
|
int val = hdfsSeek(fileSys_, hfile_, newoffset);
|
|
|
|
if (val < 0) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
|
|
|
// returns true if we are at the end of file, false otherwise
|
|
|
|
bool feof() {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile feof %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
if (hdfsTell(fileSys_, hfile_) == fileSize()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// the current size of the file
|
|
|
|
tOffset fileSize() {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsReadableFile fileSize %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, filename_.c_str());
|
|
|
|
tOffset size = 0L;
|
2013-03-01 03:04:58 +01:00
|
|
|
if (pFileInfo != nullptr) {
|
2012-06-08 10:11:14 +02:00
|
|
|
size = pFileInfo->mSize;
|
|
|
|
hdfsFreeFileInfo(pFileInfo, 1);
|
|
|
|
} else {
|
2014-05-14 21:14:18 +02:00
|
|
|
throw HdfsFatalException("fileSize on unknown file " + filename_);
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Appends to an existing file in HDFS.
|
|
|
|
class HdfsWritableFile: public WritableFile {
|
|
|
|
private:
|
|
|
|
hdfsFS fileSys_;
|
|
|
|
std::string filename_;
|
|
|
|
hdfsFile hfile_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
HdfsWritableFile(hdfsFS fileSys, const std::string& fname)
|
2013-03-01 03:04:58 +01:00
|
|
|
: fileSys_(fileSys), filename_(fname) , hfile_(nullptr) {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile opening %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_WRONLY, 0, 0, 0);
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile opened %s\n", filename_.c_str());
|
2013-03-01 03:04:58 +01:00
|
|
|
assert(hfile_ != nullptr);
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
virtual ~HdfsWritableFile() {
|
2013-03-01 03:04:58 +01:00
|
|
|
if (hfile_ != nullptr) {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile closing %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
hdfsCloseFile(fileSys_, hfile_);
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile closed %s\n", filename_.c_str());
|
2013-03-01 03:04:58 +01:00
|
|
|
hfile_ = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the file was successfully created, then this returns true.
|
|
|
|
// Otherwise returns false.
|
|
|
|
bool isValid() {
|
2013-03-01 03:04:58 +01:00
|
|
|
return hfile_ != nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// The name of the file, mostly needed for debug logging.
|
|
|
|
const std::string& getName() {
|
|
|
|
return filename_;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Append(const Slice& data) {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile Append %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
const char* src = data.data();
|
|
|
|
size_t left = data.size();
|
|
|
|
size_t ret = hdfsWrite(fileSys_, hfile_, src, left);
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile Appended %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
if (ret != left) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Flush() {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Sync() {
|
|
|
|
Status s;
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile Sync %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
if (hdfsFlush(fileSys_, hfile_) == -1) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
2014-05-20 23:22:12 +02:00
|
|
|
if (hdfsHSync(fileSys_, hfile_) == -1) {
|
2012-06-08 10:11:14 +02:00
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile Synced %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is used by HdfsLogger to write data to the debug log file
|
|
|
|
virtual Status Append(const char* src, size_t size) {
|
|
|
|
if (hdfsWrite(fileSys_, hfile_, src, size) != (tSize)size) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status Close() {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile closing %s\n", filename_.c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
if (hdfsCloseFile(fileSys_, hfile_) != 0) {
|
|
|
|
return IOError(filename_, errno);
|
|
|
|
}
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsWritableFile closed %s\n", filename_.c_str());
|
2013-03-01 03:04:58 +01:00
|
|
|
hfile_ = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// The object that implements the debug logs to reside in HDFS.
|
|
|
|
class HdfsLogger : public Logger {
|
|
|
|
private:
|
|
|
|
HdfsWritableFile* file_;
|
|
|
|
uint64_t (*gettid_)(); // Return the thread id for the current thread
|
|
|
|
|
|
|
|
public:
|
2014-05-14 21:14:18 +02:00
|
|
|
HdfsLogger(HdfsWritableFile* f, uint64_t (*gettid)())
|
|
|
|
: file_(f), gettid_(gettid) {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsLogger opened %s\n",
|
|
|
|
file_->getName().c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~HdfsLogger() {
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::DEBUG_LEVEL, mylog,
|
|
|
|
"[hdfs] HdfsLogger closed %s\n",
|
|
|
|
file_->getName().c_str());
|
2012-06-08 10:11:14 +02:00
|
|
|
delete file_;
|
2013-03-01 03:04:58 +01:00
|
|
|
if (mylog != nullptr && mylog == this) {
|
|
|
|
mylog = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void Logv(const char* format, va_list ap) {
|
|
|
|
const uint64_t thread_id = (*gettid_)();
|
|
|
|
|
|
|
|
// We try twice: the first time with a fixed-size stack allocated buffer,
|
|
|
|
// and the second time with a much larger dynamically allocated buffer.
|
|
|
|
char buffer[500];
|
|
|
|
for (int iter = 0; iter < 2; iter++) {
|
|
|
|
char* base;
|
|
|
|
int bufsize;
|
|
|
|
if (iter == 0) {
|
|
|
|
bufsize = sizeof(buffer);
|
|
|
|
base = buffer;
|
|
|
|
} else {
|
|
|
|
bufsize = 30000;
|
|
|
|
base = new char[bufsize];
|
|
|
|
}
|
|
|
|
char* p = base;
|
|
|
|
char* limit = base + bufsize;
|
|
|
|
|
|
|
|
struct timeval now_tv;
|
2013-03-01 03:04:58 +01:00
|
|
|
gettimeofday(&now_tv, nullptr);
|
2012-06-08 10:11:14 +02:00
|
|
|
const time_t seconds = now_tv.tv_sec;
|
|
|
|
struct tm t;
|
|
|
|
localtime_r(&seconds, &t);
|
|
|
|
p += snprintf(p, limit - p,
|
|
|
|
"%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
|
|
|
|
t.tm_year + 1900,
|
|
|
|
t.tm_mon + 1,
|
|
|
|
t.tm_mday,
|
|
|
|
t.tm_hour,
|
|
|
|
t.tm_min,
|
|
|
|
t.tm_sec,
|
|
|
|
static_cast<int>(now_tv.tv_usec),
|
|
|
|
static_cast<long long unsigned int>(thread_id));
|
|
|
|
|
|
|
|
// Print the message
|
|
|
|
if (p < limit) {
|
|
|
|
va_list backup_ap;
|
|
|
|
va_copy(backup_ap, ap);
|
|
|
|
p += vsnprintf(p, limit - p, format, backup_ap);
|
|
|
|
va_end(backup_ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Truncate to available space if necessary
|
|
|
|
if (p >= limit) {
|
|
|
|
if (iter == 0) {
|
|
|
|
continue; // Try again with larger buffer
|
|
|
|
} else {
|
|
|
|
p = limit - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add newline if necessary
|
|
|
|
if (p == base || p[-1] != '\n') {
|
|
|
|
*p++ = '\n';
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(p <= limit);
|
|
|
|
file_->Append(base, p-base);
|
|
|
|
file_->Flush();
|
|
|
|
if (base != buffer) {
|
|
|
|
delete[] base;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Finally, the hdfs environment
|
|
|
|
|
2014-05-14 21:14:18 +02:00
|
|
|
const std::string HdfsEnv::kProto = "hdfs://";
|
|
|
|
const std::string HdfsEnv::pathsep = "/";
|
|
|
|
|
2012-06-08 10:11:14 +02:00
|
|
|
// open a file for sequential reading
|
|
|
|
Status HdfsEnv::NewSequentialFile(const std::string& fname,
|
2014-05-14 21:14:18 +02:00
|
|
|
unique_ptr<SequentialFile>* result,
|
|
|
|
const EnvOptions& options) {
|
|
|
|
result->reset();
|
2012-06-08 10:11:14 +02:00
|
|
|
HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
|
2014-05-14 21:14:18 +02:00
|
|
|
if (f == nullptr || !f->isValid()) {
|
|
|
|
delete f;
|
2013-03-01 03:04:58 +01:00
|
|
|
*result = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
2014-05-14 21:14:18 +02:00
|
|
|
result->reset(dynamic_cast<SequentialFile*>(f));
|
2012-06-08 10:11:14 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// open a file for random reading
|
|
|
|
Status HdfsEnv::NewRandomAccessFile(const std::string& fname,
|
2014-05-14 21:14:18 +02:00
|
|
|
unique_ptr<RandomAccessFile>* result,
|
|
|
|
const EnvOptions& options) {
|
|
|
|
result->reset();
|
2012-06-08 10:11:14 +02:00
|
|
|
HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
|
2014-05-14 21:14:18 +02:00
|
|
|
if (f == nullptr || !f->isValid()) {
|
|
|
|
delete f;
|
2013-03-01 03:04:58 +01:00
|
|
|
*result = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
2014-05-14 21:14:18 +02:00
|
|
|
result->reset(dynamic_cast<RandomAccessFile*>(f));
|
2012-06-08 10:11:14 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// create a new file for writing
|
|
|
|
Status HdfsEnv::NewWritableFile(const std::string& fname,
|
2014-05-14 21:14:18 +02:00
|
|
|
unique_ptr<WritableFile>* result,
|
|
|
|
const EnvOptions& options) {
|
|
|
|
result->reset();
|
2012-06-08 10:11:14 +02:00
|
|
|
Status s;
|
|
|
|
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
|
2013-03-01 03:04:58 +01:00
|
|
|
if (f == nullptr || !f->isValid()) {
|
2014-05-14 21:14:18 +02:00
|
|
|
delete f;
|
2013-03-01 03:04:58 +01:00
|
|
|
*result = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
2014-05-14 21:14:18 +02:00
|
|
|
result->reset(dynamic_cast<WritableFile*>(f));
|
2012-06-08 10:11:14 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-10-10 10:01:16 +02:00
|
|
|
Status HdfsEnv::NewRandomRWFile(const std::string& fname,
|
|
|
|
unique_ptr<RandomRWFile>* result,
|
|
|
|
const EnvOptions& options) {
|
|
|
|
return Status::NotSupported("NewRandomRWFile not supported on HdfsEnv");
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:22:12 +02:00
|
|
|
class HdfsDirectory : public Directory {
|
|
|
|
public:
|
|
|
|
explicit HdfsDirectory(int fd) : fd_(fd) {}
|
2014-05-21 13:50:37 +02:00
|
|
|
~HdfsDirectory() {}
|
2014-05-20 23:22:12 +02:00
|
|
|
|
2014-05-21 13:50:37 +02:00
|
|
|
virtual Status Fsync() { return Status::OK(); }
|
2014-05-20 23:22:12 +02:00
|
|
|
|
|
|
|
private:
|
|
|
|
int fd_;
|
|
|
|
};
|
|
|
|
|
2014-05-14 21:14:18 +02:00
|
|
|
Status HdfsEnv::NewDirectory(const std::string& name,
|
|
|
|
unique_ptr<Directory>* result) {
|
2014-05-21 13:50:37 +02:00
|
|
|
int value = hdfsExists(fileSys_, name.c_str());
|
2014-05-20 23:22:12 +02:00
|
|
|
switch (value) {
|
2014-05-21 13:50:37 +02:00
|
|
|
case HDFS_EXISTS:
|
|
|
|
result->reset(new HdfsDirectory(0));
|
2014-05-20 23:22:12 +02:00
|
|
|
return Status::OK();
|
2014-05-21 13:50:37 +02:00
|
|
|
default: // fail if the directory doesn't exist
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::FATAL_LEVEL,
|
|
|
|
mylog, "NewDirectory hdfsExists call failed");
|
2014-05-21 13:50:37 +02:00
|
|
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
2014-11-25 05:44:49 +01:00
|
|
|
ToString(value) + " on path " + name +
|
2014-05-21 13:50:37 +02:00
|
|
|
".\n");
|
2014-05-20 23:22:12 +02:00
|
|
|
}
|
2014-01-27 20:02:21 +01:00
|
|
|
}
|
|
|
|
|
2012-06-08 10:11:14 +02:00
|
|
|
bool HdfsEnv::FileExists(const std::string& fname) {
|
2014-05-21 13:50:37 +02:00
|
|
|
|
2012-06-08 10:11:14 +02:00
|
|
|
int value = hdfsExists(fileSys_, fname.c_str());
|
2014-05-14 21:14:18 +02:00
|
|
|
switch (value) {
|
|
|
|
case HDFS_EXISTS:
|
2012-06-08 10:11:14 +02:00
|
|
|
return true;
|
2014-05-14 21:14:18 +02:00
|
|
|
case HDFS_DOESNT_EXIST:
|
|
|
|
return false;
|
|
|
|
default: // anything else should be an error
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::FATAL_LEVEL,
|
|
|
|
mylog, "FileExists hdfsExists call failed");
|
2014-05-21 13:50:37 +02:00
|
|
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
2014-11-25 05:44:49 +01:00
|
|
|
ToString(value) + " on path " + fname +
|
2014-05-21 13:50:37 +02:00
|
|
|
".\n");
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status HdfsEnv::GetChildren(const std::string& path,
|
|
|
|
std::vector<std::string>* result) {
|
|
|
|
int value = hdfsExists(fileSys_, path.c_str());
|
|
|
|
switch (value) {
|
2014-05-14 21:14:18 +02:00
|
|
|
case HDFS_EXISTS: { // directory exists
|
2012-06-08 10:11:14 +02:00
|
|
|
int numEntries = 0;
|
|
|
|
hdfsFileInfo* pHdfsFileInfo = 0;
|
|
|
|
pHdfsFileInfo = hdfsListDirectory(fileSys_, path.c_str(), &numEntries);
|
|
|
|
if (numEntries >= 0) {
|
|
|
|
for(int i = 0; i < numEntries; i++) {
|
|
|
|
char* pathname = pHdfsFileInfo[i].mName;
|
|
|
|
char* filename = rindex(pathname, '/');
|
2013-03-01 03:04:58 +01:00
|
|
|
if (filename != nullptr) {
|
2012-06-08 10:11:14 +02:00
|
|
|
result->push_back(filename+1);
|
|
|
|
}
|
|
|
|
}
|
2013-03-01 03:04:58 +01:00
|
|
|
if (pHdfsFileInfo != nullptr) {
|
2012-06-08 10:11:14 +02:00
|
|
|
hdfsFreeFileInfo(pHdfsFileInfo, numEntries);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// numEntries < 0 indicates error
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::FATAL_LEVEL, mylog,
|
|
|
|
"hdfsListDirectory call failed with error ");
|
2014-05-14 21:14:18 +02:00
|
|
|
throw HdfsFatalException(
|
|
|
|
"hdfsListDirectory call failed negative error.\n");
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2014-05-14 21:14:18 +02:00
|
|
|
case HDFS_DOESNT_EXIST: // directory does not exist, exit
|
2012-06-08 10:11:14 +02:00
|
|
|
break;
|
|
|
|
default: // anything else should be an error
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::FATAL_LEVEL, mylog,
|
|
|
|
"GetChildren hdfsExists call failed");
|
2014-05-14 21:14:18 +02:00
|
|
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
2014-11-25 05:44:49 +01:00
|
|
|
ToString(value) + ".\n");
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status HdfsEnv::DeleteFile(const std::string& fname) {
|
2014-05-21 13:50:37 +02:00
|
|
|
if (hdfsDelete(fileSys_, fname.c_str(), 1) == 0) {
|
2012-06-08 10:11:14 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(fname, errno);
|
|
|
|
};
|
|
|
|
|
|
|
|
Status HdfsEnv::CreateDir(const std::string& name) {
|
|
|
|
if (hdfsCreateDirectory(fileSys_, name.c_str()) == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(name, errno);
|
|
|
|
};
|
|
|
|
|
2012-11-26 22:56:45 +01:00
|
|
|
Status HdfsEnv::CreateDirIfMissing(const std::string& name) {
|
|
|
|
const int value = hdfsExists(fileSys_, name.c_str());
|
|
|
|
// Not atomic. state might change b/w hdfsExists and CreateDir.
|
2014-05-14 21:14:18 +02:00
|
|
|
switch (value) {
|
|
|
|
case HDFS_EXISTS:
|
2012-11-26 22:56:45 +01:00
|
|
|
return Status::OK();
|
2014-05-14 21:14:18 +02:00
|
|
|
case HDFS_DOESNT_EXIST:
|
2012-11-26 22:56:45 +01:00
|
|
|
return CreateDir(name);
|
2014-05-14 21:14:18 +02:00
|
|
|
default: // anything else should be an error
|
2014-11-05 09:12:20 +01:00
|
|
|
Log(InfoLogLevel::FATAL_LEVEL, mylog,
|
|
|
|
"CreateDirIfMissing hdfsExists call failed");
|
2014-05-21 13:54:22 +02:00
|
|
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
2014-11-25 05:44:49 +01:00
|
|
|
ToString(value) + ".\n");
|
2012-11-26 22:56:45 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-06-08 10:11:14 +02:00
|
|
|
Status HdfsEnv::DeleteDir(const std::string& name) {
|
|
|
|
return DeleteFile(name);
|
|
|
|
};
|
|
|
|
|
|
|
|
Status HdfsEnv::GetFileSize(const std::string& fname, uint64_t* size) {
|
|
|
|
*size = 0L;
|
|
|
|
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str());
|
2013-03-01 03:04:58 +01:00
|
|
|
if (pFileInfo != nullptr) {
|
2012-06-08 10:11:14 +02:00
|
|
|
*size = pFileInfo->mSize;
|
|
|
|
hdfsFreeFileInfo(pFileInfo, 1);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
|
|
|
|
2012-11-26 22:56:45 +01:00
|
|
|
Status HdfsEnv::GetFileModificationTime(const std::string& fname,
|
|
|
|
uint64_t* time) {
|
|
|
|
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str());
|
2013-03-01 03:04:58 +01:00
|
|
|
if (pFileInfo != nullptr) {
|
2012-11-26 22:56:45 +01:00
|
|
|
*time = static_cast<uint64_t>(pFileInfo->mLastMod);
|
|
|
|
hdfsFreeFileInfo(pFileInfo, 1);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(fname, errno);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-06-08 10:11:14 +02:00
|
|
|
// The rename is not atomic. HDFS does not allow a renaming if the
|
|
|
|
// target already exists. So, we delete the target before attemting the
|
|
|
|
// rename.
|
|
|
|
Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) {
|
2014-05-20 23:22:12 +02:00
|
|
|
hdfsDelete(fileSys_, target.c_str(), 1);
|
2012-06-08 10:11:14 +02:00
|
|
|
if (hdfsRename(fileSys_, src.c_str(), target.c_str()) == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return IOError(src, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status HdfsEnv::LockFile(const std::string& fname, FileLock** lock) {
|
|
|
|
// there isn's a very good way to atomically check and create
|
|
|
|
// a file via libhdfs
|
2013-03-01 03:04:58 +01:00
|
|
|
*lock = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status HdfsEnv::UnlockFile(FileLock* lock) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-01-20 11:07:13 +01:00
|
|
|
Status HdfsEnv::NewLogger(const std::string& fname,
|
|
|
|
shared_ptr<Logger>* result) {
|
2012-06-08 10:11:14 +02:00
|
|
|
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
|
2013-03-01 03:04:58 +01:00
|
|
|
if (f == nullptr || !f->isValid()) {
|
2014-05-14 21:14:18 +02:00
|
|
|
delete f;
|
2013-03-01 03:04:58 +01:00
|
|
|
*result = nullptr;
|
2012-06-08 10:11:14 +02:00
|
|
|
return IOError(fname, errno);
|
|
|
|
}
|
|
|
|
HdfsLogger* h = new HdfsLogger(f, &HdfsEnv::gettid);
|
2014-05-14 21:14:18 +02:00
|
|
|
result->reset(h);
|
2013-03-01 03:04:58 +01:00
|
|
|
if (mylog == nullptr) {
|
2012-06-08 10:11:14 +02:00
|
|
|
// mylog = h; // uncomment this for detailed logging
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|
2012-06-08 10:11:14 +02:00
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
#endif // ROCKSDB_HDFS_FILE_C
|
2012-06-08 10:11:14 +02:00
|
|
|
|
|
|
|
#else // USE_HDFS
|
|
|
|
|
|
|
|
// dummy placeholders used when HDFS is not available
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/env.h"
|
2012-06-08 10:11:14 +02:00
|
|
|
#include "hdfs/env_hdfs.h"
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2012-08-29 21:29:43 +02:00
|
|
|
Status HdfsEnv::NewSequentialFile(const std::string& fname,
|
2013-03-15 01:00:04 +01:00
|
|
|
unique_ptr<SequentialFile>* result,
|
|
|
|
const EnvOptions& options) {
|
2012-08-29 21:29:43 +02:00
|
|
|
return Status::NotSupported("Not compiled with hdfs support");
|
|
|
|
}
|
2012-06-08 10:11:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|