hadoop 之 libhdfs

Hadoop 3.2.2

libhdfs/hdfs.c

复制代码
struct hdfsFile_internal {
    void* file;
    enum hdfsStreamType type;
    int flags;
};

以上数据结构中的 flags 是由以下接口赋值

复制代码
hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags,
                      int bufferSize, short replication, tSize blockSize)
{
    struct hdfsStreamBuilder *bld = hdfsStreamBuilderAlloc(fs, path, flags);
    if (bufferSize != 0) {
      hdfsStreamBuilderSetBufferSize(bld, bufferSize);
    }
    if (replication != 0) {
      hdfsStreamBuilderSetReplication(bld, replication);
    }
    if (blockSize != 0) {
      hdfsStreamBuilderSetDefaultBlockSize(bld, blockSize);
    }
    return hdfsStreamBuilderBuild(bld);
}

int hdfsFileUsesDirectRead(hdfsFile file)
{
    return !!(file->flags & HDFS_FILE_SUPPORTS_DIRECT_READ);
}

void hdfsFileDisableDirectRead(hdfsFile file)
{
    file->flags &= ~HDFS_FILE_SUPPORTS_DIRECT_READ;
}

static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
                  int32_t bufferSize, int16_t replication, int64_t blockSize)
{
...
if ((flags & O_WRONLY) == 0) {
        // Try a test read to see if we can do direct reads
        char buf;
        if (readDirect(fs, file, &buf, 0) == 0) {
            // Success - 0-byte read should return 0
            file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
        } else if (errno != ENOTSUP) {
            // Unexpected error. Clear it, don't set the direct flag.
            fprintf(stderr,
                  "hdfsOpenFile(%s): WARN: Unexpected error %d when testing "
                  "for direct read compatibility\n", path, errno);
        }
    }
...
}

hadoop 3.3.1

hadoop 3.3.1 版本该接口实现代码已经修改,

相关commit: https://github.com/apache/hadoop/pull/597/files#diff-c1385f6f8f4422f3f22bd28edd3123209d551e513b73429e58dd7c3d3350f59d

复制代码
if ((flags & O_WRONLY) == 0) {
        // Check the StreamCapabilities of jFile to see if we can do direct
        // reads
        if (hdfsHasStreamCapability(jFile, "in:readbytebuffer")) {
            file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
        }

        // Check the StreamCapabilities of jFile to see if we can do direct
        // preads
        if (hdfsHasStreamCapability(jFile, "in:preadbytebuffer")) {
            file->flags |= HDFS_FILE_SUPPORTS_DIRECT_PREAD;
        }
    }
判断一个数据流是否具备某个接口的能力

org.apache.hadoop.fs.FSDataInputStream.java

复制代码
  @Override
  public boolean hasCapability(String capability) {
    return StoreImplementationUtils.hasCapability(in, capability);
  }

org.apache.hadoop.fs.impl.StoreImplementationUtils.java

复制代码
  /**
   * Probe for an input stream having a capability; returns true
   * if the stream implements {@link StreamCapabilities} and its
   * {@code hasCapabilities()} method returns true for the capability.
   * @param in input stream
   * @param capability capability to probe for
   * @return true if the stream declares that it supports the capability.
   */
  public static boolean hasCapability(InputStream in, String capability) {
    return objectHasCapability(in, capability);
  }

这里 子 流 不是 StreamCapabilities, 直接返回的是 false

复制代码
  /**
   * Probe for an object having a capability; returns true
   * if the stream implements {@link StreamCapabilities} and its
   * {@code hasCapabilities()} method returns true for the capability.
   * This is a package private method intended to provided a common
   * implementation for input and output streams.
   * {@link StreamCapabilities#hasCapability(String)} call is for public use.
   * @param object object to probe.
   * @param capability capability to probe for
   * @return true if the object implements stream capabilities and
   * declares that it supports the capability.
   */
  static boolean objectHasCapability(Object object, String capability) {
    if (object instanceof StreamCapabilities) {
      return ((StreamCapabilities) object).hasCapability(capability);
    }
    return false;
  }
Demo 测试
复制代码
$ cat test_libhdfs_read.c

#include "hdfs.h" 

#include <stdio.h>
#include <stdlib.h>
#include <iostream>

int main(int argc, char **argv) {
    hdfsFS fs;
    const char *rfile = argv[1];
    tSize bufferSize = strtoul(argv[3], NULL, 10);
    hdfsFile readFile;
    char* buffer;
    tSize curSize;

    if (argc != 4) {
        fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
        exit(-1);
    }
    
    //fs = hdfsConnect("default", 0);
    fs = hdfsConnect("cosn://xiangx-guigu-1258469122", 0);
    if (!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-1);
    } 

    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
    std::cout << "readFile: " << readFile << std::endl;
    if (!readFile) {
        std::cout << "Failed to open: "  << std::endl;
        fprintf(stdout, "Failed to open %s for writing!\n", rfile);
        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
        exit(-2);
    }


    buffer = (char*)malloc(sizeof(char) * bufferSize);
    if(buffer == NULL) {
        return -2;
    }
    

    curSize = bufferSize;
    for (; curSize == bufferSize;) {
        curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
    }
    

    free(buffer);
    hdfsCloseFile(fs, readFile);
    hdfsDisconnect(fs);
    std::cout << "curSize: " << curSize << std::endl;

    return 0;
}
  • 编译命令

    g++ test_libhdfs_read.c -IHADOOP_HOME/include -LHADOOP_HOME/lib/native -L/usr/local/jdk/jre/lib/amd64/server/ -lhdfs -ljvm -o test_libhdfs_read -Wl,-rpath,/usr/local/jdk/jre/lib/amd64/server/

  • 运行命令

    export CLASSPATH=hadoop classpath --glob
    ./test_libhdfs_read cosn://xxx/testdata/testparquet/part-00000-4a31a445-8104-402e-ad60-486fba4ae5f6-c000-f8f4776d-5003-4888-872f-c9567471b5f5.snappy.parquet 100 100

    readDirect: FSDataInputStream#read error:
    UnsupportedOperationException: Byte-buffer read unsupported by input streamjava.lang.UnsupportedOperationException: Byte-buffer read unsupported by input stream
    at org.apache.hadoop.fs.FSDataInputStream.read(FSDataInputStream.java:150)
    readFile: 0x33cd8b0
    curSize: 99

相关推荐
一只栖枝4 小时前
华为 HCIE 大数据认证中 Linux 命令行的运用及价值
大数据·linux·运维·华为·华为认证·hcie·it
喂完待续8 小时前
Apache Hudi:数据湖的实时革命
大数据·数据仓库·分布式·架构·apache·数据库架构
青云交8 小时前
Java 大视界 -- 基于 Java 的大数据可视化在城市交通拥堵治理与出行效率提升中的应用(398)
java·大数据·flink·大数据可视化·拥堵预测·城市交通治理·实时热力图
计艺回忆路10 小时前
从Podman开始一步步构建Hadoop开发集群
hadoop
还是大剑师兰特14 小时前
Flink面试题及详细答案100道(1-20)- 基础概念与架构
大数据·flink·大剑师·flink面试题
yh云想17 小时前
《从入门到精通:Kafka核心原理全解析》
分布式·kafka
1892280486118 小时前
NY243NY253美光固态闪存NY257NY260
大数据·网络·人工智能·缓存
武子康18 小时前
大数据-70 Kafka 日志清理:删除、压缩及混合模式最佳实践
大数据·后端·kafka
CCF_NOI.20 小时前
解锁聚变密码:从微观世界到能源新未来
大数据·人工智能·计算机·聚变
杨荧20 小时前
基于Python的电影评论数据分析系统 Python+Django+Vue.js
大数据·前端·vue.js·python