Java访问Hadoop分布式文件系统HDFS的配置说明

网友投稿 351 2023-07-15


Java访问Hadoop分布式文件系统HDFS的配置说明

配置文件

m103替换为hdfs服务地址。

要利用java客户端来存取HDFS上的文件,不得不说的是配置文件hadoop-0.20.2/conf/core-site.xml了,最初我就是在这里吃了大亏,所以我死活连不上HDFS,文件无法创建、读取。

hadoop.tmp.dir

/home/zhangzk/hadoop

A base for other temporary directories.

fs.default.name

hdfs://linux-zzk-113:9000

配置项:hadoop.tmp.dir表示命名节点上存放元数据的目录位置,对于数据节点则为该节点上存放文件数据的目录。

配置项:fs.default.name表示命名的IP地址和端口号,缺省值是file:///,对于JavaAPI来讲,连接HDFS必须使用这里的配置的URL地址,对于数据节点来讲,数据节点通过该URL来访问命名节点。

hdfs-site.xml

dfs.namenode.name.dir

file:///mnt/sdc1/dfs/nn

dfs.namenode.servicerpc-address

m103:8022

dfs.https.address

m103:50470

dfs.https.port

50470

dfs.namenode.http-address

m103:50070

dfs.replication

3

dfs.blocksize

134217728

dfs.client.use.datanode.hostname

false

fs.permissions.umask-mode

022

dfs.namenode.acls.enabled

false

dfs.block.local-path-access.user

cloudera-scm

dfs.client.read.shortcircuit

false

dfs.domain.socket.path

/var/run/hdfs-sockets/dn

dfs.client.read.shortcircuit.skip.checksum

false

dfs.client.domain.socket.data.traffic

false

dfs.datanode.hdfs-blocks-metadata.enabled

true

fs.http.impl

com.scistor.datavision.fs.HTTPFileSystem

mapred-site.xml

mapreduce.job.split.metainfo.maxsize

10000000

mapreduce.job.counters.max

120

mapreduce.output.fileoutputformat.compress

true

mapreduce.output.fileoutputformat.compress.type

BLOCK

mapreduce.output.fileoutputformat.compress.codec

org.apache.hadoop.io.compress.SnappyCodec

mapreduce.map.output.compress.codec

org.apache.hadoop.io.compress.SnappyCodec

mapreduce.map.output.compress

true

zlib.compress.level

DEFAULT_COMPRESSION

mapreduce.task.io.sort.factor

64

mapreduce.map.sort.spill.percent

0.8

mapreduce.reduce.shuffle.parallelcopies

10

mapreduce.task.timeout

600000

mapreduce.client.submit.file.rehttp://plication

1

mapreduce.job.reduces

24

mapreduce.task.io.sort.mb

256

mapreduce.map.speculative

false

mapreduce.reduce.speculative

false

mapreduce.job.reduce.slowstart.completedmaps

0.8

mapreduce.jobhistory.address

m103:10020

mapreduce.jobhistory.webapp.address

m103:19888

mapreduce.jobhistory.webapp.https.address

m103:19890

mapreduce.jobhistory.admin.address

m103:10033

mapreduce.framework.name

yarn

yarn.app.mapreduce.am.staging-dir

/user

mapreduce.am.max-attempts

2

yarn.app.mapreduce.am.resource.mb

2048

yarn.app.mapreduce.am.resource.cpu-vcores

1

mapreduce.job.uUmhURbertask.enable

false

yarn.app.mapreduce.am.command-opts

-Djava.net.preferIPv4Stack=true -Xmx1717986918

mapreduce.map.java.opts

-Djava.net.preferIPv4Stack=true -Xmx1717986918

mapreduce.reduce.java.opts

-Djava.net.preferIPv4Stack=true -Xmx2576980378

yarn.app.mapreduce.am.admin.user.env

LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH

mapreduce.map.memory.mb

2048

mapreduce.map.cpu.vcores

1

mapreduce.reduce.memory.mb

3072

mapreduce.reduce.cpu.vcores

1

mapreduce.application.classpath

$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,$MR2_CLASSPATH,$CDH_HCAT_HOME/share/hcatalog/*,$CDH_HIVE_HOME/lib/*,/etc/hive/conf,/opt/cloudera/parcels/CDH/lib/udps/*

mapreduce.admin.user.env

LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH

mapreduce.shuffle.max.connections

80

利用JavaAPI来访问HDFS的文件与目录

package com.demo.hdfs;

import java.io.BufferedInputStream;

import java.io.FileInputStream;

import java.io.FileNotFoundException;

import java.io.FileOutputStream;

import java.io.IOException;

import java.io.InputStream;

import java.io.OutputStream;

import java.net.URI;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.FSDataInputStream;

import org.apache.hadoop.fs.FSDataOutputStream;

import org.apache.hadoop.fs.FileStatus;

import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.IOUtils;

import org.apache.hadoop.util.Progressable;

/**

* @author zhangzk

*

*/

public class FileCopyToHdfs {

public static void main(String[] args) throws Exception {

try {

//uploadToHdfs();

//deleteFromHdfs();

//getDirectoryFromHdfs();

appendToHdfs();

readFromHdfs();

} catch (Exception e) {

// TODO Auto-generated catch block

e.printStackTrace();

}

finally

{

System.out.println("SUCCESS");

}

}

/**上传文件到HDFS上去*/

private static void uploadToHdfs() throws FileNotFoundException,IOException {

String localSrc = "d://qq.txt";

String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";

InputStream in = new BufferedInputStream(new FileInputStream(localSrc));

Configuration conf = new Configuration();

FileSystem fs = FileSystem.get(URI.create(dst), conf);

OutputStream out = fs.create(new Path(dst), new Progressable() {

public void progress() {

System.out.print(".");

}

});

IOUtils.copyBytes(in, out, 4096, true);

}

/**从HDFS上读取文件*/

private static void readFromHdfs() throws FileNotFoundException,IOException {

String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";

Configuration conf = new Configuration();

FileSystem fs = FileSystem.get(URI.create(dst), conf);

FSDataInputStream hdfsInStream = fs.open(new Path(dst));

OutputStream out = new FileOutputStream("d:/qq-hdfs.txt");

byte[] ioBuffer = new byte[1024];

int readLen = hdfsInStream.read(ioBuffer);

while(-1 != readLen){

out.write(ioBuffer, 0, readLen);

readLen = hdfsInStream.read(ioBuffer);

}

out.close();

hdfsInStream.close();

fs.close();

}

/**以append方式将内容添加到HDFS上文件的末尾;注意:文件更新,需要在hdfs-site.xml中添dfs.append.supporttrue*/

private static void appendToHdfs() throws FileNotFoundException,IOException {

String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";

Configuration conf = new Configuration();

FileSystem fs = FileSystem.get(URI.create(dst), conf);

FSDataOutputStream out = fs.append(new Path(dst));

int readLen = "zhangzk add by hdfs java api".getBytes().length;

while(-1 != readLen){

out.write("zhangzk add by hdfs java api".getBytes(), 0, readLen);

}

out.close();

fs.close();

}

/**从HDFS上删除文件*/

private static void deleteFromHdfs() throws FileNotFoundException,IOException {

String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq-bak.txt";

Configuration conf = new Configuration();

FileSystem fs = FileSystem.get(URI.create(dst), conf);

fs.deleteOnExit(new Path(dUmhURst));

fs.close();

}

/**遍历HDFS上的文件和目录*/

private static void getDirectoryFromHdfs() throws FileNotFoundException,IOException {

String dst = "hdfs://192.168.0.113:9000/user/zhangzk";

Configuration conf = new Configuration();

FileSystem fs = FileSystem.get(URI.create(dst), conf);

FileStatus fileList[] = fs.listStatus(new Path(dst));

int size = fileList.length;

for(int i = 0; i < size; i++){

System.out.println("name:" + fileList[i].getPath().getName() + "/t/tsize:" + fileList[i].getLen());

}

fs.close();

}

}

注意:对于append操作,从hadoop-0.21版本开始就不支持了,关于Append的操作可以参考Javaeye上的一篇文档。


版权声明:本文内容由网络用户投稿,版权归原作者所有,本站不拥有其著作权,亦不承担相应法律责任。如果您发现本站中有涉嫌抄袭或描述失实的内容,请联系我们jiasou666@gmail.com 处理,核实后本网站将在24小时内删除侵权内容。

上一篇:详解Java的MyBatis框架中的事务处理
下一篇:Bootstrap3.0建站教程(一)之bootstrap表单元素排版
相关文章

 发表评论

暂时没有评论,来抢沙发吧~