Java APIはHDFS HAに接続します
コードコピーは次のとおりです。
public static void main(string [] args){
構成conf = new Configuration();
conf.set( "fs.defaultfs"、 "hdfs:// hadoop2cluster");
conf.set( "dfs.nameservices"、 "hadoop2cluster");
conf.set( "dfs.ha.namenodes.hadoop2cluster"、 "nn1、nn2");
conf.set( "dfs.namenode.rpc-address.hadoop2cluster.nn1"、 "10.0.1.165:8020");
conf.set( "dfs.namenode.rpc-address.hadoop2cluster.nn2"、 "10.0.1.166:8020");
conf.set( "dfs.client.failover.proxy.provider.hadoop2cluster"、 "org.apache.hadoop.hdfs.server.namenode.ha.configuredfailoverproxyprovider");
ファイルシステムfs = null;
試す {
fs = filesystem.get(conf);
filestatus [] list = fs.liststatus(new Path( "/"));
for(filestatus file:list){
system.out.println(file.getPath()。getName());
}
} catch(ioException e){
e.printstacktrace();
} ついに{
試す {
fs.close();
} catch(ioException e){
e.printstacktrace();
}
}
}
Java APIはMapReduceプログラムを呼び出します
コードコピーは次のとおりです。
string [] args = new String [24];
args [0] = "/usr/hadoop-2.2.0/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.2.0.jar";
args [1] = "wordcount";
args [2] = "-d";
args [3] = "yarn.resourcemanager.address = 10.0.1.165:8032";
args [4] = "-d";
args [5] = "yarn.resourcemanager.scheduler.address = 10.0.1.165:8030";
args [6] = "-d";
args [7] = "fs.defaultfs = hdfs:// hadoop2cluster/";
args [8] = "-d";
args [9] = "dfs.nameservices = hadoop2cluster";
args [10] = "-d";
args [11] = "dfs.ha.namenodes.hadoop2cluster = nn1、nn2";
args [12] = "-d";
args [13] = "dfs.namenode.rpc-address.hadoop2cluster.nn1 = 10.0.1.165:8020";
args [14] = "-d";
args [15] = "dfs.namenode.rpc-address.hadoop2cluster.nn2 = 10.0.1.166:8020";
args [16] = "-d";
args [17] = "dfs.client.failover.proxy.provider.hadoop2cluster = org.apache.hadoop.hdfs.server.namenode.ha.configuredfailoverproxyprovider";
args [18] = "-d";
args [19] = "fs.hdfs.impl = org.apache.hadoop.hdfs.distributedfilesystem";
args [20] = "-d";
args [21] = "mapreduce.framework.name = yarn";
args [22] = "/input";
args [23] = "/out01";
runjar.main(args);