分享

救助!eclipse win7上远程无法提交job

报错信息如下:
Exception in thread "main" org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /tmp/hadoop-
yarn/staging/root/.staging/job_1463448457587_0001/job.jar could only be replicated to 0 nodes instead of minReplication (=1).  There are 2 datanode(s) running and 2 node(s) are excluded in this operation.
at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1559)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3245)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:663)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:482)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:975)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2040)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2036)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1656)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2034)
at org.apache.hadoop.ipc.Client.call(Client.java:1469)
at org.apache.hadoop.ipc.Client.call(Client.java:1400)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232)
at com.sun.proxy.$Proxy9.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:399)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
at java.lang.reflect.Method.invoke(Unknown Source)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy10.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.locateFollowingBlock(DFSOutputStream.java:1532)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.nextBlockOutputStream(DFSOutputStream.java:1349)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:588)

代码:
package wordcount;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class test{

  public static class TokenizerMapper
       extends Mapper<Object, Text, Text, IntWritable>{

    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    public void map(Object key, Text value, Context context
                    ) throws IOException, InterruptedException {
      StringTokenizer itr = new StringTokenizer(value.toString());
      while (itr.hasMoreTokens()) {
        word.set(itr.nextToken());
        context.write(word, one);      }
    }
  }

  public static class IntSumReducer
       extends Reducer<Text,IntWritable,Text,IntWritable> {
    private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values,
                       Context context
                       ) throws IOException, InterruptedException {
      int sum = 0;
      for (IntWritable val : values) {
        sum += val.get();
      }
      result.set(sum);
      context.write(key, result);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", "hdfs://192.168.56.100:9000");
    conf.set("hadoop.job.user", "root");
    conf.set("mapreduce.framework.name","yarn");
    conf.set("mapreduce.jobtracker.address","192.168.56.100:9001");
    conf.set("yarn.resourcemanager.hostname","192.168.56.100");
    conf.set("yarn.resourcemanager.admin.address","192.168.56.100:8033");
    conf.set("yarn.resourcemanager.address","192.168.56.100:8032");
    conf.set("yarn.resourcemanager.resource-tracker.address","192.168.56.100:8031");
    conf.set("yarn.resourcemanager.scheduler.address","192.168.56.100:8030");

    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length != 2) {
      System.err.println("Usage: wordcount  ");
      System.exit(2);
    }
Job job = new Job(conf, "word count");
   
    job.setJar("wordcount.jar");
    job.setJarByClass(test.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    System.out.println(otherArgs[0].toString());
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
    System.out.println(otherArgs[1].toString());
   
    boolean flag=job.waitForCompletion(true);
   
    System.out.println("SUCCEED!"+flag);
    System.exit(flag ? 0 : 1);
   
    System.out.println();
  }
}

centos6.5+hadoop2.5.4
经检查集群是正常的,我在namenode上安装的也有eclipse,一样的JOB在namenode上提交就没问题,在远端win7上提交就是提交不上去,跪求大神能帮助看看是什么问题。

已有(4)人评论

跳转到指定楼层
langke93 发表于 2016-5-17 11:25:06
1.    conf.set("hadoop.job.user", "root");
用户应该不是root
2.本地版本是否与服务器版本一致
3.hdfs-site.xml里面是否允许远程访问



回复

使用道具 举报

pinetree 发表于 2016-5-17 11:32:07
langke93 发表于 2016-5-17 11:25
1.    conf.set("hadoop.job.user", "root");
用户应该不是root
2.本地版本是否与服务器版本一致

我这个只是在搭公司建的实验环境,为了方便就没有另外设置hadoop用户,直接就在root用户下搭建的,dfs.permissions为了方便从一开始就设置的是false
回复

使用道具 举报

langke93 发表于 2016-5-17 12:16:00
pinetree 发表于 2016-5-17 11:32
我这个只是在搭公司建的实验环境,为了方便就没有另外设置hadoop用户,直接就在root用户下搭建的,dfs.pe ...

既然是远程操作,提交job应该是window用户,并非Linux下的root用户
回复

使用道具 举报

pinetree 发表于 2016-5-17 12:40:13
langke93 发表于 2016-5-17 12:16
既然是远程操作,提交job应该是window用户,并非Linux下的root用户

下载1.png
你说的这个问题我知道,所以我win7下的用户名已经改成和集群用户名一致了都是root
回复

使用道具 举报

您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

关闭

推荐上一条 /2 下一条