我的hadoop server 有錯 {{{ #!java import java.io.BufferedReader; import java.io.FileReader; import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.filecache.DistributedCache; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; public class WordCount2for020 extends Configured implements Tool { public static class Map extends Mapper { static enum Counters { INPUT_WORDS } private final static IntWritable one = new IntWritable(1); private Text word = new Text(); private boolean caseSensitive = true; private Set patternsToSkip = new HashSet(); private long numRecords = 0; private String inputFile; public void setup(Context context) { Configuration conf = context.getConfiguration(); caseSensitive = conf.getBoolean("wordcount.case.sensitive", true); inputFile = conf.get("map.input.file"); if (conf.getBoolean("wordcount.skip.patterns", false)) { Path[] patternsFiles = new Path[0]; try { patternsFiles = DistributedCache.getLocalCacheFiles(conf); } catch (IOException ioe) { System.err .println("Caught exception while getting cached files: " + StringUtils.stringifyException(ioe)); } for (Path patternsFile : patternsFiles) { parseSkipFile(patternsFile); } } } private void parseSkipFile(Path patternsFile) { try { BufferedReader fis = new BufferedReader(new FileReader( patternsFile.toString())); String pattern = null; while ((pattern = fis.readLine()) != null) { patternsToSkip.add(pattern); } } catch (IOException ioe) { System.err .println("Caught exception while parsing the cached file '" + patternsFile + "' : " + StringUtils.stringifyException(ioe)); } } public void map(LongWritable key, Text value, OutputCollector output, Reporter reporter) throws IOException { String line = (caseSensitive) ? value.toString() : value.toString() .toLowerCase(); for (String pattern : patternsToSkip) { line = line.replaceAll(pattern, ""); } StringTokenizer tokenizer = new StringTokenizer(line); while (tokenizer.hasMoreTokens()) { word.set(tokenizer.nextToken()); output.collect(word, one); reporter.incrCounter(Counters.INPUT_WORDS, 1); } if ((++numRecords % 100) == 0) { reporter.setStatus("Finished processing " + numRecords + " records " + "from the input file: " + inputFile); } } } public static class Reduce extends Reducer { public void reduce(Text key, Iterator values, OutputCollector output, Reporter reporter) throws IOException { int sum = 0; while (values.hasNext()) { sum += values.next().get(); } output.collect(key, new IntWritable(sum)); } } public int run(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = new Job(conf); job.setJarByClass(WordCount2for020.class); job.setJobName("wordcount"); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.setMapperClass(Map.class); job.setCombinerClass(Reduce.class); job.setReducerClass(Reduce.class); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); List other_args = new ArrayList(); for (int i = 0; i < args.length; ++i) { if ("-skip".equals(args[i])) { DistributedCache .addCacheFile(new Path(args[++i]).toUri(), conf); conf.setBoolean("wordcount.skip.patterns", true); } else { other_args.add(args[i]); } } // conf.set("mapred.job.tracker", "local"); // conf.set("fs.default.name", "file:///"); FileInputFormat.setInputPaths(job, new Path(other_args.get(0))); FileOutputFormat.setOutputPath(job, new Path(other_args.get(1))); job.waitForCompletion(true); return 0; } public static void main(String[] args) throws Exception { // String[] argv = { "-Dwordcount.case.sensitive=false", // "/user/waue/text_input", "/user/waue/output-v020", "-skip", // "/user/waue/patterns" }; // args = argv; int res = ToolRunner.run(new Configuration(), new WordCount2for020(), args); System.exit(res); } } }}} {{{ 10/04/16 17:32:35 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same. 10/04/16 17:32:36 INFO input.FileInputFormat: Total input paths to process : 4 10/04/16 17:32:36 INFO mapred.JobClient: Running job: job_201003231850_0021 10/04/16 17:32:37 INFO mapred.JobClient: map 0% reduce 0% 10/04/16 17:32:44 INFO mapred.JobClient: Task Id : attempt_201003231850_0021_m_000005_0, Status : FAILED java.io.IOException: Task process exit with nonzero status of 126. at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:418) 10/04/16 17:32:44 WARN mapred.JobClient: Error reading task outputhttp://vpro:50060/tasklog?plaintext=true&taskid=attempt_201003231850_0021_m_000005_0&filter=stdout }}}