{{{ #!html
HBase 進階課程
程式範例練習
}}} [wiki:NCHCCloudCourse100929_4_HBEX6 上一關 < ] 最後一關 [wiki:NCHCCloudCourse110721#第二天 >> 回課程大綱] = 範例七: LoadHBaseMapper = == 說明: == 此程式碼將HBase的資料取出來,再將結果塞回hdfs上 {{{ $ bin/hadoop jar TCRCExample.jar LoadHBaseMapper output }}} == 注意: == 1. 請注意之前已經run 過 範例六 2. 運算完後,程式將執行結果放在你指定 hdfs的 內 請注意 沒有 資料夾 {{{ #!java package org.nchc.hbase; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; public class LoadHBaseMapper { public static class HtMap extends TableMapper { public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { // content:count String res = Bytes.toString(value.getValue(Bytes.toBytes("content"), Bytes.toBytes("count"))); context.write(new Text(key.toString()), new Text(res)); } } // TableReducer // 原本為 TableReducer // 但在此改成 LongWritable 也可以 // 因此證明在此的Class可以很多,org.apache.hadoop.io.* 內有write()的Writable class應該皆可 public static class HtReduce extends Reducer { public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException { String str = new String(""); Text final_key = new Text(key); Text final_value = new Text(); // 將key值相同的values,透過 && 符號分隔之 for (Text tmp : values) { str += tmp.toString(); } final_value.set(str); context.write(final_key, final_value); } } public static void main(String args[]) throws Exception { // debug // String[] argv = { "output-lhm" }; // args = argv; String tablename = "wordcount"; Configuration conf = new Configuration(); Job job = new Job(conf, tablename + " hbase data to hdfs"); job.setJarByClass(LoadHBaseMapper.class); // // 設定哪張hbase的table為輸入 conf.set(TableInputFormat.INPUT_TABLE, tablename); Scan myScan = new Scan("".getBytes(), "12".getBytes()); myScan.addColumn("content:count".getBytes()); // 先用 TableMapReduceUtil 的 initTableMapperJob 與 initTableReducerJob 來做設定 TableMapReduceUtil.initTableMapperJob(tablename, myScan, HtMap.class, Text.class, Text.class, job); job.setMapperClass(HtMap.class); job.setReducerClass(HtReduce.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setInputFormatClass(TableInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileOutputFormat.setOutputPath(job, new Path(args[0])); System.exit(job.waitForCompletion(true) ? 0 : 1); } } }}} * 執行結果 觀察 /user/hadoop/output-lhm/part-r-00000 檔案的結果