| 39 | | == 1.3. 安裝hadoop == |
| 40 | | = 二、 設定 = |
| 41 | | = 三、 運作 = |
| | 116 | * 下載 [http://www.eclipse.org/downloads/download.php?file=/eclipse/downloads/drops/R-3.4.2-200902111700/eclipse-SDK-3.4.2-linux-gtk.tar.gz eclipse SDK 3.4.2 Classic] |
| | 117 | * 放這檔案到家目錄 |
| | 118 | {{{ |
| | 119 | $ cd ~ |
| | 120 | $ tar -zxvf eclipse-SDK-3.4.2-linux-gtk.tar.gz |
| | 121 | $ sudo mv eclipse /opt |
| | 122 | $ sudo ln -sf /opt/eclipse/bin/eclipse /usr/local/bin/ |
| | 123 | |
| | 124 | }}} |
| | 125 | |
| | 126 | * 建立連結 (圖形介面要重新啟動才有吧!) |
| | 127 | |
| | 128 | {{{ |
| | 129 | $ cat >> ~/.gnome2/panel2.d/default/launchers/eclipse.desktop << EOF |
| | 130 | [Desktop Entry] |
| | 131 | Encoding=UTF-8 |
| | 132 | Version=1.0 |
| | 133 | Type=Application |
| | 134 | Terminal=false |
| | 135 | Name[zh_TW]=eclipse |
| | 136 | Exec=/usr/bin/eclipse |
| | 137 | Name=eclipse |
| | 138 | Icon[zh_TW]=/opt/eclipse/plugins/org.eclipse.platform_3.3.101.v200902111700/eclipse48.png |
| | 139 | Icon=/opt/eclipse/plugins/org.eclipse.platform_3.3.101.v200902111700/eclipse48.png |
| | 140 | EOF |
| | 141 | }}} |
| | 142 | |
| | 143 | = 二、 建立專案 = |
| | 144 | |
| | 145 | * 匯入hadoop 0.20.0 eclipse plugin |
| | 146 | {{{ |
| | 147 | $ cd /opt/hadoop |
| | 148 | $ sudo cp /opt/hadoop/contrib/eclipse-plugin/hadoop-0.20.0-eclipse-plugin.jar /opt/eclipse/plugins |
| | 149 | }}} |
| | 150 | {{{ |
| | 151 | $ sudo vim /opt/eclipse/eclipse.ini |
| | 152 | }}} |
| | 153 | * 可斟酌參考eclipse.ini內容(非必要) |
| | 154 | {{{ |
| | 155 | #!sh |
| | 156 | -startup |
| | 157 | plugins/org.eclipse.equinox.launcher_1.0.101.R34x_v20081125.jar |
| | 158 | --launcher.library |
| | 159 | plugins/org.eclipse.equinox.launcher.gtk.linux.x86_1.0.101.R34x_v20080805 |
| | 160 | -showsplash |
| | 161 | org.eclipse.platform |
| | 162 | --launcher.XXMaxPermSize |
| | 163 | 512m |
| | 164 | -vmargs |
| | 165 | -Xms40m |
| | 166 | -Xmx512m |
| | 167 | }}} |
| | 168 | |
| | 169 | * window -> open pers.. -> other.. -> map/reduce |
| | 170 | * file -> new -> project -> Map/Reduce -> Map/Reduce Project -> next |
| | 171 | {{{ |
| | 172 | #!sh |
| | 173 | project name-> 輸入 : icas (隨意) |
| | 174 | use default hadoop -> Configur Hadoop install... -> 輸入: "/opt/hadoop" -> ok |
| | 175 | Finish |
| | 176 | }}} |
| | 177 | * Project -> properties -> |
| | 178 | * java Build Path -> Libraries -> hadoop-0.20.0-ant.jar |
| | 179 | * java Build Path -> Libraries -> hadoop-0.20.0-core.jar |
| | 180 | * java Build Path -> Libraries -> hadoop-0.20.0-tools.jar |
| | 181 | 以 hadoop-0.20.0-core.jar 的設定內容如下,其他依此類推 |
| | 182 | {{{ |
| | 183 | #!sh |
| | 184 | source ...-> 輸入:/opt/opt/hadoop-0.20.0/src/core |
| | 185 | javadoc ...-> 輸入:file:/opt/hadoop/docs/api/ |
| | 186 | }}} |
| | 187 | |
| | 188 | * Project -> properties |
| | 189 | * javadoc location -> 輸入:file:/usr/lib/jvm/java-6-sun/docs/api/ |
| | 190 | |
| | 191 | * 視窗右下角黃色大象圖示"Map/Reduce Locations tag" -> 點選齒輪右邊的藍色大象圖示: |
| | 192 | {{{ |
| | 193 | Location Name -> 輸入:hadoop (隨意) |
| | 194 | Map/Reduce Master -> Host-> 輸入:localhost |
| | 195 | Map/Reduce Master -> Port-> 輸入:9001 |
| | 196 | DFS Master -> Host-> 輸入:9000 |
| | 197 | Finish |
| | 198 | }}} |
| | 199 | |
| | 200 | = 三、 撰寫範例程式 = |
| | 201 | |
| | 202 | * 之前在eclipse上已經開了個專案icas,因此這個目錄在: |
| | 203 | * /home/waue/workspace/icas |
| | 204 | * 在這個目錄內有兩個資料夾: |
| | 205 | * src : 用來裝程式原始碼 |
| | 206 | * bin : 用來裝編譯後的class檔 |
| | 207 | * 如此一來原始碼和編譯檔就不會混在一起,對之後產生jar檔會很有幫助 |
| | 208 | |
| | 209 | == 3.1 我的第一隻程式 == |
| | 210 | * File -> new -> mapper |
| | 211 | {{{ |
| | 212 | source folder-> 輸入: icas/src |
| | 213 | Package : Sample |
| | 214 | Name -> : mapper |
| | 215 | }}} |
| | 216 | * 編輯mapper.java |
| | 217 | {{{ |
| | 218 | #!java |
| | 219 | package Sample; |
| | 220 | |
| | 221 | import java.io.IOException; |
| | 222 | import java.util.StringTokenizer; |
| | 223 | |
| | 224 | import org.apache.hadoop.io.IntWritable; |
| | 225 | import org.apache.hadoop.io.Text; |
| | 226 | import org.apache.hadoop.mapreduce.Mapper; |
| | 227 | |
| | 228 | public class mapper extends Mapper<Object, Text, Text, IntWritable> { |
| | 229 | |
| | 230 | private final static IntWritable one = new IntWritable(1); |
| | 231 | private Text word = new Text(); |
| | 232 | |
| | 233 | public void map(Object key, Text value, Context context) |
| | 234 | throws IOException, InterruptedException { |
| | 235 | StringTokenizer itr = new StringTokenizer(value.toString()); |
| | 236 | while (itr.hasMoreTokens()) { |
| | 237 | word.set(itr.nextToken()); |
| | 238 | context.write(word, one); |
| | 239 | } |
| | 240 | } |
| | 241 | } |
| | 242 | }}} |
| | 243 | * File -> new -> reducer |
| | 244 | |
| | 245 | {{{ |
| | 246 | source folder-> 輸入: icas/src |
| | 247 | Package : Sample |
| | 248 | Name -> : reducer |
| | 249 | }}} |
| | 250 | |
| | 251 | {{{ |
| | 252 | #!java |
| | 253 | package Sample; |
| | 254 | |
| | 255 | import java.io.IOException; |
| | 256 | |
| | 257 | import org.apache.hadoop.io.IntWritable; |
| | 258 | import org.apache.hadoop.io.Text; |
| | 259 | import org.apache.hadoop.mapreduce.Reducer; |
| | 260 | |
| | 261 | public class reducer extends Reducer<Text, IntWritable, Text, IntWritable> { |
| | 262 | private IntWritable result = new IntWritable(); |
| | 263 | |
| | 264 | public void reduce(Text key, Iterable<IntWritable> values, Context context) |
| | 265 | throws IOException, InterruptedException { |
| | 266 | int sum = 0; |
| | 267 | for (IntWritable val : values) { |
| | 268 | sum += val.get(); |
| | 269 | } |
| | 270 | result.set(sum); |
| | 271 | context.write(key, result); |
| | 272 | } |
| | 273 | } |
| | 274 | }}} |
| | 275 | |
| | 276 | * File -> new -> Map/Reduce Driver |
| | 277 | |
| | 278 | {{{ |
| | 279 | source folder-> 輸入: icas/src |
| | 280 | Package : Sample |
| | 281 | Name -> : WordCount.java |
| | 282 | }}} |
| | 283 | * 編輯 WordCount.java 檔 |
| | 284 | {{{ |
| | 285 | #!java |
| | 286 | package Sample; |
| | 287 | |
| | 288 | import org.apache.hadoop.conf.Configuration; |
| | 289 | import org.apache.hadoop.fs.Path; |
| | 290 | import org.apache.hadoop.io.IntWritable; |
| | 291 | import org.apache.hadoop.io.Text; |
| | 292 | import org.apache.hadoop.mapreduce.Job; |
| | 293 | import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; |
| | 294 | import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; |
| | 295 | import org.apache.hadoop.util.GenericOptionsParser; |
| | 296 | |
| | 297 | public class WordCount { |
| | 298 | |
| | 299 | public static void main(String[] args) throws Exception { |
| | 300 | Configuration conf = new Configuration(); |
| | 301 | String[] otherArgs = new GenericOptionsParser(conf, args) |
| | 302 | .getRemainingArgs(); |
| | 303 | if (otherArgs.length != 2) { |
| | 304 | System.err.println("Usage: wordcount <in> <out>"); |
| | 305 | System.exit(2); |
| | 306 | } |
| | 307 | Job job = new Job(conf, "word count"); |
| | 308 | job.setJarByClass(WordCount.class); |
| | 309 | job.setMapperClass(mapper.class); |
| | 310 | |
| | 311 | job.setCombinerClass(reducer.class); |
| | 312 | job.setReducerClass(reducer.class); |
| | 313 | job.setOutputKeyClass(Text.class); |
| | 314 | job.setOutputValueClass(IntWritable.class); |
| | 315 | FileInputFormat.addInputPath(job, new Path(otherArgs[0])); |
| | 316 | FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); |
| | 317 | System.exit(job.waitForCompletion(true) ? 0 : 1); |
| | 318 | } |
| | 319 | } |
| | 320 | }}} |
| | 321 | |
| | 322 | * 三個檔都存檔後,可以看到icas專案下的src,bin都有檔案產生 |
| | 323 | |
| | 324 | {{{ |
| | 325 | $ cd workspace/icas |
| | 326 | $ ls src/Sample/ |
| | 327 | mapper.java reducer.java WordCount.java |
| | 328 | $ ls bin/Sample/ |
| | 329 | mapper.class reducer.class WordCount.class |
| | 330 | }}} |
| | 331 | |
| | 332 | = 四、編譯 = |
| | 333 | * 由於hadoop 0.20 此版本的eclipse-plugin依舊不完整,如:1. 齒輪圖示沒有作用 2. 右鍵點選WordCount.java -> run as -> run on Hadoop :沒有效果 |
| | 334 | * 因此編譯hadoop程式就要靠指令來編輯,然而用一行一行指令來產生太沒效率,在此介紹用Makefile來編譯 |
| | 335 | |
| | 336 | == 4.1 產生Makefile == |
| | 337 | {{{ |
| | 338 | |
| | 339 | $ cd /home/waue/workspace/icas/ |
| | 340 | $ gedit Makefile |
| | 341 | }}} |
| | 342 | |
| | 343 | {{{ |
| | 344 | #!sh |
| | 345 | |
| | 346 | JarFile="sample-0.1.jar" |
| | 347 | MainFunc="Sample.WordCount" |
| | 348 | LocalOutDir="/tmp/output" |
| | 349 | |
| | 350 | all:help |
| | 351 | jar: |
| | 352 | jar -cvf ${JarFile} -C bin/ . |
| | 353 | |
| | 354 | run: |
| | 355 | hadoop jar ${JarFile} ${MainFunc} input output |
| | 356 | |
| | 357 | clean: |
| | 358 | hadoop fs -rmr output |
| | 359 | |
| | 360 | output: |
| | 361 | rm -rf ${LocalOutDir} |
| | 362 | hadoop fs -get output ${LocalOutDir} |
| | 363 | gedit ${LocalOutDir}/part-r-00000 & |
| | 364 | |
| | 365 | help: |
| | 366 | @echo "Usage:" |
| | 367 | @echo " make jar - Build Jar File." |
| | 368 | @echo " make clean - Clean up Output directory on HDFS." |
| | 369 | @echo " make run - Run your MapReduce code on Hadoop." |
| | 370 | @echo " make output - Download and show output file" |
| | 371 | @echo " make help - Show Makefile options." |
| | 372 | @echo " " |
| | 373 | @echo "Example:" |
| | 374 | @echo " make jar; make run; make output; make clean" |
| | 375 | |
| | 376 | }}} |
| | 377 | |
| | 378 | |
| | 379 | == 4.2. 執行 == |
| | 380 | {{{ |
| | 381 | $ cd /home/waue/workspace/icas/ |
| | 382 | $ make jar; make run; make output; make clean |
| | 383 | }}} |