-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathApp.java
More file actions
99 lines (67 loc) · 2.88 KB
/
App.java
File metadata and controls
99 lines (67 loc) · 2.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
package com.test.begin;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* Test Map -Reduce
*
*/
public class App{
public static class TestMap extends Mapper<Object, Text, Text, IntWritable>{
//The output value of mapper is kept 1 of type IntWritable, as no specific Object value of use
public final static IntWritable occurence = new IntWritable(1);
public Text word = new Text();
@Override
protected void map(Object key, Text value,
Mapper<Object, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.map(key, value, context);
// Under this function you can read each seperate line as VALUE, and dig out the information you ned to pass as key to reducer
// if you know delimeter, you can easily extract tokens by value.toString().split("ddelimeter")
//or you can use StringTokenizer to iterate through the tokens
// you can select any number of key,value pairs to output
// Assuming that I use AllstarFull.csv where records are seperated by ,
String[] words = value.toString().split(",");
word.set(words[0]);
context.write(word, occurence);
}
// SO, TestMap should now return key value pairs after reading the AllstarFull.csv like(playerId, 1)
// and its gonna read each line and output one key,value pair per line
}
public static class TestReduce extends Reducer<Text, IntWritable, Text, IntWritable>{
@Override
protected void reduce(Text arg0, Iterable<IntWritable> arg1,
Reducer<Text, IntWritable, Text, IntWritable>.Context arg2)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.reduce(arg0, arg1, arg2);
int sum = 0;
for(IntWritable value : arg1){
sum += value.get();
}
arg2.write(arg0, new IntWritable(sum));
}
}
public static void main( String[] args ) throws Exception
{
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "player count");
job.setJarByClass(App.class);
job.setMapperClass(TestMap.class);
job.setCombinerClass(TestReduce.class);
job.setReducerClass(TestReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true)?0:1);
}
}