需求说明:第一字母表示本人,其他是他的朋友,找出有共同朋友的人,和共同朋友是谁
A B C D E F
B A C D E
C A B E
D A B E
E A B C D
F A
FdMapper.java
package com.fish.had.friend;
import java.io.IOException;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class FdMapper extends Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable k1, Text v1,
Mapper<LongWritable, Text, Text, Text>.Context context)
throws IOException, InterruptedException {
Set<String> set = new TreeSet<String>();
String[] vs = v1.toString().split(" ");
int len = vs.length;
if(len > 0){
Text v0 = new Text(vs[0]);
for(String s : vs){
set.add(s);
}
String[] fds = new String[set.size()];
fds = set.toArray(fds);
int len2 = fds.length;
for(int i= 0; i < len2; i++){
for(int j = i+1; j < len2; j++){
context.write(new Text(fds+fds[j]), v0);
}
}
}
}
}
FdReducer.java
package com.fish.had.friend;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class FdReducer extends Reducer<Text, Text, Text, Text> {
@Override
protected void reduce(Text k2, Iterable<Text> v2s,
Reducer<Text, Text, Text, Text>.Context context) throws IOException,
InterruptedException {
String v2 = "";
String sk = k2.toString();
String sv = "";
for(Text t : v2s){
sv = t.toString();
if(sk.indexOf(sv) < 0){
if(v2 == ""){
v2 = sv;
} else {
v2 += "," + sv;
}
}
}
context.write(k2, new Text(v2));
}
}
说明:
1、需要去除自身
for(Text t : v2s){
sv = t.toString();
if(sk.indexOf(sv) < 0){
if(v2 == ""){
FdMain.java
package com.fish.had.friend;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class FdMain extends Configured implements Tool{
public static void main(String[] args) throws Exception {
if(args.length != 2){
System.err.print("----> parameter is two, not null\n");
System.exit(1);
}
ToolRunner.run(new FdMain(), args);
}
@SuppressWarnings("deprecation")
public int run(String[] args) throws Exception {
String INPATH = args[0];
String OUTPATH = args[1];
Path outPath = new Path(OUTPATH);
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI(INPATH), conf);
if(fs.exists(outPath)) fs.delete(outPath, true);
Job job = new Job(conf, FdMain.class.getSimpleName());
job.setJarByClass(FdMain.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapperClass(FdMapper.class);
job.setReducerClass(FdReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setPartitionerClass(HashPartitioner.class);
job.setNumReduceTasks(1);
FileInputFormat.setInputPaths(job, new Path(INPATH));
FileOutputFormat.setOutputPath(job, outPath);
job.waitForCompletion(true);
return 0;
}
}
|