输入数据
order.txt
1001 01 1
1002 02 2
1003 03 3
1004 01 4
1005 02 5
1006 03 6
pd.txt
01 小米
02 华为
03 格力
期望结果
需求分析
自定OrderProductBean
package com.mr.reducejoin;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class OrderProductBean implements Writable {
// order数据的orderid
private int orderId;
// order数据的数量amount
private int amount;
// order数据的商品id,同时也是product数据的商品id
private int productId;
// product 数据的商品名字
private String productName;
// 标记,用来标记bean对象属于order还是属于product
private String flag;
public OrderProductBean() {
}
public OrderProductBean(int orderId, int amount, int productId, String productName, String flag) {
this.orderId = orderId;
this.amount = amount;
this.productId = productId;
this.productName = productName;
this.flag = flag;
}
public int getOrderId() {
return orderId;
}
public void setOrderId(int orderId) {
this.orderId = orderId;
}
public int getAmount() {
return amount;
}
public void setAmount(int amount) {
this.amount = amount;
}
public int getProductId() {
return productId;
}
public void setProductId(int productId) {
this.productId = productId;
}
public String getProductName() {
return productName;
}
public void setProductName(String productName) {
this.productName = productName;
}
public String getFlag() {
return flag;
}
public void setFlag(String flag) {
this.flag = flag;
}
@Override
public String toString() {
return orderId +
"\t" +
productName + '\t' + amount
;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(orderId);
out.writeInt(amount);
out.writeInt(productId);
out.writeUTF(productName);
out.writeUTF(flag);
}
@Override
public void readFields(DataInput in) throws IOException {
orderId = in.readInt();
amount = in.readInt();
productId = in.readInt();
productName = in.readUTF();
flag = in.readUTF();
}
}
Mapper类
package com.mr.reducejoin;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
public class ReduceJoinMapper extends Mapper<LongWritable, Text, Text, OrderProductBean> {
// 文件名字
private String fileName;
// bean对象,封装order和product的数据
OrderProductBean orderProductBean = new OrderProductBean();
// 公共字段作为key,商品的id
Text pId = new Text();
// 获取到输入文件的名字
@Override
protected void setup(Context context) throws IOException, InterruptedException {
InputSplit inputSplit = context.getInputSplit();
FileSplit fileSplit = (FileSplit) inputSplit;
fileName = fileSplit.getPath().getName();
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] split = value.toString().split("\t");
// 判断文件名,如果包含order,表示是来自order.txt 的数据
if (fileName.contains("order")) {
orderProductBean.setAmount(Integer.parseInt(split[2]));
orderProductBean.setFlag("order");
orderProductBean.setOrderId(Integer.parseInt(split[0]));
orderProductBean.setProductId(Integer.parseInt(split[1]));
// 没有的数据设置默认值
orderProductBean.setProductName("");
// 设置pId
pId.set(split[1]);
} else {
// 表示来自pd.txt 的数据
orderProductBean.setProductName(split[1]);
orderProductBean.setProductId(Integer.parseInt(split[0]));
orderProductBean.setFlag("product");
// 没有的数据设置默认值
orderProductBean.setAmount(0);
orderProductBean.setOrderId(0);
// 设置pId
pId.set(split[0]);
}
context.write(pId, orderProductBean);
}
}
Reducer类
package com.mr.reducejoin;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
public class ReduceJoinReducer extends Reducer<Text, OrderProductBean, OrderProductBean, NullWritable> {
@Override
protected void reduce(Text key, Iterable<OrderProductBean> values, Context context) throws IOException, InterruptedException {
ArrayList<OrderProductBean> orderBeanList = new ArrayList<>();
OrderProductBean productBean = new OrderProductBean();
// 需要分开来自order的bean对象以及来自pd 的bean对象
for (OrderProductBean value : values) {
if (value.getFlag().equals(“order”)) {
// orderBeanList.add(value);
OrderProductBean bean = new OrderProductBean();
try {
BeanUtils.copyProperties(bean, value);
} catch (IllegalAccessException | InvocationTargetException e) {
e.printStackTrace();
}
orderBeanList.add(bean);
} else {
try {
BeanUtils.copyProperties(productBean, value);
} catch (IllegalAccessException | InvocationTargetException e) {
e.printStackTrace();
}
}
}
// 迭代 orderBeanList,设置列表里面每个bean对象的productName字段
for (OrderProductBean bean : orderBeanList) {
bean.setProductName(productBean.getProductName());
context.write(bean, NullWritable.get());
}
}
}
Driver类
package com.mr.reducejoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class ReduceJoinDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args = new String[]{“G:\file\input”, “G:\file\ouput”};
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
job.setJarByClass(ReduceJoinDriver.class);
job.setMapperClass(ReduceJoinMapper.class);
job.setReducerClass(ReduceJoinReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(OrderProductBean.class);
job.setOutputKeyClass(OrderProductBean.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean waitForCompletion = job.waitForCompletion(true);
System.exit(waitForCompletion ? 0 : 1);
}
}