hadoop MR客户端源码分析
客户端程序的入口为:
job.waitForCompletion(true);
进入之后为:Job.java
/**
* Submit the job to the cluster and return immediately.
* @throws IOException
*/
public void submit()
throws IOException, InterruptedException, ClassNotFoundException {
ensureState(JobState.DEFINE);
setUseNewAPI();
connect();
final JobSubmitter submitter =
getJobSubmitter(cluster.getFileSystem(), cluster.getClient());
status = ugi.doAs(new PrivilegedExceptionAction<JobStatus>() {
public JobStatus run() throws IOException, InterruptedException,
ClassNotFoundException {
// 核心代码 ,进入,
return submitter.submitJobInternal(Job.this, cluster);
}
});
state = JobState.RUNNING;
LOG.info("The url to track the job: " + getTrackingURL());
}
JobSubmitter.java
根据job进行分片,有多少片,就生成多少map
// Create the splits for the job
LOG.debug("Creating splits at " + jtFs.makeQualified(submitJobDir));
// 核心,进入
int maps = writeSplits(job, submitJobDir);
private int writeSplits(org.apache.hadoop.mapreduce.JobContext job,
Path jobSubmitDir) throws IOException,
InterruptedException, ClassNotFoundException {
JobConf jConf = (JobConf)job.getConfiguration();
int maps;
if (jConf.getUseNewMapper()) {
// 核心,进入
maps = writeNewSplits(job, jobSubmitDir);
} else {
maps = writeOldSplits(jConf, jobSubmitDir);
}
return maps;
}
private <T extends InputSplit>
int writeNewSplits(JobContext job, Path jobSubmitDir) throws IOException,
InterruptedException, ClassNotFoundException {
Configuration conf = job.getConfiguration();
InputFormat<?, ?> input =
// a1: job.getInputFormatClass()
ReflectionUtils.newInstance(job.getInputFormatClass(), conf);
// 核心,a2
List<InputSplit> splits = input.getSplits(job);
T[] array = (T[]) splits.toArray(new InputSplit[splits.size()]);
// sort the splits into order based on size, so that the biggest
// go first
Arrays.sort(array, new SplitComparator());
JobSplitWriter.createSplitFiles(jobSubmitDir, conf,
jobSubmitDir.getFileSystem(conf), array);
return array.length;
}
进入a1, JobContextImpl.java
/**
* Get the {@link InputFormat} class for the job.
*
* @return the {@link InputFormat} class for the job.
*/
@SuppressWarnings("unchecked")
public Class<? extends InputFormat<?,?>> getInputFormatClass()
throws ClassNotFoundException {
return (Class<? extends InputFormat<?,?>>)
// 可得,默认的inputformat为 TextInputFormat
conf.getClass(INPUT_FORMAT_CLASS_ATTR, TextInputFormat.class);
}
准备进入a2,此时3类关系为: inputFormat==> FileInputFormat ==>TextInputFormat

进入a2,FileInputFormat.java
/**
* Generate the list of files and make them into FileSplits.
* @param job the job context
* @throws IOException
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = new StopWatch().start();
//minSize = 1
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
// maxSize = long的最大值
long maxSize = getMaxSplitSize(job);
// generate splits
List<InputSplit> splits = new ArrayList<InputSplit>();
// FileStatus:可以获取文件的元数据等
List<FileStatus> files = listStatus(job);
//遍历每一个文件,下面的操作都是 针对每一个文件的,
for (FileStatus file: files) {
if (ignoreDirs && file.isDirectory()) {
continue;
}
// hadoop文件的路径
Path path = file.getPath();
// 文件的大小
long length = file.getLen();
if (length != 0) {
// 创建 块地址的数组
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
// 分布式,所以进入这里
FileSystem fs = path.getFileSystem(job.getConfiguration());
// 返回分布式文件 ,文件从0开始,所有块的位置清单信息
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(job, path)) {
// 默认128M,
long blockSize = file.getBlockSize();
// 点进去,计算公式为: return Math.max(minSize, Math.min(maxSize, blockSize));
// 即此处,为 切片大小,默认 = 块大小 。
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
// bytesRemaining: 切割成切片后,剩余的文件大小。 后面 -= 可以看到。
long bytesRemaining = length;
// 如果bytesRemaining/切片大小> 1.1 ,就一直切割
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
// b1,核心,计算 块和切片的对应关系,拿到块的位置信息
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
// 文件块的主机信息(这个支撑计算向数据移动,设计到资源层,一个块肯定存放在多个副本,
// 多个位置, 只有资源层才知道哪个空闲.
// 确定每个 InputSplit 的元数据信息。这通常由 四部分组成 :<file, start, length, // hosts>,
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else { // not splitable
if (LOG.isDebugEnabled()) {
// Log only if the file is big enough to be splitted
if (length > Math.min(file.getBlockSize(), minSize)) {
LOG.debug("File is not splittable so no parallelization "
+ "is possible: " + file.getPath());
}
}
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
}
进入b1,保证 块和切片的对应关系
// 传入块的位置和切片的偏移量。
protected int getBlockIndex(BlockLocation[] blkLocations,
long offset) {
// 每一个块坐判断
for (int i = 0 ; i < blkLocations.length; i++) {
// is the offset inside this block?
// 保证片的起始位置 在 块的起点和终点位置之间
if ((blkLocations[i].getOffset() <= offset) &&
(offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){
return i;
}
}
BlockLocation last = blkLocations[blkLocations.length -1];
long fileLength = last.getOffset() + last.getLength() -1;
throw new IllegalArgumentException("Offset " + offset +
" is outside of file (0.." +
fileLength + ")");
}
左边是块,右边是切片,加入一个文件,分成了3块,切成了2片。 右边标记的即为起始位置offset。
左第 1 块的 起始< 右边的一个 片的偏移量 < 坐1块的截止, 所以 第一个切片在 1号快上, 也就是第一个map在 1号块的机器上计算, 这就是 计算向数据移动 。。 同理,第二个切片,会在3号机器上计算。 所以,第2号块的数据,会分2半,一部分1号机器上计算,一部分 3号机器上计算,就是 数据向计算移动。(没有办法,但也算并行计算,所以比单节点还是快的)
b2 ,
/**
* A factory that makes the split for this class. It can be overridden
* by sub-classes to make sub-types
*/
protected FileSplit makeSplit(Path file, long start, long length,
String[] hosts, String[] inMemoryHosts) {
return new FileSplit(file, start, length, hosts, inMemoryHosts);
}