Skip to content
Snippets Groups Projects
Commit 4590e294 authored by cloudera_vm's avatar cloudera_vm
Browse files

Inverted Index without frequencies- Question2

parent a4290b5f
Branches
No related tags found
No related merge requests found
/Question1/
/Question2/
/StubDriver.class
/StubMapper.class
This diff is collapsed.
This diff is collapsed.
File added
File added
This diff is collapsed.
package Question2;
import java.util.Arrays;
import java.util.StringTokenizer;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class InvIndex extends Configured implements Tool {
public static void main(String[] args) throws Exception {
System.out.println(Arrays.toString(args));
int res = ToolRunner.run(new Configuration(), new InvIndex(), args);
System.exit(res);
}
public int run(String[] args) throws Exception {
Job job = Job.getInstance(getConf());
job.setJobName("InvIndex");
job.setJarByClass(InvIndex.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
Path outputFilePath = new Path(args[3]);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.addInputPath(job, new Path(args[1]));
FileInputFormat.addInputPath(job, new Path(args[2]));
FileOutputFormat.setOutputPath(job, outputFilePath);
/* Delete output filepath if already exists */
FileSystem fs = FileSystem.newInstance(getConf());
if (fs.exists(outputFilePath)) {
fs.delete(outputFilePath, true);
}
return job.waitForCompletion(true) ? 0 : 1;
}
public static class Map extends Mapper<LongWritable, Text, Text, Text> {
private Text word = new Text();
private Text filename = new Text();
private boolean caseSensitive = false;
@Override
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String filenameStr = ((FileSplit) context.getInputSplit()).getPath().getName();
filename = new Text(filenameStr);
String line = value.toString();
if (!caseSensitive) {
line = line.toLowerCase();
}
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
context.write(word, filename);
}
}
@Override
protected void setup(Context context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
this.caseSensitive = conf.getBoolean("wordcount.case.sensitive",false);
}
}
public static class Reduce extends Reducer<Text, Text, Text, Text> {
@Override
public void reduce(final Text key, final Iterable<Text> values,
final Context context) throws IOException, InterruptedException {
StringBuilder stringBuilder = new StringBuilder();
for (Text value : values) {
stringBuilder.append(value.toString());
if (values.iterator().hasNext()) {
stringBuilder.append(", ");
}
}
context.write(key, new Text(stringBuilder.toString()));
}
}
}
package Question1;
package Question2;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.HashSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
......@@ -11,6 +16,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
......@@ -18,10 +24,10 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class WordCount extends Configured implements Tool {
public class InvertedIndex extends Configured implements Tool {
public static void main(String[] args) throws Exception {
System.out.println(Arrays.toString(args));
int res = ToolRunner.run(new Configuration(), new WordCount(), args);
int res = ToolRunner.run(new Configuration(), new InvertedIndex(), args);
System.exit(res);
}
......@@ -29,10 +35,10 @@ public class WordCount extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
System.out.println(Arrays.toString(args));
Job job = new Job(getConf(), "WordCount");
job.setJarByClass(WordCount.class);
Job job = new Job(getConf(), "InvertedIndex");
job.setJarByClass(InvertedIndex.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
......@@ -40,37 +46,70 @@ public class WordCount extends Configured implements Tool {
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
Path outputFilePath = new Path(args[3]);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
FileInputFormat.addInputPath(job, new Path(args[1]));
FileInputFormat.addInputPath(job, new Path(args[2]));
FileOutputFormat.setOutputPath(job, outputFilePath);
FileSystem fs = FileSystem.newInstance(getConf());
if (fs.exists(outputFilePath)) {
fs.delete(outputFilePath, true);
}
job.waitForCompletion(true);
return 0;
}
public static class Map extends Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable ONE = new IntWritable(1);
public static class Map extends Mapper<LongWritable, Text, Text, Text> {
private Text word = new Text();
private String stopwords_file = "/home/cloudera/workspace/bpa/Assign1/output_Q1.i/stopwords.csv";
@Override
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String stopwords = new String(Files.readAllBytes(
Paths.get(stopwords_file)));
Text filename = new Text(
((FileSplit) context.getInputSplit())
.getPath().getName());
for (String token : value.toString().split("\\s+")) {
word.set(token);
context.write(word, ONE);
if (!stopwords.contains(token.toLowerCase())) {
word.set(token.toLowerCase());
}
}
context.write(word, filename);
}
}
public static class Reduce extends Reducer<Text, IntWritable, Text, IntWritable> {
public static class Reduce extends Reducer<Text, Text, Text, Text> {
@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context)
public void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
HashSet<String> set = new HashSet<String>();
for (Text value : values) {
set.add(value.toString());
}
StringBuilder builder = new StringBuilder();
String prefix = "";
for (String s : set) {
builder.append(prefix);
prefix = ", ";
builder.append(s);
}
context.write(key, new IntWritable(sum));
context.write(key, new Text(builder.toString()));
}
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment