Argument is not taking as a string

Hi Krishna,

As you suggested , I took the file input format as a string but while passing it just a word.It’s throwing error.I have mentioned program and error below.

</span>

package mrd.training.sample;

import java.io.IOException;

import java.net.URISyntaxException;
import java.text.DecimalFormat;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class FilterTrnsDetails {

public static class mymapcall extends
Mapper<LongWritable, Text, Text, DoubleWritable> {
private String filter = "";

protected void setup(Context context) throws IOException, InterruptedException {

filter = context.getConfiguration().get("filter");
}
}

@SuppressWarnings("unchecked")
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

String line = value.toString();
String[] array = line.split(",");


if (array[8].equalsIgnoreCase("filter")) {
context.write(new Text(array[4]), new DoubleWritable(Double.parseDouble(array[3].toString())));



}
}
public static class MyReducer extends Reducer<Text, DoubleWritable, Text, Text> {
public void reduce(Text key, Iterable<DoubleWritable> values, Context context) throws IOException, InterruptedException {

Double sum = 0.00;

for (DoubleWritable val : values) {

sum += val.get();

}

DecimalFormat formatter = new DecimalFormat("0.##");

context.write(key, new Text(formatter.format(sum)));
}
}

public static void main(String[] args) throws IOException,
InterruptedException, ClassNotFoundException, URISyntaxException {

Configuration conf = new Configuration();
Job job = new Job(conf, "SumOfCost");
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
conf.set("filter", otherArgs[1]);

job.setJarByClass(FilterTrnsDetails.class);

job.setMapperClass(FilterTrnsDetails.mymapcall.class);
job.setReducerClass(FilterTrnsDetails.MyReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(DoubleWritable.class);

job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);

FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileInputFormat.addInputPaths(job, otherArgs[1]);
FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));

// Boolean complete = job.waitForCompletion(true);
System.exit(job.waitForCompletion(true) ? 0 : 1);

// System.exit(1);

}
}

 

Argument isĀ :- notroot@ubuntu:~$ hadoop jar lab/programs/HadoopTraining.jar mrd.training.sample.FilterTrnsDetails input/txns1 “credit” output/sum7

Error :-14/08/28 15:12:28 ERROR security.UserGroupInformation: PriviledgedActionException as:notroot cause:org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: hdfs://localhost:8020/user/notroot/credit

Exception in thread “main” org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: hdfs://localhost:8020/user/notroot/credit
at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:235)
at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:252)

3 thoughts on “Argument is not taking as a string

  • Hi,

    Please like below

    Configuration conf = new Configuration();
    conf.set(&quot;filter&quot;, otherArgs[1]);
    Job job = new Job(conf, &quot;SumOfCost&quot;);
    String[] otherArgs = new GenericOptionsParser(conf, args)
    .getRemainingArgs();
    
     
    job.setJarByClass(FilterTrnsDetails.class);
     
    job.setMapperClass(FilterTrnsDetails.mymapcall.class);
    job.setReducerClass(FilterTrnsDetails.MyReducer.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(DoubleWritable.class); 
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(DoubleWritable.class);
     
    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));
    
    hadoop jar lab/programs/HadoopTraining.jar mrd.training.sample.FilterTrnsDetails input/txns1 credit output/sum7
    
    • Hi Krishna,
      It’s giving error at “conf.set(“filter”, otherArgs[1])” since it’s getting set before.It should come after “String[] otherArgs = new GenericOptionsParser(conf, args)
      .getRemainingArgs();”

      Configuration conf = new Configuration();
      conf.set(&quot;filter&quot;, otherArgs[1]);
      Job job = new Job(conf, &quot;SumOfCost&quot;);
      String[] otherArgs = new GenericOptionsParser(conf, args)
      .getRemainingArgs();
      conf.set(&quot;filter&quot;, otherArgs[1]);

      job.setJarByClass(FilterTrnsDetails.class);

      job.setMapperClass(FilterTrnsDetails.mymapcall.class);
      job.setReducerClass(FilterTrnsDetails.MyReducer.class);
      job.setMapOutputKeyClass(Text.class);
      job.setMapOutputValueClass(DoubleWritable.class);

      job.setOutputKeyClass(Text.class);
      job.setOutputValueClass(DoubleWritable.class);

      FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
      FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));

      // Boolean complete = job.waitForCompletion(true);
      System.exit(job.waitForCompletion(true) ? 0 : 1)

      • Hi,

        Please use this.

        Configuration conf = new Configuration();     
               
                String[] otherArgs = new GenericOptionsParser(conf, args)
                        .getRemainingArgs();
                conf.set(&quot;filter&quot;, otherArgs[1]);
         Job job = new Job(conf, &quot;SumOfCost&quot;);
        

        Thanks,
        Krishna,

Leave a Reply