filespy_fit文件用什么软件打开「建议收藏」

filespy_fit文件用什么软件打开「建议收藏」hadoop的FileSplit简单使用FileSplit类继承关系:FileSplit类中的属性和方法:作业输入:[java]viewplaincopyhadoop@hadoop:/home/hadoop/blb$

hadoop的FileSplit简单使用

FileSplit类继承关系:

filespy_fit文件用什么软件打开「建议收藏」

FileSplit类中的属性和方法:

filespy_fit文件用什么软件打开「建议收藏」



作业输入:

[java] 
view plain  
copy

  1. hadoop@hadoop:/home/hadoop/blb$ hdfs dfs -text /user/hadoop/libin/input/inputpath1.txt  
  2. hadoop  a  
  3. spark   a  
  4. hive    a  
  5. hbase   a  
  6. tachyon a  
  7. storm   a  
  8. redis   a  
  9. hadoop@hadoop:/home/hadoop/blb$ hdfs dfs -text /user/hadoop/libin/input/inputpath2.txt  
  10. hadoop  b  
  11. spark   b  
  12. kafka   b  
  13. tachyon b  
  14. oozie   b  
  15. flume   b  
  16. sqoop   b  
  17. solr    b  
  18. hadoop@hadoop:/home/hadoop/blb$   
[java] 
view plain  
copy

  1. hadoop@hadoop:/home/hadoop/blb$ hdfs dfs -text /user/hadoop/libin/input/inputpath1.txt  
  2. hadoop  a  
  3. spark   a  
  4. hive    a  
  5. hbase   a  
  6. tachyon a  
  7. storm   a  
  8. redis   a  
  9. hadoop@hadoop:/home/hadoop/blb$ hdfs dfs -text /user/hadoop/libin/input/inputpath2.txt  
  10. hadoop  b  
  11. spark   b  
  12. kafka   b  
  13. tachyon b  
  14. oozie   b  
  15. flume   b  
  16. sqoop   b  
  17. solr    b  
  18. hadoop@hadoop:/home/hadoop/blb$   





代码:

[java] 
view plain  
copy

  1. import java.io.IOException;  
  2.   
  3.   
  4. import org.apache.hadoop.conf.Configuration;  
  5. import org.apache.hadoop.fs.Path;  
  6. import org.apache.hadoop.io.LongWritable;  
  7. import org.apache.hadoop.io.NullWritable;  
  8. import org.apache.hadoop.io.Text;  
  9. import org.apache.hadoop.mapred.SplitLocationInfo;  
  10. import org.apache.hadoop.mapreduce.Job;  
  11. import org.apache.hadoop.mapreduce.Mapper;  
  12. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  
  13. import org.apache.hadoop.mapreduce.lib.input.FileSplit;  
  14. import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;  
  15. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
  16. import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;  
  17. import org.apache.hadoop.util.GenericOptionsParser;  
  18.   
  19.   
  20. public class GetSplitMapReduce {  
  21.     public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {  
  22.         Configuration conf = new Configuration();  
  23.         String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();  
  24.         if(otherArgs.length!=2){  
  25.             System.err.println(“Usage databaseV1 <inputpath> <outputpath>”);  
  26.         }  
  27.   
  28.   
  29.         Job job = Job.getInstance(conf, GetSplitMapReduce.class.getSimpleName() + “1”);  
  30.         job.setJarByClass(GetSplitMapReduce.class);  
  31.         job.setMapOutputKeyClass(Text.class);  
  32.         job.setMapOutputValueClass(Text.class);  
  33.         job.setOutputKeyClass(Text.class);  
  34.         job.setOutputValueClass(NullWritable.class);  
  35.         job.setMapperClass(MyMapper1.class);  
  36.         job.setNumReduceTasks(0);  
  37.         job.setInputFormatClass(TextInputFormat.class);  
  38.         job.setOutputFormatClass(TextOutputFormat.class);  
  39.         FileInputFormat.addInputPath(job, new Path(otherArgs[0]));  
  40.         FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));  
  41.         job.waitForCompletion(true);  
  42.     }  
  43.     public static class MyMapper1 extends Mapper<LongWritable, Text, Text, NullWritable>{  
  44.         @Override  
  45.         protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, NullWritable>.Context context)  
  46.                 throws IOException, InterruptedException {  
  47.           
  48.             FileSplit fileSplit=(FileSplit) context.getInputSplit();  
  49.             String pathname=fileSplit.getPath().getName();  //获取目录名字  
  50.             int depth = fileSplit.getPath().depth();    //获取目录深度  
  51.             Class<? extends FileSplit> class1 = fileSplit.getClass(); //获取当前类  
  52.             long length = fileSplit.getLength();    //获取文件长度  
  53.             SplitLocationInfo[] locationInfo = fileSplit.getLocationInfo(); //获取位置信息  
  54.             String[] locations = fileSplit.getLocations();  //获取位置  
  55.             long start = fileSplit.getStart(); //The position of the first byte in the file to process.  
  56.             String string = fileSplit.toString();  
  57.             //fileSplit.  
  58.               
  59.             context.write(new Text(“====================================================================================”), NullWritable.get());  
  60.             context.write(new Text(“pathname–“+pathname), NullWritable.get());  
  61.             context.write(new Text(“depth–“+depth), NullWritable.get());  
  62.             context.write(new Text(“class1–“+class1), NullWritable.get());  
  63.             context.write(new Text(“length–“+length), NullWritable.get());  
  64.             context.write(new Text(“locationInfo–“+locationInfo), NullWritable.get());  
  65.             context.write(new Text(“locations–“+locations), NullWritable.get());  
  66.             context.write(new Text(“start–“+start), NullWritable.get());  
  67.             context.write(new Text(“string–“+string), NullWritable.get());  
  68.         }  
  69.     }  
  70. }  
[java] 
view plain  
copy

  1. import java.io.IOException;  
  2.   
  3.   
  4. import org.apache.hadoop.conf.Configuration;  
  5. import org.apache.hadoop.fs.Path;  
  6. import org.apache.hadoop.io.LongWritable;  
  7. import org.apache.hadoop.io.NullWritable;  
  8. import org.apache.hadoop.io.Text;  
  9. import org.apache.hadoop.mapred.SplitLocationInfo;  
  10. import org.apache.hadoop.mapreduce.Job;  
  11. import org.apache.hadoop.mapreduce.Mapper;  
  12. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  
  13. import org.apache.hadoop.mapreduce.lib.input.FileSplit;  
  14. import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;  
  15. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
  16. import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;  
  17. import org.apache.hadoop.util.GenericOptionsParser;  
  18.   
  19.   
  20. public class GetSplitMapReduce {  
  21.     public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {  
  22.         Configuration conf = new Configuration();  
  23.         String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();  
  24.         if(otherArgs.length!=2){  
  25.             System.err.println(“Usage databaseV1 <inputpath> <outputpath>”);  
  26.         }  
  27.   
  28.   
  29.         Job job = Job.getInstance(conf, GetSplitMapReduce.class.getSimpleName() + “1”);  
  30.         job.setJarByClass(GetSplitMapReduce.class);  
  31.         job.setMapOutputKeyClass(Text.class);  
  32.         job.setMapOutputValueClass(Text.class);  
  33.         job.setOutputKeyClass(Text.class);  
  34.         job.setOutputValueClass(NullWritable.class);  
  35.         job.setMapperClass(MyMapper1.class);  
  36.         job.setNumReduceTasks(0);  
  37.         job.setInputFormatClass(TextInputFormat.class);  
  38.         job.setOutputFormatClass(TextOutputFormat.class);  
  39.         FileInputFormat.addInputPath(job, new Path(otherArgs[0]));  
  40.         FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));  
  41.         job.waitForCompletion(true);  
  42.     }  
  43.     public static class MyMapper1 extends Mapper<LongWritable, Text, Text, NullWritable>{  
  44.         @Override  
  45.         protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, NullWritable>.Context context)  
  46.                 throws IOException, InterruptedException {  
  47.           
  48.             FileSplit fileSplit=(FileSplit) context.getInputSplit();  
  49.             String pathname=fileSplit.getPath().getName();  //获取目录名字  
  50.             int depth = fileSplit.getPath().depth();    //获取目录深度  
  51.             Class<? extends FileSplit> class1 = fileSplit.getClass(); //获取当前类  
  52.             long length = fileSplit.getLength();    //获取文件长度  
  53.             SplitLocationInfo[] locationInfo = fileSplit.getLocationInfo(); //获取位置信息  
  54.             String[] locations = fileSplit.getLocations();  //获取位置  
  55.             long start = fileSplit.getStart(); //The position of the first byte in the file to process.  
  56.             String string = fileSplit.toString();  
  57.             //fileSplit.  
  58.               
  59.             context.write(new Text(“====================================================================================”), NullWritable.get());  
  60.             context.write(new Text(“pathname–“+pathname), NullWritable.get());  
  61.             context.write(new Text(“depth–“+depth), NullWritable.get());  
  62.             context.write(new Text(“class1–“+class1), NullWritable.get());  
  63.             context.write(new Text(“length–“+length), NullWritable.get());  
  64.             context.write(new Text(“locationInfo–“+locationInfo), NullWritable.get());  
  65.             context.write(new Text(“locations–“+locations), NullWritable.get());  
  66.             context.write(new Text(“start–“+start), NullWritable.get());  
  67.             context.write(new Text(“string–“+string), NullWritable.get());  
  68.         }  
  69.     }  
  70. }  







对应inputpath2.txt文件的输出:

[java] 
view plain  
copy

  1. hadoop@hadoop:/home/hadoop/blb$ hdfs dfs -text /user/hadoop/libin/out2/part-m-00000  
  2. ====================================================================================  
  3. pathname–inputpath2.txt  
  4. depth–5  
  5. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  6. length–66  
  7. locationInfo–null  
  8. locations–[Ljava.lang.String;@4ff41ba0  
  9. start–0  
  10. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  11. ====================================================================================  
  12. pathname–inputpath2.txt  
  13. depth–5  
  14. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  15. length–66  
  16. locationInfo–null  
  17. locations–[Ljava.lang.String;@2341ce62  
  18. start–0  
  19. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  20. ====================================================================================  
  21. pathname–inputpath2.txt  
  22. depth–5  
  23. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  24. length–66  
  25. locationInfo–null  
  26. locations–[Ljava.lang.String;@35549603  
  27. start–0  
  28. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  29. ====================================================================================  
  30. pathname–inputpath2.txt  
  31. depth–5  
  32. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  33. length–66  
  34. locationInfo–null  
  35. locations–[Ljava.lang.String;@4444ba4f  
  36. start–0  
  37. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  38. ====================================================================================  
  39. pathname–inputpath2.txt  
  40. depth–5  
  41. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  42. length–66  
  43. locationInfo–null  
  44. locations–[Ljava.lang.String;@7c23bb8c  
  45. start–0  
  46. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  47. ====================================================================================  
  48. pathname–inputpath2.txt  
  49. depth–5  
  50. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  51. length–66  
  52. locationInfo–null  
  53. locations–[Ljava.lang.String;@dee2400  
  54. start–0  
  55. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  56. ====================================================================================  
  57. pathname–inputpath2.txt  
  58. depth–5  
  59. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  60. length–66  
  61. locationInfo–null  
  62. locations–[Ljava.lang.String;@d7d8325  
  63. start–0  
  64. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  65. ====================================================================================  
  66. pathname–inputpath2.txt  
  67. depth–5  
  68. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  69. length–66  
  70. locationInfo–null  
  71. locations–[Ljava.lang.String;@2b2cf90e  
  72. start–0  
  73. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
[java] 
view plain  
copy

  1. hadoop@hadoop:/home/hadoop/blb$ hdfs dfs -text /user/hadoop/libin/out2/part-m-00000  
  2. ====================================================================================  
  3. pathname–inputpath2.txt  
  4. depth–5  
  5. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  6. length–66  
  7. locationInfo–null  
  8. locations–[Ljava.lang.String;@4ff41ba0  
  9. start–0  
  10. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  11. ====================================================================================  
  12. pathname–inputpath2.txt  
  13. depth–5  
  14. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  15. length–66  
  16. locationInfo–null  
  17. locations–[Ljava.lang.String;@2341ce62  
  18. start–0  
  19. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  20. ====================================================================================  
  21. pathname–inputpath2.txt  
  22. depth–5  
  23. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  24. length–66  
  25. locationInfo–null  
  26. locations–[Ljava.lang.String;@35549603  
  27. start–0  
  28. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  29. ====================================================================================  
  30. pathname–inputpath2.txt  
  31. depth–5  
  32. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  33. length–66  
  34. locationInfo–null  
  35. locations–[Ljava.lang.String;@4444ba4f  
  36. start–0  
  37. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  38. ====================================================================================  
  39. pathname–inputpath2.txt  
  40. depth–5  
  41. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  42. length–66  
  43. locationInfo–null  
  44. locations–[Ljava.lang.String;@7c23bb8c  
  45. start–0  
  46. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  47. ====================================================================================  
  48. pathname–inputpath2.txt  
  49. depth–5  
  50. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  51. length–66  
  52. locationInfo–null  
  53. locations–[Ljava.lang.String;@dee2400  
  54. start–0  
  55. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  56. ====================================================================================  
  57. pathname–inputpath2.txt  
  58. depth–5  
  59. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  60. length–66  
  61. locationInfo–null  
  62. locations–[Ljava.lang.String;@d7d8325  
  63. start–0  
  64. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  
  65. ====================================================================================  
  66. pathname–inputpath2.txt  
  67. depth–5  
  68. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  69. length–66  
  70. locationInfo–null  
  71. locations–[Ljava.lang.String;@2b2cf90e  
  72. start–0  
  73. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath2.txt:0+66  







对应inputpath1.txt文件的输出:

[java] 
view plain  
copy

  1. hadoop@hadoop:/home/hadoop/blb$ hdfs dfs -text /user/hadoop/libin/out2/part-m-00001  
  2. ====================================================================================  
  3. pathname–inputpath1.txt  
  4. depth–5  
  5. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  6. length–58  
  7. locationInfo–null  
  8. locations–[Ljava.lang.String;@4ff41ba0  
  9. start–0  
  10. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath1.txt:0+58  
  11. ====================================================================================  
  12. pathname–inputpath1.txt  
  13. depth–5  
  14. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  15. length–58  
  16. locationInfo–null  
  17. locations–[Ljava.lang.String;@2341ce62  
  18. start–0  
  19. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath1.txt:0+58  
  20. ====================================================================================  
  21. pathname–inputpath1.txt  
  22. depth–5  
  23. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  24. length–58  
  25. locationInfo–null  
  26. locations–[Ljava.lang.String;@35549603  
  27. start–0  
  28. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath1.txt:0+58  
  29. ====================================================================================  
  30. pathname–inputpath1.txt  
  31. depth–5  
  32. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  33. length–58  
  34. locationInfo–null  
  35. locations–[Ljava.lang.String;@4444ba4f  
  36. start–0  
  37. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath1.txt:0+58  
  38. ====================================================================================  
  39. pathname–inputpath1.txt  
  40. depth–5  
  41. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  42. length–58  
  43. locationInfo–null  
  44. locations–[Ljava.lang.String;@7c23bb8c  
  45. start–0  
  46. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath1.txt:0+58  
  47. ====================================================================================  
  48. pathname–inputpath1.txt  
  49. depth–5  
  50. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  51. length–58  
  52. locationInfo–null  
  53. locations–[Ljava.lang.String;@dee2400  
  54. start–0  
  55. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath1.txt:0+58  
  56. ====================================================================================  
  57. pathname–inputpath1.txt  
  58. depth–5  
  59. class1–class org.apache.hadoop.mapreduce.lib.input.FileSplit  
  60. length–58  
  61. locationInfo–null  
  62. locations–[Ljava.lang.String;@d7d8325  
  63. start–0  
  64. string–hdfs://hadoop:9000/user/hadoop/libin/input/inputpath1.txt:0+58  
  65. hadoop@hadoop:/home/hadoop/blb$   

今天的文章filespy_fit文件用什么软件打开「建议收藏」分享到此就结束了,感谢您的阅读。

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。
如需转载请保留出处:https://bianchenghao.cn/79187.html

(0)
编程小号编程小号

相关推荐

发表回复

您的电子邮箱地址不会被公开。 必填项已用 * 标注