HDFS编程练习

Posted 糟老头修炼记

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了HDFS编程练习相关的知识,希望对你有一定的参考价值。

1.从HDFS中下载指定文件

 从HDFS中下载指定文件,如果本地文件与要下载的文件名称相同,则自动对下载的文件重命名

shell命令:

su hadoopcd /home/hadoop#进入hadoop用户的本地目录if $(hdfs dfs -test -e file:///home/hadoop/text.txt);then $(hdfs dfs -copyToLocal text.txt ./text2.txt);else $(hdfs dfs -copyToLocal text.txt ./text.txt);fi

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;public class HDFSApi{ /** * 下载文件到本地 * 判断本地路径是否已存在,若已存在,则自动进行重命名 */ public static void copyToLocal(Configuration conf,String remoteFilePath,String localFilePath)throwsIOException{ FileSystem fs =FileSystem.get(conf); Path remotePath =newPath(remoteFilePath); File f =newFile(localFilePath); /* 如果文件名存在,自动重命名(在文件名后面加上 _0, _1 ...) */ if(f.exists()){ System.out.println(localFilePath +"已存在."); Integer i =0; boolean isExist =true; while(isExist){ f =newFile(localFilePath +"_"+ i.toString()); if(!f.exists()){ localFilePath = localFilePath +"_"+ i.toString(); break; } else{ i++; } } System.out.println("将重新命名为: "+ localFilePath); }  // 下载文件到本地 Path localPath =newPath(localFilePath); fs.copyToLocalFile(remotePath, localPath); fs.close(); } /** * 主函数 */public static void main(String[] args){Configuration conf =newConfiguration(); conf.set("fs.default.name","hdfs://localhost:9000");String localFilePath ="/home/hadoop/text.txt"; //本地路径String remoteFilePath ="/user/hadoop/text.txt"; //HDFS路径try{HDFSApi.copyToLocal(conf, remoteFilePath, localFilePath);System.out.println("下载完成");}catch(Exception e){e.printStackTrace();}}}


2.将HDFS中指定文件的内容输出到终端

shell命令:

hdfs dfs -cat text.txt

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;
public class HDFSApi { /** * 读取文件内容 */ public static void cat(Configuration conf, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); FSDataInputStream in = fs.open(remotePath); BufferedReader d = new BufferedReader(new InputStreamReader(in)); String line = null; while ( (line = d.readLine()) != null ) { System.out.println(line); } d.close(); in.close(); fs.close(); }
/** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteFilePath = "/user/hadoop/text.txt"; // HDFS路径try {System.out.println("读取文件: " + remoteFilePath);HDFSApi.cat(conf, remoteFilePath);System.out.println("n读取完成");} catch (Exception e) {e.printStackTrace();}}}


3.显示HDFS中指定文件的信息:(大小,权限,路径,创建时间 等)

shell命令:

hdfs dfs -ls -h text.txt

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;import java.text.SimpleDateFormat;
public class HDFSApi { /** * 显示指定文件的信息 */ public static void ls(Configuration conf, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); FileStatus[] fileStatuses = fs.listStatus(remotePath); for (FileStatus s : fileStatuses) { System.out.println("路径: " + s.getPath().toString()); System.out.println("权限: " + s.getPermission().toString()); System.out.println("大小: " + s.getLen()); /* 返回的是时间戳,转化为时间日期格式 */ Long timeStamp = s.getModificationTime(); SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); String date = format.format(timeStamp); System.out.println("时间: " + date); } fs.close(); }
/** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteFilePath = "/user/hadoop/text.txt"; // HDFS路径try {System.out.println("读取文件信息: " + remoteFilePath);HDFSApi.ls(conf, remoteFilePath);System.out.println("n读取完成");} catch (Exception e) {e.printStackTrace();}}}

4.给定HDFS中某一个目录,输出该目录下的所有文件的读写权限、大小、创建时间、路径等信息,如果该文件是目录,则递归输出该目录下所有文件相关信息:

shell命令:

hdfs dfs -ls -R -h /user/hadoop

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;import java.text.SimpleDateFormat;public class HDFSApi { /** * 显示指定文件夹下所有文件的信息(递归) */ public static void lsDir(Configuration conf, String remoteDir) throws IOException { FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(remoteDir); /* 递归获取目录下的所有文件 */ RemoteIterator<LocatedFileStatus> remoteIterator = fs.listFiles(dirPath, true); /* 输出每个文件的信息 */ while (remoteIterator.hasNext()) { FileStatus s = remoteIterator.next(); System.out.println("路径: " + s.getPath().toString()); System.out.println("权限: " + s.getPermission().toString()); System.out.println("大小: " + s.getLen()); /* 返回的是时间戳,转化为时间日期格式 */Long timeStamp = s.getModificationTime(); SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); String date = format.format(timeStamp); System.out.println("时间: " + date); System.out.println(); } fs.close(); } /** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteDir = "/user/hadoop"; // HDFS路径try {System.out.println("(递归)读取目录下所有文件的信息: " + remoteDir);HDFSApi.lsDir(conf, remoteDir);System.out.println("读取完成");} catch (Exception e) {e.printStackTrace();}}}


5.创建、删除HDFS文件:

shell命令:

#创建文件if $(hdfs dfs -test -d dir1/dir2);then $(hdfs dfs -touchz dir1/dir2/filename);else $(hdfs dfs -mkdir -p dir1/dir2 && hdfs dfs -touchz dir1/dir2/filename);fi
#删除文件hdfs dfs -rm dir1/dir2/filename

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;public class HDFSApi { /** * 判断路径是否存在 */ public static boolean test(Configuration conf, String path) throws IOException { FileSystem fs = FileSystem.get(conf); return fs.exists(new Path(path)); } /** * 创建目录 */ public static boolean mkdir(Configuration conf, String remoteDir) throws IOException { FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(remoteDir); boolean result = fs.mkdirs(dirPath); fs.close(); return result; } /** * 创建文件 */ public static void touchz(Configuration conf, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); FSDataOutputStream outputStream = fs.create(remotePath); outputStream.close(); fs.close(); }  /** * 删除文件 */ public static boolean rm(Configuration conf, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); boolean result = fs.delete(remotePath, false); fs.close(); return result; }/** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteFilePath = "/user/hadoop/input/text.txt"; // HDFS路径String remoteDir = "/user/hadoop/input"; // HDFS路径对应的目录try {/* 判断路径是否存在,存在则删除,否则进行创建 */if ( HDFSApi.test(conf, remoteFilePath) ) {HDFSApi.rm(conf, remoteFilePath); // 删除System.out.println("删除路径: " + remoteFilePath);} else {if ( !HDFSApi.test(conf, remoteDir) ) { // 若目录不存在,则进行创建HDFSApi.mkdir(conf, remoteDir);System.out.println("创建文件夹: " + remoteDir);}HDFSApi.touchz(conf, remoteFilePath);System.out.println("创建路径: " + remoteFilePath);}} catch (Exception e) {e.printStackTrace();}}}


6.创建和删除HDFS目录:创建目录时,如果目录文件所在目录不存在则自动创建相应目录;删除目录时,由用户指定当该目录不为空时是否还删除该目录

shell命令:

hdfs dfs -mkdir -p dir1/dir2 #创建目录:hdfs dfs -rmdir dir1/dir2#删除目录(目录为空时可以)hdfs dfs -rm -R dir1/dir2#强制删除目录

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;public class HDFSApi { /** * 判断路径是否存在 */ public static boolean test(Configuration conf, String path) throws IOException { FileSystem fs = FileSystem.get(conf); return fs.exists(new Path(path)); } /** * 判断目录是否为空 * true: 空,false: 非空 */ public static boolean isDirEmpty(Configuration conf, String remoteDir) throws IOException { FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(remoteDir); RemoteIterator<LocatedFileStatus> remoteIterator = fs.listFiles(dirPath, true); return !remoteIterator.hasNext(); } /** * 创建目录 */ public static boolean mkdir(Configuration conf, String remoteDir) throws IOException { FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(remoteDir); boolean result = fs.mkdirs(dirPath); fs.close(); return result; }/** * 删除目录 */ public static boolean rmDir(Configuration conf, String remoteDir) throws IOException { FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(remoteDir); /* 第二个参数表示是否递归删除所有文件 */ boolean result = fs.delete(dirPath, true); fs.close(); return result; }/** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteDir = "/user/hadoop/input"; // HDFS目录Boolean forceDelete = false; // 是否强制删除try {/* 判断目录是否存在,不存在则创建,存在则删除 */if ( !HDFSApi.test(conf, remoteDir) ) {HDFSApi.mkdir(conf, remoteDir); // 创建目录System.out.println("创建目录: " + remoteDir);} else {if ( HDFSApi.isDirEmpty(conf, remoteDir) || forceDelete ) { // 目录为空或强制删除HDFSApi.rmDir(conf, remoteDir);System.out.println("删除目录: " + remoteDir);} else { // 目录不为空System.out.println("目录不为空,不删除: " + remoteDir);}}} catch (Exception e) {e.printStackTrace();}}}


7.向HDFS中指定的文件追加内容,由用户指定内容追加到原有文件的开头或结尾

shell命令:

#追加到文件末尾hdfs dfs -appendToFile local.txt text.txt
#追加到文件开头cd /home/hadoophdfs dfs -get text.txtcat text.txt >> local.txthdfs dfs -copyFromLocal -f local.txt text.txt

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;public class HDFSApi { /** * 判断路径是否存在 */ public static boolean test(Configuration conf, String path) throws IOException { FileSystem fs = FileSystem.get(conf); return fs.exists(new Path(path)); } /** * 追加文本内容 */ public static void appendContentToFile(Configuration conf, String content, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); /* 创建一个文件输出流,输出的内容将追加到文件末尾 */ FSDataOutputStream out = fs.append(remotePath); out.write(content.getBytes()); out.close(); fs.close();} /** * 追加文件内容 */ public static void appendToFile(Configuration conf, String localFilePath, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); /* 创建一个文件读入流 */ FileInputStream in = new FileInputStream(localFilePath); /* 创建一个文件输出流,输出的内容将追加到文件末尾 */ FSDataOutputStream out = fs.append(remotePath); /* 读写文件内容 */ byte[] data = new byte[1024]; int read = -1; while ( (read = in.read(data)) > 0 ) { out.write(data, 0, read); } out.close(); in.close(); fs.close(); } /** * 移动文件到本地 * 移动后,删除源文件 */ public static void moveToLocalFile(Configuration conf, String remoteFilePath, String localFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); Path localPath = new Path(localFilePath); fs.moveToLocalFile(remotePath, localPath); }  /** * 创建文件 */ public static void touchz(Configuration conf, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); FSDataOutputStream outputStream = fs.create(remotePath); outputStream.close(); fs.close(); } /** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");conf.set("dfs.support.append","true");conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true");conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");
String remoteFilePath = "/user/hadoop/text.txt"; // HDFS文件String content = "新追加的内容n";String choice = "after"; //追加到文件末尾// String choice = "before"; // 追加到文件开头try {/* 判断文件是否存在 */if ( !HDFSApi.test(conf, remoteFilePath) ) {System.out.println("文件不存在: " + remoteFilePath);} else {if ( choice.equals("after") ) { // 追加在文件末尾HDFSApi.appendContentToFile(conf, content, remoteFilePath);System.out.println("已追加内容到文件末尾" + remoteFilePath);} else if ( choice.equals("before") ) { // 追加到文件开头/* 没有相应的api可以直接操作,因此先把文件移动到本地,创建一个新的HDFS,再按顺序追加内容 */String localTmpPath = "/user/hadoop/tmp.txt";HDFSApi.moveToLocalFile(conf, remoteFilePath, localTmpPath); // 移动到本地HDFSApi.touchz(conf, remoteFilePath); // 创建一个新文件HDFSApi.appendContentToFile(conf, content, remoteFilePath); // 先写入新内容HDFSApi.appendToFile(conf, localTmpPath, remoteFilePath); // 再写入原来内容System.out.println("已追加内容到文件开头: " + remoteFilePath);}}} catch (Exception e) {e.printStackTrace();}}}


8.删除HDFS中的指定文件

shell命令::

hdfs dfs -rm text.txt
import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;public class HDFSApi { /** * 删除文件 */ public static boolean rm(Configuration conf, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); boolean result = fs.delete(remotePath, false); fs.close(); return result; } /** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteFilePath = "/user/hadoop/text.txt"; // HDFS文件try {if ( HDFSApi.rm(conf, remoteFilePath) ) {System.out.println("文件删除: " + remoteFilePath);} else {System.out.println("操作失败(文件不存在或删除失败)");}} catch (Exception e) {e.printStackTrace();}}}


9.删除HDFS中指定的目录(由用户指定目录中如果存在文件时是否删除目录)

shell命令:

hdfs dfs -rmdir dir1/dir2 #删除目录(如果目录非空则会提示not empty,不执行删除):hdfs dfs -rm -R dir1/dir2 #强制删除目录

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;public class HDFSApi { /** * 判断目录是否为空 * true: 空,false: 非空 */ public static boolean isDirEmpty(Configuration conf, String remoteDir) throws IOException { FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(remoteDir); RemoteIterator<LocatedFileStatus> remoteIterator = fs.listFiles(dirPath, true); return !remoteIterator.hasNext(); } /** * 删除目录 */ public static boolean rmDir(Configuration conf, String remoteDir, boolean recursive) throws IOException { FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(remoteDir); /* 第二个参数表示是否递归删除所有文件 */ boolean result = fs.delete(dirPath, recursive); fs.close(); return result; } /** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteDir = "/user/hadoop/input"; // HDFS目录Boolean forceDelete = false; // 是否强制删除try {if ( !HDFSApi.isDirEmpty(conf, remoteDir) && !forceDelete ) {System.out.println("目录不为空,不删除");} else {if ( HDFSApi.rmDir(conf, remoteDir, forceDelete) ) {System.out.println("目录已删除: " + remoteDir);} else {System.out.println("操作失败");}}} catch (Exception e) {e.printStackTrace();}}}


10.HDFS文件移动:将hdfs文件从源文件路径移动到目的路径

shell命令:

hdfs dfs -mv text.txt text2.txt

代码实现:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;public class HDFSApi { /** * 移动文件 */ public static boolean mv(Configuration conf, String remoteFilePath, String remoteToFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path srcPath = new Path(remoteFilePath); Path dstPath = new Path(remoteToFilePath); boolean result = fs.rename(srcPath, dstPath); fs.close(); return result; } /** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteFilePath = "hdfs:///user/hadoop/text.txt"; // 源文件HDFS路径String remoteToFilePath = "hdfs:///user/hadoop/new.txt"; // 目的HDFS路径try {if ( HDFSApi.mv(conf, remoteFilePath, remoteToFilePath) ) {System.out.println("将文件 " + remoteFilePath + " 移动到 " + remoteToFilePath);} else {System.out.println("操作失败(源文件不存在或移动失败)");}} catch (Exception e) {e.printStackTrace();}}}


11.输出HDFS中指定文件的文本到终端中:

import org.apache.hadoop.fs.*;import org.apache.hadoop.io.IOUtils;import java.io.*;import java.net.URL;public class HDFSApi {static{ URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory()); }/** * 主函数 */public static void main(String[] args) throws Exception {String remoteFilePath = "hdfs://localhost:9000/user/hadoop/text.txt"; // HDFS文件InputStream in = null; try{ /* 通过URL对象打开数据流,从中读取数据 */ in = new URL(remoteFilePath).openStream(); IOUtils.copyBytes(in,System.out,4096,false); } finally{ IOUtils.closeStream(in); }}}


12.按行读取HDFS中指定文件:

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataInputStream;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import java.io.*;public class MyFSDataInputStream extends FSDataInputStream {public MyFSDataInputStream(InputStream in) {super(in);}/** * 实现按行读取 * 每次读入一个字符,遇到"\n"结束,返回一行内容 */public static String readline(BufferedReader br) throws IOException {char[] data = new char[1024];int read = -1;int off = 0; // 循环执行时,br 每次会从上一次读取结束的位置继续读取,因此该函数里,off 每次都从0开始while ( (read = br.read(data, off, 1)) != -1 ) {if (String.valueOf(data[off]).equals("\n") ) {off += 1;break;}off += 1;}if (off > 0) {return String.valueOf(data);} else {return null;}}/** * 读取文件内容 */ public static void cat(Configuration conf, String remoteFilePath) throws IOException { FileSystem fs = FileSystem.get(conf); Path remotePath = new Path(remoteFilePath); FSDataInputStream in = fs.open(remotePath); BufferedReader br = new BufferedReader(new InputStreamReader(in)); String line = null; while ( (line = MyFSDataInputStream.readline(br)) != null ) { System.out.println(line); } br.close(); in.close(); fs.close(); }/** * 主函数 */public static void main(String[] args) {Configuration conf = new Configuration(); conf.set("fs.default.name","hdfs://localhost:9000");String remoteFilePath = "/user/hadoop/text.txt"; // HDFS路径try {MyFSDataInputStream.cat(conf, remoteFilePath);} catch (Exception e) {e.printStackTrace();}}}


以上是关于HDFS编程练习的主要内容,如果未能解决你的问题,请参考以下文章

VSCode自定义代码片段——JS中的面向对象编程

VSCode自定义代码片段9——JS中的面向对象编程

spring练习,在Eclipse搭建的Spring开发环境中,使用set注入方式,实现对象的依赖关系,通过ClassPathXmlApplicationContext实体类获取Bean对象(代码片段

分布式文件系统HDFS 练习

分布式文件系统HDFS 练习

分布式文件系统HDFS 练习