头哥实践平台----HBase 开发:使用Java操作HBase

一.第1关:创建表

1.先写命令行

python 复制代码
start-dfs.sh

start-hbase.sh

hadoop fs -ls /hbase(可有可无)

2.再写代码文件

python 复制代码
package step1;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;


public class Task{
	public void createTable()throws Exception{
		/********* Begin *********/
		Configuration config = HBaseConfiguration.create();
		Connection connection = ConnectionFactory.createConnection(config);
	try {
		// Create table
		Admin admin = connection.getAdmin();
		try {
			TableName tableName = TableName.valueOf("dept");
			// 新 API 构建表
			// TableDescriptor 对象通过 TableDescriptorBuilder 构建;
			TableDescriptorBuilder tableDescriptor =
			TableDescriptorBuilder.newBuilder(tableName);
			ColumnFamilyDescriptor family = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("data")).build();// 构建列族对象
			tableDescriptor.setColumnFamily(family); // 设置列族
			admin.createTable(tableDescriptor.build()); // 创建表


			TableName emp = TableName.valueOf("emp");
			// 新 API 构建表
			// TableDescriptor 对象通过 TableDescriptorBuilder 构建;
			TableDescriptorBuilder empDescriptor =
			TableDescriptorBuilder.newBuilder(emp);
			ColumnFamilyDescriptor empfamily =
			ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("emp")).build();// 构建列族对象
			empDescriptor.setColumnFamily(empfamily); // 设置列族
			admin.createTable(empDescriptor.build()); // 创建表
		} finally {
			admin.close();
		}
	} finally {
		connection.close();
	}

		/********* End *********/
	}
}

3.运行

二.第2关:添加数据

1.先写命令行

python 复制代码
start-dfs.sh

start-hbase.sh

2.再写代码文件

python 复制代码
package step2;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;

public class Task {

	public void insertInfo()throws Exception{
		/********* Begin *********/
		Configuration config = HBaseConfiguration.create();
 		Connection connection = ConnectionFactory.createConnection(config);
 		Admin admin = connection.getAdmin();
 		TableName tableName = TableName.valueOf("tb_step2");
 		TableDescriptorBuilder tableDescriptor = TableDescriptorBuilder.newBuilder(tableName);
 		ColumnFamilyDescriptor family = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("data")).build();//  构建列族对象
 		tableDescriptor.setColumnFamily(family); //  设置列族
 		admin.createTable(tableDescriptor.build()); //  创建表
 		// 添加数据
 		byte[] row1 = Bytes.toBytes("row1");
 		Put put1 = new Put(row1); 
 		byte[] columnFamily1 = Bytes.toBytes("data"); // 列
 		byte[] qualifier1 = Bytes.toBytes(String.valueOf(1)); // 列族修饰词
		byte[] value1 = Bytes.toBytes("张三丰"); // 值
		put1.addColumn(columnFamily1, qualifier1, value1);
		byte[] row2 = Bytes.toBytes("row2");
		Put put2 = new Put(row2); 
		byte[] columnFamily2 = Bytes.toBytes("data"); // 列
		byte[] qualifier2 = Bytes.toBytes(String.valueOf(2)); // 列族修饰词
		byte[] value2 = Bytes.toBytes("张无忌"); // 值
		put2.addColumn(columnFamily2, qualifier2, value2);
		Table table = connection.getTable(tableName);
		table.put(put1);
		table.put(put2);


		/********* End *********/
	}
}

3.运行

三.第3关:获取数据

1.先写命令行

python 复制代码
start-dfs.sh

start-hbase.sh

2.再写代码文件

python 复制代码
package step3;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;


public class Task {

	public void queryTableInfo()throws Exception{
		/********* Begin *********/
		Configuration config = HBaseConfiguration.create();
		Connection connection = ConnectionFactory.createConnection(config);
 		Admin admin = connection.getAdmin();
 		TableName tableName = TableName.valueOf("t_step3");
 		Table table = connection.getTable(tableName);
 		// 获取数据
 		Get get = new Get(Bytes.toBytes("row1")); // 定义 get 对象
 		Result result = table.get(get); // 通过 table 对象获取数据
 		//System.out.println("Result: " + result);
		// 很多时候我们只需要获取"值"   这里表示获取  data:1  列族的值
		byte[] valueBytes = result.getValue(Bytes.toBytes("data"), Bytes.toBytes("1")); // 获取到的是字节数组
 		// 将字节转成字符串
		String valueStr = new String(valueBytes,"utf-8");
		System.out.println("value:" + valueStr);
		TableName tableStep3Name = TableName.valueOf("table_step3");
		Table step3Table = connection.getTable(tableStep3Name);
		// 批量查询
		Scan scan = new Scan();
		ResultScanner scanner = step3Table.getScanner(scan);
 		try {
			 int i = 0;
			 for (Result scannerResult: scanner) {
				 //byte[] value = scannerResult.getValue(Bytes.toBytes("data"), Bytes.toBytes(1));
				 // System.out.println("Scan: " + scannerResult);
				 byte[] row = scannerResult.getRow();
				 System.out.println("rowName:" + new String(row,"utf-8"));
			 }
 		} finally {
 			scanner.close();
 		}

		
		
		
		
		/********* End *********/
	}
	
}

3.运行

四.第4关:删除表

1.先写命令行

python 复制代码
start-dfs.sh

start-hbase.sh

2.再写代码文件

python 复制代码
package step4;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;


public class Task {
	
	
	public void deleteTable()throws Exception{
		/********* Begin *********/
		Configuration config = HBaseConfiguration.create();
		Connection connection = ConnectionFactory.createConnection(config);
		Admin admin = connection.getAdmin();
		TableName tableName = TableName.valueOf("t_step4");
		admin.disableTable(tableName);	
		admin.deleteTable(tableName);

		
		/********* End *********/
	}
}

3.运行

相关推荐
FQNmxDG4S4 小时前
Java多线程编程:Thread与Runnable的并发控制
java·开发语言
虹科网络安全5 小时前
艾体宝干货|数据复制详解:类型、原理与适用场景
java·开发语言·数据库
axng pmje5 小时前
Java语法进阶
java·开发语言·jvm
rKWP8gKv75 小时前
Java微服务性能监控:Prometheus与Grafana集成方案
java·微服务·prometheus
老前端的功夫5 小时前
【Java从入门到入土】28:Stream API:告别for循环的新时代
java·开发语言·python
qq_435287925 小时前
第9章 夸父逐日与后羿射日:死循环与进程终止?十个太阳同时值班的并行冲突
java·开发语言·git·死循环·进程终止·并行冲突·夸父逐日
小江的记录本5 小时前
【Kafka核心】架构模型:Producer、Broker、Consumer、Consumer Group、Topic、Partition、Replica
java·数据库·分布式·后端·搜索引擎·架构·kafka
yaoxin5211236 小时前
397. Java 文件操作基础 - 创建常规文件与临时文件
java·开发语言·python
极客先躯8 小时前
高级java每日一道面试题-2025年11月24日-容器与虚拟化题[Dockerj]-runc 的作用是什么?
java·oci 的命令行工具·最小可用·无守护进程·完全标准·创建容器的核心流程·runc 核心职责思维导图
用户60648767188968 小时前
AI 抢不走的技能:用 Claude API 构建自动化工作流实战
java