头哥实践平台----HBase 开发:使用Java操作HBase

一.第1关:创建表

1.先写命令行

python 复制代码
start-dfs.sh

start-hbase.sh

hadoop fs -ls /hbase(可有可无)

2.再写代码文件

python 复制代码
package step1;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;


public class Task{
	public void createTable()throws Exception{
		/********* Begin *********/
		Configuration config = HBaseConfiguration.create();
		Connection connection = ConnectionFactory.createConnection(config);
	try {
		// Create table
		Admin admin = connection.getAdmin();
		try {
			TableName tableName = TableName.valueOf("dept");
			// 新 API 构建表
			// TableDescriptor 对象通过 TableDescriptorBuilder 构建;
			TableDescriptorBuilder tableDescriptor =
			TableDescriptorBuilder.newBuilder(tableName);
			ColumnFamilyDescriptor family = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("data")).build();// 构建列族对象
			tableDescriptor.setColumnFamily(family); // 设置列族
			admin.createTable(tableDescriptor.build()); // 创建表


			TableName emp = TableName.valueOf("emp");
			// 新 API 构建表
			// TableDescriptor 对象通过 TableDescriptorBuilder 构建;
			TableDescriptorBuilder empDescriptor =
			TableDescriptorBuilder.newBuilder(emp);
			ColumnFamilyDescriptor empfamily =
			ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("emp")).build();// 构建列族对象
			empDescriptor.setColumnFamily(empfamily); // 设置列族
			admin.createTable(empDescriptor.build()); // 创建表
		} finally {
			admin.close();
		}
	} finally {
		connection.close();
	}

		/********* End *********/
	}
}

3.运行

二.第2关:添加数据

1.先写命令行

python 复制代码
start-dfs.sh

start-hbase.sh

2.再写代码文件

python 复制代码
package step2;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;

public class Task {

	public void insertInfo()throws Exception{
		/********* Begin *********/
		Configuration config = HBaseConfiguration.create();
 		Connection connection = ConnectionFactory.createConnection(config);
 		Admin admin = connection.getAdmin();
 		TableName tableName = TableName.valueOf("tb_step2");
 		TableDescriptorBuilder tableDescriptor = TableDescriptorBuilder.newBuilder(tableName);
 		ColumnFamilyDescriptor family = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("data")).build();//  构建列族对象
 		tableDescriptor.setColumnFamily(family); //  设置列族
 		admin.createTable(tableDescriptor.build()); //  创建表
 		// 添加数据
 		byte[] row1 = Bytes.toBytes("row1");
 		Put put1 = new Put(row1); 
 		byte[] columnFamily1 = Bytes.toBytes("data"); // 列
 		byte[] qualifier1 = Bytes.toBytes(String.valueOf(1)); // 列族修饰词
		byte[] value1 = Bytes.toBytes("张三丰"); // 值
		put1.addColumn(columnFamily1, qualifier1, value1);
		byte[] row2 = Bytes.toBytes("row2");
		Put put2 = new Put(row2); 
		byte[] columnFamily2 = Bytes.toBytes("data"); // 列
		byte[] qualifier2 = Bytes.toBytes(String.valueOf(2)); // 列族修饰词
		byte[] value2 = Bytes.toBytes("张无忌"); // 值
		put2.addColumn(columnFamily2, qualifier2, value2);
		Table table = connection.getTable(tableName);
		table.put(put1);
		table.put(put2);


		/********* End *********/
	}
}

3.运行

三.第3关:获取数据

1.先写命令行

python 复制代码
start-dfs.sh

start-hbase.sh

2.再写代码文件

python 复制代码
package step3;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;


public class Task {

	public void queryTableInfo()throws Exception{
		/********* Begin *********/
		Configuration config = HBaseConfiguration.create();
		Connection connection = ConnectionFactory.createConnection(config);
 		Admin admin = connection.getAdmin();
 		TableName tableName = TableName.valueOf("t_step3");
 		Table table = connection.getTable(tableName);
 		// 获取数据
 		Get get = new Get(Bytes.toBytes("row1")); // 定义 get 对象
 		Result result = table.get(get); // 通过 table 对象获取数据
 		//System.out.println("Result: " + result);
		// 很多时候我们只需要获取"值"   这里表示获取  data:1  列族的值
		byte[] valueBytes = result.getValue(Bytes.toBytes("data"), Bytes.toBytes("1")); // 获取到的是字节数组
 		// 将字节转成字符串
		String valueStr = new String(valueBytes,"utf-8");
		System.out.println("value:" + valueStr);
		TableName tableStep3Name = TableName.valueOf("table_step3");
		Table step3Table = connection.getTable(tableStep3Name);
		// 批量查询
		Scan scan = new Scan();
		ResultScanner scanner = step3Table.getScanner(scan);
 		try {
			 int i = 0;
			 for (Result scannerResult: scanner) {
				 //byte[] value = scannerResult.getValue(Bytes.toBytes("data"), Bytes.toBytes(1));
				 // System.out.println("Scan: " + scannerResult);
				 byte[] row = scannerResult.getRow();
				 System.out.println("rowName:" + new String(row,"utf-8"));
			 }
 		} finally {
 			scanner.close();
 		}

		
		
		
		
		/********* End *********/
	}
	
}

3.运行

四.第4关:删除表

1.先写命令行

python 复制代码
start-dfs.sh

start-hbase.sh

2.再写代码文件

python 复制代码
package step4;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;


public class Task {
	
	
	public void deleteTable()throws Exception{
		/********* Begin *********/
		Configuration config = HBaseConfiguration.create();
		Connection connection = ConnectionFactory.createConnection(config);
		Admin admin = connection.getAdmin();
		TableName tableName = TableName.valueOf("t_step4");
		admin.disableTable(tableName);	
		admin.deleteTable(tableName);

		
		/********* End *********/
	}
}

3.运行

相关推荐
此木|西贝1 小时前
【设计模式】享元模式
java·设计模式·享元模式
李少兄2 小时前
解决Spring Boot多模块自动配置失效问题
java·spring boot·后端
bxlj_jcj3 小时前
JVM性能优化之年轻代参数设置
java·性能优化
八股文领域大手子3 小时前
深入理解缓存淘汰策略:LRU 与 LFU 算法详解及 Java 实现
java·数据库·算法·缓存·mybatis·哈希算法
不当菜虚困3 小时前
JAVA设计模式——(八)单例模式
java·单例模式·设计模式
m0_740154673 小时前
Maven概述
java·maven
吗喽对你问好3 小时前
Java位运算符大全
java·开发语言·位运算
Java致死4 小时前
工厂设计模式
java·设计模式·简单工厂模式·工厂方法模式·抽象工厂模式
程序员JerrySUN4 小时前
驱动开发硬核特训 · Day 21(上篇) 抽象理解 Linux 子系统:内核工程师的视角
java·linux·驱动开发