Professional Documents
Culture Documents
HBase Development Java
HBase Development Java
The term planet-size web application comes to mind, and in this case it is fitting
WHAT IS IT?
Sclable
HBASE
Distributed
BigDatastore Column Oriented
Reader
Writer
HDFS
FEATURES OF HBASE
Scalable. Automatic failover Consistent reads and writes. Sharding of tables Failover support Classes for backing Hadoop map reduce
jobs
Java API for client access Thrift gateway and a REST Web
WHAT IT IS NOT
NoSQL
HBase is a type of "NoSQL" database. "NoSQL" is a general term meaning that the database isn't an RDBMS which supports SQL as its primary access language.
THINK ON THIS
Facebook, for example, is adding more than 15 TB, and processing daily Google adding Peta-Bytes of data and processing. Companies storing Logs, temperature details, and many other prospective
to store and process, which come in Peta-byte for which conventional technologies will days to read the data forget about processing it.
Grouped by columns, The reason to store values on a per-column basis that, for specific queries, not all of the values are
needed.
Reduced I/O
COMPONENTS
HMASTER
Master
server is responsible for monitoring all RegionServer instances in the cluster, and is the interface for all metadata changes, it runs on the server which hosts namenode.
ZOOKEEP ER
API
Interface to HBase Using these we can we can access HBase and perform
read/write and other operation on HBase.
REST, Thrift, and Avro Thrift API framework, for scalable cross-language
services development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, OCaml and Delphi and other languages.
lib commons-configuration-1.8.jar commons-lang-2.6.jar commons-logging-1.1.1.jar hadoop-core-1.0.0.jar hbase-0.92.1.jar log4j-1.2.16.jar slf4j-api-1.5.8.jar slf4j-log4j12-1.5.8.jar zookeeper-3.4.3.jar
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
public class HBaseTest { private static Configuration conf = null; /** * Initialization */ static { conf = HBaseConfiguration.create(); } }
*/ public static void creatTable(String tableName, String[] familys) throws Exception { HBaseAdmin(conf); if (admin.tableExists(tableName)) { already exists!"); HBaseAdmin admin = new System.out.println("table
} else { HTableDescriptor tableDesc = new HTableDescriptor(tableName); for (int i = 0; i < familys.length; i++) { tableDesc.addFamily(new HColumnDescriptor(familys[i])); }admin.createTable(tableDesc); System.out.println("create table " + tableName + " ok."); }
/**
* Delete a table
*/ public static void deleteTable(String tableName) throws Exception {
} catch (ZooKeeperConnectionException e) {
e.printStackTrace(); }
/** * Put (or insert) a row */ public static void addRecord(String tableName, String rowKey, String family, String qualifier, String value) throws Exception {
Bytes
/**
* Delete a row
*/ public static void delRecord(String tableName, String rowKey)
throws IOException {HTable table = new HTable(conf, tableName); List<Delete> list = new ArrayList<Delete>(); Delete del = new Delete(rowKey.getBytes()); list.add(del);
table.delete(list);
System.out.println("del recored " + rowKey + " ok."); }
/**Get a row */ public static void getOneRecord (String tableName, String rowKey) throws IOException{ HTable table = new HTable(conf, tableName);
/** Scan (or list) a table */ public static void getAllRecord (String tableName) { try{ HTable table = new HTable(conf, tableName); Scan s = new Scan();
ResultScanner ss = table.getScanner(s);
for(Result r:ss){ for(KeyValue kv : r.raw()){ System.out.print(new String(kv.getRow()) + " "); System.out.print(new String(kv.getFamily()) + ":"); String(kv.getQualifier()) + " "); System.out.print(kv.getTimestamp() + " "); String(kv.getValue())); } } } catch (IOException e){ e.printStackTrace(); } System.out.print(new System.out.println(new
try {
String tablename = "scores"; String[] familys = { "grade", "course" };
HBaseTest.creatTable(tablename, familys);
// add record zkb HBaseTest.addRecord(tablename, "zkb", "grade", "", "5");
System.out.println("===========get one record========"); HBaseTest.getOneRecord(tablename, "zkb"); System.out.println("===========show all record========"); HBaseTest.getAllRecord(tablename); System.out.println("===========del one record========"); HBaseTest.delRecord(tablename, "baoniu"); HBaseTest.getAllRecord(tablename); System.out.println("===========show all record========"); HBaseTest.getAllRecord(tablename); } catch (Exception e) { e.printStackTrace(); } }}
Gmail
Twitter Facebook Skype
dwivedishashwat@gmail.com
shashwat_2010 shriparv@gmail.com shriparv