public class ThriftTable extends Object implements org.apache.hadoop.hbase.client.Table
| 构造器和说明 |
|---|
ThriftTable(org.apache.hadoop.hbase.TableName tableName,
THBaseService.Client client,
org.apache.thrift.transport.TTransport tTransport,
org.apache.hadoop.conf.Configuration conf) |
| 限定符和类型 | 方法和说明 |
|---|---|
org.apache.hadoop.hbase.client.Result |
append(org.apache.hadoop.hbase.client.Append append) |
Object[] |
batch(List<? extends org.apache.hadoop.hbase.client.Row> list) |
void |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results) |
<R> Object[] |
batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> list,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
<R> void |
batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message message,
byte[] bytes,
byte[] bytes1,
R r) |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message message,
byte[] bytes,
byte[] bytes1,
R r,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
boolean |
checkAndDelete(byte[] bytes,
byte[] bytes1,
byte[] bytes2,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] bytes3,
org.apache.hadoop.hbase.client.Delete delete) |
boolean |
checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp op,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations mutation) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
boolean |
checkAndPut(byte[] bytes,
byte[] bytes1,
byte[] bytes2,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] bytes3,
org.apache.hadoop.hbase.client.Put put) |
void |
close() |
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel |
coprocessorService(byte[] row) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> aClass,
byte[] bytes,
byte[] bytes1,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> call) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> aClass,
byte[] bytes,
byte[] bytes1,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> call,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
void |
delete(org.apache.hadoop.hbase.client.Delete delete) |
void |
delete(List<org.apache.hadoop.hbase.client.Delete> deletes) |
boolean |
exists(org.apache.hadoop.hbase.client.Get get) |
boolean[] |
existsAll(List<org.apache.hadoop.hbase.client.Get> gets) |
org.apache.hadoop.hbase.client.Result |
get(org.apache.hadoop.hbase.client.Get get) |
org.apache.hadoop.hbase.client.Result[] |
get(List<org.apache.hadoop.hbase.client.Get> gets) |
org.apache.hadoop.conf.Configuration |
getConfiguration() |
org.apache.hadoop.hbase.TableName |
getName() |
int |
getOperationTimeout() |
int |
getReadRpcTimeout() |
int |
getRpcTimeout() |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family,
byte[] qualifier) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(org.apache.hadoop.hbase.client.Scan scan) |
org.apache.hadoop.hbase.HTableDescriptor |
getTableDescriptor() |
long |
getWriteBufferSize() |
int |
getWriteRpcTimeout() |
org.apache.hadoop.hbase.client.Result |
increment(org.apache.hadoop.hbase.client.Increment increment) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability) |
void |
mutateRow(org.apache.hadoop.hbase.client.RowMutations rm) |
void |
put(List<org.apache.hadoop.hbase.client.Put> puts) |
void |
put(org.apache.hadoop.hbase.client.Put put) |
void |
setOperationTimeout(int i) |
void |
setReadRpcTimeout(int i) |
void |
setRpcTimeout(int i) |
void |
setWriteBufferSize(long l) |
void |
setWriteRpcTimeout(int i) |
public ThriftTable(org.apache.hadoop.hbase.TableName tableName,
THBaseService.Client client,
org.apache.thrift.transport.TTransport tTransport,
org.apache.hadoop.conf.Configuration conf)
public org.apache.hadoop.hbase.TableName getName()
getName 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.conf.Configuration getConfiguration()
getConfiguration 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.hbase.HTableDescriptor getTableDescriptor()
throws IOException
getTableDescriptor 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean exists(org.apache.hadoop.hbase.client.Get get)
throws IOException
exists 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean[] existsAll(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
existsAll 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void batch(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results) throws IOException
batch 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic <R> void batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException
batchCallback 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Result get(org.apache.hadoop.hbase.client.Get get)
throws IOException
get 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
get 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family,
byte[] qualifier)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void put(org.apache.hadoop.hbase.client.Put put)
throws IOException
put 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void put(List<org.apache.hadoop.hbase.client.Put> puts) throws IOException
put 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void delete(org.apache.hadoop.hbase.client.Delete delete)
throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void delete(List<org.apache.hadoop.hbase.client.Delete> deletes) throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp op,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations mutation)
throws IOException
checkAndMutate 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void mutateRow(org.apache.hadoop.hbase.client.RowMutations rm)
throws IOException
mutateRow 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Result append(org.apache.hadoop.hbase.client.Append append)
throws IOException
append 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Result increment(org.apache.hadoop.hbase.client.Increment increment)
throws IOException
increment 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void close()
throws IOException
close 在接口中 Closeableclose 在接口中 AutoCloseableclose 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndPut(byte[] bytes,
byte[] bytes1,
byte[] bytes2,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] bytes3,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndDelete(byte[] bytes,
byte[] bytes1,
byte[] bytes2,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] bytes3,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel coprocessorService(byte[] row)
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablepublic Object[] batch(List<? extends org.apache.hadoop.hbase.client.Row> list) throws IOException, InterruptedException
batch 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionInterruptedExceptionpublic <R> Object[] batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> list, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, InterruptedException
batchCallback 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionInterruptedExceptionpublic <T extends com.google.protobuf.Service,R> Map<byte[],R> coprocessorService(Class<T> aClass, byte[] bytes, byte[] bytes1, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> call) throws com.google.protobuf.ServiceException, Throwable
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic <T extends com.google.protobuf.Service,R> void coprocessorService(Class<T> aClass, byte[] bytes, byte[] bytes1, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic long getWriteBufferSize()
getWriteBufferSize 在接口中 org.apache.hadoop.hbase.client.Tablepublic void setWriteBufferSize(long l)
throws IOException
setWriteBufferSize 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic <R extends com.google.protobuf.Message> Map<byte[],R> batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor, com.google.protobuf.Message message, byte[] bytes, byte[] bytes1, R r) throws com.google.protobuf.ServiceException, Throwable
batchCoprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic <R extends com.google.protobuf.Message> void batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message message,
byte[] bytes,
byte[] bytes1,
R r,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback)
throws com.google.protobuf.ServiceException,
Throwable
batchCoprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic void setOperationTimeout(int i)
public int getOperationTimeout()
public int getRpcTimeout()
public void setRpcTimeout(int i)
public int getReadRpcTimeout()
public void setReadRpcTimeout(int i)
public int getWriteRpcTimeout()
public void setWriteRpcTimeout(int i)
Copyright © 2019. All rights reserved.