org.apache.hadoop.hbase.client.Put.add(byte[], byte[], byte[])
|
org.apache.hadoop.hbase.client.Put.add(byte[], byte[], long, byte[])
|
org.apache.hadoop.hbase.client.Put.add(byte[], ByteBuffer, long, ByteBuffer)
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.addPeer(String, String)
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.addPeer(String, String, String) |
org.apache.hadoop.hbase.client.Result.addResults(ClientProtos.RegionLoadStats)
|
org.apache.hadoop.hbase.client.Table.batch(List<? extends Row>)
|
org.apache.hadoop.hbase.client.Table.batchCallback(List<? extends Row>, Batch.Callback<R>)
|
org.apache.hadoop.hbase.client.HConnection.clearCaches(ServerName)
|
org.apache.hadoop.hbase.client.HConnection.clearRegionCache()
|
org.apache.hadoop.hbase.client.HConnection.clearRegionCache(byte[]) |
org.apache.hadoop.hbase.client.HConnection.clearRegionCache(TableName)
|
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(Job, HTable)
|
org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[], int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, String) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, ExecutorService) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, ExecutorService, User) |
org.apache.hadoop.hbase.client.HConnectionManager.createConnection(Configuration, User) |
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections()
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections(boolean) |
org.apache.hadoop.hbase.client.HConnection.deleteCachedRegionLocation(HRegionLocation)
|
org.apache.hadoop.hbase.client.Delete.deleteColumn(byte[], byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteColumn(byte[], byte[], long)
|
org.apache.hadoop.hbase.client.Delete.deleteColumns(byte[], byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteColumns(byte[], byte[], long)
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteConnection(Configuration) |
org.apache.hadoop.hbase.client.Delete.deleteFamily(byte[])
|
org.apache.hadoop.hbase.client.Delete.deleteFamily(byte[], long)
|
org.apache.hadoop.hbase.client.Delete.deleteFamilyVersion(byte[], long)
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteStaleConnection(HConnection) |
org.apache.hadoop.hbase.CellUtil.estimatedHeapSizeOfWithoutTags(Cell) |
org.apache.hadoop.hbase.CellUtil.estimatedSizeOf(Cell)
|
org.apache.hadoop.hbase.client.HConnectionManager.execute(HConnectable<T>)
|
org.apache.hadoop.hbase.rest.client.RemoteHTable.exists(List<Get>) |
org.apache.hadoop.hbase.client.HConnection.getAdmin(ServerName)
|
org.apache.hadoop.hbase.client.HConnection.getAdmin(ServerName, boolean)
|
org.apache.hadoop.hbase.client.HConnection.getClient(ServerName)
|
org.apache.hadoop.hbase.client.Result.getColumn(byte[], byte[])
|
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], byte[])
|
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], int, int, byte[], int, int)
|
org.apache.hadoop.hbase.client.HConnectionManager.getConnection(Configuration) |
org.apache.hadoop.hbase.client.HConnection.getCurrentNrHRS()
|
org.apache.hadoop.hbase.HColumnDescriptor.getDataBlockEncodingOnDisk()
|
org.apache.hadoop.hbase.HRegionInfo.getDaughterRegions(Result)
|
org.apache.hadoop.hbase.Cell.getFamily()
|
org.apache.hadoop.hbase.client.Mutation.getFamilyMap()
|
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfo(Result)
|
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfo(Result, byte[])
|
org.apache.hadoop.hbase.HRegionInfo.getHRegionInfoAndServerName(Result)
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getHTable()
|
org.apache.hadoop.hbase.mapred.TableInputFormatBase.getHTable()
|
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(byte[]) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(TableName) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptors(List<String>) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptorsByTableName(List<TableName>)
|
org.apache.hadoop.hbase.client.HConnection.getKeepAliveMasterService()
|
org.apache.hadoop.hbase.client.HConnection.getMaster()
|
org.apache.hadoop.hbase.HRegionInfo.getMergeRegions(Result)
|
org.apache.hadoop.hbase.Cell.getMvccVersion()
|
org.apache.hadoop.hbase.HTableDescriptor.getName()
|
org.apache.hadoop.hbase.filter.Filter.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.filter.FilterList.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.client.HConnection.getNonceGenerator()
|
org.apache.hadoop.hbase.HTableDescriptor.getOwnerString() |
org.apache.hadoop.hbase.Cell.getQualifier()
|
org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(byte[])
|
org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(TableName)
|
org.apache.hadoop.hbase.client.HConnection.getRegionLocation(byte[], byte[], boolean) |
org.apache.hadoop.hbase.client.HConnection.getRegionLocation(TableName, byte[], boolean)
|
org.apache.hadoop.hbase.Cell.getRow()
|
org.apache.hadoop.hbase.TableName.getRowComparator()
|
org.apache.hadoop.hbase.HRegionInfo.getSeqNumDuringOpen(Result)
|
org.apache.hadoop.hbase.ClusterStatus.getServerInfo()
|
org.apache.hadoop.hbase.HRegionInfo.getServerName(Result)
|
org.apache.hadoop.hbase.io.ImmutableBytesWritable.getSize()
|
org.apache.hadoop.hbase.HTableDescriptor.getTableDir(Path, byte[]) |
org.apache.hadoop.hbase.HRegionInfo.getTableName()
|
org.apache.hadoop.hbase.HRegionInfo.getTableName(byte[])
|
org.apache.hadoop.hbase.client.HConnection.getTableNames()
|
org.apache.hadoop.hbase.Cell.getValue()
|
org.apache.hadoop.hbase.HRegionInfo.getVersion()
|
org.apache.hadoop.hbase.client.Table.getWriteBufferSize()
|
org.apache.hadoop.hbase.client.Mutation.getWriteToWAL()
|
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initCredentialsForCluster(Job, String)
|
org.apache.hadoop.hbase.client.HConnection.isDeadServer(ServerName)
|
org.apache.hadoop.hbase.client.HConnection.isMasterRunning()
|
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[]) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[], byte[][]) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(TableName, byte[][])
|
org.apache.hadoop.hbase.client.HConnection.isTableDisabled(byte[]) |
org.apache.hadoop.hbase.client.HConnection.isTableEnabled(byte[]) |
org.apache.hadoop.hbase.client.Result.list()
|
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listPeers()
|
org.apache.hadoop.hbase.client.HConnection.listTableNames()
|
org.apache.hadoop.hbase.client.HConnection.listTables()
|
org.apache.hadoop.hbase.client.HConnection.locateRegion(byte[])
|
org.apache.hadoop.hbase.client.HConnection.locateRegion(byte[], byte[]) |
org.apache.hadoop.hbase.client.HConnection.locateRegion(TableName, byte[])
|
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[]) |
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[], boolean, boolean) |
org.apache.hadoop.hbase.client.HConnection.locateRegions(TableName)
|
org.apache.hadoop.hbase.client.HConnection.locateRegions(TableName, boolean, boolean)
|
org.apache.hadoop.hbase.security.User.obtainAuthTokenForJob(Configuration, Job)
|
org.apache.hadoop.hbase.security.User.obtainAuthTokenForJob(JobConf)
|
org.apache.hadoop.hbase.client.HConnection.processBatch(List<? extends Row>, byte[], ExecutorService, Object[]) |
org.apache.hadoop.hbase.client.HConnection.processBatch(List<? extends Row>, TableName, ExecutorService, Object[])
|
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List<? extends Row>, byte[], ExecutorService, Object[], Batch.Callback<R>) |
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List<? extends Row>, TableName, ExecutorService, Object[], Batch.Callback<R>)
|
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], List<Put>) |
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put) |
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put, int) |
org.apache.hadoop.hbase.client.Result.raw()
|
org.apache.hadoop.hbase.HColumnDescriptor.readFields(DataInput)
|
org.apache.hadoop.hbase.HRegionInfo.readFields(DataInput)
|
org.apache.hadoop.hbase.HTableDescriptor.readFields(DataInput)
|
org.apache.hadoop.hbase.util.Bytes.readVLong(byte[], int)
|
org.apache.hadoop.hbase.client.HConnection.relocateRegion(byte[], byte[]) |
org.apache.hadoop.hbase.client.HConnection.relocateRegion(TableName, byte[])
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.reverseDNS(InetAddress)
|
org.apache.hadoop.hbase.HColumnDescriptor.setEncodeOnDisk(boolean)
|
org.apache.hadoop.hbase.client.Mutation.setFamilyMap(NavigableMap<byte[], List<KeyValue>>)
|
org.apache.hadoop.hbase.client.Increment.setFamilyMap(NavigableMap<byte[], List<KeyValue>>) |
org.apache.hadoop.hbase.client.Delete.setFamilyMap(NavigableMap<byte[], List<KeyValue>>) |
org.apache.hadoop.hbase.client.Put.setFamilyMap(NavigableMap<byte[], List<KeyValue>>) |
org.apache.hadoop.hbase.client.Append.setFamilyMap(NavigableMap<byte[], List<KeyValue>>) |
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.setHTable(HTable)
|
org.apache.hadoop.hbase.mapred.TableInputFormatBase.setHTable(HTable)
|
org.apache.hadoop.hbase.mapreduce.TableRecordReader.setHTable(Table)
|
org.apache.hadoop.hbase.HColumnDescriptor.setKeepDeletedCells(boolean)
|
org.apache.hadoop.hbase.HTableDescriptor.setName(byte[]) |
org.apache.hadoop.hbase.HTableDescriptor.setName(TableName) |
org.apache.hadoop.hbase.HTableDescriptor.setOwner(User) |
org.apache.hadoop.hbase.HTableDescriptor.setOwnerString(String) |
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.setPeerTableCFs(String, String)
|
org.apache.hadoop.hbase.client.HConnection.setRegionCachePrefetch(byte[], boolean)
|
org.apache.hadoop.hbase.client.HConnection.setRegionCachePrefetch(TableName, boolean)
|
org.apache.hadoop.hbase.client.HConnectionManager.setServerSideHConnectionRetries(Configuration, String, Log)
|
org.apache.hadoop.hbase.client.Table.setWriteBufferSize(long)
|
org.apache.hadoop.hbase.client.Mutation.setWriteToWAL(boolean)
|
org.apache.hadoop.hbase.client.Increment.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Delete.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Put.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.client.Append.setWriteToWAL(boolean) |
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheBloomsOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheDataInL1()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheDataOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCacheIndexesOnWrite()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldCompressTags()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldEvictBlocksOnClose()
|
org.apache.hadoop.hbase.HColumnDescriptor.shouldPrefetchBlocksOnOpen()
|
org.apache.hadoop.hbase.filter.Filter.transform(KeyValue) |
org.apache.hadoop.hbase.filter.FilterList.transform(KeyValue) |
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, byte[], byte[], Collection<LoadIncrementalHFiles.LoadQueueItem>)
|
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(byte[], byte[], Object, HRegionLocation) |
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(TableName, byte[], byte[], Object, ServerName)
|
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(TableName, byte[], Object, HRegionLocation) |
org.apache.hadoop.hbase.HColumnDescriptor.write(DataOutput)
|
org.apache.hadoop.hbase.HRegionInfo.write(DataOutput)
|
org.apache.hadoop.hbase.HTableDescriptor.write(DataOutput)
|