public class MetadataTableUtil extends Object
Modifier and Type | Method and Description |
---|---|
static void |
addBulkLoadInProgressFlag(AccumuloServerContext context,
String path) |
static void |
addDeleteEntries(KeyExtent extent,
Set<FileRef> datafilesToDelete,
ClientContext context) |
static void |
addDeleteEntry(AccumuloServerContext context,
String tableId,
String path) |
static void |
addTablet(KeyExtent extent,
String path,
ClientContext context,
char timeType,
ZooLock lock) |
static int |
checkClone(String tableName,
String srcTableId,
String tableId,
Connector conn,
BatchWriter bw) |
static void |
chopped(AccumuloServerContext context,
KeyExtent extent,
ZooLock zooLock) |
static void |
cloneTable(ClientContext context,
String srcTableId,
String tableId,
VolumeManager volumeManager) |
static Mutation |
createDeleteMutation(String tableId,
String pathToRemove) |
static void |
createReplicationTable(ClientContext context)
During an upgrade from 1.6 to 1.7, we need to add the replication table
|
static void |
deleteTable(String tableId,
boolean insertDeletes,
ClientContext context,
ZooLock lock) |
static void |
finishSplit(KeyExtent extent,
Map<FileRef,DataFileValue> datafileSizes,
List<FileRef> highDatafilesToRemove,
ClientContext context,
ZooLock zooLock) |
static void |
finishSplit(org.apache.hadoop.io.Text metadataEntry,
Map<FileRef,DataFileValue> datafileSizes,
List<FileRef> highDatafilesToRemove,
ClientContext context,
ZooLock zooLock) |
static Map<Long,? extends Collection<FileRef>> |
getBulkFilesLoaded(ClientContext context,
KeyExtent extent) |
static List<FileRef> |
getBulkFilesLoaded(Connector conn,
KeyExtent extent,
long tid) |
static SortedMap<FileRef,DataFileValue> |
getDataFileSizes(KeyExtent extent,
ClientContext context) |
static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> |
getFileAndLogEntries(ClientContext context,
KeyExtent extent) |
static Iterator<LogEntry> |
getLogEntries(ClientContext context) |
static List<LogEntry> |
getLogEntries(ClientContext context,
KeyExtent extent) |
static Writer |
getMetadataTable(ClientContext context) |
static Writer |
getRootTable(ClientContext context) |
static String |
getRootTabletDir() |
static SortedMap<org.apache.hadoop.io.Text,SortedMap<ColumnFQ,Value>> |
getTabletEntries(SortedMap<Key,Value> tabletKeyValues,
List<ColumnFQ> columns) |
static void |
initializeClone(String tableName,
String srcTableId,
String tableId,
Connector conn,
BatchWriter bw) |
static void |
moveMetaDeleteMarkers(ClientContext context)
During an upgrade we need to move deletion requests for files under the !METADATA table to the
root tablet.
|
static void |
moveMetaDeleteMarkersFrom14(ClientContext context) |
static void |
putLockID(ZooLock zooLock,
Mutation m) |
static void |
removeBulkLoadEntries(Connector conn,
String tableId,
long tid) |
static void |
removeBulkLoadInProgressFlag(AccumuloServerContext context,
String path) |
static void |
removeScanFiles(KeyExtent extent,
Set<FileRef> scanFiles,
ClientContext context,
ZooLock zooLock) |
static void |
removeUnusedWALEntries(AccumuloServerContext context,
KeyExtent extent,
List<LogEntry> entries,
ZooLock zooLock) |
static void |
rollBackSplit(org.apache.hadoop.io.Text metadataEntry,
org.apache.hadoop.io.Text oldPrevEndRow,
ClientContext context,
ZooLock zooLock) |
static void |
setRootTabletDir(String dir) |
static void |
splitDatafiles(String tableId,
org.apache.hadoop.io.Text midRow,
double splitRatio,
Map<FileRef,FileUtil.FileInfo> firstAndLastRows,
SortedMap<FileRef,DataFileValue> datafiles,
SortedMap<FileRef,DataFileValue> lowDatafileSizes,
SortedMap<FileRef,DataFileValue> highDatafileSizes,
List<FileRef> highDatafilesToRemove) |
static void |
splitTablet(KeyExtent extent,
org.apache.hadoop.io.Text oldPrevEndRow,
double splitRatio,
ClientContext context,
ZooLock zooLock) |
static void |
update(ClientContext context,
ZooLock zooLock,
Mutation m,
KeyExtent extent) |
static void |
update(Writer t,
ZooLock zooLock,
Mutation m) |
static void |
updateTabletCompactID(KeyExtent extent,
long compactID,
ClientContext context,
ZooLock zooLock) |
static void |
updateTabletDataFile(long tid,
KeyExtent extent,
Map<FileRef,DataFileValue> estSizes,
String time,
ClientContext context,
ZooLock zooLock) |
static void |
updateTabletDir(KeyExtent extent,
String newDir,
ClientContext context,
ZooLock lock) |
static void |
updateTabletFlushID(KeyExtent extent,
long flushID,
ClientContext context,
ZooLock zooLock) |
static void |
updateTabletVolumes(KeyExtent extent,
List<LogEntry> logsToRemove,
List<LogEntry> logsToAdd,
List<FileRef> filesToRemove,
SortedMap<FileRef,DataFileValue> filesToAdd,
String newDir,
ZooLock zooLock,
AccumuloServerContext context) |
public static Writer getMetadataTable(ClientContext context)
public static Writer getRootTable(ClientContext context)
public static void update(ClientContext context, ZooLock zooLock, Mutation m, KeyExtent extent)
public static void updateTabletFlushID(KeyExtent extent, long flushID, ClientContext context, ZooLock zooLock)
public static void updateTabletCompactID(KeyExtent extent, long compactID, ClientContext context, ZooLock zooLock)
public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue> estSizes, String time, ClientContext context, ZooLock zooLock)
public static void updateTabletDir(KeyExtent extent, String newDir, ClientContext context, ZooLock lock)
public static void addTablet(KeyExtent extent, String path, ClientContext context, char timeType, ZooLock lock)
public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove, SortedMap<FileRef,DataFileValue> filesToAdd, String newDir, ZooLock zooLock, AccumuloServerContext context)
public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, ClientContext context) throws IOException
IOException
public static void rollBackSplit(org.apache.hadoop.io.Text metadataEntry, org.apache.hadoop.io.Text oldPrevEndRow, ClientContext context, ZooLock zooLock)
public static void splitTablet(KeyExtent extent, org.apache.hadoop.io.Text oldPrevEndRow, double splitRatio, ClientContext context, ZooLock zooLock)
public static void finishSplit(org.apache.hadoop.io.Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, ClientContext context, ZooLock zooLock)
public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, ClientContext context, ZooLock zooLock)
public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete, ClientContext context) throws IOException
IOException
public static void addDeleteEntry(AccumuloServerContext context, String tableId, String path) throws IOException
IOException
public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException
IOException
public static void removeScanFiles(KeyExtent extent, Set<FileRef> scanFiles, ClientContext context, ZooLock zooLock)
public static void splitDatafiles(String tableId, org.apache.hadoop.io.Text midRow, double splitRatio, Map<FileRef,FileUtil.FileInfo> firstAndLastRows, SortedMap<FileRef,DataFileValue> datafiles, SortedMap<FileRef,DataFileValue> lowDatafileSizes, SortedMap<FileRef,DataFileValue> highDatafileSizes, List<FileRef> highDatafilesToRemove)
public static void deleteTable(String tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException
AccumuloException
IOException
public static void setRootTabletDir(String dir) throws IOException
IOException
public static String getRootTabletDir() throws IOException
IOException
public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws org.apache.zookeeper.KeeperException, InterruptedException, IOException
org.apache.zookeeper.KeeperException
InterruptedException
IOException
public static List<LogEntry> getLogEntries(ClientContext context, KeyExtent extent) throws IOException, org.apache.zookeeper.KeeperException, InterruptedException
IOException
org.apache.zookeeper.KeeperException
InterruptedException
public static Iterator<LogEntry> getLogEntries(ClientContext context) throws IOException, org.apache.zookeeper.KeeperException, InterruptedException
IOException
org.apache.zookeeper.KeeperException
InterruptedException
public static void removeUnusedWALEntries(AccumuloServerContext context, KeyExtent extent, List<LogEntry> entries, ZooLock zooLock)
public static void initializeClone(String tableName, String srcTableId, String tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException
public static int checkClone(String tableName, String srcTableId, String tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException
public static void cloneTable(ClientContext context, String srcTableId, String tableId, VolumeManager volumeManager) throws Exception
Exception
public static void chopped(AccumuloServerContext context, KeyExtent extent, ZooLock zooLock)
public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception
Exception
public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException
IOException
public static Map<Long,? extends Collection<FileRef>> getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException
IOException
public static void addBulkLoadInProgressFlag(AccumuloServerContext context, String path)
public static void removeBulkLoadInProgressFlag(AccumuloServerContext context, String path)
public static void createReplicationTable(ClientContext context) throws IOException
IOException
public static void moveMetaDeleteMarkers(ClientContext context)
public static void moveMetaDeleteMarkersFrom14(ClientContext context)
Copyright © 2011–2018 The Apache Software Foundation. All rights reserved.