org.apache.accumulo.core.client.mapreduce.lib.partition
Class KeyRangePartitioner

java.lang.Object
  extended by org.apache.hadoop.mapreduce.Partitioner<Key,org.apache.hadoop.io.Writable>
      extended by org.apache.accumulo.core.client.mapreduce.lib.partition.KeyRangePartitioner
All Implemented Interfaces:
org.apache.hadoop.conf.Configurable

public class KeyRangePartitioner
extends org.apache.hadoop.mapreduce.Partitioner<Key,org.apache.hadoop.io.Writable>
implements org.apache.hadoop.conf.Configurable

Hadoop partitioner that uses ranges based on row keys, and optionally sub-bins based on hashing.


Constructor Summary
KeyRangePartitioner()
           
 
Method Summary
 org.apache.hadoop.conf.Configuration getConf()
           
 int getPartition(Key key, org.apache.hadoop.io.Writable value, int numPartitions)
           
 void setConf(org.apache.hadoop.conf.Configuration conf)
           
static void setNumSubBins(org.apache.hadoop.mapreduce.Job job, int num)
          Sets the number of random sub-bins per range
static void setSplitFile(org.apache.hadoop.mapreduce.Job job, String file)
          Sets the hdfs file name to use, containing a newline separated list of Base64 encoded split points that represent ranges for partitioning
 
Methods inherited from class java.lang.Object
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
 

Constructor Detail

KeyRangePartitioner

public KeyRangePartitioner()
Method Detail

getPartition

public int getPartition(Key key,
                        org.apache.hadoop.io.Writable value,
                        int numPartitions)
Specified by:
getPartition in class org.apache.hadoop.mapreduce.Partitioner<Key,org.apache.hadoop.io.Writable>

getConf

public org.apache.hadoop.conf.Configuration getConf()
Specified by:
getConf in interface org.apache.hadoop.conf.Configurable

setConf

public void setConf(org.apache.hadoop.conf.Configuration conf)
Specified by:
setConf in interface org.apache.hadoop.conf.Configurable

setSplitFile

public static void setSplitFile(org.apache.hadoop.mapreduce.Job job,
                                String file)
Sets the hdfs file name to use, containing a newline separated list of Base64 encoded split points that represent ranges for partitioning


setNumSubBins

public static void setNumSubBins(org.apache.hadoop.mapreduce.Job job,
                                 int num)
Sets the number of random sub-bins per range



Copyright © 2013 Apache Accumulo Project. All Rights Reserved.