Review Board 1.7.22


HBASE-4071: Support minimum number of versions with TTL

Review Request #1582 - Created Aug. 18, 2011 and submitted

Lars Hofhansl
trunk
HBASE-4071
Reviewers
hbase
jgray, stack, tlipcon
hbase
Allow enforcing a minimum number of versions when TTL is enable for a store.
The GC logic for both versions and TTL is unified inside the ColumnTrackers.
Ran all tests. I get error (not failures) from two: TestDistributedLogSplitting and TestHTablePool. Both fail with or without my changes.
New tests: TestMinVersions.

Diff revision 5

This is not the most recent revision of the diff. The latest diff is revision 7. See what's changed.

1 2 3 4 5 6 7
1 2 3 4 5 6 7

  1. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java: Loading...
  2. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java: Loading...
  3. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java: Loading...
  4. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: Loading...
  5. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java: Loading...
  6. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java: Loading...
  7. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java: Loading...
  8. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java: Loading...
  9. http://svn.apache.org/repos/asf/hbase/trunk/src/main/ruby/hbase/admin.rb: Loading...
  10. http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java: Loading...
  11. http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java: Loading...
  12. http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java: Loading...
  13. http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java: Loading...
http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
Revision 1160440 New Change
[20] 84 lines
[+20] [+] public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
85
  public static final String LENGTH = "LENGTH";
85
  public static final String LENGTH = "LENGTH";
86
  public static final String TTL = "TTL";
86
  public static final String TTL = "TTL";
87
  public static final String BLOOMFILTER = "BLOOMFILTER";
87
  public static final String BLOOMFILTER = "BLOOMFILTER";
88
  public static final String FOREVER = "FOREVER";
88
  public static final String FOREVER = "FOREVER";
89
  public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
89
  public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";

    
   
90
  public static final String MIN_VERSIONS = "MIN_VERSIONS";
90

    
   
91

   
91
  /**
92
  /**
92
   * Default compression type.
93
   * Default compression type.
93
   */
94
   */
94
  public static final String DEFAULT_COMPRESSION =
95
  public static final String DEFAULT_COMPRESSION =
95
    Compression.Algorithm.NONE.getName();
96
    Compression.Algorithm.NONE.getName();
96

    
   
97

   
97
  /**
98
  /**
98
   * Default number of versions of a record to keep.
99
   * Default number of versions of a record to keep.
99
   */
100
   */
100
  public static final int DEFAULT_VERSIONS = 3;
101
  public static final int DEFAULT_VERSIONS = 3;
101

    
   
102

   

    
   
103
  /**

    
   
104
   * Default is not to keep a minimum of versions.

    
   
105
   */

    
   
106
  public static final int DEFAULT_MIN_VERSIONS = 0;

    
   
107

   
102
  /*
108
  /*
103
   * Cache here the HCD value.
109
   * Cache here the HCD value.
104
   * Question: its OK to cache since when we're reenable, we create a new HCD?
110
   * Question: its OK to cache since when we're reenable, we create a new HCD?
105
   */
111
   */
106
  private volatile Integer blocksize = null;
112
  private volatile Integer blocksize = null;
[+20] [20] 150 lines
[+20] public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
257
   */
263
   */
258
  public HColumnDescriptor(final byte [] familyName, final int maxVersions,
264
  public HColumnDescriptor(final byte [] familyName, final int maxVersions,
259
      final String compression, final boolean inMemory,
265
      final String compression, final boolean inMemory,
260
      final boolean blockCacheEnabled, final int blocksize,
266
      final boolean blockCacheEnabled, final int blocksize,
261
      final int timeToLive, final String bloomFilter, final int scope) {
267
      final int timeToLive, final String bloomFilter, final int scope) {

    
   
268
    this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, compression, inMemory,

    
   
269
        blockCacheEnabled, blocksize, timeToLive, bloomFilter, scope);

    
   
270
  }

    
   
271

   

    
   
272
  /**

    
   
273
   * Constructor

    
   
274
   * @param familyName Column family name. Must be 'printable' -- digit or

    
   
275
   * letter -- and may not contain a <code>:<code>

    
   
276
   * @param maxVersions Maximum number of versions to keep

    
   
277
   * @param minVersions Minimum number of versions to keep

    
   
278
   * @param compression Compression type

    
   
279
   * @param inMemory If true, column data should be kept in an HRegionServer's

    
   
280
   * cache

    
   
281
   * @param blockCacheEnabled If true, MapFile blocks should be cached

    
   
282
   * @param blocksize Block size to use when writing out storefiles.  Use

    
   
283
   * smaller blocksizes for faster random-access at expense of larger indices

    
   
284
   * (more memory consumption).  Default is usually 64k.

    
   
285
   * @param timeToLive Time-to-live of cell contents, in seconds

    
   
286
   * (use HConstants.FOREVER for unlimited TTL)

    
   
287
   * @param bloomFilter Bloom filter type for this column

    
   
288
   * @param scope The scope tag for this column

    
   
289
   *

    
   
290
   * @throws IllegalArgumentException if passed a family name that is made of

    
   
291
   * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains

    
   
292
   * a <code>:</code>

    
   
293
   * @throws IllegalArgumentException if the number of versions is &lt;= 0

    
   
294
   */

    
   
295
  public HColumnDescriptor(final byte [] familyName, final int minVersions,

    
   
296
      final int maxVersions, final String compression, final boolean inMemory,

    
   
297
      final boolean blockCacheEnabled, final int blocksize,

    
   
298
      final int timeToLive, final String bloomFilter, final int scope) {
262
    isLegalFamilyName(familyName);
299
    isLegalFamilyName(familyName);
263
    this.name = familyName;
300
    this.name = familyName;
264

    
   
301

   
265
    if (maxVersions <= 0) {
302
    if (maxVersions <= 0) {
266
      // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
303
      // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
267
      // Until there is support, consider 0 or < 0 -- a configuration error.
304
      // Until there is support, consider 0 or < 0 -- a configuration error.
268
      throw new IllegalArgumentException("Maximum versions must be positive");
305
      throw new IllegalArgumentException("Maximum versions must be positive");
269
    }
306
    }

    
   
307

   

    
   
308
    if (minVersions > 0) {

    
   
309
      if (timeToLive == HConstants.FOREVER) {

    
   
310
        throw new IllegalArgumentException("Minimum versions requires TTL to be set.");

    
   
311
      }

    
   
312
      if (minVersions > maxVersions) {

    
   
313
        throw new IllegalArgumentException("Minimum versions must be <= "+

    
   
314
            "maximum versions.");

    
   
315
      }

    
   
316
    }

    
   
317

   
270
    setMaxVersions(maxVersions);
318
    setMaxVersions(maxVersions);

    
   
319
    setMinVersions(minVersions);
271
    setInMemory(inMemory);
320
    setInMemory(inMemory);
272
    setBlockCacheEnabled(blockCacheEnabled);
321
    setBlockCacheEnabled(blockCacheEnabled);
273
    setTimeToLive(timeToLive);
322
    setTimeToLive(timeToLive);
274
    setCompressionType(Compression.Algorithm.
323
    setCompressionType(Compression.Algorithm.
275
      valueOf(compression.toUpperCase()));
324
      valueOf(compression.toUpperCase()));
[+20] [20] 231 lines
[+20] [+] public int getTimeToLive() {
507
  public void setTimeToLive(int timeToLive) {
556
  public void setTimeToLive(int timeToLive) {
508
    setValue(TTL, Integer.toString(timeToLive));
557
    setValue(TTL, Integer.toString(timeToLive));
509
  }
558
  }
510

    
   
559

   
511
  /**
560
  /**

    
   
561
   * @return minVersions The minimum number of versions to keep.

    
   
562
   */

    
   
563
  public int getMinVersions() {

    
   
564
    String value = getValue(MIN_VERSIONS);

    
   
565
    return (value != null)? Integer.valueOf(value).intValue(): 0;

    
   
566
  }

    
   
567

   

    
   
568
  /**

    
   
569
   * @param minVersions The minimum number of versions to keep.

    
   
570
   * (used when timeToLive is set)

    
   
571
   */

    
   
572
  public void setMinVersions(int minVersions) {

    
   
573
    setValue(MIN_VERSIONS, Integer.toString(minVersions));

    
   
574
  }

    
   
575

   

    
   
576
  /**
512
   * @return True if MapFile blocks should be cached.
577
   * @return True if MapFile blocks should be cached.
513
   */
578
   */
514
  public boolean isBlockCacheEnabled() {
579
  public boolean isBlockCacheEnabled() {
515
    String value = getValue(BLOCKCACHE);
580
    String value = getValue(BLOCKCACHE);
516
    if (value != null)
581
    if (value != null)
[+20] [20] 220 lines
http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/main/ruby/hbase/admin.rb
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java
Revision 1160440 New Change
 
http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
New File
 
http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java
Revision 1160440 New Change
 
  1. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java: Loading...
  2. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java: Loading...
  3. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java: Loading...
  4. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: Loading...
  5. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java: Loading...
  6. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java: Loading...
  7. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java: Loading...
  8. http://svn.apache.org/repos/asf/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java: Loading...
  9. http://svn.apache.org/repos/asf/hbase/trunk/src/main/ruby/hbase/admin.rb: Loading...
  10. http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java: Loading...
  11. http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java: Loading...
  12. http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java: Loading...
  13. http://svn.apache.org/repos/asf/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java: Loading...