Review Board 1.7.22


HBASE-2214 per scan max buffersize

Review Request #4726 - Created April 15, 2012 and updated

ferdy
0.94
HBASE-2214
Reviewers
hbase
tedyu
hbase
HBASE-2214 per scan max buffersize.
It works when running this test:



    new HBaseTestingUtility(conf).startMiniCluster();
 
    HBaseAdmin admin = new HBaseAdmin(conf);
    if (!admin.tableExists("test")) {
      HTableDescriptor tableDesc = new HTableDescriptor("test");
      tableDesc.addFamily(new HColumnDescriptor("fam"));
      admin.createTable(tableDesc);
    }
    
    
    HTable table = new HTable(conf, "test");
    Put put; 
    
    put = new Put(Bytes.toBytes("row1"));
    put.add(Bytes.toBytes("fam"),Bytes.toBytes("qual1"),Bytes.toBytes("val1"));
    table.put(put);
    
    put = new Put(Bytes.toBytes("row2"));
    put.add(Bytes.toBytes("fam"),Bytes.toBytes("qual2"),Bytes.toBytes("val2"));
    table.put(put);
    
    put = new Put(Bytes.toBytes("row3"));
    put.add(Bytes.toBytes("fam"),Bytes.toBytes("qual3"),Bytes.toBytes("val3"));
    table.put(put);
    
    table.flushCommits();
    //put a logging statement to ClientScanner#next() to see the effect.
    {
      System.out.println("returns all rows at once because of the caching");
      Scan scan = new Scan();
      scan.setCaching(100);
      ResultScanner scanner = table.getScanner(scan);
      scanner.next(100);
    }
    {
      System.out.println("returns one row at a time because of the maxResultSize");
      Scan scan = new Scan();
      scan.setCaching(100);
      scan.setMaxResultSize(1);
      ResultScanner scanner = table.getScanner(scan);
      scanner.next(100);
    }



See output:

returns all rows at once because of the caching
2012-04-25 22:18:47,494 DEBUG [main] client.ClientScanner(94): Creating scanner over test starting at key ''
2012-04-25 22:18:47,494 DEBUG [main] client.ClientScanner(206): Advancing internal scanner to startKey at ''
2012-04-25 22:18:47,499 DEBUG [main] client.ClientScanner(323): Rows returned 3
2012-04-25 22:18:47,502 DEBUG [main] client.ClientScanner(193): Finished with scanning at {NAME => 'test,,1335385126388.ed23a82f3d6ca2eab571918843796259.', STARTKEY => '', ENDKEY => '', ENCODED => ed23a82f3d6ca2eab571918843796259,}
returns one row at a time because of the maxResultSize
2012-04-25 22:18:47,504 DEBUG [main] client.ClientScanner(94): Creating scanner over test starting at key ''
2012-04-25 22:18:47,505 DEBUG [main] client.ClientScanner(206): Advancing internal scanner to startKey at ''
2012-04-25 22:18:47,514 DEBUG [main] client.ClientScanner(323): Rows returned 1
2012-04-25 22:18:47,517 DEBUG [main] client.ClientScanner(323): Rows returned 1
2012-04-25 22:18:47,522 DEBUG [main] client.ClientScanner(323): Rows returned 1

Changes between revision 1 and 2

1 2 3 4 5
1 2 3 4 5

  1. /src/main/java/org/apache/hadoop/hbase/client/Scan.java: Loading...
  2. /src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: Loading...
/src/main/java/org/apache/hadoop/hbase/client/Scan.java
Diff Revision 1 Diff Revision 2
This file contains only whitespace changes.
1
/*
1
/*
2
 * Copyright 2010 The Apache Software Foundation
2
 * Copyright 2010 The Apache Software Foundation
3
 *
3
 *
4
 * Licensed to the Apache Software Foundation (ASF) under one
4
 * Licensed to the Apache Software Foundation (ASF) under one
5
 * or more contributor license agreements.  See the NOTICE file
5
 * or more contributor license agreements.  See the NOTICE file
6
 * distributed with this work for additional information
6
 * distributed with this work for additional information
7
 * regarding copyright ownership.  The ASF licenses this file
7
 * regarding copyright ownership.  The ASF licenses this file
8
 * to you under the Apache License, Version 2.0 (the
8
 * to you under the Apache License, Version 2.0 (the
9
 * "License"); you may not use this file except in compliance
9
 * "License"); you may not use this file except in compliance
10
 * with the License.  You may obtain a copy of the License at
10
 * with the License.  You may obtain a copy of the License at
11
 *
11
 *
12
 *     http://www.apache.org/licenses/LICENSE-2.0
12
 *     http://www.apache.org/licenses/LICENSE-2.0
13
 *
13
 *
14
 * Unless required by applicable law or agreed to in writing, software
14
 * Unless required by applicable law or agreed to in writing, software
15
 * distributed under the License is distributed on an "AS IS" BASIS,
15
 * distributed under the License is distributed on an "AS IS" BASIS,
16
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
 * See the License for the specific language governing permissions and
17
 * See the License for the specific language governing permissions and
18
 * limitations under the License.
18
 * limitations under the License.
19
 */
19
 */
20

    
   
20

   
21
package org.apache.hadoop.hbase.client;
21
package org.apache.hadoop.hbase.client;
22

    
   
22

   
23
import org.apache.hadoop.classification.InterfaceAudience;
23
import org.apache.hadoop.classification.InterfaceAudience;
24
import org.apache.hadoop.classification.InterfaceStability;
24
import org.apache.hadoop.classification.InterfaceStability;
25
import org.apache.hadoop.conf.Configuration;
25
import org.apache.hadoop.conf.Configuration;
26
import org.apache.hadoop.hbase.HConstants;
26
import org.apache.hadoop.hbase.HConstants;
27
import org.apache.hadoop.hbase.filter.Filter;
27
import org.apache.hadoop.hbase.filter.Filter;
28
import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
28
import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
29
import org.apache.hadoop.hbase.io.TimeRange;
29
import org.apache.hadoop.hbase.io.TimeRange;
30
import org.apache.hadoop.hbase.util.Bytes;
30
import org.apache.hadoop.hbase.util.Bytes;
31
import org.apache.hadoop.io.Writable;
31
import org.apache.hadoop.io.Writable;
32
import org.apache.hadoop.io.WritableFactories;
32
import org.apache.hadoop.io.WritableFactories;
33

    
   
33

   
34
import java.io.DataInput;
34
import java.io.DataInput;
35
import java.io.DataOutput;
35
import java.io.DataOutput;
36
import java.io.IOException;
36
import java.io.IOException;
37
import java.util.ArrayList;
37
import java.util.ArrayList;
38
import java.util.HashMap;
38
import java.util.HashMap;
39
import java.util.List;
39
import java.util.List;
40
import java.util.Map;
40
import java.util.Map;
41
import java.util.NavigableSet;
41
import java.util.NavigableSet;
42
import java.util.TreeMap;
42
import java.util.TreeMap;
43
import java.util.TreeSet;
43
import java.util.TreeSet;
44

    
   
44

   
45
/**
45
/**
46
 * Used to perform Scan operations.
46
 * Used to perform Scan operations.
47
 * <p>
47
 * <p>
48
 * All operations are identical to {@link Get} with the exception of
48
 * All operations are identical to {@link Get} with the exception of
49
 * instantiation.  Rather than specifying a single row, an optional startRow
49
 * instantiation.  Rather than specifying a single row, an optional startRow
50
 * and stopRow may be defined.  If rows are not specified, the Scanner will
50
 * and stopRow may be defined.  If rows are not specified, the Scanner will
51
 * iterate over all rows.
51
 * iterate over all rows.
52
 * <p>
52
 * <p>
53
 * To scan everything for each row, instantiate a Scan object.
53
 * To scan everything for each row, instantiate a Scan object.
54
 * <p>
54
 * <p>
55
 * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
55
 * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
56
 * If caching is NOT set, we will use the caching value of the hosting {@link HTable}.  See 
56
 * If caching is NOT set, we will use the caching value of the hosting {@link HTable}.  See
57
 * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a 
57
 * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a
58
 * maximum buffer size, using {@link #setMaxBufferSize(long) setMaxBufferSize}. When both are used, 
58
 * maximum buffer size, using {@link #setMaxBufferSize(long) setMaxBufferSize}. When both are used,
59
 * single server requests are limited by either number of rows or maximum buffer size, whichever 
59
 * single server requests are limited by either number of rows or maximum buffer size, whichever
60
 * limit comes first. 
60
 * limit comes first.
61
 * <p>
61
 * <p>
62
 * To further define the scope of what to get when scanning, perform additional
62
 * To further define the scope of what to get when scanning, perform additional
63
 * methods as outlined below.
63
 * methods as outlined below.
64
 * <p>
64
 * <p>
65
 * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily}
65
 * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily}
66
 * for each family to retrieve.
66
 * for each family to retrieve.
67
 * <p>
67
 * <p>
68
 * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn}
68
 * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn}
69
 * for each column to retrieve.
69
 * for each column to retrieve.
70
 * <p>
70
 * <p>
71
 * To only retrieve columns within a specific range of version timestamps,
71
 * To only retrieve columns within a specific range of version timestamps,
72
 * execute {@link #setTimeRange(long, long) setTimeRange}.
72
 * execute {@link #setTimeRange(long, long) setTimeRange}.
73
 * <p>
73
 * <p>
74
 * To only retrieve columns with a specific timestamp, execute
74
 * To only retrieve columns with a specific timestamp, execute
75
 * {@link #setTimeStamp(long) setTimestamp}.
75
 * {@link #setTimeStamp(long) setTimestamp}.
76
 * <p>
76
 * <p>
77
 * To limit the number of versions of each column to be returned, execute
77
 * To limit the number of versions of each column to be returned, execute
78
 * {@link #setMaxVersions(int) setMaxVersions}.
78
 * {@link #setMaxVersions(int) setMaxVersions}.
79
 * <p>
79
 * <p>
80
 * To limit the maximum number of values returned for each call to next(),
80
 * To limit the maximum number of values returned for each call to next(),
81
 * execute {@link #setBatch(int) setBatch}.
81
 * execute {@link #setBatch(int) setBatch}.
82
 * <p>
82
 * <p>
83
 * To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
83
 * To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
84
 * <p>
84
 * <p>
85
 * Expert: To explicitly disable server-side block caching for this scan,
85
 * Expert: To explicitly disable server-side block caching for this scan,
86
 * execute {@link #setCacheBlocks(boolean)}.
86
 * execute {@link #setCacheBlocks(boolean)}.
87
 */
87
 */
88
@InterfaceAudience.Public
88
@InterfaceAudience.Public
89
@InterfaceStability.Stable
89
@InterfaceStability.Stable
90
public class Scan extends OperationWithAttributes implements Writable {
90
public class Scan extends OperationWithAttributes implements Writable {
91
  private static final String RAW_ATTR = "_raw_";
91
  private static final String RAW_ATTR = "_raw_";
92
  private static final String ISOLATION_LEVEL = "_isolationlevel_";
92
  private static final String ISOLATION_LEVEL = "_isolationlevel_";
93

    
   
93

   
94
  private static final byte SCAN_VERSION = (byte)3;
94
  private static final byte SCAN_VERSION = (byte)3;
95
  private byte [] startRow = HConstants.EMPTY_START_ROW;
95
  private byte [] startRow = HConstants.EMPTY_START_ROW;
96
  private byte [] stopRow  = HConstants.EMPTY_END_ROW;
96
  private byte [] stopRow  = HConstants.EMPTY_END_ROW;
97
  private int maxVersions = 1;
97
  private int maxVersions = 1;
98
  private int batch = -1;
98
  private int batch = -1;
99
  // If application wants to collect scan metrics, it needs to
99
  // If application wants to collect scan metrics, it needs to
100
  // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
100
  // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
101
  static public String SCAN_ATTRIBUTES_METRICS_ENABLE =
101
  static public String SCAN_ATTRIBUTES_METRICS_ENABLE =
102
    "scan.attributes.metrics.enable";
102
    "scan.attributes.metrics.enable";
103
  static public String SCAN_ATTRIBUTES_METRICS_DATA =
103
  static public String SCAN_ATTRIBUTES_METRICS_DATA =
104
    "scan.attributes.metrics.data";
104
    "scan.attributes.metrics.data";
105

    
   
105

   
106
  /*
106
  /*
107
   * -1 means no caching
107
   * -1 means no caching
108
   */
108
   */
109
  private int caching = -1;
109
  private int caching = -1;
110
  private long maxBufferSize = -1;
110
  private long maxBufferSize = -1;
111
  private boolean cacheBlocks = true;
111
  private boolean cacheBlocks = true;
112
  private Filter filter = null;
112
  private Filter filter = null;
113
  private TimeRange tr = new TimeRange();
113
  private TimeRange tr = new TimeRange();
114
  private Map<byte [], NavigableSet<byte []>> familyMap =
114
  private Map<byte [], NavigableSet<byte []>> familyMap =
115
    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
115
    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
116

    
   
116

   
117
  /**
117
  /**
118
   * Create a Scan operation across all rows.
118
   * Create a Scan operation across all rows.
119
   */
119
   */
120
  public Scan() {}
120
  public Scan() {}
121

    
   
121

   
122
  public Scan(byte [] startRow, Filter filter) {
122
  public Scan(byte [] startRow, Filter filter) {
123
    this(startRow);
123
    this(startRow);
124
    this.filter = filter;
124
    this.filter = filter;
125
  }
125
  }
126

    
   
126

   
127
  /**
127
  /**
128
   * Create a Scan operation starting at the specified row.
128
   * Create a Scan operation starting at the specified row.
129
   * <p>
129
   * <p>
130
   * If the specified row does not exist, the Scanner will start from the
130
   * If the specified row does not exist, the Scanner will start from the
131
   * next closest row after the specified row.
131
   * next closest row after the specified row.
132
   * @param startRow row to start scanner at or after
132
   * @param startRow row to start scanner at or after
133
   */
133
   */
134
  public Scan(byte [] startRow) {
134
  public Scan(byte [] startRow) {
135
    this.startRow = startRow;
135
    this.startRow = startRow;
136
  }
136
  }
137

    
   
137

   
138
  /**
138
  /**
139
   * Create a Scan operation for the range of rows specified.
139
   * Create a Scan operation for the range of rows specified.
140
   * @param startRow row to start scanner at or after (inclusive)
140
   * @param startRow row to start scanner at or after (inclusive)
141
   * @param stopRow row to stop scanner before (exclusive)
141
   * @param stopRow row to stop scanner before (exclusive)
142
   */
142
   */
143
  public Scan(byte [] startRow, byte [] stopRow) {
143
  public Scan(byte [] startRow, byte [] stopRow) {
144
    this.startRow = startRow;
144
    this.startRow = startRow;
145
    this.stopRow = stopRow;
145
    this.stopRow = stopRow;
146
  }
146
  }
147

    
   
147

   
148
  /**
148
  /**
149
   * Creates a new instance of this class while copying all values.
149
   * Creates a new instance of this class while copying all values.
150
   *
150
   *
151
   * @param scan  The scan instance to copy from.
151
   * @param scan  The scan instance to copy from.
152
   * @throws IOException When copying the values fails.
152
   * @throws IOException When copying the values fails.
153
   */
153
   */
154
  public Scan(Scan scan) throws IOException {
154
  public Scan(Scan scan) throws IOException {
155
    startRow = scan.getStartRow();
155
    startRow = scan.getStartRow();
156
    stopRow  = scan.getStopRow();
156
    stopRow  = scan.getStopRow();
157
    maxVersions = scan.getMaxVersions();
157
    maxVersions = scan.getMaxVersions();
158
    batch = scan.getBatch();
158
    batch = scan.getBatch();
159
    caching = scan.getCaching();
159
    caching = scan.getCaching();
160
    maxBufferSize = scan.getMaxBufferSize();
160
    maxBufferSize = scan.getMaxBufferSize();
161
    cacheBlocks = scan.getCacheBlocks();
161
    cacheBlocks = scan.getCacheBlocks();
162
    filter = scan.getFilter(); // clone?
162
    filter = scan.getFilter(); // clone?
163
    TimeRange ctr = scan.getTimeRange();
163
    TimeRange ctr = scan.getTimeRange();
164
    tr = new TimeRange(ctr.getMin(), ctr.getMax());
164
    tr = new TimeRange(ctr.getMin(), ctr.getMax());
165
    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
165
    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
166
    for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
166
    for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
167
      byte [] fam = entry.getKey();
167
      byte [] fam = entry.getKey();
168
      NavigableSet<byte[]> cols = entry.getValue();
168
      NavigableSet<byte[]> cols = entry.getValue();
169
      if (cols != null && cols.size() > 0) {
169
      if (cols != null && cols.size() > 0) {
170
        for (byte[] col : cols) {
170
        for (byte[] col : cols) {
171
          addColumn(fam, col);
171
          addColumn(fam, col);
172
        }
172
        }
173
      } else {
173
      } else {
174
        addFamily(fam);
174
        addFamily(fam);
175
      }
175
      }
176
    }
176
    }
177
    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
177
    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
178
      setAttribute(attr.getKey(), attr.getValue());
178
      setAttribute(attr.getKey(), attr.getValue());
179
    }
179
    }
180
  }
180
  }
181

    
   
181

   
182
  /**
182
  /**
183
   * Builds a scan object with the same specs as get.
183
   * Builds a scan object with the same specs as get.
184
   * @param get get to model scan after
184
   * @param get get to model scan after
185
   */
185
   */
186
  public Scan(Get get) {
186
  public Scan(Get get) {
187
    this.startRow = get.getRow();
187
    this.startRow = get.getRow();
188
    this.stopRow = get.getRow();
188
    this.stopRow = get.getRow();
189
    this.filter = get.getFilter();
189
    this.filter = get.getFilter();
190
    this.cacheBlocks = get.getCacheBlocks();
190
    this.cacheBlocks = get.getCacheBlocks();
191
    this.maxVersions = get.getMaxVersions();
191
    this.maxVersions = get.getMaxVersions();
192
    this.tr = get.getTimeRange();
192
    this.tr = get.getTimeRange();
193
    this.familyMap = get.getFamilyMap();
193
    this.familyMap = get.getFamilyMap();
194
  }
194
  }
195

    
   
195

   
196
  public boolean isGetScan() {
196
  public boolean isGetScan() {
197
    return this.startRow != null && this.startRow.length > 0 &&
197
    return this.startRow != null && this.startRow.length > 0 &&
198
      Bytes.equals(this.startRow, this.stopRow);
198
      Bytes.equals(this.startRow, this.stopRow);
199
  }
199
  }
200

    
   
200

   
201
  /**
201
  /**
202
   * Get all columns from the specified family.
202
   * Get all columns from the specified family.
203
   * <p>
203
   * <p>
204
   * Overrides previous calls to addColumn for this family.
204
   * Overrides previous calls to addColumn for this family.
205
   * @param family family name
205
   * @param family family name
206
   * @return this
206
   * @return this
207
   */
207
   */
208
  public Scan addFamily(byte [] family) {
208
  public Scan addFamily(byte [] family) {
209
    familyMap.remove(family);
209
    familyMap.remove(family);
210
    familyMap.put(family, null);
210
    familyMap.put(family, null);
211
    return this;
211
    return this;
212
  }
212
  }
213

    
   
213

   
214
  /**
214
  /**
215
   * Get the column from the specified family with the specified qualifier.
215
   * Get the column from the specified family with the specified qualifier.
216
   * <p>
216
   * <p>
217
   * Overrides previous calls to addFamily for this family.
217
   * Overrides previous calls to addFamily for this family.
218
   * @param family family name
218
   * @param family family name
219
   * @param qualifier column qualifier
219
   * @param qualifier column qualifier
220
   * @return this
220
   * @return this
221
   */
221
   */
222
  public Scan addColumn(byte [] family, byte [] qualifier) {
222
  public Scan addColumn(byte [] family, byte [] qualifier) {
223
    NavigableSet<byte []> set = familyMap.get(family);
223
    NavigableSet<byte []> set = familyMap.get(family);
224
    if(set == null) {
224
    if(set == null) {
225
      set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
225
      set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
226
    }
226
    }
227
    set.add(qualifier);
227
    set.add(qualifier);
228
    familyMap.put(family, set);
228
    familyMap.put(family, set);
229

    
   
229

   
230
    return this;
230
    return this;
231
  }
231
  }
232

    
   
232

   
233
  /**
233
  /**
234
   * Get versions of columns only within the specified timestamp range,
234
   * Get versions of columns only within the specified timestamp range,
235
   * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
235
   * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
236
   * your time range spans more than one version and you want all versions
236
   * your time range spans more than one version and you want all versions
237
   * returned, up the number of versions beyond the defaut.
237
   * returned, up the number of versions beyond the defaut.
238
   * @param minStamp minimum timestamp value, inclusive
238
   * @param minStamp minimum timestamp value, inclusive
239
   * @param maxStamp maximum timestamp value, exclusive
239
   * @param maxStamp maximum timestamp value, exclusive
240
   * @throws IOException if invalid time range
240
   * @throws IOException if invalid time range
241
   * @see #setMaxVersions()
241
   * @see #setMaxVersions()
242
   * @see #setMaxVersions(int)
242
   * @see #setMaxVersions(int)
243
   * @return this
243
   * @return this
244
   */
244
   */
245
  public Scan setTimeRange(long minStamp, long maxStamp)
245
  public Scan setTimeRange(long minStamp, long maxStamp)
246
  throws IOException {
246
  throws IOException {
247
    tr = new TimeRange(minStamp, maxStamp);
247
    tr = new TimeRange(minStamp, maxStamp);
248
    return this;
248
    return this;
249
  }
249
  }
250

    
   
250

   
251
  /**
251
  /**
252
   * Get versions of columns with the specified timestamp. Note, default maximum
252
   * Get versions of columns with the specified timestamp. Note, default maximum
253
   * versions to return is 1.  If your time range spans more than one version
253
   * versions to return is 1.  If your time range spans more than one version
254
   * and you want all versions returned, up the number of versions beyond the
254
   * and you want all versions returned, up the number of versions beyond the
255
   * defaut.
255
   * defaut.
256
   * @param timestamp version timestamp
256
   * @param timestamp version timestamp
257
   * @see #setMaxVersions()
257
   * @see #setMaxVersions()
258
   * @see #setMaxVersions(int)
258
   * @see #setMaxVersions(int)
259
   * @return this
259
   * @return this
260
   */
260
   */
261
  public Scan setTimeStamp(long timestamp) {
261
  public Scan setTimeStamp(long timestamp) {
262
    try {
262
    try {
263
      tr = new TimeRange(timestamp, timestamp+1);
263
      tr = new TimeRange(timestamp, timestamp+1);
264
    } catch(IOException e) {
264
    } catch(IOException e) {
265
      // Will never happen
265
      // Will never happen
266
    }
266
    }
267
    return this;
267
    return this;
268
  }
268
  }
269

    
   
269

   
270
  /**
270
  /**
271
   * Set the start row of the scan.
271
   * Set the start row of the scan.
272
   * @param startRow row to start scan on (inclusive)
272
   * @param startRow row to start scan on (inclusive)
273
   * Note: In order to make startRow exclusive add a trailing 0 byte
273
   * Note: In order to make startRow exclusive add a trailing 0 byte
274
   * @return this
274
   * @return this
275
   */
275
   */
276
  public Scan setStartRow(byte [] startRow) {
276
  public Scan setStartRow(byte [] startRow) {
277
    this.startRow = startRow;
277
    this.startRow = startRow;
278
    return this;
278
    return this;
279
  }
279
  }
280

    
   
280

   
281
  /**
281
  /**
282
   * Set the stop row.
282
   * Set the stop row.
283
   * @param stopRow row to end at (exclusive)
283
   * @param stopRow row to end at (exclusive)
284
   * Note: In order to make stopRow inclusive add a trailing 0 byte
284
   * Note: In order to make stopRow inclusive add a trailing 0 byte
285
   * @return this
285
   * @return this
286
   */
286
   */
287
  public Scan setStopRow(byte [] stopRow) {
287
  public Scan setStopRow(byte [] stopRow) {
288
    this.stopRow = stopRow;
288
    this.stopRow = stopRow;
289
    return this;
289
    return this;
290
  }
290
  }
291

    
   
291

   
292
  /**
292
  /**
293
   * Get all available versions.
293
   * Get all available versions.
294
   * @return this
294
   * @return this
295
   */
295
   */
296
  public Scan setMaxVersions() {
296
  public Scan setMaxVersions() {
297
    this.maxVersions = Integer.MAX_VALUE;
297
    this.maxVersions = Integer.MAX_VALUE;
298
    return this;
298
    return this;
299
  }
299
  }
300

    
   
300

   
301
  /**
301
  /**
302
   * Get up to the specified number of versions of each column.
302
   * Get up to the specified number of versions of each column.
303
   * @param maxVersions maximum versions for each column
303
   * @param maxVersions maximum versions for each column
304
   * @return this
304
   * @return this
305
   */
305
   */
306
  public Scan setMaxVersions(int maxVersions) {
306
  public Scan setMaxVersions(int maxVersions) {
307
    this.maxVersions = maxVersions;
307
    this.maxVersions = maxVersions;
308
    return this;
308
    return this;
309
  }
309
  }
310

    
   
310

   
311
  /**
311
  /**
312
   * Set the maximum number of values to return for each call to next()
312
   * Set the maximum number of values to return for each call to next()
313
   * @param batch the maximum number of values
313
   * @param batch the maximum number of values
314
   */
314
   */
315
  public void setBatch(int batch) {
315
  public void setBatch(int batch) {
316
    if (this.hasFilter() && this.filter.hasFilterRow()) {
316
    if (this.hasFilter() && this.filter.hasFilterRow()) {
317
      throw new IncompatibleFilterException(
317
      throw new IncompatibleFilterException(
318
        "Cannot set batch on a scan using a filter" +
318
        "Cannot set batch on a scan using a filter" +
319
        " that returns true for filter.hasFilterRow");
319
        " that returns true for filter.hasFilterRow");
320
    }
320
    }
321
    this.batch = batch;
321
    this.batch = batch;
322
  }
322
  }
323

    
   
323

   
324
  /**
324
  /**
325
   * Set the number of rows for caching that will be passed to scanners.
325
   * Set the number of rows for caching that will be passed to scanners.
326
   * If not set, the default setting from {@link HTable#getScannerCaching()} will apply.
326
   * If not set, the default setting from {@link HTable#getScannerCaching()} will apply.
327
   * Higher caching values will enable faster scanners but will use more memory.
327
   * Higher caching values will enable faster scanners but will use more memory.
328
   * @param caching the number of rows for caching
328
   * @param caching the number of rows for caching
329
   */
329
   */
330
  public void setCaching(int caching) {
330
  public void setCaching(int caching) {
331
    this.caching = caching;
331
    this.caching = caching;
332
  }
332
  }
333
  
333

   
334
  /**
334
  /**
335
   * @return the maximum buffer size in bytes. See {@link #setMaxBufferSize(long)}
335
   * @return the maximum buffer size in bytes. See {@link #setMaxBufferSize(long)}
336
   */
336
   */
337
  public long getMaxBufferSize() {
337
  public long getMaxBufferSize() {
338
    return maxBufferSize;
338
    return maxBufferSize;
339
  }
339
  }
340
  
340

   
341
  /**
341
  /**
342
   * Set the maximum buffer size. The default is -1; this means that no specific
342
   * Set the maximum buffer size. The default is -1; this means that no specific
343
   * maximum buffer size will be set for this scan, and the global configured
343
   * maximum buffer size will be set for this scan, and the global configured
344
   * value will be used instead. (Defaults to unlimited). 
344
   * value will be used instead. (Defaults to unlimited).
345
   * 
345
   *
346
   * @param maxBufferSize The maximum buffer size in bytes.
346
   * @param maxBufferSize The maximum buffer size in bytes.
347
   */
347
   */
348
  public void setMaxBufferSize(long maxBufferSize) {
348
  public void setMaxBufferSize(long maxBufferSize) {
349
    this.maxBufferSize = maxBufferSize;
349
    this.maxBufferSize = maxBufferSize;
350
  }
350
  }
351

    
   
351

   
352
  /**
352
  /**
353
   * Apply the specified server-side filter when performing the Scan.
353
   * Apply the specified server-side filter when performing the Scan.
354
   * @param filter filter to run on the server
354
   * @param filter filter to run on the server
355
   * @return this
355
   * @return this
356
   */
356
   */
357
  public Scan setFilter(Filter filter) {
357
  public Scan setFilter(Filter filter) {
358
    this.filter = filter;
358
    this.filter = filter;
359
    return this;
359
    return this;
360
  }
360
  }
361

    
   
361

   
362
  /**
362
  /**
363
   * Setting the familyMap
363
   * Setting the familyMap
364
   * @param familyMap map of family to qualifier
364
   * @param familyMap map of family to qualifier
365
   * @return this
365
   * @return this
366
   */
366
   */
367
  public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
367
  public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
368
    this.familyMap = familyMap;
368
    this.familyMap = familyMap;
369
    return this;
369
    return this;
370
  }
370
  }
371

    
   
371

   
372
  /**
372
  /**
373
   * Getting the familyMap
373
   * Getting the familyMap
374
   * @return familyMap
374
   * @return familyMap
375
   */
375
   */
376
  public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
376
  public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
377
    return this.familyMap;
377
    return this.familyMap;
378
  }
378
  }
379

    
   
379

   
380
  /**
380
  /**
381
   * @return the number of families in familyMap
381
   * @return the number of families in familyMap
382
   */
382
   */
383
  public int numFamilies() {
383
  public int numFamilies() {
384
    if(hasFamilies()) {
384
    if(hasFamilies()) {
385
      return this.familyMap.size();
385
      return this.familyMap.size();
386
    }
386
    }
387
    return 0;
387
    return 0;
388
  }
388
  }
389

    
   
389

   
390
  /**
390
  /**
391
   * @return true if familyMap is non empty, false otherwise
391
   * @return true if familyMap is non empty, false otherwise
392
   */
392
   */
393
  public boolean hasFamilies() {
393
  public boolean hasFamilies() {
394
    return !this.familyMap.isEmpty();
394
    return !this.familyMap.isEmpty();
395
  }
395
  }
396

    
   
396

   
397
  /**
397
  /**
398
   * @return the keys of the familyMap
398
   * @return the keys of the familyMap
399
   */
399
   */
400
  public byte[][] getFamilies() {
400
  public byte[][] getFamilies() {
401
    if(hasFamilies()) {
401
    if(hasFamilies()) {
402
      return this.familyMap.keySet().toArray(new byte[0][0]);
402
      return this.familyMap.keySet().toArray(new byte[0][0]);
403
    }
403
    }
404
    return null;
404
    return null;
405
  }
405
  }
406

    
   
406

   
407
  /**
407
  /**
408
   * @return the startrow
408
   * @return the startrow
409
   */
409
   */
410
  public byte [] getStartRow() {
410
  public byte [] getStartRow() {
411
    return this.startRow;
411
    return this.startRow;
412
  }
412
  }
413

    
   
413

   
414
  /**
414
  /**
415
   * @return the stoprow
415
   * @return the stoprow
416
   */
416
   */
417
  public byte [] getStopRow() {
417
  public byte [] getStopRow() {
418
    return this.stopRow;
418
    return this.stopRow;
419
  }
419
  }
420

    
   
420

   
421
  /**
421
  /**
422
   * @return the max number of versions to fetch
422
   * @return the max number of versions to fetch
423
   */
423
   */
424
  public int getMaxVersions() {
424
  public int getMaxVersions() {
425
    return this.maxVersions;
425
    return this.maxVersions;
426
  }
426
  }
427

    
   
427

   
428
  /**
428
  /**
429
   * @return maximum number of values to return for a single call to next()
429
   * @return maximum number of values to return for a single call to next()
430
   */
430
   */
431
  public int getBatch() {
431
  public int getBatch() {
432
    return this.batch;
432
    return this.batch;
433
  }
433
  }
434

    
   
434

   
435
  /**
435
  /**
436
   * @return caching the number of rows fetched when calling next on a scanner
436
   * @return caching the number of rows fetched when calling next on a scanner
437
   */
437
   */
438
  public int getCaching() {
438
  public int getCaching() {
439
    return this.caching;
439
    return this.caching;
440
  }
440
  }
441

    
   
441

   
442
  /**
442
  /**
443
   * @return TimeRange
443
   * @return TimeRange
444
   */
444
   */
445
  public TimeRange getTimeRange() {
445
  public TimeRange getTimeRange() {
446
    return this.tr;
446
    return this.tr;
447
  }
447
  }
448

    
   
448

   
449
  /**
449
  /**
450
   * @return RowFilter
450
   * @return RowFilter
451
   */
451
   */
452
  public Filter getFilter() {
452
  public Filter getFilter() {
453
    return filter;
453
    return filter;
454
  }
454
  }
455

    
   
455

   
456
  /**
456
  /**
457
   * @return true is a filter has been specified, false if not
457
   * @return true is a filter has been specified, false if not
458
   */
458
   */
459
  public boolean hasFilter() {
459
  public boolean hasFilter() {
460
    return filter != null;
460
    return filter != null;
461
  }
461
  }
462

    
   
462

   
463
  /**
463
  /**
464
   * Set whether blocks should be cached for this Scan.
464
   * Set whether blocks should be cached for this Scan.
465
   * <p>
465
   * <p>
466
   * This is true by default.  When true, default settings of the table and
466
   * This is true by default.  When true, default settings of the table and
467
   * family are used (this will never override caching blocks if the block
467
   * family are used (this will never override caching blocks if the block
468
   * cache is disabled for that family or entirely).
468
   * cache is disabled for that family or entirely).
469
   *
469
   *
470
   * @param cacheBlocks if false, default settings are overridden and blocks
470
   * @param cacheBlocks if false, default settings are overridden and blocks
471
   * will not be cached
471
   * will not be cached
472
   */
472
   */
473
  public void setCacheBlocks(boolean cacheBlocks) {
473
  public void setCacheBlocks(boolean cacheBlocks) {
474
    this.cacheBlocks = cacheBlocks;
474
    this.cacheBlocks = cacheBlocks;
475
  }
475
  }
476

    
   
476

   
477
  /**
477
  /**
478
   * Get whether blocks should be cached for this Scan.
478
   * Get whether blocks should be cached for this Scan.
479
   * @return true if default caching should be used, false if blocks should not
479
   * @return true if default caching should be used, false if blocks should not
480
   * be cached
480
   * be cached
481
   */
481
   */
482
  public boolean getCacheBlocks() {
482
  public boolean getCacheBlocks() {
483
    return cacheBlocks;
483
    return cacheBlocks;
484
  }
484
  }
485

    
   
485

   
486
  /**
486
  /**
487
   * Compile the table and column family (i.e. schema) information
487
   * Compile the table and column family (i.e. schema) information
488
   * into a String. Useful for parsing and aggregation by debugging,
488
   * into a String. Useful for parsing and aggregation by debugging,
489
   * logging, and administration tools.
489
   * logging, and administration tools.
490
   * @return Map
490
   * @return Map
491
   */
491
   */
492
  @Override
492
  @Override
493
  public Map<String, Object> getFingerprint() {
493
  public Map<String, Object> getFingerprint() {
494
    Map<String, Object> map = new HashMap<String, Object>();
494
    Map<String, Object> map = new HashMap<String, Object>();
495
    List<String> families = new ArrayList<String>();
495
    List<String> families = new ArrayList<String>();
496
    if(this.familyMap.size() == 0) {
496
    if(this.familyMap.size() == 0) {
497
      map.put("families", "ALL");
497
      map.put("families", "ALL");
498
      return map;
498
      return map;
499
    } else {
499
    } else {
500
      map.put("families", families);
500
      map.put("families", families);
501
    }
501
    }
502
    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
502
    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
503
        this.familyMap.entrySet()) {
503
        this.familyMap.entrySet()) {
504
      families.add(Bytes.toStringBinary(entry.getKey()));
504
      families.add(Bytes.toStringBinary(entry.getKey()));
505
    }
505
    }
506
    return map;
506
    return map;
507
  }
507
  }
508

    
   
508

   
509
  /**
509
  /**
510
   * Compile the details beyond the scope of getFingerprint (row, columns,
510
   * Compile the details beyond the scope of getFingerprint (row, columns,
511
   * timestamps, etc.) into a Map along with the fingerprinted information.
511
   * timestamps, etc.) into a Map along with the fingerprinted information.
512
   * Useful for debugging, logging, and administration tools.
512
   * Useful for debugging, logging, and administration tools.
513
   * @param maxCols a limit on the number of columns output prior to truncation
513
   * @param maxCols a limit on the number of columns output prior to truncation
514
   * @return Map
514
   * @return Map
515
   */ 
515
   */ 
516
  @Override
516
  @Override
517
  public Map<String, Object> toMap(int maxCols) {
517
  public Map<String, Object> toMap(int maxCols) {
518
    // start with the fingerpring map and build on top of it
518
    // start with the fingerpring map and build on top of it
519
    Map<String, Object> map = getFingerprint();
519
    Map<String, Object> map = getFingerprint();
520
    // map from families to column list replaces fingerprint's list of families
520
    // map from families to column list replaces fingerprint's list of families
521
    Map<String, List<String>> familyColumns =
521
    Map<String, List<String>> familyColumns =
522
      new HashMap<String, List<String>>();
522
      new HashMap<String, List<String>>();
523
    map.put("families", familyColumns);
523
    map.put("families", familyColumns);
524
    // add scalar information first
524
    // add scalar information first
525
    map.put("startRow", Bytes.toStringBinary(this.startRow));
525
    map.put("startRow", Bytes.toStringBinary(this.startRow));
526
    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
526
    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
527
    map.put("maxVersions", this.maxVersions);
527
    map.put("maxVersions", this.maxVersions);
528
    map.put("batch", this.batch);
528
    map.put("batch", this.batch);
529
    map.put("caching", this.caching);
529
    map.put("caching", this.caching);
530
    map.put("maxBufferSize", this.maxBufferSize);
530
    map.put("maxBufferSize", this.maxBufferSize);
531
    map.put("cacheBlocks", this.cacheBlocks);
531
    map.put("cacheBlocks", this.cacheBlocks);
532
    List<Long> timeRange = new ArrayList<Long>();
532
    List<Long> timeRange = new ArrayList<Long>();
533
    timeRange.add(this.tr.getMin());
533
    timeRange.add(this.tr.getMin());
534
    timeRange.add(this.tr.getMax());
534
    timeRange.add(this.tr.getMax());
535
    map.put("timeRange", timeRange);
535
    map.put("timeRange", timeRange);
536
    int colCount = 0;
536
    int colCount = 0;
537
    // iterate through affected families and list out up to maxCols columns
537
    // iterate through affected families and list out up to maxCols columns
538
    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
538
    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
539
      this.familyMap.entrySet()) {
539
      this.familyMap.entrySet()) {
540
      List<String> columns = new ArrayList<String>();
540
      List<String> columns = new ArrayList<String>();
541
      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
541
      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
542
      if(entry.getValue() == null) {
542
      if(entry.getValue() == null) {
543
        colCount++;
543
        colCount++;
544
        --maxCols;
544
        --maxCols;
545
        columns.add("ALL");
545
        columns.add("ALL");
546
      } else {
546
      } else {
547
        colCount += entry.getValue().size();
547
        colCount += entry.getValue().size();
548
        if (maxCols <= 0) {
548
        if (maxCols <= 0) {
549
          continue;
549
          continue;
550
        } 
550
        } 
551
        for (byte [] column : entry.getValue()) {
551
        for (byte [] column : entry.getValue()) {
552
          if (--maxCols <= 0) {
552
          if (--maxCols <= 0) {
553
            continue;
553
            continue;
554
          }
554
          }
555
          columns.add(Bytes.toStringBinary(column));
555
          columns.add(Bytes.toStringBinary(column));
556
        }
556
        }
557
      } 
557
      } 
558
    }       
558
    }       
559
    map.put("totalColumns", colCount);
559
    map.put("totalColumns", colCount);
560
    if (this.filter != null) {
560
    if (this.filter != null) {
561
      map.put("filter", this.filter.toString());
561
      map.put("filter", this.filter.toString());
562
    }
562
    }
563
    return map;
563
    return map;
564
  }
564
  }
565

    
   
565

   
566
  @SuppressWarnings("unchecked")
566
  @SuppressWarnings("unchecked")
567
  private Writable createForName(String className) {
567
  private Writable createForName(String className) {
568
    try {
568
    try {
569
      Class<? extends Writable> clazz =
569
      Class<? extends Writable> clazz =
570
        (Class<? extends Writable>) Class.forName(className);
570
        (Class<? extends Writable>) Class.forName(className);
571
      return WritableFactories.newInstance(clazz, new Configuration());
571
      return WritableFactories.newInstance(clazz, new Configuration());
572
    } catch (ClassNotFoundException e) {
572
    } catch (ClassNotFoundException e) {
573
      throw new RuntimeException("Can't find class " + className);
573
      throw new RuntimeException("Can't find class " + className);
574
    }
574
    }
575
  }
575
  }
576

    
   
576

   
577
  //Writable
577
  //Writable
578
  public void readFields(final DataInput in)
578
  public void readFields(final DataInput in)
579
  throws IOException {
579
  throws IOException {
580
    int version = in.readByte();
580
    int version = in.readByte();
581
    if (version > (int)SCAN_VERSION) {
581
    if (version > (int)SCAN_VERSION) {
582
      throw new IOException("version not supported");
582
      throw new IOException("version not supported");
583
    }
583
    }
584
    this.startRow = Bytes.readByteArray(in);
584
    this.startRow = Bytes.readByteArray(in);
585
    this.stopRow = Bytes.readByteArray(in);
585
    this.stopRow = Bytes.readByteArray(in);
586
    this.maxVersions = in.readInt();
586
    this.maxVersions = in.readInt();
587
    this.batch = in.readInt();
587
    this.batch = in.readInt();
588
    this.caching = in.readInt();
588
    this.caching = in.readInt();
589
    this.cacheBlocks = in.readBoolean();
589
    this.cacheBlocks = in.readBoolean();
590
    if(in.readBoolean()) {
590
    if(in.readBoolean()) {
591
      this.filter = (Filter)createForName(Bytes.toString(Bytes.readByteArray(in)));
591
      this.filter = (Filter)createForName(Bytes.toString(Bytes.readByteArray(in)));
592
      this.filter.readFields(in);
592
      this.filter.readFields(in);
593
    }
593
    }
594
    this.tr = new TimeRange();
594
    this.tr = new TimeRange();
595
    tr.readFields(in);
595
    tr.readFields(in);
596
    int numFamilies = in.readInt();
596
    int numFamilies = in.readInt();
597
    this.familyMap =
597
    this.familyMap =
598
      new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
598
      new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
599
    for(int i=0; i<numFamilies; i++) {
599
    for(int i=0; i<numFamilies; i++) {
600
      byte [] family = Bytes.readByteArray(in);
600
      byte [] family = Bytes.readByteArray(in);
601
      int numColumns = in.readInt();
601
      int numColumns = in.readInt();
602
      TreeSet<byte []> set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
602
      TreeSet<byte []> set = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
603
      for(int j=0; j<numColumns; j++) {
603
      for(int j=0; j<numColumns; j++) {
604
        byte [] qualifier = Bytes.readByteArray(in);
604
        byte [] qualifier = Bytes.readByteArray(in);
605
        set.add(qualifier);
605
        set.add(qualifier);
606
      }
606
      }
607
      this.familyMap.put(family, set);
607
      this.familyMap.put(family, set);
608
    }
608
    }
609

    
   
609

   
610
    if (version > 1) {
610
    if (version > 1) {
611
      readAttributes(in);
611
      readAttributes(in);
612
    }
612
    }
613
    if (version > 2) {
613
    if (version > 2) {
614
      this.maxBufferSize = in.readLong();
614
      this.maxBufferSize = in.readLong();
615
    }
615
    }
616
  }
616
  }
617

    
   
617

   
618
  public void write(final DataOutput out)
618
  public void write(final DataOutput out)
619
  throws IOException {
619
  throws IOException {
620
    out.writeByte(SCAN_VERSION);
620
    out.writeByte(SCAN_VERSION);
621
    Bytes.writeByteArray(out, this.startRow);
621
    Bytes.writeByteArray(out, this.startRow);
622
    Bytes.writeByteArray(out, this.stopRow);
622
    Bytes.writeByteArray(out, this.stopRow);
623
    out.writeInt(this.maxVersions);
623
    out.writeInt(this.maxVersions);
624
    out.writeInt(this.batch);
624
    out.writeInt(this.batch);
625
    out.writeInt(this.caching);
625
    out.writeInt(this.caching);
626
    out.writeBoolean(this.cacheBlocks);
626
    out.writeBoolean(this.cacheBlocks);
627
    if(this.filter == null) {
627
    if(this.filter == null) {
628
      out.writeBoolean(false);
628
      out.writeBoolean(false);
629
    } else {
629
    } else {
630
      out.writeBoolean(true);
630
      out.writeBoolean(true);
631
      Bytes.writeByteArray(out, Bytes.toBytes(filter.getClass().getName()));
631
      Bytes.writeByteArray(out, Bytes.toBytes(filter.getClass().getName()));
632
      filter.write(out);
632
      filter.write(out);
633
    }
633
    }
634
    tr.write(out);
634
    tr.write(out);
635
    out.writeInt(familyMap.size());
635
    out.writeInt(familyMap.size());
636
    for(Map.Entry<byte [], NavigableSet<byte []>> entry : familyMap.entrySet()) {
636
    for(Map.Entry<byte [], NavigableSet<byte []>> entry : familyMap.entrySet()) {
637
      Bytes.writeByteArray(out, entry.getKey());
637
      Bytes.writeByteArray(out, entry.getKey());
638
      NavigableSet<byte []> columnSet = entry.getValue();
638
      NavigableSet<byte []> columnSet = entry.getValue();
639
      if(columnSet != null){
639
      if(columnSet != null){
640
        out.writeInt(columnSet.size());
640
        out.writeInt(columnSet.size());
641
        for(byte [] qualifier : columnSet) {
641
        for(byte [] qualifier : columnSet) {
642
          Bytes.writeByteArray(out, qualifier);
642
          Bytes.writeByteArray(out, qualifier);
643
        }
643
        }
644
      } else {
644
      } else {
645
        out.writeInt(0);
645
        out.writeInt(0);
646
      }
646
      }
647
    }
647
    }
648
    writeAttributes(out);
648
    writeAttributes(out);
649
    out.writeLong(maxBufferSize);
649
    out.writeLong(maxBufferSize);
650
  }
650
  }
651

    
   
651

   
652
  /**
652
  /**
653
   * Enable/disable "raw" mode for this scan.
653
   * Enable/disable "raw" mode for this scan.
654
   * If "raw" is enabled the scan will return all
654
   * If "raw" is enabled the scan will return all
655
   * delete marker and deleted rows that have not
655
   * delete marker and deleted rows that have not
656
   * been collected, yet.
656
   * been collected, yet.
657
   * This is mostly useful for Scan on column families
657
   * This is mostly useful for Scan on column families
658
   * that have KEEP_DELETED_ROWS enabled.
658
   * that have KEEP_DELETED_ROWS enabled.
659
   * It is an error to specify any column when "raw" is set.
659
   * It is an error to specify any column when "raw" is set.
660
   * @param raw True/False to enable/disable "raw" mode.
660
   * @param raw True/False to enable/disable "raw" mode.
661
   */
661
   */
662
  public void setRaw(boolean raw) {
662
  public void setRaw(boolean raw) {
663
    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
663
    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
664
  }
664
  }
665

    
   
665

   
666
  /**
666
  /**
667
   * @return True if this Scan is in "raw" mode.
667
   * @return True if this Scan is in "raw" mode.
668
   */
668
   */
669
  public boolean isRaw() {
669
  public boolean isRaw() {
670
    byte[] attr = getAttribute(RAW_ATTR);
670
    byte[] attr = getAttribute(RAW_ATTR);
671
    return attr == null ? false : Bytes.toBoolean(attr);
671
    return attr == null ? false : Bytes.toBoolean(attr);
672
  }
672
  }
673

    
   
673

   
674
  /*
674
  /*
675
   * Set the isolation level for this scan. If the
675
   * Set the isolation level for this scan. If the
676
   * isolation level is set to READ_UNCOMMITTED, then
676
   * isolation level is set to READ_UNCOMMITTED, then
677
   * this scan will return data from committed and
677
   * this scan will return data from committed and
678
   * uncommitted transactions. If the isolation level 
678
   * uncommitted transactions. If the isolation level 
679
   * is set to READ_COMMITTED, then this scan will return 
679
   * is set to READ_COMMITTED, then this scan will return 
680
   * data from committed transactions only. If a isolation
680
   * data from committed transactions only. If a isolation
681
   * level is not explicitly set on a Scan, then it 
681
   * level is not explicitly set on a Scan, then it 
682
   * is assumed to be READ_COMMITTED.
682
   * is assumed to be READ_COMMITTED.
683
   * @param level IsolationLevel for this scan
683
   * @param level IsolationLevel for this scan
684
   */
684
   */
685
  public void setIsolationLevel(IsolationLevel level) {
685
  public void setIsolationLevel(IsolationLevel level) {
686
    setAttribute(ISOLATION_LEVEL, level.toBytes());
686
    setAttribute(ISOLATION_LEVEL, level.toBytes());
687
  }
687
  }
688
  /*
688
  /*
689
   * @return The isolation level of this scan.
689
   * @return The isolation level of this scan.
690
   * If no isolation level was set for this scan object, 
690
   * If no isolation level was set for this scan object, 
691
   * then it returns READ_COMMITTED.
691
   * then it returns READ_COMMITTED.
692
   * @return The IsolationLevel for this scan
692
   * @return The IsolationLevel for this scan
693
   */
693
   */
694
  public IsolationLevel getIsolationLevel() {
694
  public IsolationLevel getIsolationLevel() {
695
    byte[] attr = getAttribute(ISOLATION_LEVEL);
695
    byte[] attr = getAttribute(ISOLATION_LEVEL);
696
    return attr == null ? IsolationLevel.READ_COMMITTED :
696
    return attr == null ? IsolationLevel.READ_COMMITTED :
697
                          IsolationLevel.fromBytes(attr);
697
                          IsolationLevel.fromBytes(attr);
698
  }
698
  }
699
}
699
}
/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
Diff Revision 1 Diff Revision 2
 
  1. /src/main/java/org/apache/hadoop/hbase/client/Scan.java: Loading...
  2. /src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: Loading...