commit_id
stringlengths 40
40
| project
stringclasses 11
values | commit_message
stringlengths 3
3.04k
| type
stringclasses 3
values | url
stringclasses 11
values | git_diff
stringlengths 555
691k
|
|---|---|---|---|---|---|
d0ea47d3ef16eb831ed535658508c5abf346bac8
|
orientdb
|
fixed collection fields in query results, issue - -3212--
|
c
|
https://github.com/orientechnologies/orientdb
|
diff --git a/core/src/main/java/com/orientechnologies/orient/core/sql/ORuntimeResult.java b/core/src/main/java/com/orientechnologies/orient/core/sql/ORuntimeResult.java
index 71debfae21f..6ad19f7893e 100644
--- a/core/src/main/java/com/orientechnologies/orient/core/sql/ORuntimeResult.java
+++ b/core/src/main/java/com/orientechnologies/orient/core/sql/ORuntimeResult.java
@@ -19,6 +19,14 @@
*/
package com.orientechnologies.orient.core.sql;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
import com.orientechnologies.common.util.OResettable;
import com.orientechnologies.orient.core.command.OCommandContext;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
@@ -33,12 +41,6 @@
import com.orientechnologies.orient.core.sql.filter.OSQLFilterItemVariable;
import com.orientechnologies.orient.core.sql.functions.OSQLFunctionRuntime;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
/**
* Handles runtime results.
*
@@ -66,6 +68,7 @@ public static ODocument createProjectionDocument(final int iProgressive) {
return doc;
}
+ @SuppressWarnings("unchecked")
public static ODocument applyRecord(final ODocument iValue, final Map<String, Object> iProjections,
final OCommandContext iContext, final OIdentifiable iRecord) {
// APPLY PROJECTIONS
@@ -105,6 +108,7 @@ else if (projectionValue instanceof OIdentifiable && !(projectionValue instanceo
&& !(projectionValue instanceof ORecord))
iValue.field(projection.getKey(), ((OIdentifiable) projectionValue).getRecord());
else if (projectionValue instanceof Iterator) {
+ boolean link = true;
// make temporary value typical case graph database elemenet's iterator edges
if (projectionValue instanceof OResettable)
((OResettable) projectionValue).reset();
@@ -113,16 +117,35 @@ else if (projectionValue instanceof Iterator) {
final Iterator projectionValueIterator = (Iterator) projectionValue;
while (projectionValueIterator.hasNext()) {
Object value = projectionValueIterator.next();
- if (value instanceof OIdentifiable)
+ if (value instanceof OIdentifiable) {
value = ((OIdentifiable) value).getRecord();
+ if (!((OIdentifiable) value).getIdentity().isPersistent())
+ link = false;
+ }
if (value != null)
iteratorValues.add(value);
}
- iValue.field(projection.getKey(), iteratorValues);
+ iValue.field(projection.getKey(), iteratorValues, link ? OType.LINKLIST : OType.EMBEDDEDLIST);
} else if (projectionValue instanceof ODocument && !((ODocument) projectionValue).getIdentity().isPersistent()) {
iValue.field(projection.getKey(), projectionValue, OType.EMBEDDED);
+ } else if (projectionValue instanceof Set<?>) {
+ OType type = OType.getTypeByValue(projectionValue);
+ if (type == OType.LINKSET && !entriesPersistent((Collection<OIdentifiable>) projectionValue))
+ type = OType.EMBEDDEDSET;
+ iValue.field(projection.getKey(), projectionValue, type);
+ } else if (projectionValue instanceof Map<?, ?>) {
+ OType type = OType.getTypeByValue(projectionValue);
+ if (type == OType.LINKMAP && !entriesPersistent(((Map<?, OIdentifiable>) projectionValue).values()))
+ type = OType.EMBEDDEDMAP;
+ iValue.field(projection.getKey(), projectionValue, type);
+ } else if (projectionValue instanceof List<?>) {
+ OType type = OType.getTypeByValue(projectionValue);
+ if (type == OType.LINKLIST && !entriesPersistent((Collection<OIdentifiable>) projectionValue))
+ type = OType.EMBEDDEDLIST;
+ iValue.field(projection.getKey(), projectionValue, type);
+
} else
iValue.field(projection.getKey(), projectionValue);
@@ -132,6 +155,14 @@ else if (projectionValue instanceof Iterator) {
return iValue;
}
+ private static boolean entriesPersistent(Collection<OIdentifiable> projectionValue) {
+ for (OIdentifiable rec : projectionValue) {
+ if (!rec.getIdentity().isPersistent())
+ return false;
+ }
+ return true;
+ }
+
public static ODocument getResult(final ODocument iValue, final Map<String, Object> iProjections) {
if (iValue != null) {
diff --git a/core/src/test/java/com/orientechnologies/orient/core/sql/select/TestSqlEmbeddedResult.java b/core/src/test/java/com/orientechnologies/orient/core/sql/select/TestSqlEmbeddedResult.java
index 91acf4bd64f..9bca80b8703 100644
--- a/core/src/test/java/com/orientechnologies/orient/core/sql/select/TestSqlEmbeddedResult.java
+++ b/core/src/test/java/com/orientechnologies/orient/core/sql/select/TestSqlEmbeddedResult.java
@@ -1,6 +1,5 @@
package com.orientechnologies.orient.core.sql.select;
-import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@@ -12,6 +11,8 @@
import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.impl.ODocument;
+import com.orientechnologies.orient.core.serialization.serializer.ONetworkThreadLocalSerializer;
+import com.orientechnologies.orient.core.serialization.serializer.record.binary.ORecordSerializerBinary;
import com.orientechnologies.orient.core.sql.query.OSQLSynchQuery;
public class TestSqlEmbeddedResult {
@@ -34,11 +35,25 @@ public void testEmbeddedRusultTypeNotLink() {
Assert.assertEquals(res.size(), 1);
ODocument ele = res.get(0);
Assert.assertNotNull(ele.field("el"));
+ ONetworkThreadLocalSerializer.setNetworkSerializer(new ORecordSerializerBinary());
byte [] bt = ele.toStream();
ODocument read = new ODocument(bt);
Assert.assertNotNull(read.field("el"));
Assert.assertEquals(read.fieldType("el"), OType.EMBEDDED);
+
+ ONetworkThreadLocalSerializer.setNetworkSerializer(null);
+ res = db.query(new OSQLSynchQuery<Object>("select $Pics as el FROM Test LET $Pics = (select expand( rel.include('format')) from $current)"));
+
+ ONetworkThreadLocalSerializer.setNetworkSerializer(new ORecordSerializerBinary());
+ Assert.assertEquals(res.size(), 1);
+ ele = res.get(0);
+ Assert.assertNotNull(ele.field("el"));
+ bt = ele.toStream();
+ read = new ODocument(bt);
+ Assert.assertNotNull(read.field("el"));
+ Assert.assertEquals(read.fieldType("el"), OType.EMBEDDEDLIST);
+ ONetworkThreadLocalSerializer.setNetworkSerializer(null);
db.drop();
}
}
|
710ae3a9d2fbcb6767872656d82b2edaeb6e0656
|
spring-framework
|
SpringJUnit4ClassRunnerAppCtxTests now verifies- seamless support for using @Inject in addition to @Autowired, etc.--
|
a
|
https://github.com/spring-projects/spring-framework
|
diff --git a/org.springframework.test/.classpath b/org.springframework.test/.classpath
index d35f01db07af..5e5a2e70be5f 100644
--- a/org.springframework.test/.classpath
+++ b/org.springframework.test/.classpath
@@ -15,6 +15,7 @@
<classpathentry combineaccessrules="false" kind="src" path="/org.springframework.web.portlet"/>
<classpathentry combineaccessrules="false" kind="src" path="/org.springframework.web.servlet"/>
<classpathentry kind="var" path="IVY_CACHE/javax.activation/com.springsource.javax.activation/1.1.0/com.springsource.javax.activation-1.1.0.jar" sourcepath="/IVY_CACHE/javax.activation/com.springsource.javax.activation/1.1.0/com.springsource.javax.activation-sources-1.1.0.jar"/>
+ <classpathentry kind="var" path="IVY_CACHE/javax.inject/com.springsource.javax.inject/0.9.0.PFD/com.springsource.javax.inject-0.9.0.PFD.jar" sourcepath="/IVY_CACHE/javax.inject/com.springsource.javax.inject/0.9.0.PFD/com.springsource.javax.inject-sources-0.9.0.PFD.jar"/>
<classpathentry kind="var" path="IVY_CACHE/javax.persistence/com.springsource.javax.persistence/1.0.0/com.springsource.javax.persistence-1.0.0.jar" sourcepath="/IVY_CACHE/javax.persistence/com.springsource.javax.persistence/1.0.0/com.springsource.javax.persistence-sources-1.0.0.jar"/>
<classpathentry kind="var" path="IVY_CACHE/javax.portlet/com.springsource.javax.portlet/2.0.0/com.springsource.javax.portlet-2.0.0.jar"/>
<classpathentry kind="var" path="IVY_CACHE/javax.servlet/com.springsource.javax.servlet/2.5.0/com.springsource.javax.servlet-2.5.0.jar" sourcepath="/IVY_CACHE/javax.servlet/com.springsource.javax.servlet/2.5.0/com.springsource.javax.servlet-sources-2.5.0.jar"/>
diff --git a/org.springframework.test/ivy.xml b/org.springframework.test/ivy.xml
index e64c205bb575..b70e01038eed 100644
--- a/org.springframework.test/ivy.xml
+++ b/org.springframework.test/ivy.xml
@@ -21,6 +21,7 @@
<dependencies>
<dependency org="javax.activation" name="com.springsource.javax.activation" rev="1.1.0" conf="provided->compile"/>
<dependency org="javax.el" name="com.springsource.javax.el" rev="1.0.0" conf="provided->compile"/>
+ <dependency org="javax.inject" name="com.springsource.javax.inject" rev="0.9.0.PFD" conf="test->compile"/>
<dependency org="javax.persistence" name="com.springsource.javax.persistence" rev="1.0.0" conf="provided->compile"/>
<dependency org="javax.portlet" name="com.springsource.javax.portlet" rev="2.0.0" conf="provided->compile"/>
<dependency org="javax.servlet" name="com.springsource.javax.servlet" rev="2.5.0" conf="provided->compile"/>
diff --git a/org.springframework.test/pom.xml b/org.springframework.test/pom.xml
index 3518579246f8..332939c8b645 100644
--- a/org.springframework.test/pom.xml
+++ b/org.springframework.test/pom.xml
@@ -25,6 +25,12 @@
<version>1.0</version>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>javax.inject</groupId>
+ <artifactId>com.springsource.javax.inject</artifactId>
+ <version>0.9.0.PFD</version>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>javax.persistence</groupId>
<artifactId>persistence-api</artifactId>
diff --git a/org.springframework.test/src/test/java/org/springframework/test/context/junit4/SpringJUnit4ClassRunnerAppCtxTests.java b/org.springframework.test/src/test/java/org/springframework/test/context/junit4/SpringJUnit4ClassRunnerAppCtxTests.java
index 91b51242bdfc..e23164bcf1f2 100644
--- a/org.springframework.test/src/test/java/org/springframework/test/context/junit4/SpringJUnit4ClassRunnerAppCtxTests.java
+++ b/org.springframework.test/src/test/java/org/springframework/test/context/junit4/SpringJUnit4ClassRunnerAppCtxTests.java
@@ -19,9 +19,11 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import javax.annotation.Resource;
+import javax.inject.Inject;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -48,6 +50,7 @@
* <ul>
* <li>{@link ContextConfiguration @ContextConfiguration}</li>
* <li>{@link Autowired @Autowired}</li>
+ * <li>{@link Inject @Inject}</li>
* <li>{@link Qualifier @Qualifier}</li>
* <li>{@link Resource @Resource}</li>
* <li>{@link ApplicationContextAware}</li>
@@ -59,10 +62,12 @@
* {@link ContextConfiguration#locations() locations} are explicitly declared
* and since the {@link ContextConfiguration#loader() ContextLoader} is left set
* to the default value of {@link GenericXmlContextLoader}, this test class's
- * dependencies will be injected via {@link Autowired @Autowired} and
- * {@link Resource @Resource} from beans defined in the
- * {@link ApplicationContext} loaded from the default classpath resource:
- * <code>"/org/springframework/test/context/junit/SpringJUnit4ClassRunnerAppCtxTests-context.xml"</code>.
+ * dependencies will be injected via {@link Autowired @Autowired},
+ * {@link Inject @Inject}, and {@link Resource @Resource} from beans defined in
+ * the {@link ApplicationContext} loaded from the default classpath resource:
+ *
+ * <code>"/org/springframework/test/context/junit/SpringJUnit4ClassRunnerAppCtxTests-context.xml"</code>
+ * .
* </p>
*
* @author Sam Brannen
@@ -93,12 +98,15 @@ public class SpringJUnit4ClassRunnerAppCtxTests implements ApplicationContextAwa
private Employee employee;
@Autowired
- private Pet pet;
+ private Pet autowiredPet;
+
+ @Inject
+ private Pet injectedPet;
@Autowired(required = false)
protected Long nonrequiredLong;
- @Resource()
+ @Resource
protected String foo;
protected String bar;
@@ -153,11 +161,14 @@ public final void verifyBeanNameSet() {
}
@Test
- public final void verifyAnnotationAutowiredFields() {
+ public final void verifyAnnotationAutowiredAndInjectedFields() {
assertNull("The nonrequiredLong field should NOT have been autowired.", this.nonrequiredLong);
assertEquals("The quux field should have been autowired via @Autowired and @Qualifier.", "Quux", this.quux);
- assertNotNull("The pet field should have been autowired.", this.pet);
- assertEquals("Fido", this.pet.getName());
+ assertNotNull("The pet field should have been autowired.", this.autowiredPet);
+ assertNotNull("The pet field should have been injected.", this.injectedPet);
+ assertEquals("Fido", this.autowiredPet.getName());
+ assertEquals("Fido", this.injectedPet.getName());
+ assertSame("@Autowired and @Inject pet should be the same object.", this.autowiredPet, this.injectedPet);
}
@Test
@@ -176,4 +187,4 @@ public final void verifyResourceAnnotationWiredMethods() {
assertEquals("The bar method should have been wired via @Resource.", "Bar", this.bar);
}
-}
+}
\ No newline at end of file
diff --git a/org.springframework.test/test.iml b/org.springframework.test/test.iml
index a5a65c18b9ae..924d7b776d9d 100644
--- a/org.springframework.test/test.iml
+++ b/org.springframework.test/test.iml
@@ -24,6 +24,7 @@
<orderEntry type="library" name="Commons Logging" level="project" />
<orderEntry type="library" name="EasyMock" level="project" />
<orderEntry type="library" name="javax.el" level="project" />
+ <orderEntry type="library" name="javax.inject" level="project" />
<orderEntry type="library" name="JUnit" level="project" />
<orderEntry type="module-library">
<library>
|
3086b191dda16c22e7a909f131296f6c060bc639
|
hbase
|
HBASE-1647 Filter-filterRow is called too often,- filters rows it shouldn't have -- reversed it for a moment; it may have- broken things -- not sure yet--git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@798510 13f79535-47bb-0310-9956-ffa450edef68-
|
c
|
https://github.com/apache/hbase
|
diff --git a/CHANGES.txt b/CHANGES.txt
index 58065a2b05d7..564b5dce52fd 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -295,8 +295,6 @@ Release 0.20.0 - Unreleased
(Tim Sell and Ryan Rawson via Stack)
HBASE-1703 ICVs across /during a flush can cause multiple keys with the
same TS (bad)
- HBASE-1647 Filter#filterRow is called too often, filters rows it
- shouldn't have (Doğacan Güney via Ryan Rawson and Stack)
IMPROVEMENTS
HBASE-1089 Add count of regions on filesystem to master UI; add percentage
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 23c141b92cce..29ea7e5bfd0b 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -53,8 +53,6 @@
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.Reference.Range;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -1683,13 +1681,8 @@ public Path getBaseDir() {
class RegionScanner implements InternalScanner {
private final KeyValueHeap storeHeap;
private final byte [] stopRow;
- private Filter filter;
- private RowFilterInterface oldFilter;
- private List<KeyValue> results = new ArrayList<KeyValue>();
RegionScanner(Scan scan, List<KeyValueScanner> additionalScanners) {
- this.filter = scan.getFilter();
- this.oldFilter = scan.getOldFilter();
if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) {
this.stopRow = null;
} else {
@@ -1713,74 +1706,46 @@ class RegionScanner implements InternalScanner {
this(scan, null);
}
- private void resetFilters() {
- if (filter != null) {
- filter.reset();
- }
- if (oldFilter != null) {
- oldFilter.reset();
- }
- }
-
/**
* Get the next row of results from this region.
* @param results list to append results to
* @return true if there are more rows, false if scanner is done
*/
- @Override
- public boolean next(List<KeyValue> outResults) throws IOException {
- results.clear();
- boolean returnResult = nextInternal();
- if (!returnResult && filter != null && filter.filterRow()) {
- results.clear();
- }
- outResults.addAll(results);
- resetFilters();
- return returnResult;
- }
-
- private boolean nextInternal() throws IOException {
+ public boolean next(List<KeyValue> results)
+ throws IOException {
// This method should probably be reorganized a bit... has gotten messy
- KeyValue kv;
- byte[] currentRow = null;
- boolean filterCurrentRow = false;
+ KeyValue kv = this.storeHeap.peek();
+ if (kv == null) {
+ return false;
+ }
+ byte [] currentRow = kv.getRow();
+ // See if we passed stopRow
+ if (stopRow != null &&
+ comparator.compareRows(stopRow, 0, stopRow.length,
+ currentRow, 0, currentRow.length) <= 0) {
+ return false;
+ }
+ this.storeHeap.next(results);
while(true) {
kv = this.storeHeap.peek();
if (kv == null) {
return false;
}
byte [] row = kv.getRow();
- if (filterCurrentRow && Bytes.equals(currentRow, row)) {
- // filter all columns until row changes
- this.storeHeap.next(results);
- results.clear();
- continue;
- }
- // see if current row should be filtered based on row key
- if ((filter != null && filter.filterRowKey(row, 0, row.length)) ||
- (oldFilter != null && oldFilter.filterRowKey(row, 0, row.length))) {
- this.storeHeap.next(results);
- results.clear();
- resetFilters();
- filterCurrentRow = true;
- currentRow = row;
- continue;
- }
if(!Bytes.equals(currentRow, row)) {
- // Continue on the next row:
- currentRow = row;
- filterCurrentRow = false;
- // See if we passed stopRow
- if(stopRow != null &&
- comparator.compareRows(stopRow, 0, stopRow.length,
- currentRow, 0, currentRow.length) <= 0) {
- return false;
- }
- // if there are _no_ results or current row should be filtered
- if (results.isEmpty() || filter != null && filter.filterRow()) {
- // make sure results is empty
- results.clear();
- resetFilters();
+ // Next row:
+
+ // what happens if there are _no_ results:
+ if (results.isEmpty()) {
+ // Continue on the next row:
+ currentRow = row;
+
+ // But did we pass the stop row?
+ if (stopRow != null &&
+ comparator.compareRows(stopRow, 0, stopRow.length,
+ currentRow, 0, currentRow.length) <= 0) {
+ return false;
+ }
continue;
}
return true;
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java b/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java
index a0ba3369ba44..9e9295de9621 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java
@@ -325,6 +325,7 @@ public void update() {
public void reset() {
this.deletes.reset();
this.columns.reset();
+ if (this.filter != null) this.filter.reset();
}
/**
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
index 2ad16708e76c..cb15d317823b 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
@@ -114,6 +114,16 @@ public MatchCode match(KeyValue kv) {
if (this.stickyNextRow)
return MatchCode.SEEK_NEXT_ROW;
+ // Give the row filter a chance to do it's job.
+ if (filter != null && filter.filterRowKey(bytes, offset, rowLength)) {
+ stickyNextRow = true; // optimize to keep from calling the filter too much.
+ return MatchCode.SEEK_NEXT_ROW;
+ } else if (oldFilter != null && oldFilter.filterRowKey(bytes, offset, rowLength)) {
+ stickyNextRow = true;
+ return MatchCode.SEEK_NEXT_ROW;
+ }
+
+
if (this.columns.done()) {
stickyNextRow = true;
return MatchCode.SEEK_NEXT_ROW;
@@ -189,6 +199,16 @@ public MatchCode match(KeyValue kv) {
return MatchCode.SEEK_NEXT_ROW;
}
+ /**
+ * If the row was otherwise going to be included, call this to last-minute
+ * check.
+ *
+ * @return <code>true</code> if the row should be filtered.
+ */
+ public boolean filterEntireRow() {
+ return filter == null? false: filter.filterRow();
+ }
+
/**
* Set current row
* @param row
@@ -203,5 +223,7 @@ public void setRow(byte [] row) {
public void reset() {
super.reset();
stickyNextRow = false;
+ if (filter != null)
+ filter.reset();
}
}
\ No newline at end of file
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 0b39a9871afa..1c279fccca98 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -162,12 +162,20 @@ public synchronized boolean next(List<KeyValue> outResult) throws IOException {
continue;
case DONE:
+ if (matcher.filterEntireRow()) {
+ // nuke all results, and then return.
+ results.clear();
+ }
// copy jazz
outResult.addAll(results);
return true;
case DONE_SCAN:
+ if (matcher.filterEntireRow()) {
+ // nuke all results, and then return.
+ results.clear();
+ }
close();
// copy jazz
@@ -194,6 +202,11 @@ public synchronized boolean next(List<KeyValue> outResult) throws IOException {
throw new RuntimeException("UNEXPECTED");
}
}
+
+ if (matcher.filterEntireRow()) {
+ // nuke all results, and then return.
+ results.clear();
+ }
if (!results.isEmpty()) {
// copy jazz
diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java b/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
index d93668222296..369b504faec1 100644
--- a/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
+++ b/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
@@ -38,14 +38,6 @@
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
-import org.apache.hadoop.hbase.filter.InclusiveStopRowFilter;
-import org.apache.hadoop.hbase.filter.PrefixFilter;
-import org.apache.hadoop.hbase.filter.PrefixRowFilter;
-import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.filter.WhileMatchFilter;
-import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
@@ -117,7 +109,7 @@ public void testStopRow() throws Exception {
count++;
}
s.close();
- assertEquals(0, count);
+ assertEquals(1, count);
// Now do something a bit more imvolved.
scan = new Scan(startrow, stoprow);
scan.addFamily(HConstants.CATALOG_FAMILY);
@@ -144,69 +136,6 @@ public void testStopRow() throws Exception {
shutdownDfs(this.cluster);
}
}
-
- void rowPrefixFilter(Scan scan) throws IOException {
- List<KeyValue> results = new ArrayList<KeyValue>();
- scan.addFamily(HConstants.CATALOG_FAMILY);
- InternalScanner s = r.getScanner(scan);
- boolean hasMore = true;
- while (hasMore) {
- hasMore = s.next(results);
- for (KeyValue kv : results) {
- assertEquals((byte)'a', kv.getRow()[0]);
- assertEquals((byte)'b', kv.getRow()[1]);
- }
- results.clear();
- }
- s.close();
- }
-
- void rowInclusiveStopFilter(Scan scan, byte[] stopRow) throws IOException {
- List<KeyValue> results = new ArrayList<KeyValue>();
- scan.addFamily(HConstants.CATALOG_FAMILY);
- InternalScanner s = r.getScanner(scan);
- boolean hasMore = true;
- while (hasMore) {
- hasMore = s.next(results);
- for (KeyValue kv : results) {
- assertTrue(Bytes.compareTo(kv.getRow(), stopRow) <= 0);
- }
- results.clear();
- }
- s.close();
- }
-
- public void testFilters() throws IOException {
- try {
- this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
- addContent(this.r, HConstants.CATALOG_FAMILY);
- Filter newFilter = new PrefixFilter(Bytes.toBytes("ab"));
- Scan scan = new Scan();
- scan.setFilter(newFilter);
- rowPrefixFilter(scan);
- RowFilterInterface oldFilter = new PrefixRowFilter(Bytes.toBytes("ab"));
- scan = new Scan();
- scan.setOldFilter(oldFilter);
- rowPrefixFilter(scan);
-
- byte[] stopRow = Bytes.toBytes("bbc");
- newFilter = new WhileMatchFilter(new InclusiveStopFilter(stopRow));
- scan = new Scan();
- scan.setFilter(newFilter);
- rowInclusiveStopFilter(scan, stopRow);
-
- oldFilter = new WhileMatchRowFilter(
- new InclusiveStopRowFilter(stopRow));
- scan = new Scan();
- scan.setOldFilter(oldFilter);
- rowInclusiveStopFilter(scan, stopRow);
-
- } finally {
- this.r.close();
- this.r.getLog().closeAndDelete();
- shutdownDfs(this.cluster);
- }
- }
/** The test!
* @throws IOException
@@ -387,6 +316,7 @@ private void scan(boolean validateStartcode, String serverName)
String server = Bytes.toString(val);
assertEquals(0, server.compareTo(serverName));
}
+ results.clear();
}
} finally {
InternalScanner s = scanner;
diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
index 8fb2cc18b793..6f611b1f4259 100644
--- a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
+++ b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
@@ -20,23 +20,25 @@
package org.apache.hadoop.hbase.regionserver;
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueTestUtil;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.WhileMatchFilter;
+import org.apache.hadoop.hbase.filter.*;
+import org.apache.hadoop.hbase.util.Bytes;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.NavigableSet;
import java.util.TreeSet;
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueTestUtil;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
public class TestStoreScanner extends TestCase {
final byte [] CF = Bytes.toBytes("cf");
-
+
/**
* Test utility for building a NavigableSet for scanners.
* @param strCols
@@ -65,9 +67,9 @@ public void testScanSameTimestamp() throws IOException {
Scan scanSpec = new Scan(Bytes.toBytes("R1"));
// this only uses maxVersions (default=1) and TimeRange (default=all)
StoreScanner scan =
- new StoreScanner(scanSpec, CF, Long.MAX_VALUE,
- KeyValue.COMPARATOR, getCols("a"),
- scanners);
+ new StoreScanner(scanSpec, CF, Long.MAX_VALUE,
+ KeyValue.COMPARATOR, getCols("a"),
+ scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(true, scan.next(results));
@@ -96,9 +98,9 @@ public void testWontNextToNext() throws IOException {
Scan scanSpec = new Scan(Bytes.toBytes("R1"));
// this only uses maxVersions (default=1) and TimeRange (default=all)
StoreScanner scan =
- new StoreScanner(scanSpec, CF, Long.MAX_VALUE,
- KeyValue.COMPARATOR, getCols("a"),
- scanners);
+ new StoreScanner(scanSpec, CF, Long.MAX_VALUE,
+ KeyValue.COMPARATOR, getCols("a"),
+ scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
scan.next(results);
@@ -128,8 +130,8 @@ public void testDeleteVersionSameTimestamp() throws IOException {
};
Scan scanSpec = new Scan(Bytes.toBytes("R1"));
StoreScanner scan =
- new StoreScanner(scanSpec, CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- getCols("a"), scanners);
+ new StoreScanner(scanSpec, CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ getCols("a"), scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
assertFalse(scan.next(results));
@@ -151,9 +153,9 @@ public void testDeletedRowThenGoodRow() throws IOException {
};
Scan scanSpec = new Scan(Bytes.toBytes("R1"));
StoreScanner scan =
- new StoreScanner(scanSpec, CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- getCols("a"), scanners);
-
+ new StoreScanner(scanSpec, CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ getCols("a"), scanners);
+
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(true, scan.next(results));
assertEquals(0, results.size());
@@ -181,8 +183,8 @@ public void testDeleteVersionMaskingMultiplePuts() throws IOException {
new KeyValueScanFixture(KeyValue.COMPARATOR, kvs2)
};
StoreScanner scan =
- new StoreScanner(new Scan(Bytes.toBytes("R1")), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- getCols("a"), scanners);
+ new StoreScanner(new Scan(Bytes.toBytes("R1")), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ getCols("a"), scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
// the two put at ts=now will be masked by the 1 delete, and
// since the scan default returns 1 version we'll return the newest
@@ -209,8 +211,8 @@ public void testDeleteVersionsMixedAndMultipleVersionReturn() throws IOException
};
Scan scanSpec = new Scan(Bytes.toBytes("R1")).setMaxVersions(2);
StoreScanner scan =
- new StoreScanner(scanSpec, CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- getCols("a"), scanners);
+ new StoreScanner(scanSpec, CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ getCols("a"), scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(true, scan.next(results));
assertEquals(2, results.size());
@@ -219,17 +221,17 @@ public void testDeleteVersionsMixedAndMultipleVersionReturn() throws IOException
}
public void testWildCardOneVersionScan() throws IOException {
- KeyValue [] kvs = new KeyValue [] {
- KeyValueTestUtil.create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"),
- KeyValueTestUtil.create("R1", "cf", "b", 1, KeyValue.Type.Put, "dont-care"),
- KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"),
- };
+ KeyValue [] kvs = new KeyValue [] {
+ KeyValueTestUtil.create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R1", "cf", "b", 1, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"),
+ };
KeyValueScanner [] scanners = new KeyValueScanner[] {
new KeyValueScanFixture(KeyValue.COMPARATOR, kvs)
};
StoreScanner scan =
- new StoreScanner(new Scan(Bytes.toBytes("R1")), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- null, scanners);
+ new StoreScanner(new Scan(Bytes.toBytes("R1")), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ null, scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(true, scan.next(results));
assertEquals(2, results.size());
@@ -259,8 +261,8 @@ public void testWildCardScannerUnderDeletes() throws IOException {
new KeyValueScanFixture(KeyValue.COMPARATOR, kvs)
};
StoreScanner scan =
- new StoreScanner(new Scan().setMaxVersions(2), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- null, scanners);
+ new StoreScanner(new Scan().setMaxVersions(2), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ null, scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(true, scan.next(results));
assertEquals(5, results.size());
@@ -289,8 +291,8 @@ public void testDeleteFamily() throws IOException {
new KeyValueScanFixture(KeyValue.COMPARATOR, kvs)
};
StoreScanner scan =
- new StoreScanner(new Scan().setMaxVersions(Integer.MAX_VALUE), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- null, scanners);
+ new StoreScanner(new Scan().setMaxVersions(Integer.MAX_VALUE), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ null, scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(true, scan.next(results));
assertEquals(0, results.size());
@@ -312,8 +314,8 @@ public void testDeleteColumn() throws IOException {
new KeyValueScanFixture(KeyValue.COMPARATOR, kvs),
};
StoreScanner scan =
- new StoreScanner(new Scan(), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- null, scanners);
+ new StoreScanner(new Scan(), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ null, scanners);
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(true, scan.next(results));
assertEquals(1, results.size());
@@ -337,9 +339,9 @@ public void testSkipColumn() throws IOException {
new KeyValueScanFixture(KeyValue.COMPARATOR, kvs)
};
StoreScanner scan =
- new StoreScanner(new Scan(), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
- getCols("a", "d"), scanners);
-
+ new StoreScanner(new Scan(), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+ getCols("a", "d"), scanners);
+
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(true, scan.next(results));
assertEquals(2, results.size());
@@ -350,8 +352,156 @@ public void testSkipColumn() throws IOException {
assertEquals(true, scan.next(results));
assertEquals(1, results.size());
assertEquals(kvs[kvs.length-1], results.get(0));
-
+
results.clear();
assertEquals(false, scan.next(results));
}
+
+ KeyValue [] stdKvs = new KeyValue[] {
+ KeyValueTestUtil.create("R:1", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:1", "cf", "b", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:1", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:1", "cf", "e", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:1", "cf", "f", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:1", "cf", "g", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:1", "cf", "h", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:1", "cf", "i", 11, KeyValue.Type.Put, "dont-care"),
+
+ // 9...
+ KeyValueTestUtil.create("R:2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:2", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:2", "cf", "c", 10, KeyValue.Type.Put, "dont-care"),
+
+ // 12...
+ KeyValueTestUtil.create("R:3", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:3", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:3", "cf", "c", 10, KeyValue.Type.Put, "dont-care"),
+
+ // 15 ...
+ KeyValueTestUtil.create("R:4", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:4", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:4", "cf", "c", 10, KeyValue.Type.Put, "dont-care"),
+
+ // 18 ..
+ KeyValueTestUtil.create("R:5", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:5", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
+
+ // 20...
+ KeyValueTestUtil.create("R:6", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:6", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
+
+ // 22...
+ KeyValueTestUtil.create("R:7", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:7", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
+
+ // 24...
+ KeyValueTestUtil.create("R:8", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ KeyValueTestUtil.create("R:8", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
+
+ // 26 ..
+ KeyValueTestUtil.create("RA:1", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+
+ // 27...
+ KeyValueTestUtil.create("RA:2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+
+ // 28..
+ KeyValueTestUtil.create("RA:3", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
+ };
+ private StoreScanner getTestScanner(Scan s, NavigableSet<byte[]> cols) {
+ KeyValueScanner [] scanners = new KeyValueScanner[] {
+ new KeyValueScanFixture(KeyValue.COMPARATOR, stdKvs)
+ };
+
+ return new StoreScanner(s, CF, Long.MAX_VALUE, KeyValue.COMPARATOR, cols,
+ scanners);
+ }
+
+
+ // Test new and old row prefix filters.
+ public void testNewRowPrefixFilter() throws IOException {
+ Filter f = new WhileMatchFilter(
+ new PrefixFilter(Bytes.toBytes("R:")));
+ Scan s = new Scan(Bytes.toBytes("R:7"));
+ s.setFilter(f);
+
+ rowPrefixFilter(s);
+ }
+
+ public void testOldRowPrefixFilter() throws IOException {
+ RowFilterInterface f = new WhileMatchRowFilter(
+ new PrefixRowFilter(Bytes.toBytes("R:")));
+ Scan s = new Scan(Bytes.toBytes("R:7"));
+ s.setOldFilter(f);
+
+ rowPrefixFilter(s);
+
+ }
+ public void rowPrefixFilter(Scan s) throws IOException {
+
+ StoreScanner scan = getTestScanner(s, null);
+
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ assertTrue(scan.next(results));
+ assertEquals(2, results.size());
+ assertEquals(stdKvs[22], results.get(0));
+ assertEquals(stdKvs[23], results.get(1));
+ results.clear();
+
+ assertTrue(scan.next(results));
+ assertEquals(2, results.size());
+ assertEquals(stdKvs[24], results.get(0));
+ assertEquals(stdKvs[25], results.get(1));
+ results.clear();
+
+ assertFalse(scan.next(results));
+ assertEquals(0, results.size());
+ }
+
+ // Test new and old row-inclusive stop filter.
+ public void testNewRowInclusiveStopFilter() throws IOException {
+ Filter f = new WhileMatchFilter(new InclusiveStopFilter(Bytes.toBytes("R:3")));
+ Scan scan = new Scan();
+ scan.setFilter(f);
+
+ rowInclusiveStopFilter(scan);
+ }
+
+ public void testOldRowInclusiveTopFilter() throws IOException {
+ RowFilterInterface f = new WhileMatchRowFilter(
+ new InclusiveStopRowFilter(Bytes.toBytes("R:3")));
+ Scan scan = new Scan();
+ scan.setOldFilter(f);
+
+ rowInclusiveStopFilter(scan);
+ }
+
+ public void rowInclusiveStopFilter(Scan scan) throws IOException {
+ StoreScanner s = getTestScanner(scan, getCols("a"));
+
+ // read crap.
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ assertTrue(s.next(results));
+ assertEquals(1, results.size());
+ assertEquals(stdKvs[0], results.get(0));
+ results.clear();
+
+ assertTrue(s.next(results));
+ assertEquals(1, results.size());
+ assertEquals(stdKvs[9], results.get(0));
+ results.clear();
+
+ assertTrue(s.next(results));
+ assertEquals(1, results.size());
+ assertEquals(stdKvs[12], results.get(0));
+ results.clear();
+
+ // without aggressive peeking, the scanner doesnt know if the next row is good or not
+ // under the affects of a filter.
+ assertFalse(s.next(results));
+ assertEquals(0, results.size());
+ }
+
+
+
}
|
9bac807cedbcff34e1a144fb475eff267e5ed86d
|
hadoop
|
MAPREDUCE-2187. Reporter sends progress during- sort/merge. Contributed by Anupam Seth.--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1152964 13f79535-47bb-0310-9956-ffa450edef68-
|
c
|
https://github.com/apache/hadoop
|
diff --git a/mapreduce/CHANGES.txt b/mapreduce/CHANGES.txt
index aee8b0a8cedc4..a95155a931d7b 100644
--- a/mapreduce/CHANGES.txt
+++ b/mapreduce/CHANGES.txt
@@ -40,6 +40,9 @@ Trunk (unreleased changes)
IMPROVEMENTS
+ MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via
+ acmurthy)
+
MAPREDUCE-2365. Add counters to track bytes (read,written) via
File(Input,Output)Format. (Siddharth Seth via acmurthy)
diff --git a/mapreduce/src/java/mapred-default.xml b/mapreduce/src/java/mapred-default.xml
index 0b74e9778cb6a..db2d79a35dfd0 100644
--- a/mapreduce/src/java/mapred-default.xml
+++ b/mapreduce/src/java/mapred-default.xml
@@ -1041,6 +1041,14 @@
</property>
<!-- End of TaskTracker DistributedCache configuration -->
+<property>
+ <name>mapreduce.task.combine.progress.records</name>
+ <value>10000</value>
+ <description> The number of records to process during combine output collection
+ before sending a progress notification to the TaskTracker.
+ </description>
+</property>
+
<property>
<name>mapreduce.task.merge.progress.records</name>
<value>10000</value>
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java b/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java
index 44ba9a7e68a03..951b45ae70fa4 100644
--- a/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java
+++ b/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java
@@ -946,7 +946,7 @@ public MapOutputBuffer(TaskUmbilicalProtocol umbilical, JobConf job,
if (combinerRunner != null) {
final Counters.Counter combineOutputCounter =
reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
- combineCollector= new CombineOutputCollector<K,V>(combineOutputCounter);
+ combineCollector= new CombineOutputCollector<K,V>(combineOutputCounter, reporter, conf);
} else {
combineCollector = null;
}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTask.java b/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTask.java
index 0225982139b8b..6256c662730e8 100644
--- a/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTask.java
+++ b/mapreduce/src/java/org/apache/hadoop/mapred/ReduceTask.java
@@ -352,7 +352,7 @@ public void run(JobConf job, final TaskUmbilicalProtocol umbilical)
Class combinerClass = conf.getCombinerClass();
CombineOutputCollector combineCollector =
(null != combinerClass) ?
- new CombineOutputCollector(reduceCombineOutputCounter) : null;
+ new CombineOutputCollector(reduceCombineOutputCounter, reporter, conf) : null;
Shuffle shuffle =
new Shuffle(getTaskID(), job, FileSystem.getLocal(job), umbilical,
diff --git a/mapreduce/src/java/org/apache/hadoop/mapred/Task.java b/mapreduce/src/java/org/apache/hadoop/mapred/Task.java
index f5abb3022a56f..8ad56a7d05137 100644
--- a/mapreduce/src/java/org/apache/hadoop/mapred/Task.java
+++ b/mapreduce/src/java/org/apache/hadoop/mapred/Task.java
@@ -58,6 +58,7 @@
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
import org.apache.hadoop.mapreduce.task.ReduceContextImpl;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
@@ -79,6 +80,7 @@ abstract public class Task implements Writable, Configurable {
LogFactory.getLog(Task.class);
public static String MERGED_OUTPUT_PREFIX = ".merged";
+ public static final long DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS = 10000;
/**
* Counters to measure the usage of the different file systems.
@@ -1176,16 +1178,26 @@ public static class CombineOutputCollector<K extends Object, V extends Object>
implements OutputCollector<K, V> {
private Writer<K, V> writer;
private Counters.Counter outCounter;
- public CombineOutputCollector(Counters.Counter outCounter) {
+ private Progressable progressable;
+ private long progressBar;
+
+ public CombineOutputCollector(Counters.Counter outCounter, Progressable progressable, Configuration conf) {
this.outCounter = outCounter;
+ this.progressable=progressable;
+ progressBar = conf.getLong(MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS, DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS);
}
+
public synchronized void setWriter(Writer<K, V> writer) {
this.writer = writer;
}
+
public synchronized void collect(K key, V value)
throws IOException {
outCounter.increment(1);
writer.append(key, value);
+ if ((outCounter.getValue() % progressBar) == 0) {
+ progressable.progress();
+ }
}
}
diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index bcaeaf147af0e..0054646caf185 100644
--- a/mapreduce/src/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/mapreduce/src/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -260,6 +260,8 @@ public interface MRJobConfig {
public static final String REDUCE_MEMTOMEM_ENABLED = "mapreduce.reduce.merge.memtomem.enabled";
+ public static final String COMBINE_RECORDS_BEFORE_PROGRESS = "mapreduce.task.combine.progress.records";
+
public static final String JOB_NAMENODES = "mapreduce.job.hdfs-servers";
public static final String JOB_JOBTRACKER_ID = "mapreduce.job.kerberos.jtprinicipal";
|
4fa8844a07917f925668409e461c2c48f9bbc965
|
camel
|
CAMEL-6053: Allow to override blueprint config- admin placeholders from unit test.--git-svn-id: https://svn.apache.org/repos/asf/camel/trunk@1443931 13f79535-47bb-0310-9956-ffa450edef68-
|
a
|
https://github.com/apache/camel
|
diff --git a/components/camel-test-blueprint/src/main/java/org/apache/camel/test/blueprint/CamelBlueprintTestSupport.java b/components/camel-test-blueprint/src/main/java/org/apache/camel/test/blueprint/CamelBlueprintTestSupport.java
index 72db54659de43..d26d6992e739e 100644
--- a/components/camel-test-blueprint/src/main/java/org/apache/camel/test/blueprint/CamelBlueprintTestSupport.java
+++ b/components/camel-test-blueprint/src/main/java/org/apache/camel/test/blueprint/CamelBlueprintTestSupport.java
@@ -16,6 +16,7 @@
*/
package org.apache.camel.test.blueprint;
+import java.util.Dictionary;
import java.util.Properties;
import org.apache.camel.CamelContext;
@@ -26,6 +27,8 @@
import org.junit.Before;
import org.osgi.framework.BundleContext;
import org.osgi.service.blueprint.container.BlueprintContainer;
+import org.osgi.service.cm.Configuration;
+import org.osgi.service.cm.ConfigurationAdmin;
/**
* Base class for OSGi Blueprint unit tests with Camel.
@@ -47,6 +50,19 @@ public void setUp() throws Exception {
bundleContext.registerService(PropertiesComponent.OVERRIDE_PROPERTIES, extra, null);
}
+ // allow end users to override config admin service with extra properties
+ Dictionary props = new Properties();
+ String pid = useOverridePropertiesWithConfigAdmin(props);
+ if (pid != null) {
+ ConfigurationAdmin configAdmin = getOsgiService(ConfigurationAdmin.class);
+ Configuration config = configAdmin.getConfiguration(pid);
+ if (config == null) {
+ throw new IllegalArgumentException("Cannot find configuration with pid " + pid + " in OSGi ConfigurationAdmin service.");
+ }
+ log.info("Updating ConfigAdmin {} by overriding properties {}", config, props);
+ config.update(props);
+ }
+
super.setUp();
// must wait for blueprint container to be published then the namespace parser is complete and we are ready for testing
@@ -54,6 +70,16 @@ public void setUp() throws Exception {
getOsgiService(BlueprintContainer.class, "(osgi.blueprint.container.symbolicname=" + symbolicName + ")");
}
+ /**
+ * Override this method to override config admin properties.
+ *
+ * @param props properties where you add the properties to override
+ * @return the PID of the OSGi {@link ConfigurationAdmin} which are defined in the Blueprint XML file.
+ */
+ protected String useOverridePropertiesWithConfigAdmin(Dictionary props) {
+ return null;
+ }
+
@After
@Override
public void tearDown() throws Exception {
diff --git a/components/camel-test-blueprint/src/test/java/org/apache/camel/test/blueprint/ConfigAdminOverridePropertiesOutsideCamelContextTest.java b/components/camel-test-blueprint/src/test/java/org/apache/camel/test/blueprint/ConfigAdminOverridePropertiesOutsideCamelContextTest.java
new file mode 100644
index 0000000000000..776594730ecde
--- /dev/null
+++ b/components/camel-test-blueprint/src/test/java/org/apache/camel/test/blueprint/ConfigAdminOverridePropertiesOutsideCamelContextTest.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.test.blueprint;
+
+import java.util.Dictionary;
+
+import org.junit.Test;
+
+/**
+ *
+ */
+public class ConfigAdminOverridePropertiesOutsideCamelContextTest extends CamelBlueprintTestSupport {
+
+ @Override
+ protected String getBlueprintDescriptor() {
+ return "org/apache/camel/test/blueprint/configadmin-outside.xml";
+ }
+
+ // START SNIPPET: e1
+ @Override
+ protected String useOverridePropertiesWithConfigAdmin(Dictionary props) {
+ // add the properties we want to override
+ props.put("greeting", "Bye");
+
+ // return the PID of the config-admin we are using in the blueprint xml file
+ return "my-placeholders";
+ }
+ // END SNIPPET: e1
+
+ @Test
+ public void testConfigAdmin() throws Exception {
+ getMockEndpoint("mock:result").expectedBodiesReceived("Bye World");
+
+ template.sendBody("direct:start", "World");
+
+ assertMockEndpointsSatisfied();
+ }
+
+}
diff --git a/components/camel-test-blueprint/src/test/java/org/apache/camel/test/blueprint/MyCoolBean.java b/components/camel-test-blueprint/src/test/java/org/apache/camel/test/blueprint/MyCoolBean.java
new file mode 100644
index 0000000000000..0b370879ad0a0
--- /dev/null
+++ b/components/camel-test-blueprint/src/test/java/org/apache/camel/test/blueprint/MyCoolBean.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.test.blueprint;
+
+/**
+ *
+ */
+public class MyCoolBean {
+
+ private String say;
+
+ public String getSay() {
+ return say;
+ }
+
+ public void setSay(String say) {
+ this.say = say;
+ }
+
+ public String saySomething(String s) {
+ return say + " " + s;
+ }
+}
diff --git a/components/camel-test-blueprint/src/test/resources/org/apache/camel/test/blueprint/configadmin-outside.xml b/components/camel-test-blueprint/src/test/resources/org/apache/camel/test/blueprint/configadmin-outside.xml
new file mode 100644
index 0000000000000..bef05a066bdfc
--- /dev/null
+++ b/components/camel-test-blueprint/src/test/resources/org/apache/camel/test/blueprint/configadmin-outside.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.0.0"
+ xsi:schemaLocation="
+ http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.0.0 http://aries.apache.org/schemas/blueprint-cm/blueprint-cm-1.0.0.xsd
+ http://www.osgi.org/xmlns/blueprint/v1.0.0 http://www.osgi.org/xmlns/blueprint/v1.0.0/blueprint.xsd">
+
+ <!-- blueprint property placeholders -->
+ <cm:property-placeholder persistent-id="my-placeholders">
+ <cm:default-properties>
+ <cm:property name="greeting" value="Hello"/>
+ <cm:property name="destination" value="mock:result"/>
+ </cm:default-properties>
+ </cm:property-placeholder>
+
+ <!-- a bean that uses a blueprint property placeholder -->
+ <bean id="myCoolBean" class="org.apache.camel.test.blueprint.MyCoolBean">
+ <property name="say" value="${greeting}"/>
+ </bean>
+
+ <camelContext xmlns="http://camel.apache.org/schema/blueprint">
+
+ <route>
+ <from uri="direct:start"/>
+ <bean ref="myCoolBean" method="saySomething"/>
+ <to uri="{{destination}}"/>
+ </route>
+
+ </camelContext>
+
+</blueprint>
|
1ae65999cf19232ed85d3329fd4f4887c2f3fe47
|
intellij-community
|
refactor name: it could be any RC--
|
p
|
https://github.com/JetBrains/intellij-community
|
diff --git a/xml/impl/src/com/intellij/ide/browsers/BrowserStarter.java b/xml/impl/src/com/intellij/ide/browsers/BrowserStarter.java
index 8e3f014ea5aff..ca6726926d6f8 100644
--- a/xml/impl/src/com/intellij/ide/browsers/BrowserStarter.java
+++ b/xml/impl/src/com/intellij/ide/browsers/BrowserStarter.java
@@ -21,12 +21,12 @@ public class BrowserStarter {
private static final Logger LOG = Logger.getInstance(BrowserStarter.class);
private final StartBrowserSettings mySettings;
- private final RunConfiguration myNodeRunConfiguration;
+ private final RunConfiguration myRunConfiguration;
private final ProcessHandler myServerProcessHandler;
public BrowserStarter(@NotNull RunConfiguration runConfiguration, @NotNull StartBrowserSettings settings, @NotNull ProcessHandler serverProcessHandler) {
mySettings = settings;
- myNodeRunConfiguration = runConfiguration;
+ myRunConfiguration = runConfiguration;
myServerProcessHandler = serverProcessHandler;
}
@@ -102,7 +102,7 @@ public void run() {
private void openPageNow() {
if (!isProcessTerminated()) {
- JavaScriptDebuggerStarter.Util.startDebugOrLaunchBrowser(myNodeRunConfiguration, mySettings);
+ JavaScriptDebuggerStarter.Util.startDebugOrLaunchBrowser(myRunConfiguration, mySettings);
}
}
|
57f4f664ba0bf785e9903535a4965d786cf13062
|
kotlin
|
refactored generation of static initializer--
|
p
|
https://github.com/JetBrains/kotlin
|
diff --git a/idea/src/org/jetbrains/jet/codegen/ClassBodyCodegen.java b/idea/src/org/jetbrains/jet/codegen/ClassBodyCodegen.java
index ed0735204e8d6..58a6fa5d7d4c4 100644
--- a/idea/src/org/jetbrains/jet/codegen/ClassBodyCodegen.java
+++ b/idea/src/org/jetbrains/jet/codegen/ClassBodyCodegen.java
@@ -4,8 +4,11 @@
import org.jetbrains.jet.lang.descriptors.PropertyDescriptor;
import org.jetbrains.jet.lang.psi.*;
import org.objectweb.asm.ClassVisitor;
+import org.objectweb.asm.MethodVisitor;
import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.commons.InstructionAdapter;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -22,6 +25,8 @@ public abstract class ClassBodyCodegen {
protected final ClassVisitor v;
protected final ClassContext context;
+ protected final List<CodeChunk> staticInitializerChunks = new ArrayList<CodeChunk>();
+
public ClassBodyCodegen(JetClassOrObject aClass, ClassContext context, ClassVisitor v, GenerationState state) {
this.state = state;
descriptor = state.getBindingContext().getClassDescriptor(aClass);
@@ -38,6 +43,8 @@ public void generate() {
generateClassBody();
+ generateStaticInitializer();
+
v.visitEnd();
}
@@ -95,4 +102,23 @@ protected List<JetParameter> getPrimaryConstructorParameters() {
}
return Collections.emptyList();
}
+
+ private void generateStaticInitializer() {
+ if (staticInitializerChunks.size() > 0) {
+ final MethodVisitor mv = v.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC,
+ "<clinit>", "()V", null, null);
+ mv.visitCode();
+
+ InstructionAdapter v = new InstructionAdapter(mv);
+
+ for (CodeChunk chunk : staticInitializerChunks) {
+ chunk.generate(v);
+ }
+
+ mv.visitInsn(Opcodes.RETURN);
+ mv.visitMaxs(0, 0);
+
+ mv.visitEnd();
+ }
+ }
}
diff --git a/idea/src/org/jetbrains/jet/codegen/CodeChunk.java b/idea/src/org/jetbrains/jet/codegen/CodeChunk.java
new file mode 100644
index 0000000000000..eb0aa42088415
--- /dev/null
+++ b/idea/src/org/jetbrains/jet/codegen/CodeChunk.java
@@ -0,0 +1,10 @@
+package org.jetbrains.jet.codegen;
+
+import org.objectweb.asm.commons.InstructionAdapter;
+
+/**
+ * @author yole
+ */
+public interface CodeChunk {
+ void generate(InstructionAdapter v);
+}
diff --git a/idea/src/org/jetbrains/jet/codegen/ImplementationBodyCodegen.java b/idea/src/org/jetbrains/jet/codegen/ImplementationBodyCodegen.java
index f013f20e2d91a..1287090056967 100644
--- a/idea/src/org/jetbrains/jet/codegen/ImplementationBodyCodegen.java
+++ b/idea/src/org/jetbrains/jet/codegen/ImplementationBodyCodegen.java
@@ -74,29 +74,74 @@ protected String getSuperClass() {
@Override
protected void generateSyntheticParts() {
- int typeinfoStatic = descriptor.getTypeConstructor().getParameters().size() > 0 ? 0 : Opcodes.ACC_STATIC;
- v.visitField(Opcodes.ACC_PRIVATE | typeinfoStatic, "$typeInfo", "Ljet/typeinfo/TypeInfo;", null, null);
+ generateFieldForTypeInfo();
+ generateFieldForObjectInstance();
+ generateFieldForClassObject();
+ try {
+ generatePrimaryConstructor();
+ }
+ catch(RuntimeException e) {
+ throw new RuntimeException("Error generating primary constructor of class " + myClass.getName() + " with kind " + kind, e);
+ }
+
+ generateGetTypeInfo();
+ }
+
+ private void generateFieldForTypeInfo() {
+ final boolean typeInfoIsStatic = descriptor.getTypeConstructor().getParameters().size() == 0;
+ v.visitField(Opcodes.ACC_PRIVATE | (typeInfoIsStatic ? Opcodes.ACC_STATIC : 0), "$typeInfo",
+ "Ljet/typeinfo/TypeInfo;", null, null);
+ if (typeInfoIsStatic) {
+ staticInitializerChunks.add(new CodeChunk() {
+ @Override
+ public void generate(InstructionAdapter v) {
+ JetTypeMapper typeMapper = state.getTypeMapper();
+ ClassCodegen.newTypeInfo(v, false, typeMapper.jvmType(descriptor, OwnerKind.INTERFACE));
+ v.putstatic(typeMapper.jvmName(descriptor, kind), "$typeInfo", "Ljet/typeinfo/TypeInfo;");
+ }
+ });
+ }
+ }
+
+ private void generateFieldForObjectInstance() {
if (isNonLiteralObject()) {
Type type = JetTypeMapper.jetImplementationType(descriptor);
v.visitField(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC, "$instance", type.getDescriptor(), null, null);
+
+ staticInitializerChunks.add(new CodeChunk() {
+ @Override
+ public void generate(InstructionAdapter v) {
+ String name = jvmName();
+ v.anew(Type.getObjectType(name));
+ v.dup();
+ v.invokespecial(name, "<init>", "()V");
+ v.putstatic(name, "$instance", JetTypeMapper.jetImplementationType(descriptor).getDescriptor());
+ }
+ });
+
}
+ }
+
+ private void generateFieldForClassObject() {
final JetClassObject classObject = getClassObject();
if (classObject != null) {
Type type = Type.getObjectType(state.getTypeMapper().jvmName(classObject));
v.visitField(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC, "$classobj", type.getDescriptor(), null, null);
- }
-
- generateStaticInitializer();
- try {
- generatePrimaryConstructor();
- }
- catch(RuntimeException e) {
- throw new RuntimeException("Error generating primary constructor of class " + myClass.getName() + " with kind " + kind, e);
+ staticInitializerChunks.add(new CodeChunk() {
+ @Override
+ public void generate(InstructionAdapter v) {
+ String name = state.getTypeMapper().jvmName(classObject);
+ final Type classObjectType = Type.getObjectType(name);
+ v.anew(classObjectType);
+ v.dup();
+ v.invokespecial(name, "<init>", "()V");
+ v.putstatic(state.getTypeMapper().jvmName(descriptor, OwnerKind.IMPLEMENTATION), "$classobj",
+ classObjectType.getDescriptor());
+ }
+ });
}
-
- generateGetTypeInfo();
}
protected void generatePrimaryConstructor() {
@@ -409,47 +454,6 @@ else if (declaration instanceof JetFunction) {
}
}
- private void generateStaticInitializer() {
- boolean needTypeInfo = descriptor.getTypeConstructor().getParameters().size() == 0;
- boolean needInstance = isNonLiteralObject();
- JetClassObject classObject = getClassObject();
- if (!needTypeInfo && !needInstance && classObject == null) {
- return;
- }
- final MethodVisitor mv = v.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC,
- "<clinit>", "()V", null, null);
- mv.visitCode();
-
- InstructionAdapter v = new InstructionAdapter(mv);
-
- if (needTypeInfo) {
- JetTypeMapper typeMapper = state.getTypeMapper();
- ClassCodegen.newTypeInfo(v, false, typeMapper.jvmType(descriptor, OwnerKind.INTERFACE));
- v.putstatic(typeMapper.jvmName(descriptor, kind), "$typeInfo", "Ljet/typeinfo/TypeInfo;");
- }
- if (needInstance) {
- String name = jvmName();
- v.anew(Type.getObjectType(name));
- v.dup();
- v.invokespecial(name, "<init>", "()V");
- v.putstatic(name, "$instance", JetTypeMapper.jetImplementationType(descriptor).getDescriptor());
- }
- if (classObject != null) {
- String name = state.getTypeMapper().jvmName(classObject);
- final Type classObjectType = Type.getObjectType(name);
- v.anew(classObjectType);
- v.dup();
- v.invokespecial(name, "<init>", "()V");
- v.putstatic(state.getTypeMapper().jvmName(descriptor, OwnerKind.IMPLEMENTATION), "$classobj",
- classObjectType.getDescriptor());
- }
-
- mv.visitInsn(Opcodes.RETURN);
- mv.visitMaxs(0, 0);
-
- mv.visitEnd();
- }
-
@Nullable
private JetClassObject getClassObject() {
return myClass instanceof JetClass ? ((JetClass) myClass).getClassObject() : null;
|
bc6ad67d673dfdebd216b021193f736dcf5a76f8
|
hbase
|
HBASE-1386 NPE in housekeeping--git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@772703 13f79535-47bb-0310-9956-ffa450edef68-
|
c
|
https://github.com/apache/hbase
|
diff --git a/CHANGES.txt b/CHANGES.txt
index 537c12dea555..254a4e31494d 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -115,6 +115,7 @@ Release 0.20.0 - Unreleased
HBASE-1377 RS address is null in master web UI
HBASE-1344 WARN IllegalStateException: Cannot set a region as open if it has
not been pending
+ HBASE-1386 NPE in housekeeping
IMPROVEMENTS
HBASE-1089 Add count of regions on filesystem to master UI; add percentage
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 656c395d81ae..9f6b1db91e1b 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1124,6 +1124,7 @@ private boolean isHealthy() {
}
return true;
}
+
/*
* Run some housekeeping tasks before we go into 'hibernation' sleeping at
* the end of the main HRegionServer run loop.
@@ -1132,12 +1133,16 @@ private void housekeeping() {
// If the todo list has > 0 messages, iterate looking for open region
// messages. Send the master a message that we're working on its
// processing so it doesn't assign the region elsewhere.
- if (this.toDo.size() <= 0) {
+ if (this.toDo.isEmpty()) {
return;
}
// This iterator is 'safe'. We are guaranteed a view on state of the
// queue at time iterator was taken out. Apparently goes from oldest.
for (ToDoEntry e: this.toDo) {
+ HMsg msg = e.msg;
+ if (msg == null) {
+ LOG.warn("Message is empty: " + e);
+ }
if (e.msg.isType(HMsg.Type.MSG_REGION_OPEN)) {
addProcessingMessage(e.msg.getRegionInfo());
}
@@ -1299,15 +1304,16 @@ void reportSplit(HRegionInfo oldRegion, HRegionInfo newRegionA,
/*
* Data structure to hold a HMsg and retries count.
*/
- private static class ToDoEntry {
- protected int tries;
+ private static final class ToDoEntry {
+ protected volatile int tries;
protected final HMsg msg;
- ToDoEntry(HMsg msg) {
+
+ ToDoEntry(final HMsg msg) {
this.tries = 0;
this.msg = msg;
}
}
-
+
final BlockingQueue<ToDoEntry> toDo = new LinkedBlockingQueue<ToDoEntry>();
private Worker worker;
private Thread workerThread;
|
a6fa02a07fe374204e9e02914ccf1cc9812aa5ba
|
restlet-framework-java
|
- Initial code for new default HTTP connector and- SIP connector.--
|
a
|
https://github.com/restlet/restlet-framework-java
|
diff --git a/modules/org.restlet/src/org/restlet/engine/http/connector/Connection.java b/modules/org.restlet/src/org/restlet/engine/http/connector/Connection.java
index 8631b810ca..a0b89ae9c7 100644
--- a/modules/org.restlet/src/org/restlet/engine/http/connector/Connection.java
+++ b/modules/org.restlet/src/org/restlet/engine/http/connector/Connection.java
@@ -1037,10 +1037,12 @@ public void writeMessages() {
}
}
- writeMessage(message);
+ if (message != null) {
+ writeMessage(message);
- if (getState() == ConnectionState.CLOSING) {
- close(true);
+ if (getState() == ConnectionState.CLOSING) {
+ close(true);
+ }
}
}
} catch (Exception e) {
|
6c0386029b4620e622f6d62939567f88238a21a2
|
camel
|
CAMEL-2011: JmsEndpoint is now singleton.--git-svn-id: https://svn.apache.org/repos/asf/camel/trunk@814584 13f79535-47bb-0310-9956-ffa450edef68-
|
p
|
https://github.com/apache/camel
|
diff --git a/components/camel-jms/src/main/java/org/apache/camel/component/jms/JmsEndpoint.java b/components/camel-jms/src/main/java/org/apache/camel/component/jms/JmsEndpoint.java
index 78ad2ac9dfc3b..8c6a049c9f13e 100644
--- a/components/camel-jms/src/main/java/org/apache/camel/component/jms/JmsEndpoint.java
+++ b/components/camel-jms/src/main/java/org/apache/camel/component/jms/JmsEndpoint.java
@@ -305,8 +305,9 @@ public void setSelector(String selector) {
this.selector = selector;
}
+ @ManagedAttribute
public boolean isSingleton() {
- return false;
+ return true;
}
public synchronized Requestor getRequestor() throws Exception {
diff --git a/components/camel-jms/src/test/java/org/apache/camel/component/jms/issues/JmsSendToAlotOfDestinationWithSameEndpointTest.java b/components/camel-jms/src/test/java/org/apache/camel/component/jms/issues/JmsSendToAlotOfDestinationWithSameEndpointTest.java
index 6beb3397d1d43..c6c84ae801834 100644
--- a/components/camel-jms/src/test/java/org/apache/camel/component/jms/issues/JmsSendToAlotOfDestinationWithSameEndpointTest.java
+++ b/components/camel-jms/src/test/java/org/apache/camel/component/jms/issues/JmsSendToAlotOfDestinationWithSameEndpointTest.java
@@ -47,7 +47,6 @@ public void testSendToAlotOfMessageToQueues() throws Exception {
// use the same endpoint but provide a header with the dynamic queue we send to
// this allows us to reuse endpoints and not create a new endpoint for each and every jms queue
// we send to
- Thread.sleep(50);
if (i > 0 && i % 50 == 0) {
LOG.info("Send " + i + " messages so far");
}
|
b8291d673e065fdc24d82aca9e1e4e110e8d81c2
|
hbase
|
HADOOP-2295 Fix assigning a region to multiple- servers--git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@599578 13f79535-47bb-0310-9956-ffa450edef68-
|
c
|
https://github.com/apache/hbase
|
diff --git a/CHANGES.txt b/CHANGES.txt
index 3c81729b59af..476a99522c1f 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -39,6 +39,7 @@ Trunk (unreleased changes)
may not restart)
HADOOP-2253 getRow can return HBASE::DELETEVAL cells
(Bryan Duxbury via Stack)
+ HADOOP-2295 Fix assigning a region to multiple servers
IMPROVEMENTS
HADOOP-2401 Add convenience put method that takes writable
diff --git a/src/java/org/apache/hadoop/hbase/HMaster.java b/src/java/org/apache/hadoop/hbase/HMaster.java
index d2e930d90b72..d5424d36a54b 100644
--- a/src/java/org/apache/hadoop/hbase/HMaster.java
+++ b/src/java/org/apache/hadoop/hbase/HMaster.java
@@ -1136,12 +1136,16 @@ public void run() {
// Join up with all threads
try {
- rootScannerThread.join(); // Wait for the root scanner to finish.
+ if (rootScannerThread.isAlive()) {
+ rootScannerThread.join(); // Wait for the root scanner to finish.
+ }
} catch (Exception iex) {
LOG.warn("root scanner", iex);
}
try {
- metaScannerThread.join(); // Wait for meta scanner to finish.
+ if (metaScannerThread.isAlive()) {
+ metaScannerThread.join(); // Wait for meta scanner to finish.
+ }
} catch(Exception iex) {
LOG.warn("meta scanner", iex);
}
@@ -1460,10 +1464,25 @@ private HMsg[] processMsgs(HServerInfo info, HMsg incomingMsgs[])
// Get reports on what the RegionServer did.
for (int i = 0; i < incomingMsgs.length; i++) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Received " + incomingMsgs[i].toString() + "from " +
+ serverName);
+ }
HRegionInfo region = incomingMsgs[i].getRegionInfo();
switch (incomingMsgs[i].getMsg()) {
+ case HMsg.MSG_REPORT_PROCESS_OPEN:
+ synchronized (this.assignAttempts) {
+ // Region server has acknowledged request to open region.
+ // Extend region open time by 1/2 max region open time.
+ assignAttempts.put(region.getRegionName(),
+ Long.valueOf(assignAttempts.get(
+ region.getRegionName()).longValue() +
+ (this.maxRegionOpenTime / 2)));
+ }
+ break;
+
case HMsg.MSG_REPORT_OPEN:
HRegionInfo regionInfo = unassignedRegions.get(region.getRegionName());
@@ -1484,9 +1503,7 @@ private HMsg[] processMsgs(HServerInfo info, HMsg incomingMsgs[])
} else {
LOG.info(info.getServerAddress().toString() + " serving " +
region.getRegionName());
- // Remove from unassigned list so we don't assign it to someone else
- this.unassignedRegions.remove(region.getRegionName());
- this.assignAttempts.remove(region.getRegionName());
+
if (region.getRegionName().compareTo(
HRegionInfo.rootRegionInfo.getRegionName()) == 0) {
// Store the Root Region location (in memory)
@@ -1495,21 +1512,23 @@ private HMsg[] processMsgs(HServerInfo info, HMsg incomingMsgs[])
new HServerAddress(info.getServerAddress()));
this.rootRegionLocation.notifyAll();
}
- break;
- }
-
- // Note that the table has been assigned and is waiting for the meta
- // table to be updated.
+ } else {
+ // Note that the table has been assigned and is waiting for the meta
+ // table to be updated.
- pendingRegions.add(region.getRegionName());
+ pendingRegions.add(region.getRegionName());
- // Queue up an update to note the region location.
+ // Queue up an update to note the region location.
- try {
- msgQueue.put(new ProcessRegionOpen(info, region));
- } catch (InterruptedException e) {
- throw new RuntimeException("Putting into msgQueue was interrupted.", e);
- }
+ try {
+ msgQueue.put(new ProcessRegionOpen(info, region));
+ } catch (InterruptedException e) {
+ throw new RuntimeException("Putting into msgQueue was interrupted.", e);
+ }
+ }
+ // Remove from unassigned list so we don't assign it to someone else
+ this.unassignedRegions.remove(region.getRegionName());
+ this.assignAttempts.remove(region.getRegionName());
}
break;
diff --git a/src/java/org/apache/hadoop/hbase/HMsg.java b/src/java/org/apache/hadoop/hbase/HMsg.java
index 21e118f1f7bb..488ff8f5ef9d 100644
--- a/src/java/org/apache/hadoop/hbase/HMsg.java
+++ b/src/java/org/apache/hadoop/hbase/HMsg.java
@@ -53,6 +53,9 @@ public class HMsg implements Writable {
/** region server is no longer serving the specified region */
public static final byte MSG_REPORT_CLOSE = 101;
+
+ /** region server is processing open request */
+ public static final byte MSG_REPORT_PROCESS_OPEN = 102;
/**
* region server split the region associated with this message.
@@ -142,6 +145,10 @@ public String toString() {
message.append("MSG_REGION_CLOSE_WITHOUT_REPORT : ");
break;
+ case MSG_REPORT_PROCESS_OPEN:
+ message.append("MSG_REPORT_PROCESS_OPEN : ");
+ break;
+
case MSG_REPORT_OPEN:
message.append("MSG_REPORT_OPEN : ");
break;
diff --git a/src/java/org/apache/hadoop/hbase/HRegionServer.java b/src/java/org/apache/hadoop/hbase/HRegionServer.java
index db11f31bc44e..74c3c7e21760 100644
--- a/src/java/org/apache/hadoop/hbase/HRegionServer.java
+++ b/src/java/org/apache/hadoop/hbase/HRegionServer.java
@@ -742,6 +742,10 @@ public void run() {
throw new RuntimeException("Putting into msgQueue was " +
"interrupted.", e);
}
+ if (msgs[i].getMsg() == HMsg.MSG_REGION_OPEN) {
+ outboundMsgs.add(new HMsg(HMsg.MSG_REPORT_PROCESS_OPEN,
+ msgs[i].getRegionInfo()));
+ }
}
}
}
@@ -982,11 +986,11 @@ synchronized void abort() {
* Presumption is that all closes and stops have already been called.
*/
void join() {
- join(this.workerThread);
join(this.logRoller);
join(this.cacheFlusher);
join(this.compactor);
join(this.splitter);
+ join(this.workerThread);
}
private void join(final Thread t) {
@@ -1161,8 +1165,8 @@ void openRegion(final HRegionInfo regionInfo) throws IOException {
} finally {
this.lock.writeLock().unlock();
}
+ reportOpen(region);
}
- reportOpen(region);
}
void closeRegion(final HRegionInfo hri, final boolean reportWhenCompleted)
diff --git a/src/java/org/apache/hadoop/hbase/Leases.java b/src/java/org/apache/hadoop/hbase/Leases.java
index c3219d4ce757..57d28b2fac9c 100644
--- a/src/java/org/apache/hadoop/hbase/Leases.java
+++ b/src/java/org/apache/hadoop/hbase/Leases.java
@@ -108,11 +108,13 @@ public void closeAfterLeasesExpire() {
public void close() {
LOG.info(Thread.currentThread().getName() + " closing leases");
this.stop.set(true);
- try {
- this.leaseMonitorThread.interrupt();
- this.leaseMonitorThread.join();
- } catch (InterruptedException iex) {
- // Ignore
+ while (this.leaseMonitorThread.isAlive()) {
+ try {
+ this.leaseMonitorThread.interrupt();
+ this.leaseMonitorThread.join();
+ } catch (InterruptedException iex) {
+ // Ignore
+ }
}
synchronized(leases) {
synchronized(sortedLeases) {
@@ -211,10 +213,16 @@ public void cancelLease(final long holderId, final long resourceId) {
* Its a daemon thread.
*/
class LeaseMonitor extends Chore {
+ /**
+ * @param p
+ * @param s
+ */
public LeaseMonitor(int p, AtomicBoolean s) {
super(p, s);
}
+ /** {@inheritDoc} */
+ @Override
protected void chore() {
synchronized(leases) {
synchronized(sortedLeases) {
diff --git a/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index 8a2c50f68c54..151f54200625 100644
--- a/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -53,7 +53,9 @@ public class LocalHBaseCluster implements HConstants {
private final HMaster master;
private final List<RegionServerThread> regionThreads;
private final static int DEFAULT_NO = 1;
+ /** local mode */
public static final String LOCAL = "local";
+ /** 'local:' */
public static final String LOCAL_COLON = LOCAL + ":";
private final HBaseConfiguration conf;
@@ -146,12 +148,14 @@ public List<RegionServerThread> getRegionServers() {
public String waitOnRegionServer(int serverNumber) {
RegionServerThread regionServerThread =
this.regionThreads.remove(serverNumber);
- try {
- LOG.info("Waiting on " +
- regionServerThread.getRegionServer().serverInfo.toString());
- regionServerThread.join();
- } catch (InterruptedException e) {
- e.printStackTrace();
+ while (regionServerThread.isAlive()) {
+ try {
+ LOG.info("Waiting on " +
+ regionServerThread.getRegionServer().serverInfo.toString());
+ regionServerThread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
}
return regionServerThread.getName();
}
@@ -217,10 +221,12 @@ public void shutdown() {
}
}
if (this.master != null) {
- try {
- this.master.join();
- } catch(InterruptedException e) {
- // continue
+ while (this.master.isAlive()) {
+ try {
+ this.master.join();
+ } catch(InterruptedException e) {
+ // continue
+ }
}
}
LOG.info("Shutdown " +
diff --git a/src/test/org/apache/hadoop/hbase/DFSAbort.java b/src/test/org/apache/hadoop/hbase/DFSAbort.java
index a9c553e31323..4a30a75ac40b 100644
--- a/src/test/org/apache/hadoop/hbase/DFSAbort.java
+++ b/src/test/org/apache/hadoop/hbase/DFSAbort.java
@@ -22,19 +22,10 @@
import junit.framework.TestSuite;
import junit.textui.TestRunner;
-import java.io.PrintWriter;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
/**
* Test ability of HBase to handle DFS failure
*/
public class DFSAbort extends HBaseClusterTestCase {
- private static final Log LOG =
- LogFactory.getLog(DFSAbort.class.getName());
-
/** constructor */
public DFSAbort() {
super();
@@ -66,8 +57,6 @@ public void testDFSAbort() throws Exception {
// By now the Mini DFS is running, Mini HBase is running and we have
// created a table. Now let's yank the rug out from HBase
cluster.getDFSCluster().shutdown();
- // Now wait for Mini HBase Cluster to shut down
-// cluster.join();
threadDumpingJoin();
} catch (Exception e) {
e.printStackTrace();
diff --git a/src/test/org/apache/hadoop/hbase/TestLogRolling.java b/src/test/org/apache/hadoop/hbase/TestLogRolling.java
index ce7dd68dbc83..e382cdbf32e6 100644
--- a/src/test/org/apache/hadoop/hbase/TestLogRolling.java
+++ b/src/test/org/apache/hadoop/hbase/TestLogRolling.java
@@ -127,33 +127,41 @@ private void startAndWriteData() throws Exception {
this.server = cluster.getRegionThreads().get(0).getRegionServer();
this.log = server.getLog();
-
+
// When the META table can be opened, the region servers are running
- @SuppressWarnings("unused")
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
- // Create the test table and open it
- HTableDescriptor desc = new HTableDescriptor(tableName);
- desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
- HBaseAdmin admin = new HBaseAdmin(conf);
- admin.createTable(desc);
- HTable table = new HTable(conf, new Text(tableName));
-
- for (int i = 1; i <= 2048; i++) { // 2048 writes should cause 8 log rolls
- long lockid =
- table.startUpdate(new Text("row" + String.format("%1$04d", i)));
- table.put(lockid, HConstants.COLUMN_FAMILY, value);
- table.commit(lockid);
-
- if (i % 256 == 0) {
- // After every 256 writes sleep to let the log roller run
-
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- // continue
+ try {
+
+ // Create the test table and open it
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ admin.createTable(desc);
+ HTable table = new HTable(conf, new Text(tableName));
+
+ try {
+ for (int i = 1; i <= 2048; i++) { // 2048 writes should cause 8 log rolls
+ long lockid =
+ table.startUpdate(new Text("row" + String.format("%1$04d", i)));
+ table.put(lockid, HConstants.COLUMN_FAMILY, value);
+ table.commit(lockid);
+
+ if (i % 256 == 0) {
+ // After every 256 writes sleep to let the log roller run
+
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // continue
+ }
+ }
}
+ } finally {
+ table.close();
}
+ } finally {
+ meta.close();
}
}
|
b6ce0a1f69af1862f1577f5c2a0f96905d2af683
|
hadoop
|
YARN-2635. TestRM, TestRMRestart,- TestClientToAMTokens should run with both CS and FS. (Wei Yan and kasha via- kasha)--(cherry picked from commit 80d11eb68e60f88e16d7d41edecbddfc935a6b10)-
|
p
|
https://github.com/apache/hadoop
|
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 628dfa2e91167..9ce5d8dbb69dd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -14,6 +14,9 @@ Release 2.7.0 - UNRELEASED
YARN-1979. TestDirectoryCollection fails when the umask is unusual.
(Vinod Kumar Vavilapalli and Tsuyoshi OZAWA via junping_du)
+ YARN-2635. TestRM, TestRMRestart, TestClientToAMTokens should run
+ with both CS and FS. (Wei Yan and kasha via kasha)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
new file mode 100644
index 0000000000000..cfd16001a37b6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
+
+
+import org.junit.Before;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Arrays;
+import java.util.Collection;
+
+@RunWith(Parameterized.class)
+public abstract class ParameterizedSchedulerTestBase {
+ protected final static String TEST_DIR =
+ new File(System.getProperty("test.build.data", "/tmp")).getAbsolutePath();
+ private final static String FS_ALLOC_FILE =
+ new File(TEST_DIR, "test-fs-queues.xml").getAbsolutePath();
+
+ private SchedulerType schedulerType;
+ private YarnConfiguration conf = null;
+
+ public enum SchedulerType {
+ CAPACITY, FAIR
+ }
+
+ public ParameterizedSchedulerTestBase(SchedulerType type) {
+ schedulerType = type;
+ }
+
+ public YarnConfiguration getConf() {
+ return conf;
+ }
+
+ @Parameterized.Parameters
+ public static Collection<SchedulerType[]> getParameters() {
+ return Arrays.asList(new SchedulerType[][]{
+ {SchedulerType.CAPACITY}, {SchedulerType.FAIR}});
+ }
+
+ @Before
+ public void configureScheduler() throws IOException {
+ conf = new YarnConfiguration();
+ switch (schedulerType) {
+ case CAPACITY:
+ conf.set(YarnConfiguration.RM_SCHEDULER,
+ CapacityScheduler.class.getName());
+ break;
+ case FAIR:
+ configureFairScheduler(conf);
+ break;
+ }
+ }
+
+ private void configureFairScheduler(YarnConfiguration conf) throws IOException {
+ // Disable queueMaxAMShare limitation for fair scheduler
+ PrintWriter out = new PrintWriter(new FileWriter(FS_ALLOC_FILE));
+ out.println("<?xml version=\"1.0\"?>");
+ out.println("<allocations>");
+ out.println("<queueMaxAMShareDefault>-1.0</queueMaxAMShareDefault>");
+ out.println("</allocations>");
+ out.close();
+
+ conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
+ conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, FS_ALLOC_FILE);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
index cd67ebc216bec..3d664f28848bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.server.resourcemanager;
+import org.junit.Before;
import static org.mockito.Matchers.argThat;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.spy;
@@ -65,7 +66,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
import org.apache.log4j.Level;
@@ -75,13 +75,23 @@
import org.mockito.ArgumentMatcher;
@SuppressWarnings({"unchecked", "rawtypes"})
-public class TestRM {
-
+public class TestRM extends ParameterizedSchedulerTestBase {
private static final Log LOG = LogFactory.getLog(TestRM.class);
// Milliseconds to sleep for when waiting for something to happen
private final static int WAIT_SLEEP_MS = 100;
+ private YarnConfiguration conf;
+
+ public TestRM(SchedulerType type) {
+ super(type);
+ }
+
+ @Before
+ public void setup() {
+ conf = getConf();
+ }
+
@After
public void tearDown() {
ClusterMetrics.destroy();
@@ -93,7 +103,7 @@ public void tearDown() {
public void testGetNewAppId() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
- MockRM rm = new MockRM();
+ MockRM rm = new MockRM(conf);
rm.start();
GetNewApplicationResponse resp = rm.getNewAppId();
@@ -106,7 +116,7 @@ public void testGetNewAppId() throws Exception {
public void testAppWithNoContainers() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
- MockRM rm = new MockRM();
+ MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 5120);
@@ -128,7 +138,6 @@ public void testAppWithNoContainers() throws Exception {
public void testAppOnMultiNode() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
- YarnConfiguration conf = new YarnConfiguration();
conf.set("yarn.scheduler.capacity.node-locality-delay", "-1");
MockRM rm = new MockRM(conf);
rm.start();
@@ -188,7 +197,6 @@ public void testAppOnMultiNode() throws Exception {
// corresponding NM Token.
@Test (timeout = 20000)
public void testNMTokenSentForNormalContainer() throws Exception {
- YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class.getCanonicalName());
MockRM rm = new MockRM(conf);
@@ -240,7 +248,7 @@ public void testNMTokenSentForNormalContainer() throws Exception {
@Test (timeout = 40000)
public void testNMToken() throws Exception {
- MockRM rm = new MockRM();
+ MockRM rm = new MockRM(conf);
try {
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 10000);
@@ -422,8 +430,6 @@ protected void allocateContainersAndValidateNMTokens(MockAM am,
@Test (timeout = 300000)
public void testActivatingApplicationAfterAddingNM() throws Exception {
- YarnConfiguration conf = new YarnConfiguration();
-
MockRM rm1 = new MockRM(conf);
// start like normal because state is empty
@@ -469,7 +475,6 @@ public void testActivatingApplicationAfterAddingNM() throws Exception {
// is killed or failed, so that client doesn't get the wrong information.
@Test (timeout = 80000)
public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception {
- YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
MockRM rm1 = new MockRM(conf);
rm1.start();
@@ -522,7 +527,6 @@ public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception {
@Test (timeout = 60000)
public void testInvalidatedAMHostPortOnAMRestart() throws Exception {
- YarnConfiguration conf = new YarnConfiguration();
MockRM rm1 = new MockRM(conf);
rm1.start();
MockNM nm1 =
@@ -555,7 +559,6 @@ public void testInvalidatedAMHostPortOnAMRestart() throws Exception {
@Test (timeout = 60000)
public void testApplicationKillAtAcceptedState() throws Exception {
- YarnConfiguration conf = new YarnConfiguration();
final Dispatcher dispatcher = new AsyncDispatcher() {
@Override
public EventHandler getEventHandler() {
@@ -632,15 +635,4 @@ protected Dispatcher createDispatcher() {
Assert.assertEquals(appsSubmitted + 1, metrics.getAppsSubmitted());
}
- public static void main(String[] args) throws Exception {
- TestRM t = new TestRM();
- t.testGetNewAppId();
- t.testAppWithNoContainers();
- t.testAppOnMultiNode();
- t.testNMToken();
- t.testActivatingApplicationAfterAddingNM();
- t.testInvalidateAMHostPortWhenAMFailedOrKilled();
- t.testInvalidatedAMHostPortOnAMRestart();
- t.testApplicationKillAtAcceptedState();
- }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 0b3a364c45564..b37b648ae8b8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -29,7 +29,6 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
@@ -109,7 +108,7 @@
import org.junit.Before;
import org.junit.Test;
-public class TestRMRestart {
+public class TestRMRestart extends ParameterizedSchedulerTestBase {
private final static File TEMP_DIR = new File(System.getProperty(
"test.build.data", "/tmp"), "decommision");
private File hostFile = new File(TEMP_DIR + File.separator + "hostFile.txt");
@@ -117,12 +116,17 @@ public class TestRMRestart {
// Fake rmAddr for token-renewal
private static InetSocketAddress rmAddr;
+ private List<MockRM> rms = new ArrayList<MockRM>();
+
+ public TestRMRestart(SchedulerType type) {
+ super(type);
+ }
@Before
- public void setup() throws UnknownHostException {
+ public void setup() throws IOException {
+ conf = getConf();
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
- conf = new YarnConfiguration();
UserGroupInformation.setConfiguration(conf);
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
@@ -132,9 +136,24 @@ public void setup() throws UnknownHostException {
@After
public void tearDown() {
+ for (MockRM rm : rms) {
+ rm.stop();
+ }
+ rms.clear();
+
TEMP_DIR.delete();
}
+ /**
+ *
+ * @return a new MockRM that will be stopped at the end of the test.
+ */
+ private MockRM createMockRM(YarnConfiguration conf, RMStateStore store) {
+ MockRM rm = new MockRM(conf, store);
+ rms.add(rm);
+ return rm;
+ }
+
@SuppressWarnings("rawtypes")
@Test (timeout=180000)
public void testRMRestart() throws Exception {
@@ -151,7 +170,7 @@ public void testRMRestart() throws Exception {
// PHASE 1: create state in an RM
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
// start like normal because state is empty
rm1.start();
@@ -247,7 +266,7 @@ public void testRMRestart() throws Exception {
// PHASE 2: create new RM and start from old state
// create new RM to represent restart and recover state
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
// start new RM
rm2.start();
@@ -315,7 +334,7 @@ public void testRMRestart() throws Exception {
NMContainerStatus status =
TestRMRestart
.createNMContainerStatus(loadedApp1.getCurrentAppAttempt()
- .getAppAttemptId(), 1, ContainerState.COMPLETE);
+ .getAppAttemptId(), 1, ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status), null);
nm2.registerNode();
@@ -412,7 +431,7 @@ public void testRMRestartAppRunningAMFailed() throws Exception {
rmState.getApplicationState();
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -438,13 +457,11 @@ public void testRMRestartAppRunningAMFailed() throws Exception {
rm1.waitForState(app0.getApplicationId(), RMAppState.ACCEPTED);
// start new RM
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
rm2.start();
// assert the previous AM state is loaded back on RM recovery.
rm2.waitForState(am0.getApplicationAttemptId(), RMAppAttemptState.FAILED);
- rm1.stop();
- rm2.stop();
}
@Test (timeout = 60000)
@@ -468,7 +485,7 @@ public void testRMRestartWaitForPreviousAMToFinish() throws Exception {
rmState.getApplicationState();
// start RM
- final MockRM rm1 = new MockRM(conf, memStore);
+ final MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234" , 16382, rm1.getResourceTrackerService());
@@ -492,8 +509,7 @@ public void testRMRestartWaitForPreviousAMToFinish() throws Exception {
.getAppAttemptState(), RMAppAttemptState.RUNNING);
// start new RM.
- MockRM rm2 = null;
- rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
@@ -520,7 +536,7 @@ public void testRMRestartWaitForPreviousAMToFinish() throws Exception {
NMContainerStatus status =
TestRMRestart.createNMContainerStatus(
- am2.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
+ am2.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status), null);
rm2.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.FAILED);
launchAM(rmApp, rm2, nm1);
@@ -530,8 +546,7 @@ public void testRMRestartWaitForPreviousAMToFinish() throws Exception {
// Now restart RM ...
// Setting AMLivelinessMonitor interval to be 10 Secs.
conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 10000);
- MockRM rm3 = null;
- rm3 = new MockRM(conf, memStore);
+ MockRM rm3 = createMockRM(conf, memStore);
rm3.start();
// Wait for RM to process all the events as a part of rm recovery.
@@ -578,8 +593,7 @@ public void testRMRestartWaitForPreviousAMToFinish() throws Exception {
memStore.getState().getApplicationState().get(app2.getApplicationId())
.getAttemptCount());
- MockRM rm4 = null;
- rm4 = new MockRM(conf, memStore);
+ MockRM rm4 = createMockRM(conf, memStore);
rm4.start();
rmApp = rm4.getRMContext().getRMApps().get(app1.getApplicationId());
@@ -635,7 +649,7 @@ public void updateApplicationStateInternal(ApplicationId appId,
rmState.getApplicationState();
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 = rm1.registerNode("127.0.0.1:1234", 15120);
RMApp app0 = rm1.submitApp(200);
@@ -652,7 +666,7 @@ public void updateApplicationStateInternal(ApplicationId appId,
Assert.assertNull(rmAppState.get(app0.getApplicationId()).getState());
// start RM
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
rm2.start();
@@ -661,7 +675,7 @@ public void updateApplicationStateInternal(ApplicationId appId,
rm2.waitForState(app0.getApplicationId(), RMAppState.FINISHED);
// app final state is saved via the finish event from attempt.
Assert.assertEquals(RMAppState.FINISHED,
- rmAppState.get(app0.getApplicationId()).getState());
+ rmAppState.get(app0.getApplicationId()).getState());
}
@Test (timeout = 60000)
@@ -674,7 +688,7 @@ public void testRMRestartFailedApp() throws Exception {
rmState.getApplicationState();
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -696,7 +710,7 @@ public void testRMRestartFailedApp() throws Exception {
appState.getAttempt(am0.getApplicationAttemptId()).getState());
// start new RM
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
rm2.start();
RMApp loadedApp0 = rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(app0.getApplicationId(), RMAppState.FAILED);
@@ -709,8 +723,6 @@ public void testRMRestartFailedApp() throws Exception {
.contains("Failing the application."));
// failed diagnostics from attempt is lost because the diagnostics from
// attempt is not yet available by the time app is saving the app state.
- rm1.stop();
- rm2.stop();
}
@Test (timeout = 60000)
@@ -724,7 +736,7 @@ public void testRMRestartKilledApp() throws Exception{
rmState.getApplicationState();
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -746,7 +758,7 @@ public void testRMRestartKilledApp() throws Exception{
appState.getAttempt(am0.getApplicationAttemptId()).getState());
// restart rm
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
rm2.start();
RMApp loadedApp0 = rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(app0.getApplicationId(), RMAppState.KILLED);
@@ -756,9 +768,7 @@ public void testRMRestartKilledApp() throws Exception{
ApplicationReport appReport = verifyAppReportAfterRMRestart(app0, rm2);
Assert.assertEquals(app0.getDiagnostics().toString(),
- appReport.getDiagnostics());
- rm1.stop();
- rm2.stop();
+ appReport.getDiagnostics());
}
@Test (timeout = 60000)
@@ -781,7 +791,7 @@ public synchronized void updateApplicationAttemptStateInternal(
memStore.init(conf);
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
// create app
RMApp app0 =
@@ -793,7 +803,7 @@ public synchronized void updateApplicationAttemptStateInternal(
rm1.waitForState(app0.getApplicationId(), RMAppState.KILLED);
// restart rm
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
rm2.start();
RMApp loadedApp0 =
rm2.getRMContext().getRMApps().get(app0.getApplicationId());
@@ -812,7 +822,7 @@ public void testRMRestartSucceededApp() throws Exception {
rmState.getApplicationState();
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -839,7 +849,7 @@ public void testRMRestartSucceededApp() throws Exception {
Assert.assertEquals(app0.getFinishTime(), appState.getFinishTime());
// restart rm
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
rm2.start();
// verify application report returns the same app info as the app info
@@ -848,9 +858,6 @@ public void testRMRestartSucceededApp() throws Exception {
Assert.assertEquals(FinalApplicationStatus.SUCCEEDED,
appReport.getFinalApplicationStatus());
Assert.assertEquals("trackingUrl", appReport.getOriginalTrackingUrl());
-
- rm1.stop();
- rm2.stop();
}
@Test (timeout = 60000)
@@ -860,7 +867,7 @@ public void testRMRestartGetApplicationList() throws Exception {
memStore.init(conf);
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -897,7 +904,7 @@ protected RMAppManager createRMAppManager() {
return spy(super.createRMAppManager());
}
};
-
+ rms.add(rm2);
rm2.start();
GetApplicationsRequest request1 =
@@ -944,9 +951,6 @@ protected RMAppManager createRMAppManager() {
// check application summary is logged for the completed apps after RM restart.
verify(rm2.getRMAppManager(), times(3)).logApplicationSummary(
isA(ApplicationId.class));
-
- rm1.stop();
- rm2.stop();
}
private MockAM launchAM(RMApp app, MockRM rm, MockNM nm)
@@ -1012,7 +1016,7 @@ public void testRMRestartOnMaxAppAttempts() throws Exception {
Map<ApplicationId, ApplicationState> rmAppState =
rmState.getApplicationState();
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1050,7 +1054,7 @@ public void testRMRestartOnMaxAppAttempts() throws Exception {
// Setting AMLivelinessMonitor interval to be 3 Secs.
conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 3000);
// start new RM
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
rm2.start();
// verify that maxAppAttempts is set to global value
@@ -1069,10 +1073,6 @@ public void testRMRestartOnMaxAppAttempts() throws Exception {
Assert.assertEquals(RMAppState.FAILED,
rmAppState.get(app1.getApplicationId()).getState());
Assert.assertNull(rmAppState.get(app2.getApplicationId()).getState());
-
- // stop the RM
- rm1.stop();
- rm2.stop();
}
@Test (timeout = 60000)
@@ -1154,10 +1154,6 @@ public void testDelegationTokenRestoredInDelegationTokenRenewer()
// verify tokens are properly populated back to rm2 DelegationTokenRenewer
Assert.assertEquals(tokenSet, rm2.getRMContext()
.getDelegationTokenRenewer().getDelegationTokens());
-
- // stop the RM
- rm1.stop();
- rm2.stop();
}
private void waitForTokensToBeRenewed(MockRM rm2) throws Exception {
@@ -1253,8 +1249,6 @@ public void testAppAttemptTokensRestoredOnRMRestart() throws Exception {
Assert.assertArrayEquals(amrmToken.getPassword(),
rm2.getRMContext().getAMRMTokenSecretManager().retrievePassword(
amrmToken.decodeIdentifier()));
- rm1.stop();
- rm2.stop();
}
@Test (timeout = 60000)
@@ -1402,10 +1396,6 @@ public void testRMDelegationTokenRestoredOnRMRestart() throws Exception {
.getAllTokens();
Assert.assertFalse(allTokensRM2.containsKey(dtId1));
Assert.assertFalse(rmDTState.containsKey(dtId1));
-
- // stop the RM
- rm1.stop();
- rm2.stop();
}
// This is to test submit an application to the new RM with the old delegation
@@ -1466,7 +1456,7 @@ protected void handleStoreEvent(RMStateStoreEvent event) {
memStore.init(conf);
// start RM
- final MockRM rm1 = new MockRM(conf, memStore);
+ final MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
// create apps.
@@ -1512,7 +1502,7 @@ public void testFinishedAppRemovalAfterRMRestart() throws Exception {
RMState rmState = memStore.getState();
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1523,7 +1513,7 @@ public void testFinishedAppRemovalAfterRMRestart() throws Exception {
MockAM am0 = launchAM(app0, rm1, nm1);
finishApplicationMaster(app0, rm1, nm1, am0);
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm1 = rm2.registerNode("127.0.0.1:1234", 15120);
@@ -1545,9 +1535,6 @@ public void testFinishedAppRemovalAfterRMRestart() throws Exception {
Assert.assertNull(rm2.getRMContext().getRMApps()
.get(app0.getApplicationId()));
Assert.assertNull(rmAppState.get(app0.getApplicationId()));
-
- rm1.stop();
- rm2.stop();
}
// This is to test RM does not get hang on shutdown.
@@ -1564,7 +1551,7 @@ public synchronized void checkVersion()
memStore.init(conf);
MockRM rm1 = null;
try {
- rm1 = new MockRM(conf, memStore);
+ rm1 = createMockRM(conf, memStore);
rm1.start();
Assert.fail();
} catch (Exception e) {
@@ -1582,7 +1569,7 @@ public void testClientRetryOnKillingApplication() throws Exception {
memStore.init(conf);
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1698,7 +1685,11 @@ protected void recoverApplication(ApplicationState appState,
}
}
};
- rm1.start();
+ try {
+ rm1.start();
+ } finally {
+ rm1.stop();
+ }
}
@SuppressWarnings("resource")
@@ -1711,7 +1702,7 @@ public void testQueueMetricsOnRMRestart() throws Exception {
// PHASE 1: create state in an RM
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1749,7 +1740,7 @@ public void testQueueMetricsOnRMRestart() throws Exception {
// PHASE 2: create new RM and start from old state
// create new RM to represent restart and recover state
- MockRM rm2 = new MockRM(conf, memStore);
+ MockRM rm2 = createMockRM(conf, memStore);
QueueMetrics qm2 = rm2.getResourceScheduler().getRootQueueMetrics();
resetQueueMetrics(qm2);
assertQueueMetrics(qm2, 0, 0, 0, 0);
@@ -1766,7 +1757,7 @@ public void testQueueMetricsOnRMRestart() throws Exception {
NMContainerStatus status =
TestRMRestart
.createNMContainerStatus(loadedApp1.getCurrentAppAttempt()
- .getAppAttemptId(), 1, ContainerState.COMPLETE);
+ .getAppAttemptId(), 1, ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status), null);
while (loadedApp1.getAppAttempts().size() != 2) {
@@ -1795,10 +1786,6 @@ public void testQueueMetricsOnRMRestart() throws Exception {
// finish the AMs
finishApplicationMaster(loadedApp1, rm2, nm1, am1);
assertQueueMetrics(qm2, 1, 0, 0, 1);
-
- // stop RM's
- rm2.stop();
- rm1.stop();
}
@@ -1836,43 +1823,58 @@ public void testDecomissionedNMsMetricsOnRMRestart() throws Exception {
hostFile.getAbsolutePath());
writeToHostsFile("");
final DrainDispatcher dispatcher = new DrainDispatcher();
- MockRM rm1 = new MockRM(conf) {
- @Override
- protected Dispatcher createDispatcher() {
- return dispatcher;
+ MockRM rm1 = null, rm2 = null;
+ try {
+ rm1 = new MockRM(conf) {
+ @Override
+ protected Dispatcher createDispatcher() {
+ return dispatcher;
+ }
+ };
+ rm1.start();
+ MockNM nm1 = rm1.registerNode("localhost:1234", 8000);
+ MockNM nm2 = rm1.registerNode("host2:1234", 8000);
+ Assert
+ .assertEquals(0,
+ ClusterMetrics.getMetrics().getNumDecommisionedNMs());
+ String ip = NetUtils.normalizeHostName("localhost");
+ // Add 2 hosts to exclude list.
+ writeToHostsFile("host2", ip);
+
+ // refresh nodes
+ rm1.getNodesListManager().refreshNodes(conf);
+ NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
+ Assert
+ .assertTrue(
+ NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
+ nodeHeartbeat = nm2.nodeHeartbeat(true);
+ Assert.assertTrue("The decommisioned metrics are not updated",
+ NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
+
+ dispatcher.await();
+ Assert
+ .assertEquals(2,
+ ClusterMetrics.getMetrics().getNumDecommisionedNMs());
+ rm1.stop();
+ rm1 = null;
+ Assert
+ .assertEquals(0,
+ ClusterMetrics.getMetrics().getNumDecommisionedNMs());
+
+ // restart RM.
+ rm2 = new MockRM(conf);
+ rm2.start();
+ Assert
+ .assertEquals(2,
+ ClusterMetrics.getMetrics().getNumDecommisionedNMs());
+ } finally {
+ if (rm1 != null) {
+ rm1.stop();
}
- };
- rm1.start();
- MockNM nm1 = rm1.registerNode("localhost:1234", 8000);
- MockNM nm2 = rm1.registerNode("host2:1234", 8000);
- Assert
- .assertEquals(0, ClusterMetrics.getMetrics().getNumDecommisionedNMs());
- String ip = NetUtils.normalizeHostName("localhost");
- // Add 2 hosts to exclude list.
- writeToHostsFile("host2", ip);
-
- // refresh nodes
- rm1.getNodesListManager().refreshNodes(conf);
- NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
- Assert
- .assertTrue(NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
- nodeHeartbeat = nm2.nodeHeartbeat(true);
- Assert.assertTrue("The decommisioned metrics are not updated",
- NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
-
- dispatcher.await();
- Assert
- .assertEquals(2, ClusterMetrics.getMetrics().getNumDecommisionedNMs());
- rm1.stop();
- Assert
- .assertEquals(0, ClusterMetrics.getMetrics().getNumDecommisionedNMs());
-
- // restart RM.
- MockRM rm2 = new MockRM(conf);
- rm2.start();
- Assert
- .assertEquals(2, ClusterMetrics.getMetrics().getNumDecommisionedNMs());
- rm2.stop();
+ if (rm2 != null) {
+ rm2.stop();
+ }
+ }
}
// Test Delegation token is renewed synchronously so that recover events
@@ -1887,7 +1889,7 @@ public void testSynchronouslyRenewDTOnRecovery() throws Exception {
memStore.init(conf);
// start RM
- MockRM rm1 = new MockRM(conf, memStore);
+ MockRM rm1 = createMockRM(conf, memStore);
rm1.start();
final MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1910,24 +1912,29 @@ protected void serviceStart() throws Exception {
nm1.setResourceTrackerService(getResourceTrackerService());
NMContainerStatus status =
TestRMRestart.createNMContainerStatus(
- am0.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
+ am0.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status), null);
}
};
}
};
- // Re-start RM
- rm2.start();
- // wait for the 2nd attempt to be started.
- RMApp loadedApp0 =
- rm2.getRMContext().getRMApps().get(app0.getApplicationId());
- int timeoutSecs = 0;
- while (loadedApp0.getAppAttempts().size() != 2 && timeoutSecs++ < 40) {
- Thread.sleep(200);
+ try {
+ // Re-start RM
+ rm2.start();
+
+ // wait for the 2nd attempt to be started.
+ RMApp loadedApp0 =
+ rm2.getRMContext().getRMApps().get(app0.getApplicationId());
+ int timeoutSecs = 0;
+ while (loadedApp0.getAppAttempts().size() != 2 && timeoutSecs++ < 40) {
+ Thread.sleep(200);
+ }
+ MockAM am1 = MockRM.launchAndRegisterAM(loadedApp0, rm2, nm1);
+ MockRM.finishAMAndVerifyAppState(loadedApp0, rm2, nm1, am1);
+ } finally {
+ rm2.stop();
}
- MockAM am1 = MockRM.launchAndRegisterAM(loadedApp0, rm2, nm1);
- MockRM.finishAMAndVerifyAppState(loadedApp0, rm2, nm1, am1);
}
private void writeToHostsFile(String... hosts) throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
index 0dcd228453e96..8b113a0021336 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
@@ -18,7 +18,11 @@
package org.apache.hadoop.yarn.server.resourcemanager.security;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager
+ .ParameterizedSchedulerTestBase;
import static org.junit.Assert.fail;
+import org.junit.Before;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -74,7 +78,17 @@
import org.apache.hadoop.yarn.util.Records;
import org.junit.Test;
-public class TestClientToAMTokens {
+public class TestClientToAMTokens extends ParameterizedSchedulerTestBase {
+ private YarnConfiguration conf;
+
+ public TestClientToAMTokens(SchedulerType type) {
+ super(type);
+ }
+
+ @Before
+ public void setup() {
+ conf = getConf();
+ }
private interface CustomProtocol {
@SuppressWarnings("unused")
@@ -151,8 +165,6 @@ protected void serviceStart() throws Exception {
@Test
public void testClientToAMTokens() throws Exception {
-
- final Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
@@ -267,6 +279,8 @@ public RegisterApplicationMasterResponse run() {
// Now for an authenticated user
verifyValidToken(conf, am, token);
+
+ rm.stop();
}
private void verifyTokenWithTamperedID(final Configuration conf,
|
3fd458ad88808e542b211461a49728138c1ebe79
|
hbase
|
HBASE-6427 Pluggable compaction and scan policies- via coprocessors--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1367361 13f79535-47bb-0310-9956-ffa450edef68-
|
a
|
https://github.com/apache/hbase
|
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index feb9aa391877..3607e7dbe178 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.hbase.coprocessor;
import java.util.List;
-import java.util.Map;
+import java.util.NavigableSet;
import com.google.common.collect.ImmutableList;
@@ -37,7 +37,9 @@
import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -74,6 +76,13 @@ public void preClose(ObserverContext<RegionCoprocessorEnvironment> e,
public void postClose(ObserverContext<RegionCoprocessorEnvironment> e,
boolean abortRequested) { }
+ @Override
+ public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, final KeyValueScanner memstoreScanner, final InternalScanner s)
+ throws IOException {
+ return null;
+ }
+
@Override
public void preFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
}
@@ -82,6 +91,17 @@ public void preFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOE
public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
}
+ @Override
+ public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+ InternalScanner scanner) throws IOException {
+ return scanner;
+ }
+
+ @Override
+ public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+ StoreFile resultFile) throws IOException {
+ }
+
@Override
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
}
@@ -105,6 +125,13 @@ public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment>
return scanner;
}
+ @Override
+ public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, List<? extends KeyValueScanner> scanners, final ScanType scanType,
+ final long earliestPutTs, final InternalScanner s) throws IOException {
+ return null;
+ }
+
@Override
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, final Store store,
final StoreFile resultFile) throws IOException {
@@ -241,6 +268,13 @@ public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvir
return s;
}
+ @Override
+ public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
+ final KeyValueScanner s) throws IOException {
+ return null;
+ }
+
@Override
public RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> e,
final Scan scan, final RegionScanner s) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index c5b858eaca86..c3cfa097bbb7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -18,6 +18,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.NavigableSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -35,9 +36,12 @@
import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -64,20 +68,63 @@ public interface RegionObserver extends Coprocessor {
*/
void postOpen(final ObserverContext<RegionCoprocessorEnvironment> c);
+ /**
+ * Called before a memstore is flushed to disk and prior to creating the scanner to read from
+ * the memstore. To override or modify how a memstore is flushed,
+ * implementing classes can return a new scanner to provide the KeyValues to be
+ * stored into the new {@code StoreFile} or null to perform the default processing.
+ * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
+ * effect in this hook.
+ * @param c the environment provided by the region server
+ * @param store the store being flushed
+ * @param memstoreScanner the scanner for the memstore that is flushed
+ * @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain
+ * @return the scanner to use during the flush. {@code null} if the default implementation
+ * is to be used.
+ * @throws IOException if an error occurred on the coprocessor
+ */
+ InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, final KeyValueScanner memstoreScanner, final InternalScanner s)
+ throws IOException;
+
/**
* Called before the memstore is flushed to disk.
* @param c the environment provided by the region server
* @throws IOException if an error occurred on the coprocessor
+ * @deprecated use {@link #preFlush(ObserverContext, Store, InternalScanner)} instead
*/
void preFlush(final ObserverContext<RegionCoprocessorEnvironment> c) throws IOException;
+ /**
+ * Called before a Store's memstore is flushed to disk.
+ * @param c the environment provided by the region server
+ * @param store the store where compaction is being requested
+ * @param scanner the scanner over existing data used in the store file
+ * @return the scanner to use during compaction. Should not be {@code null}
+ * unless the implementation is writing new store files on its own.
+ * @throws IOException if an error occurred on the coprocessor
+ */
+ InternalScanner preFlush(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
+ final InternalScanner scanner) throws IOException;
+
/**
* Called after the memstore is flushed to disk.
* @param c the environment provided by the region server
* @throws IOException if an error occurred on the coprocessor
+ * @deprecated use {@link #preFlush(ObserverContext, Store, InternalScanner)} instead.
*/
void postFlush(final ObserverContext<RegionCoprocessorEnvironment> c) throws IOException;
+ /**
+ * Called after a Store's memstore is flushed to disk.
+ * @param c the environment provided by the region server
+ * @param store the store being flushed
+ * @param resultFile the new store file written out during compaction
+ * @throws IOException if an error occurred on the coprocessor
+ */
+ void postFlush(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
+ final StoreFile resultFile) throws IOException;
+
/**
* Called prior to selecting the {@link StoreFile}s to compact from the list
* of available candidates. To alter the files used for compaction, you may
@@ -127,6 +174,29 @@ void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
InternalScanner preCompact(final ObserverContext<RegionCoprocessorEnvironment> c,
final Store store, final InternalScanner scanner) throws IOException;
+ /**
+ * Called prior to writing the {@link StoreFile}s selected for compaction into
+ * a new {@code StoreFile} and prior to creating the scanner used to read the
+ * input files. To override or modify the compaction process,
+ * implementing classes can return a new scanner to provide the KeyValues to be
+ * stored into the new {@code StoreFile} or null to perform the default processing.
+ * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
+ * effect in this hook.
+ * @param c the environment provided by the region server
+ * @param store the store being compacted
+ * @param scanners the list {@link StoreFileScanner}s to be read from
+ * @param scantype the {@link ScanType} indicating whether this is a major or minor compaction
+ * @param earliestPutTs timestamp of the earliest put that was found in any of the involved
+ * store files
+ * @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain
+ * @return the scanner to use during compaction. {@code null} if the default implementation
+ * is to be used.
+ * @throws IOException if an error occurred on the coprocessor
+ */
+ InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, List<? extends KeyValueScanner> scanners, final ScanType scanType,
+ final long earliestPutTs, final InternalScanner s) throws IOException;
+
/**
* Called after compaction has completed and the new store file has been
* moved in to place.
@@ -549,6 +619,30 @@ RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment>
final Scan scan, final RegionScanner s)
throws IOException;
+ /**
+ * Called before a store opens a new scanner.
+ * This hook is called when a "user" scanner is opened.
+ * <p>
+ * See {@link #preFlushScannerOpen(ObserverContext, Store, KeyValueScanner, InternalScanner)}
+ * and {@link #preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner)}
+ * to override scanners created for flushes or compactions, resp.
+ * <p>
+ * Call CoprocessorEnvironment#complete to skip any subsequent chained
+ * coprocessors.
+ * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
+ * effect in this hook.
+ * @param c the environment provided by the region server
+ * @param store the store being scanned
+ * @param scan the Scan specification
+ * @param targetCols columns to be used in the scanner
+ * @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain
+ * @return a KeyValueScanner instance to use or {@code null} to use the default implementation
+ * @throws IOException if an error occurred on the coprocessor
+ */
+ KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
+ final KeyValueScanner s) throws IOException;
+
/**
* Called after the client opens a new scanner.
* <p>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java
index 9ed051f8be3e..b606458e6e1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java
@@ -32,7 +32,6 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.Compression;
-import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
@@ -127,12 +126,21 @@ StoreFile.Writer compact(final Store store,
try {
InternalScanner scanner = null;
try {
- Scan scan = new Scan();
- scan.setMaxVersions(store.getFamily().getMaxVersions());
- /* Include deletes, unless we are doing a major compaction */
- scanner = new StoreScanner(store, scan, scanners,
- majorCompaction? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT,
- smallestReadPoint, earliestPutTs);
+ if (store.getHRegion().getCoprocessorHost() != null) {
+ scanner = store
+ .getHRegion()
+ .getCoprocessorHost()
+ .preCompactScannerOpen(store, scanners,
+ majorCompaction ? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT, earliestPutTs);
+ }
+ if (scanner == null) {
+ Scan scan = new Scan();
+ scan.setMaxVersions(store.getFamily().getMaxVersions());
+ /* Include deletes, unless we are doing a major compaction */
+ scanner = new StoreScanner(store, store.scanInfo, scan, scanners,
+ majorCompaction? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT,
+ smallestReadPoint, earliestPutTs);
+ }
if (store.getHRegion().getCoprocessorHost() != null) {
InternalScanner cpScanner =
store.getHRegion().getCoprocessorHost().preCompact(store, scanner);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 7df5e72d26d3..36d6bacd070f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1216,7 +1216,7 @@ void triggerMajorCompaction() {
* @param majorCompaction True to force a major compaction regardless of thresholds
* @throws IOException e
*/
- void compactStores(final boolean majorCompaction)
+ public void compactStores(final boolean majorCompaction)
throws IOException {
if (majorCompaction) {
this.triggerMajorCompaction();
@@ -3469,7 +3469,7 @@ public HRegionInfo getRegionInfo() {
for (Map.Entry<byte[], NavigableSet<byte[]>> entry :
scan.getFamilyMap().entrySet()) {
Store store = stores.get(entry.getKey());
- StoreScanner scanner = store.getScanner(scan, entry.getValue());
+ KeyValueScanner scanner = store.getScanner(scan, entry.getValue());
scanners.add(scanner);
}
this.storeHeap = new KeyValueHeap(scanners, comparator);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 58afaf439b9f..f6efea5b1b23 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -303,6 +303,31 @@ public void postClose(boolean abortRequested) {
}
}
+ /**
+ * See
+ * {@link RegionObserver#preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner)}
+ */
+ public InternalScanner preCompactScannerOpen(Store store, List<StoreFileScanner> scanners,
+ ScanType scanType, long earliestPutTs) throws IOException {
+ ObserverContext<RegionCoprocessorEnvironment> ctx = null;
+ InternalScanner s = null;
+ for (RegionEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof RegionObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ s = ((RegionObserver) env.getInstance()).preCompactScannerOpen(ctx, store, scanners,
+ scanType, earliestPutTs, s);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env,e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ return s;
+ }
+
/**
* Called prior to selecting the {@link StoreFile}s for compaction from
* the list of currently available candidates.
@@ -389,7 +414,7 @@ public InternalScanner preCompact(Store store, InternalScanner scanner) throws I
* Called after the store compaction has completed.
* @param store the store being compacted
* @param resultFile the new store file written during compaction
- * @throws IOException
+ * @throws IOException
*/
public void postCompact(Store store, StoreFile resultFile) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
@@ -408,6 +433,31 @@ public void postCompact(Store store, StoreFile resultFile) throws IOException {
}
}
+ /**
+ * Invoked before a memstore flush
+ * @throws IOException
+ */
+ public InternalScanner preFlush(Store store, InternalScanner scanner) throws IOException {
+ ObserverContext<RegionCoprocessorEnvironment> ctx = null;
+ boolean bypass = false;
+ for (RegionEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof RegionObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ scanner = ((RegionObserver)env.getInstance()).preFlush(
+ ctx, store, scanner);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env,e);
+ }
+ bypass |= ctx.shouldBypass();
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ return bypass ? null : scanner;
+ }
+
/**
* Invoked before a memstore flush
* @throws IOException
@@ -429,9 +479,32 @@ public void preFlush() throws IOException {
}
}
+ /**
+ * See
+ * {@link RegionObserver#preFlush(ObserverContext, Store, KeyValueScanner)}
+ */
+ public InternalScanner preFlushScannerOpen(Store store, KeyValueScanner memstoreScanner) throws IOException {
+ ObserverContext<RegionCoprocessorEnvironment> ctx = null;
+ InternalScanner s = null;
+ for (RegionEnvironment env : coprocessors) {
+ if (env.getInstance() instanceof RegionObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ s = ((RegionObserver) env.getInstance()).preFlushScannerOpen(ctx, store, memstoreScanner, s);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ return s;
+ }
+
/**
* Invoked after a memstore flush
- * @throws IOException
+ * @throws IOException
*/
public void postFlush() throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
@@ -450,9 +523,30 @@ public void postFlush() throws IOException {
}
}
+ /**
+ * Invoked after a memstore flush
+ * @throws IOException
+ */
+ public void postFlush(final Store store, final StoreFile storeFile) throws IOException {
+ ObserverContext<RegionCoprocessorEnvironment> ctx = null;
+ for (RegionEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof RegionObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((RegionObserver)env.getInstance()).postFlush(ctx, store, storeFile);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
+
/**
* Invoked just before a split
- * @throws IOException
+ * @throws IOException
*/
public void preSplit() throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
@@ -1088,6 +1182,31 @@ public RegionScanner preScannerOpen(Scan scan) throws IOException {
return bypass ? s : null;
}
+ /**
+ * See
+ * {@link RegionObserver#preStoreScannerOpen(ObserverContext, Store, Scan, NavigableSet, KeyValueScanner)}
+ */
+ public KeyValueScanner preStoreScannerOpen(Store store, Scan scan,
+ final NavigableSet<byte[]> targetCols) throws IOException {
+ KeyValueScanner s = null;
+ ObserverContext<RegionCoprocessorEnvironment> ctx = null;
+ for (RegionEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof RegionObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ s = ((RegionObserver) env.getInstance()).preStoreScannerOpen(ctx, store, scan,
+ targetCols, s);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ return s;
+ }
+
/**
* @param scan the Scan specification
* @param s the scanner
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
index 20e297864851..f02afd7171b1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
@@ -34,8 +34,6 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
-
/**
* A query matcher that is specifically designed for the scan case.
*/
@@ -138,7 +136,7 @@ public class ScanQueryMatcher {
* based on TTL
*/
public ScanQueryMatcher(Scan scan, Store.ScanInfo scanInfo,
- NavigableSet<byte[]> columns, StoreScanner.ScanType scanType,
+ NavigableSet<byte[]> columns, ScanType scanType,
long readPointToUse, long earliestPutTs, long oldestUnexpiredTS) {
this.tr = scan.getTimeRange();
this.rowComparator = scanInfo.getComparator().getRawComparator();
@@ -185,7 +183,7 @@ public ScanQueryMatcher(Scan scan, Store.ScanInfo scanInfo,
*/
ScanQueryMatcher(Scan scan, Store.ScanInfo scanInfo,
NavigableSet<byte[]> columns, long oldestUnexpiredTS) {
- this(scan, scanInfo, columns, StoreScanner.ScanType.USER_SCAN,
+ this(scan, scanInfo, columns, ScanType.USER_SCAN,
Long.MAX_VALUE, /* max Readpoint to track versions */
HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanType.java
new file mode 100644
index 000000000000..7b075120cbee
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanType.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Enum to distinguish general scan types.
+ */
[email protected]
+public enum ScanType {
+ MAJOR_COMPACT,
+ MINOR_COMPACT,
+ USER_SCAN
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index 3f5d76c06037..87a1c13f88d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -63,7 +63,6 @@
import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
@@ -212,9 +211,7 @@ protected Store(Path basedir, HRegion region, HColumnDescriptor family,
"ms in store " + this);
// Why not just pass a HColumnDescriptor in here altogether? Even if have
// to clone it?
- scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
- family.getMaxVersions(), ttl, family.getKeepDeletedCells(),
- timeToPurgeDeletes, this.comparator);
+ scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator);
this.memstore = new MemStore(conf, this.comparator);
// By default, compact if storefile.count >= minFilesToCompact
@@ -728,15 +725,30 @@ private Path internalFlushCache(final SortedSet<KeyValue> set,
if (set.size() == 0) {
return null;
}
- Scan scan = new Scan();
- scan.setMaxVersions(scanInfo.getMaxVersions());
// Use a store scanner to find which rows to flush.
// Note that we need to retain deletes, hence
// treat this as a minor compaction.
- InternalScanner scanner = new StoreScanner(this, scan, Collections
- .singletonList(new CollectionBackedScanner(set, this.comparator)),
- ScanType.MINOR_COMPACT, this.region.getSmallestReadPoint(),
- HConstants.OLDEST_TIMESTAMP);
+ InternalScanner scanner = null;
+ KeyValueScanner memstoreScanner = new CollectionBackedScanner(set, this.comparator);
+ if (getHRegion().getCoprocessorHost() != null) {
+ scanner = getHRegion().getCoprocessorHost().preFlushScannerOpen(this, memstoreScanner);
+ }
+ if (scanner == null) {
+ Scan scan = new Scan();
+ scan.setMaxVersions(scanInfo.getMaxVersions());
+ scanner = new StoreScanner(this, scanInfo, scan, Collections.singletonList(new CollectionBackedScanner(
+ set, this.comparator)), ScanType.MINOR_COMPACT, this.region.getSmallestReadPoint(),
+ HConstants.OLDEST_TIMESTAMP);
+ }
+ if (getHRegion().getCoprocessorHost() != null) {
+ InternalScanner cpScanner =
+ getHRegion().getCoprocessorHost().preFlush(this, scanner);
+ // NULL scanner returned from coprocessor hooks means skip normal processing
+ if (cpScanner == null) {
+ return null;
+ }
+ scanner = cpScanner;
+ }
try {
// TODO: We can fail in the below block before we complete adding this
// flush to list of store files. Add cleanup of anything put on filesystem
@@ -1941,11 +1953,18 @@ boolean getForceMajorCompaction() {
* are not in a compaction.
* @throws IOException
*/
- public StoreScanner getScanner(Scan scan,
+ public KeyValueScanner getScanner(Scan scan,
final NavigableSet<byte []> targetCols) throws IOException {
lock.readLock().lock();
try {
- return new StoreScanner(this, scan, targetCols);
+ KeyValueScanner scanner = null;
+ if (getHRegion().getCoprocessorHost() != null) {
+ scanner = getHRegion().getCoprocessorHost().preStoreScannerOpen(this, scan, targetCols);
+ }
+ if (scanner == null) {
+ scanner = new StoreScanner(this, getScanInfo(), scan, targetCols);
+ }
+ return scanner;
} finally {
lock.readLock().unlock();
}
@@ -2065,7 +2084,7 @@ boolean throttleCompaction(long compactionSize) {
return compactionSize > throttlePoint;
}
- HRegion getHRegion() {
+ public HRegion getHRegion() {
return this.region;
}
@@ -2168,6 +2187,12 @@ public boolean commit(MonitoredTask status) throws IOException {
}
storeFile = Store.this.commitFile(storeFilePath, cacheFlushId,
snapshotTimeRangeTracker, flushedSize, status);
+ if (Store.this.getHRegion().getCoprocessorHost() != null) {
+ Store.this.getHRegion()
+ .getCoprocessorHost()
+ .postFlush(Store.this, storeFile);
+ }
+
// Add new file to store files. Clear snapshot too while we have
// the Store write lock.
return Store.this.updateStorefiles(storeFile, snapshot);
@@ -2210,6 +2235,10 @@ public KeyValue.KVComparator getComparator() {
return comparator;
}
+ public ScanInfo getScanInfo() {
+ return scanInfo;
+ }
+
/**
* Immutable information for scans over a store.
*/
@@ -2226,6 +2255,17 @@ public static class ScanInfo {
+ (2 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_INT)
+ Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN);
+ /**
+ * @param family {@link HColumnDescriptor} describing the column family
+ * @param ttl Store's TTL (in ms)
+ * @param timeToPurgeDeletes duration in ms after which a delete marker can
+ * be purged during a major compaction.
+ * @param comparator The store's comparator
+ */
+ public ScanInfo(HColumnDescriptor family, long ttl, long timeToPurgeDeletes, KVComparator comparator) {
+ this(family.getName(), family.getMinVersions(), family.getMaxVersions(), ttl, family
+ .getKeepDeletedCells(), timeToPurgeDeletes, comparator);
+ }
/**
* @param family Name of this store's column family
* @param minVersions Store's MIN_VERSIONS setting
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index a46cb72ab5e4..cad774130e92 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
@@ -43,7 +44,7 @@
* into List<KeyValue> for a single row.
*/
@InterfaceAudience.Private
-class StoreScanner extends NonLazyKeyValueScanner
+public class StoreScanner extends NonLazyKeyValueScanner
implements KeyValueScanner, InternalScanner, ChangedReadersObserver {
static final Log LOG = LogFactory.getLog(StoreScanner.class);
private Store store;
@@ -106,16 +107,16 @@ private StoreScanner(Store store, boolean cacheBlocks, Scan scan,
* @param columns which columns we are scanning
* @throws IOException
*/
- StoreScanner(Store store, Scan scan, final NavigableSet<byte[]> columns)
+ public StoreScanner(Store store, ScanInfo scanInfo, Scan scan, final NavigableSet<byte[]> columns)
throws IOException {
- this(store, scan.getCacheBlocks(), scan, columns, store.scanInfo.getTtl(),
- store.scanInfo.getMinVersions());
+ this(store, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(),
+ scanInfo.getMinVersions());
initializeMetricNames();
if (columns != null && scan.isRaw()) {
throw new DoNotRetryIOException(
"Cannot specify any column for a raw scan");
}
- matcher = new ScanQueryMatcher(scan, store.scanInfo, columns,
+ matcher = new ScanQueryMatcher(scan, scanInfo, columns,
ScanType.USER_SCAN, Long.MAX_VALUE, HConstants.LATEST_TIMESTAMP,
oldestUnexpiredTS);
@@ -158,13 +159,13 @@ private StoreScanner(Store store, boolean cacheBlocks, Scan scan,
* @param smallestReadPoint the readPoint that we should use for tracking
* versions
*/
- StoreScanner(Store store, Scan scan,
+ public StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
List<? extends KeyValueScanner> scanners, ScanType scanType,
long smallestReadPoint, long earliestPutTs) throws IOException {
- this(store, false, scan, null, store.scanInfo.getTtl(),
- store.scanInfo.getMinVersions());
+ this(store, false, scan, null, scanInfo.getTtl(),
+ scanInfo.getMinVersions());
initializeMetricNames();
- matcher = new ScanQueryMatcher(scan, store.scanInfo, null, scanType,
+ matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType,
smallestReadPoint, earliestPutTs, oldestUnexpiredTS);
// Filter the list of scanners using Bloom filters, time range, TTL, etc.
@@ -181,7 +182,7 @@ private StoreScanner(Store store, boolean cacheBlocks, Scan scan,
/** Constructor for testing. */
StoreScanner(final Scan scan, Store.ScanInfo scanInfo,
- StoreScanner.ScanType scanType, final NavigableSet<byte[]> columns,
+ ScanType scanType, final NavigableSet<byte[]> columns,
final List<KeyValueScanner> scanners) throws IOException {
this(scan, scanInfo, scanType, columns, scanners,
HConstants.LATEST_TIMESTAMP);
@@ -189,7 +190,7 @@ private StoreScanner(Store store, boolean cacheBlocks, Scan scan,
// Constructor for testing.
StoreScanner(final Scan scan, Store.ScanInfo scanInfo,
- StoreScanner.ScanType scanType, final NavigableSet<byte[]> columns,
+ ScanType scanType, final NavigableSet<byte[]> columns,
final List<KeyValueScanner> scanners, long earliestPutTs)
throws IOException {
this(null, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(),
@@ -598,14 +599,5 @@ List<KeyValueScanner> getAllScannersForTesting() {
static void enableLazySeekGlobally(boolean enable) {
lazySeekEnabledGlobally = enable;
}
-
- /**
- * Enum to distinguish general scan types.
- */
- public static enum ScanType {
- MAJOR_COMPACT,
- MINOR_COMPACT,
- USER_SCAN
- }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index fc4fe2e4f6f7..767202e11677 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -788,6 +788,22 @@ public void flush(byte [] tableName) throws IOException {
this.hbaseCluster.flushcache(tableName);
}
+ /**
+ * Compact all regions in the mini hbase cluster
+ * @throws IOException
+ */
+ public void compact(boolean major) throws IOException {
+ this.hbaseCluster.compact(major);
+ }
+
+ /**
+ * Compact all of a table's reagion in the mini hbase cluster
+ * @throws IOException
+ */
+ public void compact(byte [] tableName, boolean major) throws IOException {
+ this.hbaseCluster.compact(tableName, major);
+ }
+
/**
* Create a table.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index c7442ae57a29..e5743036730e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -454,6 +454,34 @@ public void flushcache(byte [] tableName) throws IOException {
}
}
+ /**
+ * Call flushCache on all regions on all participating regionservers.
+ * @throws IOException
+ */
+ public void compact(boolean major) throws IOException {
+ for (JVMClusterUtil.RegionServerThread t:
+ this.hbaseCluster.getRegionServers()) {
+ for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+ r.compactStores(major);
+ }
+ }
+ }
+
+ /**
+ * Call flushCache on all regions of the specified table.
+ * @throws IOException
+ */
+ public void compact(byte [] tableName, boolean major) throws IOException {
+ for (JVMClusterUtil.RegionServerThread t:
+ this.hbaseCluster.getRegionServers()) {
+ for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+ if(Bytes.equals(r.getTableDesc().getName(), tableName)) {
+ r.compactStores(major);
+ }
+ }
+ }
+ }
+
/**
* @return List of region server threads.
*/
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 2dadc7c93f2b..2b67c5daabd2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -90,12 +90,12 @@
@Category(LargeTests.class)
public class TestFromClientSide {
final Log LOG = LogFactory.getLog(getClass());
- private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static byte [] ROW = Bytes.toBytes("testRow");
private static byte [] FAMILY = Bytes.toBytes("testFamily");
private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
private static byte [] VALUE = Bytes.toBytes("testValue");
- private static int SLAVES = 3;
+ protected static int SLAVES = 3;
/**
* @throws java.lang.Exception
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
new file mode 100644
index 000000000000..7b313dc67fac
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test all client operations with a coprocessor that
+ * just implements the default flush/compact/scan policy
+ */
+@Category(LargeTests.class)
+public class TestFromClientSideWithCoprocessor extends TestFromClientSide {
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName());
+ // We need more than one region server in this test
+ TEST_UTIL.startMiniCluster(SLAVES);
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
index a691bacc4366..119a4878e985 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
@@ -29,6 +29,7 @@
import java.util.List;
import java.util.Map;
import java.util.Arrays;
+import java.util.NavigableSet;
import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log;
@@ -42,7 +43,9 @@
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -63,11 +66,13 @@ public class SimpleRegionObserver extends BaseRegionObserver {
boolean hadPreClose;
boolean hadPostClose;
boolean hadPreFlush;
+ boolean hadPreFlushScannerOpen;
boolean hadPostFlush;
boolean hadPreSplit;
boolean hadPostSplit;
boolean hadPreCompactSelect;
boolean hadPostCompactSelect;
+ boolean hadPreCompactScanner;
boolean hadPreCompact;
boolean hadPostCompact;
boolean hadPreGet = false;
@@ -87,6 +92,7 @@ public class SimpleRegionObserver extends BaseRegionObserver {
boolean hadPreScannerClose = false;
boolean hadPostScannerClose = false;
boolean hadPreScannerOpen = false;
+ boolean hadPreStoreScannerOpen = false;
boolean hadPostScannerOpen = false;
boolean hadPreBulkLoadHFile = false;
boolean hadPostBulkLoadHFile = false;
@@ -120,12 +126,20 @@ public boolean wasClosed() {
}
@Override
- public void preFlush(ObserverContext<RegionCoprocessorEnvironment> c) {
+ public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner) {
hadPreFlush = true;
+ return scanner;
}
@Override
- public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c) {
+ public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+ hadPreFlushScannerOpen = true;
+ return null;
+ }
+
+ @Override
+ public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c, Store store, StoreFile resultFile) {
hadPostFlush = true;
}
@@ -166,6 +180,14 @@ public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment>
return scanner;
}
+ @Override
+ public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
+ InternalScanner s) throws IOException {
+ hadPreCompactScanner = true;
+ return null;
+ }
+
@Override
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
Store store, StoreFile resultFile) {
@@ -184,6 +206,14 @@ public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvir
return null;
}
+ @Override
+ public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
+ final KeyValueScanner s) throws IOException {
+ hadPreStoreScannerOpen = true;
+ return null;
+ }
+
@Override
public RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
index ebc5373e2248..e6ff17305eca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
@@ -61,7 +61,6 @@
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
-import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.LoadTestTool;
import org.apache.hadoop.hbase.util.MD5Hash;
@@ -408,7 +407,7 @@ private void performMerge(List<StoreFileScanner> scanners, Store store,
Scan scan = new Scan();
// Include deletes
- scanner = new StoreScanner(store, scan, scanners,
+ scanner = new StoreScanner(store, store.scanInfo, scan, scanners,
ScanType.MAJOR_COMPACT, Long.MIN_VALUE, Long.MIN_VALUE);
ArrayList<KeyValue> kvs = new ArrayList<KeyValue>();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
new file mode 100644
index 000000000000..668c04372c20
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
@@ -0,0 +1,62 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.NavigableSet;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TestFromClientSideWithCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+/**
+ * RegionObserver that just reimplements the default behavior,
+ * in order to validate that all the necessary APIs for this are public
+ * This observer is also used in {@link TestFromClientSideWithCoprocessor} and
+ * {@link TestCompactionWithCoprocessor} to make sure that a wide range
+ * of functionality still behaves as expected.
+ */
+public class NoOpScanPolicyObserver extends BaseRegionObserver {
+ /**
+ * Reimplement the default behavior
+ */
+ @Override
+ public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+ Store.ScanInfo oldSI = store.getScanInfo();
+ Store.ScanInfo scanInfo = new Store.ScanInfo(store.getFamily(), oldSI.getTtl(),
+ oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
+ Scan scan = new Scan();
+ scan.setMaxVersions(oldSI.getMaxVersions());
+ return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
+ ScanType.MINOR_COMPACT, store.getHRegion().getSmallestReadPoint(),
+ HConstants.OLDEST_TIMESTAMP);
+ }
+
+ /**
+ * Reimplement the default behavior
+ */
+ @Override
+ public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
+ InternalScanner s) throws IOException {
+ // this demonstrates how to override the scanners default behavior
+ Store.ScanInfo oldSI = store.getScanInfo();
+ Store.ScanInfo scanInfo = new Store.ScanInfo(store.getFamily(), oldSI.getTtl(),
+ oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
+ Scan scan = new Scan();
+ scan.setMaxVersions(oldSI.getMaxVersions());
+ return new StoreScanner(store, scanInfo, scan, scanners, scanType, store.getHRegion()
+ .getSmallestReadPoint(), earliestPutTs);
+ }
+
+ @Override
+ public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ Store store, final Scan scan, final NavigableSet<byte[]> targetCols, KeyValueScanner s)
+ throws IOException {
+ return new StoreScanner(store, store.getScanInfo(), scan, targetCols);
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithCoprocessor.java
new file mode 100644
index 000000000000..ba30a9fdf388
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithCoprocessor.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Make sure all compaction tests still pass with the preFlush and preCompact
+ * overridden to implement the default behavior
+ */
+@Category(MediumTests.class)
+public class TestCompactionWithCoprocessor extends TestCompaction {
+ /** constructor */
+ public TestCompactionWithCoprocessor() throws Exception {
+ super();
+ conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
+ NoOpScanPolicyObserver.class.getName());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
index 8e8ae45a5a7f..0da62dfc17c4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
-import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
index 3c582338e69b..01f0731549f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
@@ -38,7 +38,6 @@
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
-import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdge;
@@ -559,7 +558,7 @@ public long currentTimeMillis() {
KeyValue.COMPARATOR);
StoreScanner scanner =
new StoreScanner(scan, scanInfo,
- StoreScanner.ScanType.MAJOR_COMPACT, null, scanners,
+ ScanType.MAJOR_COMPACT, null, scanners,
HConstants.OLDEST_TIMESTAMP);
List<KeyValue> results = new ArrayList<KeyValue>();
results = new ArrayList<KeyValue>();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
new file mode 100644
index 000000000000..1915ca372762
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+// this is deliberately not in the o.a.h.h.regionserver package
+// in order to make sure all required classes/method are available
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
+import org.apache.hadoop.hbase.regionserver.ScanType;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreScanner;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.*;
+
+@Category(MediumTests.class)
+public class TestCoprocessorScanPolicy {
+ final Log LOG = LogFactory.getLog(getClass());
+ protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final byte[] F = Bytes.toBytes("fam");
+ private static final byte[] Q = Bytes.toBytes("qual");
+ private static final byte[] R = Bytes.toBytes("row");
+
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ ScanObserver.class.getName());
+ TEST_UTIL.startMiniCluster();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testBaseCases() throws Exception {
+ byte[] tableName = Bytes.toBytes("baseCases");
+ HTable t = TEST_UTIL.createTable(tableName, F, 1);
+ // set the version override to 2
+ Put p = new Put(R);
+ p.setAttribute("versions", new byte[]{});
+ p.add(F, tableName, Bytes.toBytes(2));
+ t.put(p);
+
+ // insert 2 versions
+ p = new Put(R);
+ p.add(F, Q, Q);
+ t.put(p);
+ p = new Put(R);
+ p.add(F, Q, Q);
+ t.put(p);
+ Get g = new Get(R);
+ g.setMaxVersions(10);
+ Result r = t.get(g);
+ assertEquals(2, r.size());
+
+ TEST_UTIL.flush(tableName);
+ TEST_UTIL.compact(tableName, true);
+
+ // both version are still visible even after a flush/compaction
+ g = new Get(R);
+ g.setMaxVersions(10);
+ r = t.get(g);
+ assertEquals(2, r.size());
+
+ // insert a 3rd version
+ p = new Put(R);
+ p.add(F, Q, Q);
+ t.put(p);
+ g = new Get(R);
+ g.setMaxVersions(10);
+ r = t.get(g);
+ // still only two version visible
+ assertEquals(2, r.size());
+
+ t.close();
+ }
+
+ @Test
+ public void testTTL() throws Exception {
+ byte[] tableName = Bytes.toBytes("testTTL");
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(F)
+ .setMaxVersions(10)
+ .setTimeToLive(1);
+ desc.addFamily(hcd);
+ TEST_UTIL.getHBaseAdmin().createTable(desc);
+ HTable t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName);
+ long now = EnvironmentEdgeManager.currentTimeMillis();
+ ManualEnvironmentEdge me = new ManualEnvironmentEdge();
+ me.setValue(now);
+ EnvironmentEdgeManagerTestHelper.injectEdge(me);
+ // 2s in the past
+ long ts = now - 2000;
+ // Set the TTL override to 3s
+ Put p = new Put(R);
+ p.setAttribute("ttl", new byte[]{});
+ p.add(F, tableName, Bytes.toBytes(3000L));
+ t.put(p);
+
+ p = new Put(R);
+ p.add(F, Q, ts, Q);
+ t.put(p);
+ p = new Put(R);
+ p.add(F, Q, ts+1, Q);
+ t.put(p);
+
+ // these two should be expired but for the override
+ // (their ts was 2s in the past)
+ Get g = new Get(R);
+ g.setMaxVersions(10);
+ Result r = t.get(g);
+ // still there?
+ assertEquals(2, r.size());
+
+ TEST_UTIL.flush(tableName);
+ TEST_UTIL.compact(tableName, true);
+
+ g = new Get(R);
+ g.setMaxVersions(10);
+ r = t.get(g);
+ // still there?
+ assertEquals(2, r.size());
+
+ // roll time forward 2s.
+ me.setValue(now + 2000);
+ // now verify that data eventually does expire
+ g = new Get(R);
+ g.setMaxVersions(10);
+ r = t.get(g);
+ // should be gone now
+ assertEquals(0, r.size());
+ t.close();
+ }
+
+ public static class ScanObserver extends BaseRegionObserver {
+ private Map<String, Long> ttls = new HashMap<String,Long>();
+ private Map<String, Integer> versions = new HashMap<String,Integer>();
+
+ // lame way to communicate with the coprocessor,
+ // since it is loaded by a different class loader
+ @Override
+ public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put,
+ final WALEdit edit, final boolean writeToWAL) throws IOException {
+ if (put.getAttribute("ttl") != null) {
+ KeyValue kv = put.getFamilyMap().values().iterator().next().get(0);
+ ttls.put(Bytes.toString(kv.getQualifier()), Bytes.toLong(kv.getValue()));
+ c.bypass();
+ } else if (put.getAttribute("versions") != null) {
+ KeyValue kv = put.getFamilyMap().values().iterator().next().get(0);
+ versions.put(Bytes.toString(kv.getQualifier()), Bytes.toInt(kv.getValue()));
+ c.bypass();
+ }
+ }
+
+ @Override
+ public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+ Long newTtl = ttls.get(store.getTableName());
+ if (newTtl != null) {
+ System.out.println("PreFlush:" + newTtl);
+ }
+ Integer newVersions = versions.get(store.getTableName());
+ Store.ScanInfo oldSI = store.getScanInfo();
+ HColumnDescriptor family = store.getFamily();
+ Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
+ newVersions == null ? family.getMaxVersions() : newVersions,
+ newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
+ oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
+ Scan scan = new Scan();
+ scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
+ return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
+ ScanType.MINOR_COMPACT, store.getHRegion().getSmallestReadPoint(),
+ HConstants.OLDEST_TIMESTAMP);
+ }
+
+ @Override
+ public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
+ long earliestPutTs, InternalScanner s) throws IOException {
+ Long newTtl = ttls.get(store.getTableName());
+ Integer newVersions = versions.get(store.getTableName());
+ Store.ScanInfo oldSI = store.getScanInfo();
+ HColumnDescriptor family = store.getFamily();
+ Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
+ newVersions == null ? family.getMaxVersions() : newVersions,
+ newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
+ oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
+ Scan scan = new Scan();
+ scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
+ return new StoreScanner(store, scanInfo, scan, scanners, scanType, store.getHRegion()
+ .getSmallestReadPoint(), earliestPutTs);
+ }
+
+ @Override
+ public KeyValueScanner preStoreScannerOpen(
+ final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
+ final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
+ Long newTtl = ttls.get(store.getTableName());
+ Integer newVersions = versions.get(store.getTableName());
+ Store.ScanInfo oldSI = store.getScanInfo();
+ HColumnDescriptor family = store.getFamily();
+ Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
+ newVersions == null ? family.getMaxVersions() : newVersions,
+ newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
+ oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
+ return new StoreScanner(store, scanInfo, scan, targetCols);
+ }
+ }
+
+ @org.junit.Rule
+ public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
+ new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
+}
|
135ec78d7d5854af7e5a764d4c3bb50ccf188eeb
|
kotlin
|
Fixed EA-70945--
|
c
|
https://github.com/JetBrains/kotlin
|
diff --git a/idea/idea-completion/testData/smart/EA70945.kt b/idea/idea-completion/testData/smart/EA70945.kt
new file mode 100644
index 0000000000000..1673544f07d95
--- /dev/null
+++ b/idea/idea-completion/testData/smart/EA70945.kt
@@ -0,0 +1,9 @@
+class A {
+ val foo: Int = 0
+}
+
+fun f() {
+ A().foo(<caret>)
+}
+
+// NUMBER: 0
diff --git a/idea/idea-completion/tests/org/jetbrains/kotlin/idea/completion/test/JvmSmartCompletionTestGenerated.java b/idea/idea-completion/tests/org/jetbrains/kotlin/idea/completion/test/JvmSmartCompletionTestGenerated.java
index 0ebb61cdc50b5..c73e6aba16076 100644
--- a/idea/idea-completion/tests/org/jetbrains/kotlin/idea/completion/test/JvmSmartCompletionTestGenerated.java
+++ b/idea/idea-completion/tests/org/jetbrains/kotlin/idea/completion/test/JvmSmartCompletionTestGenerated.java
@@ -113,6 +113,12 @@ public void testClassObjectMembersWithPrefix() throws Exception {
doTest(fileName);
}
+ @TestMetadata("EA70945.kt")
+ public void testEA70945() throws Exception {
+ String fileName = KotlinTestUtils.navigationMetadata("idea/idea-completion/testData/smart/EA70945.kt");
+ doTest(fileName);
+ }
+
@TestMetadata("EmptyPrefix.kt")
public void testEmptyPrefix() throws Exception {
String fileName = KotlinTestUtils.navigationMetadata("idea/idea-completion/testData/smart/EmptyPrefix.kt");
diff --git a/idea/idea-core/src/org/jetbrains/kotlin/idea/core/ExpectedInfos.kt b/idea/idea-core/src/org/jetbrains/kotlin/idea/core/ExpectedInfos.kt
index bf4d0c2db3499..53c7991390502 100644
--- a/idea/idea-core/src/org/jetbrains/kotlin/idea/core/ExpectedInfos.kt
+++ b/idea/idea-core/src/org/jetbrains/kotlin/idea/core/ExpectedInfos.kt
@@ -188,6 +188,8 @@ class ExpectedInfos(
private fun calculateForArgument(callElement: KtCallElement, argument: ValueArgument): Collection<ExpectedInfo>? {
val call = callElement.getCall(bindingContext) ?: return null
+ // sometimes we get wrong call (see testEA70945) TODO: refactor resolve so that it does not happen
+ if (call.callElement != callElement) return null
return calculateForArgument(call, argument)
}
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 5