commit_id
stringlengths
40
40
project
stringclasses
91 values
commit_message
stringlengths
9
4.65k
type
stringclasses
3 values
url
stringclasses
91 values
git_diff
stringlengths
555
2.23M
3181d96ec864a467d4259e31c64f2b7554afc3d4
hbase
HBASE-2397 Bytes.toStringBinary escapes printable- chars--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@951840 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hbase
diff --git a/CHANGES.txt b/CHANGES.txt index 280daa7fb5ce..8ad8e5601edf 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -23,6 +23,7 @@ Release 0.21.0 - Unreleased HBASE-2541 Remove transactional contrib (Clint Morgan via Stack) HBASE-2542 Fold stargate contrib into core HBASE-2565 Remove contrib module from hbase + HBASE-2397 Bytes.toStringBinary escapes printable chars BUG FIXES HBASE-1791 Timeout in IndexRecordWriter (Bradford Stephens via Andrew diff --git a/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index bed859f48e62..1b46f2d892a4 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -320,16 +320,7 @@ public static String toStringBinary(final byte [] b, int off, int len) { if ( (ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') - || ch == ',' - || ch == '_' - || ch == '-' - || ch == ':' - || ch == ' ' - || ch == '<' - || ch == '>' - || ch == '=' - || ch == '/' - || ch == '.') { + || " `~!@#$%^&*()-_=+[]{}\\|;:'\",.<>/?".indexOf(ch) >= 0 ) { result.append(first.charAt(i)); } else { result.append(String.format("\\x%02X", ch));
e24b71e70035f9a9baf7ec19c279311eceec31a9
spring-framework
Shutdown Reactor env when relay handler is- stopped--The Reactor Environment (that's used by the TcpClient) manages a-number of threads. To ensure that these threads are cleaned up-Environment.shutdown() must be called when the Environment is no-longer needed.-
c
https://github.com/spring-projects/spring-framework
diff --git a/spring-websocket/src/main/java/org/springframework/web/messaging/stomp/support/StompRelayPubSubMessageHandler.java b/spring-websocket/src/main/java/org/springframework/web/messaging/stomp/support/StompRelayPubSubMessageHandler.java index 2e1d31d42947..ef2d9eaaea48 100644 --- a/spring-websocket/src/main/java/org/springframework/web/messaging/stomp/support/StompRelayPubSubMessageHandler.java +++ b/spring-websocket/src/main/java/org/springframework/web/messaging/stomp/support/StompRelayPubSubMessageHandler.java @@ -73,6 +73,8 @@ public class StompRelayPubSubMessageHandler extends AbstractPubSubMessageHandler private MessageConverter payloadConverter; + private Environment environment; + private TcpClient<String, String> tcpClient; private final Map<String, RelaySession> relaySessions = new ConcurrentHashMap<String, RelaySession>(); @@ -181,9 +183,9 @@ public boolean isRunning() { @Override public void start() { synchronized (this.lifecycleMonitor) { - + this.environment = new Environment(); this.tcpClient = new TcpClient.Spec<String, String>(NettyTcpClient.class) - .using(new Environment()) + .using(this.environment) .codec(new DelimitedCodec<String, String>((byte) 0, true, StandardCodecs.STRING_CODEC)) .connect(this.relayHost, this.relayPort) .get(); @@ -214,6 +216,7 @@ public void stop() { this.running = false; try { this.tcpClient.close().await(5000, TimeUnit.MILLISECONDS); + this.environment.shutdown(); } catch (InterruptedException e) { // ignore
643ae0c985d01e4b5deb9d28a1381de8179926af
hbase
HBASE-2781 ZKW.createUnassignedRegion doesn't- make sure existing znode is in the right state (Karthik- Ranganathan via JD)--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@963910 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hbase
diff --git a/CHANGES.txt b/CHANGES.txt index ec4341f65428..44e345bba702 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -432,6 +432,8 @@ Release 0.21.0 - Unreleased HBASE-2797 Another NPE in ReadWriteConsistencyControl HBASE-2831 Fix '$bin' path duplication in setup scripts (Nicolas Spiegelberg via Stack) + HBASE-2781 ZKW.createUnassignedRegion doesn't make sure existing znode is + in the right state (Karthik Ranganathan via JD) IMPROVEMENTS HBASE-1760 Cleanup TODOs in HTable diff --git a/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java b/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java index b4ba5ab15c03..979739e3a076 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java @@ -993,8 +993,9 @@ public void setUnassigned(HRegionInfo info, boolean force) { // should never happen LOG.error("Error creating event data for " + HBaseEventType.M2ZK_REGION_OFFLINE, e); } - zkWrapper.createUnassignedRegion(info.getEncodedName(), data); - LOG.debug("Created UNASSIGNED zNode " + info.getRegionNameAsString() + " in state " + HBaseEventType.M2ZK_REGION_OFFLINE); + zkWrapper.createOrUpdateUnassignedRegion(info.getEncodedName(), data); + LOG.debug("Created/updated UNASSIGNED zNode " + info.getRegionNameAsString() + + " in state " + HBaseEventType.M2ZK_REGION_OFFLINE); s = new RegionState(info, RegionState.State.UNASSIGNED); regionsInTransition.put(info.getRegionNameAsString(), s); } diff --git a/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java b/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java index 74b0446fa30f..f292b253c62e 100644 --- a/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java +++ b/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java @@ -1073,6 +1073,14 @@ public boolean writeZNode(String znodeName, byte[] data, int version, boolean wa } } + /** + * Given a region name and some data, this method creates a new the region + * znode data under the UNASSGINED znode with the data passed in. This method + * will not update data for existing znodes. + * + * @param regionName - encoded name of the region + * @param data - new serialized data to update the region znode + */ public void createUnassignedRegion(String regionName, byte[] data) { String znode = getZNode(getRegionInTransitionZNode(), regionName); if(LOG.isDebugEnabled()) { @@ -1109,6 +1117,66 @@ public void createUnassignedRegion(String regionName, byte[] data) { } } + /** + * Given a region name and some data, this method updates the region znode + * data under the UNASSGINED znode with the latest data. This method will + * update the znode data only if it already exists. + * + * @param regionName - encoded name of the region + * @param data - new serialized data to update the region znode + */ + public void updateUnassignedRegion(String regionName, byte[] data) { + String znode = getZNode(getRegionInTransitionZNode(), regionName); + // this is an update - make sure the node already exists + if(!exists(znode, true)) { + LOG.error("Cannot update " + znode + " - node does not exist" ); + return; + } + + if(LOG.isDebugEnabled()) { + // Check existing state for logging purposes. + Stat stat = new Stat(); + byte[] oldData = null; + try { + oldData = readZNode(znode, stat); + } catch (IOException e) { + LOG.error("Error reading data for " + znode); + } + if(oldData == null) { + LOG.debug("While updating UNASSIGNED region " + regionName + " - node exists with no data" ); + } + else { + LOG.debug("While updating UNASSIGNED region " + regionName + " exists, state = " + (HBaseEventType.fromByte(oldData[0]))); + } + } + synchronized(unassignedZNodesWatched) { + unassignedZNodesWatched.add(znode); + try { + writeZNode(znode, data, -1, true); + } catch (IOException e) { + LOG.error("Error writing data for " + znode + ", could not update state to " + (HBaseEventType.fromByte(data[0]))); + } + } + } + + /** + * This method will create a new region in transition entry in ZK with the + * speficied data if none exists. If one already exists, it will update the + * data with whatever is passed in. + * + * @param regionName - encoded name of the region + * @param data - serialized data for the region znode + */ + public void createOrUpdateUnassignedRegion(String regionName, byte[] data) { + String znode = getZNode(getRegionInTransitionZNode(), regionName); + if(exists(znode, true)) { + updateUnassignedRegion(regionName, data); + } + else { + createUnassignedRegion(regionName, data); + } + } + public void deleteUnassignedRegion(String regionName) { String znode = getZNode(getRegionInTransitionZNode(), regionName); try {
f8a5c25714f866a85290634e7b0344f02f6b930b
kotlin
Fix for the code to compile--
c
https://github.com/JetBrains/kotlin
diff --git a/idea/src/org/jetbrains/jet/lang/cfg/Label.java b/idea/src/org/jetbrains/jet/lang/cfg/Label.java index ce3472befa2ae..9996252097706 100644 --- a/idea/src/org/jetbrains/jet/lang/cfg/Label.java +++ b/idea/src/org/jetbrains/jet/lang/cfg/Label.java @@ -3,19 +3,6 @@ /** * @author abreslav */ -public class Label { - private final String name; - - public Label(String name) { - this.name = name; - } - - public String getName() { - return name; - } - - @Override - public String toString() { - return name; - } +public interface Label { + String getName(); } diff --git a/idea/src/org/jetbrains/jet/lang/cfg/pseudocode/JetControlFlowInstructionsGenerator.java b/idea/src/org/jetbrains/jet/lang/cfg/pseudocode/JetControlFlowInstructionsGenerator.java index 0cfe7f36960a7..2948f9b131891 100644 --- a/idea/src/org/jetbrains/jet/lang/cfg/pseudocode/JetControlFlowInstructionsGenerator.java +++ b/idea/src/org/jetbrains/jet/lang/cfg/pseudocode/JetControlFlowInstructionsGenerator.java @@ -27,7 +27,7 @@ public JetControlFlowInstructionsGenerator() { } private void pushBuilder() { - Pseudocode parentPseudocode = builder == null ? new Pseudocode(null) : builders.peek().getPseudocode(); + Pseudocode parentPseudocode = builder == null ? new Pseudocode() : builders.peek().getPseudocode(); JetControlFlowInstructionsGeneratorWorker worker = new JetControlFlowInstructionsGeneratorWorker(parentPseudocode); builders.push(worker); builder = worker; @@ -90,16 +90,16 @@ public Label getExitPoint() { } } - private class JetControlFlowInstructionsGeneratorWorker implements JetControlFlowBuilder { - private final Stack<BlockInfo> loopInfo = new Stack<BlockInfo>(); - private final Stack<BlockInfo> subroutineInfo = new Stack<BlockInfo>(); + private final Stack<BlockInfo> loopInfo = new Stack<BlockInfo>(); + private final Stack<BlockInfo> subroutineInfo = new Stack<BlockInfo>(); + private final Map<JetElement, BlockInfo> elementToBlockInfo = new HashMap<JetElement, BlockInfo>(); - private final Map<JetElement, BlockInfo> elementToBlockInfo = new HashMap<JetElement, BlockInfo>(); + private class JetControlFlowInstructionsGeneratorWorker implements JetControlFlowBuilder { private final Pseudocode pseudocode; private JetControlFlowInstructionsGeneratorWorker(@Nullable Pseudocode parent) { - this.pseudocode = new Pseudocode(parent); + this.pseudocode = new Pseudocode(); } public Pseudocode getPseudocode() { @@ -113,7 +113,7 @@ private void add(Instruction instruction) { @NotNull @Override public final Label createUnboundLabel() { - return new Label("l" + labelCount++); + return pseudocode.createLabel("l" + labelCount++); } @Override diff --git a/idea/src/org/jetbrains/jet/lang/cfg/pseudocode/Pseudocode.java b/idea/src/org/jetbrains/jet/lang/cfg/pseudocode/Pseudocode.java index d08d38f8c7761..9e40dd604b7b0 100644 --- a/idea/src/org/jetbrains/jet/lang/cfg/pseudocode/Pseudocode.java +++ b/idea/src/org/jetbrains/jet/lang/cfg/pseudocode/Pseudocode.java @@ -11,14 +11,44 @@ * @author abreslav */ public class Pseudocode { + public class PseudocodeLabel implements Label { + private final String name; + + private PseudocodeLabel(String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } + + @Override + public String toString() { + return name; + } + + @Nullable + private List<Instruction> resolve() { + Integer result = labels.get(this); + assert result != null; + return instructions.subList(result, instructions.size()); + } + + } + private final List<Instruction> instructions = new ArrayList<Instruction>(); private final Map<Label, Integer> labels = new LinkedHashMap<Label, Integer>(); - @Nullable - private final Pseudocode parent; +// @Nullable +// private final Pseudocode parent; +// +// public Pseudocode(Pseudocode parent) { +// this.parent = parent; +// } - public Pseudocode(Pseudocode parent) { - this.parent = parent; + public PseudocodeLabel createLabel(String name) { + return new PseudocodeLabel(name); } public void addInstruction(Instruction instruction) { @@ -29,15 +59,6 @@ public void addLabel(Label label) { labels.put(label, instructions.size()); } - @Nullable - private Integer resolveLabel(Label targetLabel) { - Integer result = labels.get(targetLabel); - if (result == null && parent != null) { - return parent.resolveLabel(targetLabel); - } - return result; - } - public void postProcess() { for (int i = 0, instructionsSize = instructions.size(); i < instructionsSize; i++) { Instruction instruction = instructions.get(i); @@ -95,22 +116,21 @@ public void visitInstruction(Instruction instruction) { @NotNull private Instruction getJumpTarget(@NotNull Label targetLabel) { - Integer targetPosition = resolveLabel(targetLabel); - return getTargetInstruction(targetPosition); + return getTargetInstruction(((PseudocodeLabel) targetLabel).resolve()); } @NotNull - private Instruction getTargetInstruction(@NotNull Integer targetPosition) { + private Instruction getTargetInstruction(@NotNull List<Instruction> instructions) { while (true) { - assert targetPosition != null; - Instruction targetInstruction = instructions.get(targetPosition); + assert instructions != null; + Instruction targetInstruction = instructions.get(0); if (false == targetInstruction instanceof UnconditionalJumpInstruction) { return targetInstruction; } Label label = ((UnconditionalJumpInstruction) targetInstruction).getTargetLabel(); - targetPosition = resolveLabel(label); + instructions = ((PseudocodeLabel)label).resolve(); } } @@ -118,7 +138,7 @@ private Instruction getTargetInstruction(@NotNull Integer targetPosition) { private Instruction getNextPosition(int currentPosition) { int targetPosition = currentPosition + 1; assert targetPosition < instructions.size() : currentPosition; - return getTargetInstruction(targetPosition); + return getTargetInstruction(instructions.subList(targetPosition, instructions.size())); } public void dumpInstructions(@NotNull PrintStream out) { @@ -140,6 +160,7 @@ public void dumpGraph(@NotNull final PrintStream out) { private void dumpSubgraph(final PrintStream out, String graphHeader, final int[] count, String style) { out.println(graphHeader + " {"); + out.println(style); final Map<Instruction, String> nodeToName = new HashMap<Instruction, String>(); for (Instruction node : instructions) { @@ -174,7 +195,7 @@ else if (node instanceof FunctionLiteralValueInstruction) { @Override public void visitFunctionLiteralValue(FunctionLiteralValueInstruction instruction) { int index = count[0]; - instruction.getBody().dumpSubgraph(out, "subgraph f" + index, count, "color=blue;\ntlabel = \"process #" + index + "\";"); + instruction.getBody().dumpSubgraph(out, "subgraph cluster_" + index, count, "color=blue;\nlabel = \"f" + index + "\";"); printEdge(out, nodeToName.get(instruction), "n" + index, null); visitInstructionWithNext(instruction); } @@ -228,7 +249,6 @@ public void visitInstruction(Instruction instruction) { } }); } - out.println(style); out.println("}"); } diff --git a/idea/src/org/jetbrains/jet/lang/resolve/TopDownAnalyzer.java b/idea/src/org/jetbrains/jet/lang/resolve/TopDownAnalyzer.java index 67a58cd7cd4dd..6d9bac71f1704 100644 --- a/idea/src/org/jetbrains/jet/lang/resolve/TopDownAnalyzer.java +++ b/idea/src/org/jetbrains/jet/lang/resolve/TopDownAnalyzer.java @@ -230,21 +230,21 @@ private void processFunction(@NotNull WritableScope declaringScope, JetFunction declaringScope.addFunctionDescriptor(descriptor); functions.put(function, descriptor); - JetExpression bodyExpression = function.getBodyExpression(); - if (bodyExpression != null) { - System.out.println("-------------"); - JetControlFlowInstructionsGenerator instructionsGenerator = new JetControlFlowInstructionsGenerator(); - new JetControlFlowProcessor(semanticServices, trace, instructionsGenerator).generate(function, bodyExpression); - Pseudocode pseudocode = instructionsGenerator.getPseudocode(); - pseudocode.postProcess(); - pseudocode.dumpInstructions(System.out); - System.out.println("-------------"); - try { - pseudocode.dumpGraph(new PrintStream("/Users/abreslav/work/cfg.dot")); - } catch (FileNotFoundException e) { - e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. - } - } +// JetExpression bodyExpression = function.getBodyExpression(); +// if (bodyExpression != null) { +// System.out.println("-------------"); +// JetControlFlowInstructionsGenerator instructionsGenerator = new JetControlFlowInstructionsGenerator(); +// new JetControlFlowProcessor(semanticServices, trace, instructionsGenerator).generate(function, bodyExpression); +// Pseudocode pseudocode = instructionsGenerator.getPseudocode(); +// pseudocode.postProcess(); +// pseudocode.dumpInstructions(System.out); +// System.out.println("-------------"); +// try { +// pseudocode.dumpGraph(new PrintStream("/Users/abreslav/work/cfg.dot")); +// } catch (FileNotFoundException e) { +// e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. +// } +// } } private void processProperty(WritableScope declaringScope, JetProperty property) { diff --git a/idea/testData/psi/ControlStructures.txt b/idea/testData/psi/ControlStructures.txt index d2c6a510a0403..45e96659ef3b7 100644 --- a/idea/testData/psi/ControlStructures.txt +++ b/idea/testData/psi/ControlStructures.txt @@ -86,7 +86,9 @@ JetFile: ControlStructures.jet BREAK PsiElement(break)('break') PsiWhiteSpace(' ') - PsiElement(LABEL_IDENTIFIER)('@la') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@la') PsiElement(COMMA)(',') PsiWhiteSpace('\n ') VALUE_PARAMETER @@ -120,7 +122,9 @@ JetFile: ControlStructures.jet CONTINUE PsiElement(continue)('continue') PsiWhiteSpace(' ') - PsiElement(LABEL_IDENTIFIER)('@la') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@la') PsiElement(COMMA)(',') PsiWhiteSpace('\n ') VALUE_PARAMETER @@ -207,7 +211,9 @@ JetFile: ControlStructures.jet BREAK PsiElement(break)('break') PsiWhiteSpace(' ') - PsiElement(LABEL_IDENTIFIER)('@la') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@la') PsiWhiteSpace('\n ') CONTINUE PsiElement(continue)('continue') @@ -219,7 +225,9 @@ JetFile: ControlStructures.jet CONTINUE PsiElement(continue)('continue') PsiWhiteSpace(' ') - PsiElement(LABEL_IDENTIFIER)('@la') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@la') PsiWhiteSpace('\n ') IF PsiElement(if)('if') diff --git a/idea/testData/psi/Labels.txt b/idea/testData/psi/Labels.txt index ad500e937ff33..422cafa58e8e8 100644 --- a/idea/testData/psi/Labels.txt +++ b/idea/testData/psi/Labels.txt @@ -37,18 +37,24 @@ JetFile: Labels.jet PsiWhiteSpace('\n\n ') RETURN PsiElement(return)('return') - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiWhiteSpace(' ') INTEGER_CONSTANT PsiElement(INTEGER_LITERAL)('1') PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiWhiteSpace(' ') PARENTHESIZED PsiElement(LPAR)('(') @@ -62,7 +68,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiWhiteSpace(' ') PREFIX_EXPRESSION OPERATION_REFERENCE @@ -73,18 +81,24 @@ JetFile: Labels.jet PsiWhiteSpace('\n\n ') RETURN PsiElement(return)('return') - PsiElement(LABEL_IDENTIFIER)('@a') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@a') PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(LABEL_IDENTIFIER)('@a') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@a') PsiWhiteSpace(' ') INTEGER_CONSTANT PsiElement(INTEGER_LITERAL)('1') PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(LABEL_IDENTIFIER)('@a') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@a') PsiWhiteSpace(' ') PARENTHESIZED PsiElement(LPAR)('(') @@ -98,7 +112,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(LABEL_IDENTIFIER)('@a') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@a') PsiWhiteSpace(' ') PREFIX_EXPRESSION OPERATION_REFERENCE @@ -109,18 +125,24 @@ JetFile: Labels.jet PsiWhiteSpace('\n\n ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace(' ') INTEGER_CONSTANT PsiElement(INTEGER_LITERAL)('1') PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace(' ') PARENTHESIZED PsiElement(LPAR)('(') @@ -134,7 +156,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace(' ') PREFIX_EXPRESSION OPERATION_REFERENCE @@ -149,7 +173,9 @@ JetFile: Labels.jet PsiWhiteSpace(' ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace('\n ') PREFIX_EXPRESSION OPERATION_REFERENCE @@ -157,7 +183,9 @@ JetFile: Labels.jet PsiWhiteSpace(' ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace(' ') INTEGER_CONSTANT PsiElement(INTEGER_LITERAL)('1') @@ -168,7 +196,9 @@ JetFile: Labels.jet PsiWhiteSpace(' ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace(' ') PARENTHESIZED PsiElement(LPAR)('(') @@ -186,7 +216,9 @@ JetFile: Labels.jet PsiWhiteSpace(' ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace(' ') PREFIX_EXPRESSION OPERATION_REFERENCE @@ -200,30 +232,42 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') BREAK PsiElement(break)('break') - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiWhiteSpace('\n ') BREAK PsiElement(break)('break') - PsiElement(LABEL_IDENTIFIER)('@a') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@a') PsiWhiteSpace('\n ') BREAK PsiElement(break)('break') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace('\n\n ') CONTINUE PsiElement(continue)('continue') PsiWhiteSpace('\n ') CONTINUE PsiElement(continue)('continue') - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiWhiteSpace('\n ') CONTINUE PsiElement(continue)('continue') - PsiElement(LABEL_IDENTIFIER)('@a') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@a') PsiWhiteSpace('\n ') CONTINUE PsiElement(continue)('continue') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace('\n\n ') DOT_QUALIFIED_EXPRESSION REFERENCE_EXPRESSION @@ -255,7 +299,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(LABEL_IDENTIFIER)('@f') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@f') PsiWhiteSpace(' ') BOOLEAN_CONSTANT PsiElement(true)('true') @@ -292,7 +338,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiWhiteSpace(' ') BOOLEAN_CONSTANT PsiElement(true)('true') @@ -329,7 +377,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') RETURN PsiElement(return)('return') - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace(' ') BOOLEAN_CONSTANT PsiElement(true)('true') @@ -341,18 +391,21 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') THIS_EXPRESSION PsiElement(this)('this') - LABEL_REFERENCE - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiWhiteSpace('\n ') THIS_EXPRESSION PsiElement(this)('this') - LABEL_REFERENCE - PsiElement(LABEL_IDENTIFIER)('@a') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@a') PsiWhiteSpace('\n ') THIS_EXPRESSION PsiElement(this)('this') - LABEL_REFERENCE - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiWhiteSpace('\n\n ') THIS_EXPRESSION PsiElement(this)('this') @@ -365,8 +418,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') THIS_EXPRESSION PsiElement(this)('this') - LABEL_REFERENCE - PsiElement(AT)('@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(AT)('@') PsiElement(LT)('<') TYPE_REFERENCE USER_TYPE @@ -376,8 +430,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') THIS_EXPRESSION PsiElement(this)('this') - LABEL_REFERENCE - PsiElement(LABEL_IDENTIFIER)('@a') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@a') PsiElement(LT)('<') TYPE_REFERENCE USER_TYPE @@ -387,8 +442,9 @@ JetFile: Labels.jet PsiWhiteSpace('\n ') THIS_EXPRESSION PsiElement(this)('this') - LABEL_REFERENCE - PsiElement(ATAT)('@@') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(ATAT)('@@') PsiElement(LT)('<') TYPE_REFERENCE USER_TYPE diff --git a/idea/testData/psi/SimpleExpressions.txt b/idea/testData/psi/SimpleExpressions.txt index ee8ea4924d68e..8f4dc0157c43c 100644 --- a/idea/testData/psi/SimpleExpressions.txt +++ b/idea/testData/psi/SimpleExpressions.txt @@ -575,7 +575,9 @@ JetFile: SimpleExpressions.jet BREAK PsiElement(break)('break') PsiWhiteSpace(' ') - PsiElement(LABEL_IDENTIFIER)('@la') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@la') PsiElement(COMMA)(',') PsiWhiteSpace('\n ') VALUE_PARAMETER @@ -609,7 +611,9 @@ JetFile: SimpleExpressions.jet CONTINUE PsiElement(continue)('continue') PsiWhiteSpace(' ') - PsiElement(LABEL_IDENTIFIER)('@la') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@la') PsiElement(COMMA)(',') PsiWhiteSpace('\n ') VALUE_PARAMETER @@ -702,7 +706,9 @@ JetFile: SimpleExpressions.jet BREAK PsiElement(break)('break') PsiWhiteSpace(' ') - PsiElement(LABEL_IDENTIFIER)('@la') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@la') PsiWhiteSpace('\n ') CONTINUE PsiElement(continue)('continue') @@ -714,6 +720,8 @@ JetFile: SimpleExpressions.jet CONTINUE PsiElement(continue)('continue') PsiWhiteSpace(' ') - PsiElement(LABEL_IDENTIFIER)('@la') + LABEL_QUALIFIER + LABEL_REFERENCE + PsiElement(LABEL_IDENTIFIER)('@la') PsiWhiteSpace('\n') PsiElement(RBRACE)('}') \ No newline at end of file diff --git a/idea/tests/org/jetbrains/jet/checkers/JetPsiCheckerTest.java b/idea/tests/org/jetbrains/jet/checkers/JetPsiCheckerTest.java index fb257b919d077..70b1519da38ee 100644 --- a/idea/tests/org/jetbrains/jet/checkers/JetPsiCheckerTest.java +++ b/idea/tests/org/jetbrains/jet/checkers/JetPsiCheckerTest.java @@ -29,6 +29,6 @@ public void testBinaryCallsOnNullableValues() throws Exception { } public void testQualifiedThis() throws Exception { - doTest("/checker/QualifiedThis.jet", true, true); +// doTest("/checker/QualifiedThis.jet", true, true); } }
538d245ba9744f57d66724982db4850e6d3ba226
ReactiveX-RxJava
Implement a cached thread scheduler using event- loops--
a
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/schedulers/CachedThreadScheduler.java b/rxjava-core/src/main/java/rx/schedulers/CachedThreadScheduler.java new file mode 100644 index 0000000000..92dd486d92 --- /dev/null +++ b/rxjava-core/src/main/java/rx/schedulers/CachedThreadScheduler.java @@ -0,0 +1,180 @@ +/** + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package rx.schedulers; + +import rx.Scheduler; +import rx.Subscription; +import rx.functions.Action0; +import rx.subscriptions.CompositeSubscription; +import rx.subscriptions.Subscriptions; + +import java.util.Iterator; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +/* package */class CachedThreadScheduler extends Scheduler { + private static final class CachedWorkerPool { + final ThreadFactory factory = new ThreadFactory() { + final AtomicInteger counter = new AtomicInteger(); + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(r, "RxCachedThreadScheduler-" + counter.incrementAndGet()); + t.setDaemon(true); + return t; + } + }; + + private final long keepAliveTime; + private final ConcurrentLinkedQueue<PoolWorker> expiringQueue; + private final ScheduledExecutorService evictExpiredWorkerExecutor; + + CachedWorkerPool(long keepAliveTime, TimeUnit unit) { + this.keepAliveTime = unit.toNanos(keepAliveTime); + this.expiringQueue = new ConcurrentLinkedQueue<PoolWorker>(); + + evictExpiredWorkerExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() { + final AtomicInteger counter = new AtomicInteger(); + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(r, "RxCachedWorkerPoolEvictor-" + counter.incrementAndGet()); + t.setDaemon(true); + return t; + } + }); + evictExpiredWorkerExecutor.scheduleWithFixedDelay( + new Runnable() { + @Override + public void run() { + evictExpiredWorkers(); + } + }, this.keepAliveTime, this.keepAliveTime, TimeUnit.NANOSECONDS + ); + } + + private static CachedWorkerPool INSTANCE = new CachedWorkerPool( + 60L, TimeUnit.SECONDS + ); + + PoolWorker get() { + while (!expiringQueue.isEmpty()) { + PoolWorker poolWorker = expiringQueue.poll(); + if (poolWorker != null) { + return poolWorker; + } + } + + // No cached worker found, so create a new one. + return new PoolWorker(factory); + } + + void release(PoolWorker poolWorker) { + // Refresh expire time before putting worker back in pool + poolWorker.setExpirationTime(now() + keepAliveTime); + + expiringQueue.add(poolWorker); + } + + void evictExpiredWorkers() { + if (!expiringQueue.isEmpty()) { + long currentTimestamp = now(); + + Iterator<PoolWorker> poolWorkerIterator = expiringQueue.iterator(); + while (poolWorkerIterator.hasNext()) { + PoolWorker poolWorker = poolWorkerIterator.next(); + if (poolWorker.getExpirationTime() <= currentTimestamp) { + poolWorkerIterator.remove(); + poolWorker.unsubscribe(); + } else { + // Queue is ordered with the worker that will expire first in the beginning, so when we + // find a non-expired worker we can stop evicting. + break; + } + } + } + } + + long now() { + return System.nanoTime(); + } + } + + @Override + public Worker createWorker() { + return new EventLoopWorker(CachedWorkerPool.INSTANCE.get()); + } + + private static class EventLoopWorker extends Scheduler.Worker { + private final CompositeSubscription innerSubscription = new CompositeSubscription(); + private final PoolWorker poolWorker; + private final AtomicBoolean releasePoolWorkerOnce = new AtomicBoolean(false); + + EventLoopWorker(PoolWorker poolWorker) { + this.poolWorker = poolWorker; + } + + @Override + public void unsubscribe() { + if (releasePoolWorkerOnce.compareAndSet(false, true)) { + // unsubscribe should be idempotent, so only do this once + CachedWorkerPool.INSTANCE.release(poolWorker); + } + innerSubscription.unsubscribe(); + } + + @Override + public boolean isUnsubscribed() { + return innerSubscription.isUnsubscribed(); + } + + @Override + public Subscription schedule(Action0 action) { + return schedule(action, 0, null); + } + + @Override + public Subscription schedule(Action0 action, long delayTime, TimeUnit unit) { + if (innerSubscription.isUnsubscribed()) { + // don't schedule, we are unsubscribed + return Subscriptions.empty(); + } + + NewThreadScheduler.NewThreadWorker.ScheduledAction s = poolWorker.scheduleActual(action, delayTime, unit); + innerSubscription.add(s); + s.addParent(innerSubscription); + return s; + } + } + + private static final class PoolWorker extends NewThreadScheduler.NewThreadWorker { + private long expirationTime; + + PoolWorker(ThreadFactory threadFactory) { + super(threadFactory); + this.expirationTime = 0L; + } + + public long getExpirationTime() { + return expirationTime; + } + + public void setExpirationTime(long expirationTime) { + this.expirationTime = expirationTime; + } + } +} diff --git a/rxjava-core/src/main/java/rx/schedulers/Schedulers.java b/rxjava-core/src/main/java/rx/schedulers/Schedulers.java index d7096b7751..53bed75151 100644 --- a/rxjava-core/src/main/java/rx/schedulers/Schedulers.java +++ b/rxjava-core/src/main/java/rx/schedulers/Schedulers.java @@ -15,11 +15,11 @@ */ package rx.schedulers; -import java.util.concurrent.Executor; - import rx.Scheduler; import rx.plugins.RxJavaPlugins; +import java.util.concurrent.Executor; + /** * Static factory methods for creating Schedulers. */ @@ -43,7 +43,7 @@ private Schedulers() { if (io != null) { ioScheduler = io; } else { - ioScheduler = NewThreadScheduler.instance(); // defaults to new thread + ioScheduler = new CachedThreadScheduler(); } Scheduler nt = RxJavaPlugins.getInstance().getDefaultSchedulers().getNewThreadScheduler(); diff --git a/rxjava-core/src/test/java/rx/schedulers/CachedThreadSchedulerTest.java b/rxjava-core/src/test/java/rx/schedulers/CachedThreadSchedulerTest.java new file mode 100644 index 0000000000..f9f8ca161c --- /dev/null +++ b/rxjava-core/src/test/java/rx/schedulers/CachedThreadSchedulerTest.java @@ -0,0 +1,60 @@ +/** + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rx.schedulers; + +import org.junit.Test; +import rx.Observable; +import rx.Scheduler; +import rx.functions.Action1; +import rx.functions.Func1; + +import static org.junit.Assert.assertTrue; + +public class CachedThreadSchedulerTest extends AbstractSchedulerConcurrencyTests { + + @Override + protected Scheduler getScheduler() { + return Schedulers.io(); + } + + /** + * IO scheduler defaults to using CachedThreadScheduler + */ + @Test + public final void testIOScheduler() { + + Observable<Integer> o1 = Observable.from(1, 2, 3, 4, 5); + Observable<Integer> o2 = Observable.from(6, 7, 8, 9, 10); + Observable<String> o = Observable.merge(o1, o2).map(new Func1<Integer, String>() { + + @Override + public String call(Integer t) { + assertTrue(Thread.currentThread().getName().startsWith("RxCachedThreadScheduler")); + return "Value_" + t + "_Thread_" + Thread.currentThread().getName(); + } + }); + + o.subscribeOn(Schedulers.io()).toBlocking().forEach(new Action1<String>() { + + @Override + public void call(String t) { + System.out.println("t: " + t); + } + }); + } + +} \ No newline at end of file diff --git a/rxjava-core/src/test/java/rx/schedulers/NewThreadSchedulerTest.java b/rxjava-core/src/test/java/rx/schedulers/NewThreadSchedulerTest.java index 37b314a0dd..963ee50fa9 100644 --- a/rxjava-core/src/test/java/rx/schedulers/NewThreadSchedulerTest.java +++ b/rxjava-core/src/test/java/rx/schedulers/NewThreadSchedulerTest.java @@ -16,14 +16,7 @@ package rx.schedulers; -import static org.junit.Assert.assertTrue; - -import org.junit.Test; - -import rx.Observable; import rx.Scheduler; -import rx.functions.Action1; -import rx.functions.Func1; public class NewThreadSchedulerTest extends AbstractSchedulerConcurrencyTests { @@ -31,31 +24,4 @@ public class NewThreadSchedulerTest extends AbstractSchedulerConcurrencyTests { protected Scheduler getScheduler() { return Schedulers.newThread(); } - - /** - * IO scheduler defaults to using NewThreadScheduler - */ - @Test - public final void testIOScheduler() { - - Observable<Integer> o1 = Observable.<Integer> from(1, 2, 3, 4, 5); - Observable<Integer> o2 = Observable.<Integer> from(6, 7, 8, 9, 10); - Observable<String> o = Observable.<Integer> merge(o1, o2).map(new Func1<Integer, String>() { - - @Override - public String call(Integer t) { - assertTrue(Thread.currentThread().getName().startsWith("RxNewThreadScheduler")); - return "Value_" + t + "_Thread_" + Thread.currentThread().getName(); - } - }); - - o.subscribeOn(Schedulers.io()).toBlocking().forEach(new Action1<String>() { - - @Override - public void call(String t) { - System.out.println("t: " + t); - } - }); - } - }
785d41a37f6c311084ebf2d23e83f4ceb4bb94ed
jhy$jsoup
Moved .wrap, .before, and .after from Element to Node for flexibility.
p
https://github.com/jhy/jsoup
diff --git a/src/main/java/org/jsoup/nodes/Element.java b/src/main/java/org/jsoup/nodes/Element.java index 45fa23dbf4..2008195208 100644 --- a/src/main/java/org/jsoup/nodes/Element.java +++ b/src/main/java/org/jsoup/nodes/Element.java @@ -303,37 +303,31 @@ public Element prepend(String html) { addChildren(0, fragment.childNodesAsArray()); return this; } - + /** * Insert the specified HTML into the DOM before this element (i.e. as a preceeding sibling). + * * @param html HTML to add before this element * @return this element, for chaining * @see #after(String) */ + @Override public Element before(String html) { - addSiblingHtml(siblingIndex(), html); - return this; + return (Element) super.before(html); } - + /** * Insert the specified HTML into the DOM after this element (i.e. as a following sibling). + * * @param html HTML to add after this element * @return this element, for chaining * @see #before(String) */ + @Override public Element after(String html) { - addSiblingHtml(siblingIndex()+1, html); - return this; - } - - private void addSiblingHtml(int index, String html) { - Validate.notNull(html); - Validate.notNull(parentNode); - - Element fragment = Parser.parseBodyFragmentRelaxed(html, baseUri()).body(); - parentNode.addChildren(index, fragment.childNodesAsArray()); + return (Element) super.after(html); } - + /** * Remove all of the element's child nodes. Any attributes are left as-is. * @return this element @@ -344,42 +338,16 @@ public Element empty() { } /** - Wrap the supplied HTML around this element. - @param html HTML to wrap around this element, e.g. {@code <div class="head"></div>}. Can be arbitralily deep. - @return this element, for chaining. + * Wrap the supplied HTML around this element. + * + * @param html HTML to wrap around this element, e.g. {@code <div class="head"></div>}. Can be arbitrarily deep. + * @return this element, for chaining. */ + @Override public Element wrap(String html) { - Validate.notEmpty(html); - - Element wrapBody = Parser.parseBodyFragmentRelaxed(html, baseUri).body(); - Elements wrapChildren = wrapBody.children(); - Element wrap = wrapChildren.first(); - if (wrap == null) // nothing to wrap with; noop - return null; - - Element deepest = getDeepChild(wrap); - parentNode.replaceChild(this, wrap); - deepest.addChildren(this); - - // remainder (unbalananced wrap, like <div></div><p></p> -- The <p> is remainder - if (wrapChildren.size() > 1) { - for (int i = 1; i < wrapChildren.size(); i++) { // skip first - Element remainder = wrapChildren.get(i); - remainder.parentNode.removeChild(remainder); - wrap.appendChild(remainder); - } - } - return this; + return (Element) super.wrap(html); } - private Element getDeepChild(Element el) { - List<Element> children = el.children(); - if (children.size() > 0) - return getDeepChild(children.get(0)); - else - return el; - } - /** * Get sibling elements. * @return sibling elements diff --git a/src/main/java/org/jsoup/nodes/Node.java b/src/main/java/org/jsoup/nodes/Node.java index 136de50806..ec6d6bf952 100644 --- a/src/main/java/org/jsoup/nodes/Node.java +++ b/src/main/java/org/jsoup/nodes/Node.java @@ -2,6 +2,8 @@ import org.jsoup.helper.StringUtil; import org.jsoup.helper.Validate; +import org.jsoup.parser.Parser; +import org.jsoup.select.Elements; import org.jsoup.select.NodeTraversor; import org.jsoup.select.NodeVisitor; @@ -230,6 +232,73 @@ public void remove() { Validate.notNull(parentNode); parentNode.removeChild(this); } + + /** + * Insert the specified HTML into the DOM before this node (i.e. as a preceeding sibling). + * @param html HTML to add before this element + * @return this node, for chaining + * @see #after(String) + */ + public Node before(String html) { + addSiblingHtml(siblingIndex(), html); + return this; + } + + /** + * Insert the specified HTML into the DOM after this node (i.e. as a following sibling). + * @param html HTML to add after this element + * @return this node, for chaining + * @see #before(String) + */ + public Node after(String html) { + addSiblingHtml(siblingIndex()+1, html); + return this; + } + + private void addSiblingHtml(int index, String html) { + Validate.notNull(html); + Validate.notNull(parentNode); + + Element fragment = Parser.parseBodyFragmentRelaxed(html, baseUri()).body(); + parentNode.addChildren(index, fragment.childNodesAsArray()); + } + + /** + Wrap the supplied HTML around this node. + @param html HTML to wrap around this element, e.g. {@code <div class="head"></div>}. Can be arbitrarily deep. + @return this node, for chaining. + */ + public Node wrap(String html) { + Validate.notEmpty(html); + + Element wrapBody = Parser.parseBodyFragmentRelaxed(html, baseUri).body(); + Elements wrapChildren = wrapBody.children(); + Element wrap = wrapChildren.first(); + if (wrap == null) // nothing to wrap with; noop + return null; + + Element deepest = getDeepChild(wrap); + parentNode.replaceChild(this, wrap); + deepest.addChildren(this); + + // remainder (unbalanced wrap, like <div></div><p></p> -- The <p> is remainder + if (wrapChildren.size() > 1) { + for (int i = 1; i < wrapChildren.size(); i++) { // skip first + Element remainder = wrapChildren.get(i); + remainder.parentNode.removeChild(remainder); + wrap.appendChild(remainder); + } + } + return this; + } + + private Element getDeepChild(Element el) { + List<Element> children = el.children(); + if (children.size() > 0) + return getDeepChild(children.get(0)); + else + return el; + } /** * Replace this node in the DOM with the supplied node. diff --git a/src/test/java/org/jsoup/nodes/TextNodeTest.java b/src/test/java/org/jsoup/nodes/TextNodeTest.java index abf93dbaf3..b91684775c 100644 --- a/src/test/java/org/jsoup/nodes/TextNodeTest.java +++ b/src/test/java/org/jsoup/nodes/TextNodeTest.java @@ -56,4 +56,14 @@ public class TextNodeTest { assertEquals("Hello there!", div.text()); assertTrue(tn.parent() == tail.parent()); } + + @Test public void testSplitAnEmbolden() { + Document doc = Jsoup.parse("<div>Hello there</div>"); + Element div = doc.select("div").first(); + TextNode tn = (TextNode) div.childNode(0); + TextNode tail = tn.splitText(6); + tail.wrap("<b></b>"); + + assertEquals("Hello <b>there</b>", TextUtil.stripNewlines(div.html())); // not great that we get \n<b>there there... must correct + } }
c95eca10efeaa160791b351cd3786418b35f416c
kotlin
Avoid wrapping AssertionError over and over- again
c
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend/src/org/jetbrains/kotlin/types/expressions/ExpressionTypingVisitorDispatcher.java b/compiler/frontend/src/org/jetbrains/kotlin/types/expressions/ExpressionTypingVisitorDispatcher.java index 8501ab1904efe..31dc99541657e 100644 --- a/compiler/frontend/src/org/jetbrains/kotlin/types/expressions/ExpressionTypingVisitorDispatcher.java +++ b/compiler/frontend/src/org/jetbrains/kotlin/types/expressions/ExpressionTypingVisitorDispatcher.java @@ -182,18 +182,30 @@ private JetTypeInfo getTypeInfo(@NotNull JetExpression expression, ExpressionTyp } catch (Throwable e) { context.trace.report(Errors.EXCEPTION_FROM_ANALYZER.on(expression, e)); - LOG.error( - "Exception while analyzing expression at " + DiagnosticUtils.atLocation(expression) + ":\n" + expression.getText() + "\n", - e - ); + logOrThrowException(expression, e); return JetTypeInfo.create( ErrorUtils.createErrorType(e.getClass().getSimpleName() + " from analyzer"), context.dataFlowInfo ); } - } + } -////////////////////////////////////////////////////////////////////////////////////////////// + private static void logOrThrowException(@NotNull JetExpression expression, Throwable e) { + try { + // This trows AssertionError in CLI and reports the error in the IDE + LOG.error( + "Exception while analyzing expression at " + DiagnosticUtils.atLocation(expression) + ":\n" + expression.getText() + "\n", + e + ); + } + catch (AssertionError errorFromLogger) { + // If we ended up here, we are in CLI, and the initial exception needs to be rethrown, + // simply throwing AssertionError causes its being wrapped over and over again + throw new KotlinFrontEndException(errorFromLogger.getMessage(), e); + } + } + + ////////////////////////////////////////////////////////////////////////////////////////////// @Override public JetTypeInfo visitFunctionLiteralExpression(@NotNull JetFunctionLiteralExpression expression, ExpressionTypingContext data) {
0ab9ab9ce7eeaeb195740d36de05daa0fe3b003c
hbase
HBASE-8921 [thrift2] Add GenericOptionsParser to- Thrift 2 server--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1501982 13f79535-47bb-0310-9956-ffa450edef68-
a
https://github.com/apache/hbase
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java index 87b89b5f563d..a610bf1b7912 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.thrift2; +import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.thrift.ThriftMetrics; import org.apache.hadoop.hbase.thrift2.generated.THBaseService; import org.apache.hadoop.hbase.util.InfoServer; +import org.apache.hadoop.util.GenericOptionsParser; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TCompactProtocol; import org.apache.thrift.protocol.TProtocolFactory; @@ -105,9 +107,12 @@ private static Options getOptions() { return options; } - private static CommandLine parseArguments(Options options, String[] args) throws ParseException { + private static CommandLine parseArguments(Configuration conf, Options options, String[] args) + throws ParseException, IOException { + GenericOptionsParser genParser = new GenericOptionsParser(conf, args); + String[] remainingArgs = genParser.getRemainingArgs(); CommandLineParser parser = new PosixParser(); - return parser.parse(options, args); + return parser.parse(options, remainingArgs); } private static TProtocolFactory getTProtocolFactory(boolean isCompact) { @@ -222,7 +227,8 @@ public static void main(String[] args) throws Exception { TServer server = null; Options options = getOptions(); try { - CommandLine cmd = parseArguments(options, args); + Configuration conf = HBaseConfiguration.create(); + CommandLine cmd = parseArguments(conf, options, args); /** * This is to please both bin/hbase and bin/hbase-daemon. hbase-daemon provides "start" and "stop" arguments hbase @@ -245,7 +251,6 @@ public static void main(String[] args) throws Exception { boolean nonblocking = cmd.hasOption("nonblocking"); boolean hsha = cmd.hasOption("hsha"); - Configuration conf = HBaseConfiguration.create(); ThriftMetrics metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.TWO); String implType = "threadpool";
c93b0290e8efe32d4844d10adc78c70b802fde18
orientdb
fixed minor issue with thread local management--
c
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/config/OGlobalConfiguration.java b/core/src/main/java/com/orientechnologies/orient/core/config/OGlobalConfiguration.java index efd9b38a74a..a8ab992f831 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/config/OGlobalConfiguration.java +++ b/core/src/main/java/com/orientechnologies/orient/core/config/OGlobalConfiguration.java @@ -432,7 +432,7 @@ public void change(final Object iCurrentValue, final Object iNewValue) { Level.class, Level.SEVERE), SERVER_LOG_DUMP_CLIENT_EXCEPTION_FULLSTACKTRACE("server.log.dumpClientExceptionFullStackTrace", - "Dumps the full stack trace of the exception to sent to the client", Level.class, Boolean.FALSE), + "Dumps the full stack trace of the exception to sent to the client", Boolean.class, Boolean.FALSE), // DISTRIBUTED DISTRIBUTED_CRUD_TASK_SYNCH_TIMEOUT("distributed.crudTaskTimeout", diff --git a/graphdb/src/main/java/com/tinkerpop/blueprints/impls/orient/OrientBaseGraph.java b/graphdb/src/main/java/com/tinkerpop/blueprints/impls/orient/OrientBaseGraph.java index b4cecdad580..e70b253a20b 100755 --- a/graphdb/src/main/java/com/tinkerpop/blueprints/impls/orient/OrientBaseGraph.java +++ b/graphdb/src/main/java/com/tinkerpop/blueprints/impls/orient/OrientBaseGraph.java @@ -284,11 +284,8 @@ public static void clearInitStack() { final ThreadLocal<OrientBaseGraph> ag = activeGraph; if (ag != null) - ag.set(null); + ag.remove(); - final ODatabaseRecordThreadLocal dbtl = ODatabaseRecordThreadLocal.INSTANCE; - if (dbtl != null) - dbtl.set(null); } /** @@ -354,7 +351,7 @@ public static String decodeClassName(String iClassName) { protected static void checkForGraphSchema(final ODatabaseDocumentTx iDatabase) { final OSchema schema = iDatabase.getMetadata().getSchema(); -// schema.getOrCreateClass(OMVRBTreeRIDProvider.PERSISTENT_CLASS_NAME); + // schema.getOrCreateClass(OMVRBTreeRIDProvider.PERSISTENT_CLASS_NAME); final OClass vertexBaseClass = schema.getClass(OrientVertexType.CLASS_NAME); final OClass edgeBaseClass = schema.getClass(OrientEdgeType.CLASS_NAME); diff --git a/server/src/main/java/com/orientechnologies/orient/server/network/protocol/binary/ONetworkProtocolBinary.java b/server/src/main/java/com/orientechnologies/orient/server/network/protocol/binary/ONetworkProtocolBinary.java index 2d1a13447f3..61e6af8d638 100755 --- a/server/src/main/java/com/orientechnologies/orient/server/network/protocol/binary/ONetworkProtocolBinary.java +++ b/server/src/main/java/com/orientechnologies/orient/server/network/protocol/binary/ONetworkProtocolBinary.java @@ -213,8 +213,8 @@ protected void onBeforeRequest() throws IOException { } if (connection != null) { - ODatabaseRecordThreadLocal.INSTANCE.set(connection.database); if (connection.database != null) { + connection.database.activateOnCurrentThread(); connection.data.lastDatabase = connection.database.getName(); connection.data.lastUser = connection.database.getUser() != null ? connection.database.getUser().getName() : null; } else {
464fd741533548c6fec7393299185c293de0d862
Mylyn Reviews
392682: Show details for changesets Use the ScmConnectorUi to open the correct view for the connector Task-Url: https://bugs.eclipse.org/bugs/show_bug.cgi?id=392682 Change-Id: I98a778a7dc0edc1497194bc018536236f4f93382
a
https://github.com/eclipse-mylyn/org.eclipse.mylyn.reviews
diff --git a/tbr/org.eclipse.mylyn.versions.tasks.ui/META-INF/MANIFEST.MF b/tbr/org.eclipse.mylyn.versions.tasks.ui/META-INF/MANIFEST.MF index 051aff88..00e00729 100644 --- a/tbr/org.eclipse.mylyn.versions.tasks.ui/META-INF/MANIFEST.MF +++ b/tbr/org.eclipse.mylyn.versions.tasks.ui/META-INF/MANIFEST.MF @@ -11,7 +11,8 @@ Require-Bundle: org.eclipse.ui, org.eclipse.mylyn.tasks.core;bundle-version="3.8.0", org.eclipse.mylyn.versions.ui;bundle-version="1.0.0", org.eclipse.mylyn.versions.tasks.core;bundle-version="1.0.0", - org.eclipse.mylyn.commons.core;bundle-version="3.8.0" + org.eclipse.mylyn.commons.core;bundle-version="3.8.0", + org.eclipse.core.resources Bundle-ActivationPolicy: lazy Bundle-RequiredExecutionEnvironment: JavaSE-1.6 Export-Package: org.eclipse.mylyn.internal.versions.tasks.ui;x-internal:=true, diff --git a/tbr/org.eclipse.mylyn.versions.tasks.ui/plugin.xml b/tbr/org.eclipse.mylyn.versions.tasks.ui/plugin.xml index 3ad6e0d3..6682b310 100644 --- a/tbr/org.eclipse.mylyn.versions.tasks.ui/plugin.xml +++ b/tbr/org.eclipse.mylyn.versions.tasks.ui/plugin.xml @@ -10,5 +10,18 @@ id="org.eclipse.mylyn.versions.tasks.pageFactory1"> </pageFactory> </extension> + <extension + point="org.eclipse.ui.popupMenus"> + <objectContribution + adaptable="false" + id="org.eclipse.mylyn.versions.tasks.ui.openCommitContribution" + objectClass="org.eclipse.mylyn.versions.tasks.core.TaskChangeSet"> + <action + class="org.eclipse.mylyn.versions.tasks.ui.OpenCommitAction" + id="org.eclipse.mylyn.versions.tasks.ui.action1" + label="Open"> + </action> + </objectContribution> + </extension> </plugin> diff --git a/tbr/org.eclipse.mylyn.versions.tasks.ui/src/org/eclipse/mylyn/versions/tasks/ui/AbstractChangesetMappingProvider.java b/tbr/org.eclipse.mylyn.versions.tasks.ui/src/org/eclipse/mylyn/versions/tasks/ui/AbstractChangesetMappingProvider.java index ced7defe..79a61e69 100644 --- a/tbr/org.eclipse.mylyn.versions.tasks.ui/src/org/eclipse/mylyn/versions/tasks/ui/AbstractChangesetMappingProvider.java +++ b/tbr/org.eclipse.mylyn.versions.tasks.ui/src/org/eclipse/mylyn/versions/tasks/ui/AbstractChangesetMappingProvider.java @@ -16,14 +16,11 @@ import org.eclipse.mylyn.versions.tasks.core.IChangeSetMapping; /** - * * @author Kilian Matt - * */ public abstract class AbstractChangesetMappingProvider { - public abstract void getChangesetsForTask(IChangeSetMapping mapping, IProgressMonitor monitor) throws CoreException ; + public abstract void getChangesetsForTask(IChangeSetMapping mapping, IProgressMonitor monitor) throws CoreException; public abstract int getScoreFor(ITask task); } - diff --git a/tbr/org.eclipse.mylyn.versions.tasks.ui/src/org/eclipse/mylyn/versions/tasks/ui/OpenCommitAction.java b/tbr/org.eclipse.mylyn.versions.tasks.ui/src/org/eclipse/mylyn/versions/tasks/ui/OpenCommitAction.java new file mode 100644 index 00000000..0f76a9da --- /dev/null +++ b/tbr/org.eclipse.mylyn.versions.tasks.ui/src/org/eclipse/mylyn/versions/tasks/ui/OpenCommitAction.java @@ -0,0 +1,43 @@ +/******************************************************************************* + * Copyright (c) 2012 Research Group for Industrial Software (INSO), Vienna University of Technology. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Research Group for Industrial Software (INSO), Vienna University of Technology - initial API and implementation + *******************************************************************************/ +package org.eclipse.mylyn.versions.tasks.ui; + +import org.eclipse.jface.action.IAction; +import org.eclipse.jface.viewers.ISelection; +import org.eclipse.jface.viewers.IStructuredSelection; +import org.eclipse.mylyn.versions.core.ChangeSet; +import org.eclipse.mylyn.versions.tasks.core.TaskChangeSet; +import org.eclipse.mylyn.versions.ui.ScmUi; +import org.eclipse.ui.IActionDelegate; + +/** + * @author Kilian Matt + */ +public class OpenCommitAction implements IActionDelegate { + + private IStructuredSelection selection; + + public void run(IAction action) { + TaskChangeSet taskChangeSet = (TaskChangeSet) selection.getFirstElement(); + + ChangeSet changeset = taskChangeSet.getChangeset(); + ScmUi.getUiConnector(changeset.getRepository().getConnector()).showChangeSetInView(changeset); + } + + public void selectionChanged(IAction action, ISelection selection) { + if (selection instanceof IStructuredSelection) { + this.selection = (IStructuredSelection) selection; + } else { + this.selection = null; + } + } + +}
ac53634e318a28950845d0e2ae429e89ab1e9fd1
restlet-framework-java
JAX-RS extension - Issue 800 (an NPE): I've checked- all methods with the name
c
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet.ext.jaxrs/src/org/restlet/ext/jaxrs/internal/util/Util.java b/modules/org.restlet.ext.jaxrs/src/org/restlet/ext/jaxrs/internal/util/Util.java index 6accbacefd..e8e9a42937 100644 --- a/modules/org.restlet.ext.jaxrs/src/org/restlet/ext/jaxrs/internal/util/Util.java +++ b/modules/org.restlet.ext.jaxrs/src/org/restlet/ext/jaxrs/internal/util/Util.java @@ -77,8 +77,8 @@ import org.restlet.data.Request; import org.restlet.data.Response; import org.restlet.engine.http.ContentType; -import org.restlet.engine.http.HttpClientCall; import org.restlet.engine.http.HttpClientAdapter; +import org.restlet.engine.http.HttpClientCall; import org.restlet.engine.http.HttpServerAdapter; import org.restlet.engine.http.HttpUtils; import org.restlet.engine.util.DateUtils; @@ -91,7 +91,6 @@ import org.restlet.ext.jaxrs.internal.exceptions.JaxRsRuntimeException; import org.restlet.ext.jaxrs.internal.exceptions.MethodInvokeException; import org.restlet.ext.jaxrs.internal.exceptions.MissingAnnotationException; -import org.restlet.ext.jaxrs.internal.provider.JaxbElementProvider; import org.restlet.representation.EmptyRepresentation; import org.restlet.representation.Representation; import org.restlet.util.Series; @@ -306,8 +305,8 @@ public static void copyResponseHeaders( restletResponse.setEntity(new EmptyRepresentation()); } - HttpClientAdapter.copyResponseTransportHeaders(headers, - restletResponse); + HttpClientAdapter + .copyResponseTransportHeaders(headers, restletResponse); HttpClientCall.copyResponseEntityHeaders(headers, restletResponse .getEntity()); } @@ -325,8 +324,8 @@ public static void copyResponseHeaders( public static Series<Parameter> copyResponseHeaders(Response restletResponse) { final Series<Parameter> headers = new Form(); HttpServerAdapter.addResponseHeaders(restletResponse, headers); - HttpServerAdapter.addEntityHeaders(restletResponse.getEntity(), - headers); + HttpServerAdapter + .addEntityHeaders(restletResponse.getEntity(), headers); return headers; } @@ -752,24 +751,29 @@ public static <K, V> V getFirstValue(Map<K, V> map) */ public static Class<?> getGenericClass(Class<?> clazz, Class<?> implInterface) { + if (clazz == null) + throw new IllegalArgumentException("The class must not be null"); + if (implInterface == null) + throw new IllegalArgumentException( + "The interface to b eimplemented must not be null"); return getGenericClass(clazz, implInterface, null); } private static Class<?> getGenericClass(Class<?> clazz, Class<?> implInterface, Type[] gsatp) { - if (clazz.equals(JaxbElementProvider.class)) { - clazz.toString(); - } else if (clazz.equals(MultivaluedMap.class)) { - clazz.toString(); - } for (Type ifGenericType : clazz.getGenericInterfaces()) { if (!(ifGenericType instanceof ParameterizedType)) { continue; } final ParameterizedType pt = (ParameterizedType) ifGenericType; - if (!pt.getRawType().equals(implInterface)) + Type ptRawType = pt.getRawType(); + if (ptRawType == null) + continue; + if (!ptRawType.equals(implInterface)) continue; final Type[] atps = pt.getActualTypeArguments(); + if (atps == null || atps.length == 0) + continue; final Type atp = atps[0]; if (atp instanceof Class) { return (Class<?>) atp; @@ -783,13 +787,18 @@ private static Class<?> getGenericClass(Class<?> clazz, if (atp instanceof TypeVariable<?>) { TypeVariable<?> tv = (TypeVariable<?>) atp; String name = tv.getName(); + if (name == null) + continue; // clazz = AbstractProvider // implInterface = MessageBodyReader // name = "T" // pt = MessageBodyReader<T> for (int i = 0; i < atps.length; i++) { TypeVariable<?> tv2 = (TypeVariable<?>) atps[i]; - if (tv2.getName().equals(name)) { + String tv2Name = tv2.getName(); + if (tv2Name == null) + continue; + if (tv2Name.equals(name)) { Type gsatpn = gsatp[i]; if (gsatpn instanceof Class) { return (Class<?>) gsatpn; @@ -836,7 +845,7 @@ private static Class<?> getGenericClass(Class<?> clazz, } /** - * Example: in List&lt;String&lt; -&gt; out: String.class + * Example: in List&lt;String&gt; -&gt; out: String.class * * @param genericType * @return otherwise null @@ -846,7 +855,10 @@ public static Class<?> getGenericClass(Type genericType) { return null; } final ParameterizedType pt = (ParameterizedType) genericType; - final Type atp = pt.getActualTypeArguments()[0]; + Type[] actualTypeArguments = pt.getActualTypeArguments(); + if(actualTypeArguments == null || actualTypeArguments.length == 0) + return null; + final Type atp = actualTypeArguments[0]; if (atp instanceof Class) { return (Class<?>) atp; }
c8fdea3a62ecf92c159ca8811b7d4a1039edd546
orientdb
Fixed issue -2472 about null values in Lists--
c
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/db/record/ORecordLazyList.java b/core/src/main/java/com/orientechnologies/orient/core/db/record/ORecordLazyList.java index 60cce8473b2..8157ec110d5 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/db/record/ORecordLazyList.java +++ b/core/src/main/java/com/orientechnologies/orient/core/db/record/ORecordLazyList.java @@ -79,7 +79,9 @@ public boolean addAll(Collection<? extends OIdentifiable> c) { while (it.hasNext()) { Object o = it.next(); - if (o instanceof OIdentifiable) + if (o == null) + add(null); + else if (o instanceof OIdentifiable) add((OIdentifiable) o); else OMultiValue.add(this, o); @@ -407,9 +409,9 @@ public boolean lazyLoad(final boolean iInvalidateStream) { for (String item : items) { if (item.length() == 0) - continue; - - super.add(new ORecordId(item)); + super.add(new ORecordId()); + else + super.add(new ORecordId(item)); } modCount = currentModCount; diff --git a/core/src/main/java/com/orientechnologies/orient/core/fetch/OFetchHelper.java b/core/src/main/java/com/orientechnologies/orient/core/fetch/OFetchHelper.java index 021e97ea0cc..ab800be333d 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/fetch/OFetchHelper.java +++ b/core/src/main/java/com/orientechnologies/orient/core/fetch/OFetchHelper.java @@ -298,6 +298,9 @@ private static void processRecord(final ODocument record, final Object iUserObje final String iFieldPathFromRoot, final OFetchListener iListener, final OFetchContext iContext, final String iFormat) throws IOException { + if (record == null) + return; + Object fieldValue; iContext.onBeforeFetch(record); @@ -363,7 +366,6 @@ private static void processRecord(final ODocument record, final Object iUserObje } } catch (Exception e) { - e.printStackTrace(); OLogManager.instance().error(null, "Fetching error on record %s", e, record.getIdentity()); } } @@ -531,7 +533,9 @@ else if (fieldValue instanceof Iterable<?> || fieldValue instanceof ORidBag) { removeParsedFromMap(parsedRecords, d); d = d.getRecord(); - if (!(d instanceof ODocument)) { + if (d == null) + iListener.processStandardField(null, d, null, iContext, iUserObject, ""); + else if (!(d instanceof ODocument)) { iListener.processStandardField(null, d, fieldName, iContext, iUserObject, ""); } else { iContext.onBeforeDocument(iRootRecord, (ODocument) d, fieldName, iUserObject); diff --git a/core/src/main/java/com/orientechnologies/orient/core/fetch/json/OJSONFetchContext.java b/core/src/main/java/com/orientechnologies/orient/core/fetch/json/OJSONFetchContext.java index c5773e46570..3178cc3eeb5 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/fetch/json/OJSONFetchContext.java +++ b/core/src/main/java/com/orientechnologies/orient/core/fetch/json/OJSONFetchContext.java @@ -16,12 +16,6 @@ */ package com.orientechnologies.orient.core.fetch.json; -import java.io.IOException; -import java.math.BigDecimal; -import java.util.Date; -import java.util.Set; -import java.util.Stack; - import com.orientechnologies.orient.core.config.OGlobalConfiguration; import com.orientechnologies.orient.core.db.record.OIdentifiable; import com.orientechnologies.orient.core.db.record.ridbag.ORidBag; @@ -35,6 +29,12 @@ import com.orientechnologies.orient.core.serialization.serializer.record.string.ORecordSerializerJSON.FormatSettings; import com.orientechnologies.orient.core.version.ODistributedVersion; +import java.io.IOException; +import java.math.BigDecimal; +import java.util.Date; +import java.util.Set; +import java.util.Stack; + /** * @author luca.molino * @@ -174,6 +174,11 @@ private void appendType(final StringBuilder iBuffer, final String iFieldName, fi } public void writeSignature(final OJSONWriter json, final ORecordInternal<?> record) throws IOException { + if( record == null ) { + json.write("null"); + return; + } + boolean firstAttribute = true; if (settings.includeType) { diff --git a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/OJSONWriter.java b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/OJSONWriter.java index fd34670cf43..067310a12dc 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/OJSONWriter.java +++ b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/OJSONWriter.java @@ -85,8 +85,13 @@ else if (iValue instanceof OIdentifiable) { } else { if (iFormat != null && iFormat.contains("shallow")) buffer.append("{}"); - else - buffer.append(linked.getRecord().toJSON(iFormat)); + else { + final ORecord<?> rec = linked.getRecord(); + if (rec != null) + buffer.append(rec.toJSON(iFormat)); + else + buffer.append("null"); + } } } else if (iValue.getClass().isArray()) { @@ -374,8 +379,10 @@ public OJSONWriter writeAttribute(final int iIdentLevel, final boolean iNewLine, format(iIdentLevel, iNewLine); - out.append(writeValue(iName, iFormat)); - out.append(":"); + if (iName != null) { + out.append(writeValue(iName, iFormat)); + out.append(":"); + } if (iFormat.contains("graph") && (iValue == null || iValue instanceof OIdentifiable) && (iName.startsWith("in_") || iName.startsWith("out_"))) { diff --git a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/string/ORecordSerializerCSVAbstract.java b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/string/ORecordSerializerCSVAbstract.java index e7d5cd862a7..8d21461f7f7 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/string/ORecordSerializerCSVAbstract.java +++ b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/string/ORecordSerializerCSVAbstract.java @@ -15,13 +15,6 @@ */ package com.orientechnologies.orient.core.serialization.serializer.record.string; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - import com.orientechnologies.common.collection.OLazyIterator; import com.orientechnologies.common.collection.OMultiCollectionIterator; import com.orientechnologies.common.collection.OMultiValue; @@ -61,6 +54,13 @@ import com.orientechnologies.orient.core.serialization.serializer.string.OStringSerializerEmbedded; import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + @SuppressWarnings({ "unchecked", "serial" }) public abstract class ORecordSerializerCSVAbstract extends ORecordSerializerStringAbstract { public static final char FIELD_VALUE_SEPARATOR = ':'; @@ -689,7 +689,7 @@ else if (item.length() > 2 && item.charAt(0) == OStringSerializerHelper.EMBEDDED } } else { if (linkedType == null) { - final char begin = item.charAt(0); + final char begin = item.length() > 0 ? item.charAt(0) : OStringSerializerHelper.LINK; // AUTO-DETERMINE LINKED TYPE if (begin == OStringSerializerHelper.LINK)
c28831a0bdaaa573bfd6c4e837183eb5197876fb
hadoop
YARN-280. RM does not reject app submission with- invalid tokens (Daryn Sharp via tgraves)--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1425085 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 6e2cca1006e7a..683008d0fec85 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -224,6 +224,9 @@ Release 0.23.6 - UNRELEASED YARN-266. RM and JHS Web UIs are blank because AppsBlock is not escaping string properly (Ravi Prakash via jlowe) + YARN-280. RM does not reject app submission with invalid tokens + (Daryn Sharp via tgraves) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index e5abbb7ede9ec..9232190ba3bec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -276,21 +276,26 @@ public synchronized void addApplication( Collection <Token<?>> tokens = ts.getAllTokens(); long now = System.currentTimeMillis(); + // find tokens for renewal, but don't add timers until we know + // all renewable tokens are valid + Set<DelegationTokenToRenew> dtrs = new HashSet<DelegationTokenToRenew>(); for(Token<?> token : tokens) { // first renew happens immediately if (token.isManaged()) { DelegationTokenToRenew dtr = new DelegationTokenToRenew(applicationId, token, getConfig(), now, shouldCancelAtEnd); - - addTokenToList(dtr); - - setTimerForTokenRenewal(dtr, true); - if (LOG.isDebugEnabled()) { - LOG.debug("Registering token for renewal for:" + - " service = " + token.getService() + - " for appId = " + applicationId); - } + renewToken(dtr); + dtrs.add(dtr); + } + } + for (DelegationTokenToRenew dtr : dtrs) { + addTokenToList(dtr); + setTimerForTokenRenewal(dtr); + if (LOG.isDebugEnabled()) { + LOG.debug("Registering token for renewal for:" + + " service = " + dtr.token.getService() + + " for appId = " + applicationId); } } } @@ -315,22 +320,13 @@ public synchronized void run() { Token<?> token = dttr.token; try { - // need to use doAs so that http can find the kerberos tgt - dttr.expirationDate = UserGroupInformation.getLoginUser() - .doAs(new PrivilegedExceptionAction<Long>(){ - - @Override - public Long run() throws Exception { - return dttr.token.renew(dttr.conf); - } - }); - + renewToken(dttr); if (LOG.isDebugEnabled()) { LOG.debug("Renewing delegation-token for:" + token.getService() + "; new expiration;" + dttr.expirationDate); } - setTimerForTokenRenewal(dttr, false);// set the next one + setTimerForTokenRenewal(dttr);// set the next one } catch (Exception e) { LOG.error("Exception renewing token" + token + ". Not rescheduled", e); removeFailedDelegationToken(dttr); @@ -347,19 +343,12 @@ public synchronized boolean cancel() { /** * set task to renew the token */ - private - void setTimerForTokenRenewal(DelegationTokenToRenew token, - boolean firstTime) throws IOException { + private void setTimerForTokenRenewal(DelegationTokenToRenew token) + throws IOException { // calculate timer time - long now = System.currentTimeMillis(); - long renewIn; - if(firstTime) { - renewIn = now; - } else { - long expiresIn = (token.expirationDate - now); - renewIn = now + expiresIn - expiresIn/10; // little bit before the expiration - } + long expiresIn = token.expirationDate - System.currentTimeMillis(); + long renewIn = token.expirationDate - expiresIn/10; // little bit before the expiration // need to create new task every time TimerTask tTask = new RenewalTimerTask(token); @@ -368,6 +357,24 @@ void setTimerForTokenRenewal(DelegationTokenToRenew token, renewalTimer.schedule(token.timerTask, new Date(renewIn)); } + // renew a token + private void renewToken(final DelegationTokenToRenew dttr) + throws IOException { + // need to use doAs so that http can find the kerberos tgt + // NOTE: token renewers should be responsible for the correct UGI! + try { + dttr.expirationDate = UserGroupInformation.getLoginUser().doAs( + new PrivilegedExceptionAction<Long>(){ + @Override + public Long run() throws Exception { + return dttr.token.renew(dttr.conf); + } + }); + } catch (InterruptedException e) { + throw new IOException(e); + } + } + // cancel a token private void cancelToken(DelegationTokenToRenew t) { if(t.shouldCancelAtEnd) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 1c3614e46df37..ad127a9264d9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -357,6 +357,27 @@ public void testDTRenewal () throws Exception { } } + @Test + public void testInvalidDTWithAddApplication() throws Exception { + MyFS dfs = (MyFS)FileSystem.get(conf); + LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+conf.hashCode()); + + MyToken token = dfs.getDelegationToken(new Text("user1")); + token.cancelToken(); + + Credentials ts = new Credentials(); + ts.addToken(token.getKind(), token); + + // register the tokens for renewal + ApplicationId appId = BuilderUtils.newApplicationId(0, 0); + try { + delegationTokenRenewer.addApplication(appId, ts, true); + fail("App submission with a cancelled token should have failed"); + } catch (InvalidToken e) { + // expected + } + } + /** * Basic idea of the test: * 1. register a token for 2 seconds with no cancel at the end
aeda373e85a6dacc6ee4db762184d229739af7fb
Mylyn Reviews
317535: Fixed a bug preventing submit
c
https://github.com/eclipse-mylyn/org.eclipse.mylyn.reviews
diff --git a/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewData.java b/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewData.java index b52a99ab..bcf8c548 100644 --- a/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewData.java +++ b/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewData.java @@ -7,7 +7,7 @@ public class ReviewData { private boolean outgoing; private Review review; private ITask task; - // TODO state + private boolean dirty; public ReviewData(ITask task, Review review) { this.task=task; @@ -34,9 +34,15 @@ public Review getReview() { return review; } - public Object getModificationDate() { - // TODO Auto-generated method stub - return null; + public void setDirty(boolean isDirty) { + this.dirty=isDirty; } + public void setDirty() { + setDirty(true); + } + public boolean isDirty() { + return this.dirty; + } + } diff --git a/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewsUtil.java b/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewsUtil.java index beffd1ae..eeeadcf5 100644 --- a/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewsUtil.java +++ b/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewsUtil.java @@ -49,8 +49,9 @@ public static List<ReviewSubTask> getReviewSubTasksFor( List<ReviewSubTask> resultList = new ArrayList<ReviewSubTask>(); try { for (ITask subTask : taskContainer.getChildren()) { - if (subTask.getSummary().startsWith("Review")) { //$NON-NLS-1$ - + + if (ReviewsUtil.isMarkedAsReview(subTask)) {//.getSummary().startsWith("Review")) { //$NON-NLS-1$ + // change to review data manager for (Review review : getReviewAttachmentFromTask( taskDataManager, repositoryModel, subTask)) { // TODO change to latest etc @@ -170,4 +171,14 @@ public static List<Review> getReviewAttachmentFromTask( public static List<? extends ITargetPathStrategy> getPathFindingStrategies() { return strategies; } + + public static boolean isMarkedAsReview(ITask task) { + boolean isReview = Boolean.parseBoolean(task + .getAttribute(ReviewConstants.ATTR_REVIEW_FLAG)); + return isReview; + } + + public static void markAsReview(ITask task) { + task.setAttribute(ReviewConstants.ATTR_REVIEW_FLAG, Boolean.TRUE.toString()); + } } diff --git a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/CreateReviewAction.java b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/CreateReviewAction.java index 1c98ef33..242ceda5 100644 --- a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/CreateReviewAction.java +++ b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/CreateReviewAction.java @@ -9,6 +9,7 @@ import org.eclipse.mylyn.internal.tasks.ui.TasksUiPlugin; import org.eclipse.mylyn.internal.tasks.ui.util.TasksUiInternal; import org.eclipse.mylyn.reviews.core.ReviewConstants; +import org.eclipse.mylyn.reviews.core.ReviewsUtil; import org.eclipse.mylyn.reviews.core.model.review.Review; import org.eclipse.mylyn.reviews.core.model.review.ReviewFactory; import org.eclipse.mylyn.reviews.core.model.review.ScopeItem; @@ -69,7 +70,7 @@ public boolean performFinish(TaskDataModel model,ScopeItem scope) { TaskRepository taskRepository=model.getTaskRepository(); ITask newTask = TasksUiUtil.createOutgoingNewTask(taskRepository.getConnectorKind(), taskRepository.getRepositoryUrl()); - newTask.setAttribute(ReviewConstants.ATTR_REVIEW_FLAG, Boolean.TRUE.toString()); + ReviewsUtil.markAsReview(newTask); TaskMapper initializationData=new TaskMapper(model.getTaskData()); TaskData taskData = TasksUiInternal.createTaskData(taskRepository, initializationData, null, new NullProgressMonitor()); diff --git a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPart.java b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPart.java index 9b14d067..e3648f50 100644 --- a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPart.java +++ b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPart.java @@ -294,7 +294,8 @@ public Image getImage(Object element) { if (review.getResult() != null) { Rating rating = review.getResult().getRating(); ratingList.setSelection(new StructuredSelection(rating)); - commentText.setText(review.getResult().getText()); + String comment = review.getResult().getText(); + commentText.setText(comment!=null?comment:""); } commentText.addModifyListener(new ModifyListener() { diff --git a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPartAdvisor.java b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPartAdvisor.java index e420174f..2620fd11 100644 --- a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPartAdvisor.java +++ b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPartAdvisor.java @@ -16,7 +16,9 @@ import org.eclipse.mylyn.internal.tasks.ui.TasksUiPlugin; import org.eclipse.mylyn.internal.tasks.ui.util.TasksUiInternal; import org.eclipse.mylyn.reviews.core.ReviewConstants; +import org.eclipse.mylyn.reviews.core.ReviewData; import org.eclipse.mylyn.reviews.core.ReviewDataManager; +import org.eclipse.mylyn.reviews.core.ReviewsUtil; import org.eclipse.mylyn.reviews.core.model.review.Review; import org.eclipse.mylyn.reviews.ui.Messages; import org.eclipse.mylyn.reviews.ui.ReviewCommentTaskAttachmentSource; @@ -37,8 +39,7 @@ public class ReviewTaskEditorPartAdvisor implements ITaskEditorPartDescriptorAdvisor { public boolean canCustomize(ITask task) { - boolean isReview = Boolean.parseBoolean(task - .getAttribute(ReviewConstants.ATTR_REVIEW_FLAG)); + boolean isReview = ReviewsUtil.isMarkedAsReview(task); return isReview; } @@ -72,32 +73,37 @@ public AbstractTaskEditorPart createPart() { public void taskMigration(ITask oldTask, ITask newTask) { ReviewDataManager dataManager = ReviewsUiPlugin.getDataManager(); Review review = dataManager.getReviewData(oldTask).getReview(); - dataManager.storeTask(newTask, review); + dataManager.storeOutgoingTask(newTask, review); + ReviewsUtil.markAsReview(newTask); } public void afterSubmit(ITask task) { try { - Review review = ReviewsUiPlugin.getDataManager() - .getReviewData(task).getReview(); - - TaskRepository taskRepository = TasksUiPlugin - .getRepositoryManager().getRepository( - task.getRepositoryUrl()); - TaskData taskData = TasksUiPlugin.getTaskDataManager().getTaskData( - task); - // todo get which attachments have to be submitted - TaskAttribute attachmentAttribute = taskData.getAttributeMapper() - .createTaskAttachment(taskData); - byte[] attachmentBytes = createAttachment(review); - - ReviewCommentTaskAttachmentSource attachment = new ReviewCommentTaskAttachmentSource( - attachmentBytes); - - AbstractRepositoryConnector connector = TasksUi - .getRepositoryConnector(taskRepository.getConnectorKind()); - connector.getTaskAttachmentHandler().postContent(taskRepository, - task, attachment, "review result", //$NON-NLS-1$ - attachmentAttribute, new NullProgressMonitor()); + ReviewData reviewData = ReviewsUiPlugin.getDataManager() + .getReviewData(task); + Review review = reviewData.getReview(); + + if (reviewData.isOutgoing() || reviewData.isDirty()) { + TaskRepository taskRepository = TasksUiPlugin + .getRepositoryManager().getRepository( + task.getRepositoryUrl()); + TaskData taskData = TasksUiPlugin.getTaskDataManager() + .getTaskData(task); + // todo get which attachments have to be submitted + TaskAttribute attachmentAttribute = taskData + .getAttributeMapper().createTaskAttachment(taskData); + byte[] attachmentBytes = createAttachment(review); + + ReviewCommentTaskAttachmentSource attachment = new ReviewCommentTaskAttachmentSource( + attachmentBytes); + + AbstractRepositoryConnector connector = TasksUi + .getRepositoryConnector(taskRepository + .getConnectorKind()); + connector.getTaskAttachmentHandler().postContent( + taskRepository, task, attachment, "review result", //$NON-NLS-1$ + attachmentAttribute, new NullProgressMonitor()); + } } catch (CoreException e) { e.printStackTrace(); }
9ac8731539e821afca215b685203ef82115e36f5
orientdb
Working to fix corrupted data in sockets--
c
https://github.com/orientechnologies/orientdb
diff --git a/enterprise/src/main/java/com/orientechnologies/orient/enterprise/channel/binary/OChannelBinaryAsynch.java b/enterprise/src/main/java/com/orientechnologies/orient/enterprise/channel/binary/OChannelBinaryAsynch.java index ef8646b8b48..8d060267d50 100644 --- a/enterprise/src/main/java/com/orientechnologies/orient/enterprise/channel/binary/OChannelBinaryAsynch.java +++ b/enterprise/src/main/java/com/orientechnologies/orient/enterprise/channel/binary/OChannelBinaryAsynch.java @@ -21,6 +21,7 @@ import java.util.concurrent.locks.ReentrantLock; import com.orientechnologies.common.concur.OTimeoutException; +import com.orientechnologies.common.io.OIOException; import com.orientechnologies.common.log.OLogManager; import com.orientechnologies.orient.core.config.OContextConfiguration; import com.orientechnologies.orient.core.config.OGlobalConfiguration; @@ -32,7 +33,7 @@ * */ public class OChannelBinaryAsynch extends OChannelBinary { - private final ReentrantLock lockRead = new ReentrantLock(); + private final ReentrantLock lockRead = new ReentrantLock(true); private final ReentrantLock lockWrite = new ReentrantLock(); private boolean channelRead = false; private byte currentStatus; @@ -105,7 +106,8 @@ else if (!lockRead.tryLock(iTimeout, TimeUnit.MILLISECONDS)) maxUnreadResponses); // CALL THE SUPER-METHOD TO AVOID LOCKING AGAIN - super.clearInput(); + //super.clearInput(); + throw new OIOException("Timeout on reading response"); } lockRead.unlock(); @@ -116,14 +118,14 @@ else if (!lockRead.tryLock(iTimeout, TimeUnit.MILLISECONDS)) synchronized (this) { try { if (debug) - OLogManager.instance().debug(this, "Session %d is going to sleep...", currentSessionId); + OLogManager.instance().debug(this, "Session %d is going to sleep...", iRequesterId); wait(1000); final long now = System.currentTimeMillis(); if (debug) OLogManager.instance().debug(this, "Waked up: slept %dms, checking again from %s for session %d", (now - start), - socket.getLocalAddress(), currentSessionId); + socket.getLocalAddress(), iRequesterId); if (now - start >= 1000) unreadResponse++;
2299b927c8dbfaad4761ed07c3c709e8e5d1c3b8
orientdb
fixed bug on shutdown--
c
https://github.com/orientechnologies/orientdb
diff --git a/src/main/java/com/orientechnologies/lucene/manager/OLuceneIndexManagerAbstract.java b/src/main/java/com/orientechnologies/lucene/manager/OLuceneIndexManagerAbstract.java index b36472b5846..f55130922a8 100644 --- a/src/main/java/com/orientechnologies/lucene/manager/OLuceneIndexManagerAbstract.java +++ b/src/main/java/com/orientechnologies/lucene/manager/OLuceneIndexManagerAbstract.java @@ -121,8 +121,8 @@ protected void initIndex(String indexName, OIndexDefinition indexDefinition, Str private void reOpen(ODocument metadata) throws IOException { ODatabaseRecord database = getDatabase(); final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying(); - Directory dir = NIOFSDirectory.open(new File(storageLocalAbstract.getStoragePath() + File.separator + OLUCENE_BASE_DIR - + File.separator + indexName)); + String pathname = getIndexPath(storageLocalAbstract); + Directory dir = NIOFSDirectory.open(new File(pathname)); indexWriter = createIndexWriter(dir, metadata); mgrWriter = new TrackingIndexWriter(indexWriter); manager = new SearcherManager(indexWriter, true, null); @@ -133,6 +133,10 @@ private void reOpen(ODocument metadata) throws IOException { nrt.start(); } + private String getIndexPath(OStorageLocalAbstract storageLocalAbstract) { + return storageLocalAbstract.getStoragePath() + File.separator + OLUCENE_BASE_DIR + File.separator + indexName; + } + protected IndexSearcher getSearcher() throws IOException { try { nrt.waitForGeneration(reopenToken); @@ -184,12 +188,15 @@ public void delete() { try { if (indexWriter != null) { indexWriter.deleteAll(); + + nrt.interrupt(); + nrt.close(); + indexWriter.close(); - indexWriter.getDirectory().close(); } ODatabaseRecord database = getDatabase(); final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying(); - File f = new File(storageLocalAbstract.getStoragePath() + File.separator + indexName); + File f = new File(getIndexPath(storageLocalAbstract)); OLuceneIndexUtils.deleteFolder(f); @@ -259,7 +266,9 @@ public void flush() { @Override public void close() { try { + nrt.interrupt(); nrt.close(); + indexWriter.commit(); indexWriter.close(); } catch (IOException e) { e.printStackTrace(); @@ -329,7 +338,15 @@ public Analyzer getAnalyzer(ODocument metadata) { } catch (ClassNotFoundException e) { throw new OIndexException("Analyzer: " + analyzerString + " not found", e); } catch (NoSuchMethodException e) { - e.printStackTrace(); + Class classAnalyzer = null; + try { + classAnalyzer = Class.forName(analyzerString); + analyzer = (Analyzer) classAnalyzer.newInstance(); + + } catch (Throwable e1) { + throw new OIndexException("Couldn't instantiate analyzer: public constructor not found", e1); + } + } catch (InvocationTargetException e) { e.printStackTrace(); } catch (InstantiationException e) { diff --git a/src/test/java/com/orientechnologies/test/lucene-local-test.xml b/src/test/java/com/orientechnologies/test/lucene-local-test.xml new file mode 100755 index 00000000000..4a451ed031a --- /dev/null +++ b/src/test/java/com/orientechnologies/test/lucene-local-test.xml @@ -0,0 +1,194 @@ +<!DOCTYPE suite SYSTEM "http://beust.com/testng/testng-1.0.dtd"> +<suite name="Local Test Suite" verbose="2" parallel="false"> + + <parameter name="path" value="@PATH@"/> + <parameter name="url" value="@URL@"/> + <parameter name="testPath" value="@TESTPATH@"/> + + <test name="Setup"> + <classes> + <class name="com.orientechnologies.orient.test.database.base.DeleteDirectory"/> + </classes> + </test> + + <test name="DbCreation"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.DbListenerTest"/> + <class name="com.orientechnologies.orient.test.database.auto.DbCreationTest"/> + <class name="com.orientechnologies.orient.test.database.auto.StorageTest"/> + </classes> + </test> + <test name="Schema"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.SchemaTest"/> + <class name="com.orientechnologies.orient.test.database.auto.AbstractClassTest"/> + </classes> + </test> + <test name="Security"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.SecurityTest"/> + <class name="com.orientechnologies.orient.test.database.auto.RestrictedTest"/> + </classes> + </test> + <test name="Hook"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.HookTest"/> + <class name="com.orientechnologies.orient.test.database.auto.HookTxTest"/> + </classes> + </test> + <test name="Population"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.ComplexTypesTest"/> + <class name="com.orientechnologies.orient.test.database.auto.CRUDFlatPhysicalTest"/> + <class name="com.orientechnologies.orient.test.database.auto.CRUDObjectInheritanceTest"/> + <class name="com.orientechnologies.orient.test.database.auto.CRUDDocumentPhysicalTest"/> + <class name="com.orientechnologies.orient.test.database.auto.CRUDObjectPhysicalTest"/> + <class name="com.orientechnologies.orient.test.database.auto.CRUDFlatPhysicalTest"/> + <class name="com.orientechnologies.orient.test.database.auto.CRUDDocumentValidationTest"/> + <class name="com.orientechnologies.orient.test.database.auto.RecordMetadataTest"/> + <class name="com.orientechnologies.orient.test.database.auto.ObjectTreeTest"/> + <class name="com.orientechnologies.orient.test.database.auto.ObjectDetachingTest"/> + <class name="com.orientechnologies.orient.test.database.auto.ObjectEnhancingTest"/> + <class name="com.orientechnologies.orient.test.database.auto.DocumentTrackingTest"/> + <class name="com.orientechnologies.orient.test.database.auto.EmbeddedObjectSerializationTest"/> + </classes> + </test> + <test name="PopulationObjectSchemaFull"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.CRUDObjectInheritanceTestSchemaFull"/> + <class name="com.orientechnologies.orient.test.database.auto.CRUDObjectPhysicalTestSchemaFull"/> + <class name="com.orientechnologies.orient.test.database.auto.ObjectTreeTestSchemaFull"/> + <class name="com.orientechnologies.orient.test.database.auto.ObjectDetachingTestSchemaFull"/> + <class name="com.orientechnologies.orient.test.database.auto.ObjectEnhancingTestSchemaFull"/> + </classes> + </test> + <test name="Tx"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.TransactionAtomicTest"/> + <class name="com.orientechnologies.orient.test.database.auto.TransactionOptimisticTest"/> + <class name="com.orientechnologies.orient.test.database.auto.TransactionConsistencyTest"/> + </classes> + </test> + <test name="Index"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.DateIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLEscapingTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLSelectHashIndexReuseTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexCustomKeyTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexClusterTest"/> + <class name="com.orientechnologies.orient.test.database.auto.ByteArrayKeyTest"/> + <class name="com.orientechnologies.orient.test.database.auto.FullTextIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.ClassIndexManagerTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLSelectIndexReuseTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLCreateIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLDropIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLDropClassIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLDropPropertyIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SchemaIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.ClassIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.PropertyIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.CollectionIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLSelectCompositeIndexDirectSearchTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexTxAwareOneValueGetValuesTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexTxAwareMultiValueGetValuesTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexTxAwareMultiValueGetTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexTxAwareOneValueGetTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexTxAwareMultiValueGetEntriesTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexTxAwareOneValueGetEntriesTest"/> + <class name="com.orientechnologies.orient.test.database.auto.MapIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLSelectByLinkedPropertyIndexReuseTest"/> + <class name="com.orientechnologies.orient.test.database.auto.LinkListIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.LinkMapIndexTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLIndexWithoutSchemaTest"/> + <class name="com.orientechnologies.orient.test.database.auto.IndexTxTest"/> + <class name="com.orientechnologies.orient.test.database.auto.OrderByIndexReuseTest"/> + <class name="com.orientechnologies.orient.test.database.auto.LinkSetIndexTest"/> + </classes> + </test> + <test name="Dictionary"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.DictionaryTest"/> + </classes> + </test> + <test name="Query"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.WrongQueryTest"/> + </classes> + </test> + <test name="Parsing"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.JSONTest"/> + </classes> + </test> + <test name="Graph"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.GraphDatabaseTest"/> + <!-- <class name="com.orientechnologies.orient.test.database.auto.SQLCreateVertexAndEdgeTest"/> --> + </classes> + </test> + <test name="GEO"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.GEOTest"/> + </classes> + </test> + <test name="Index Manager"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.IndexManagerTest"/> + </classes> + </test> + <test name="Binary"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.BinaryTest"/> + </classes> + </test> + <test name="sql-commands"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.SQLCommandsTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLInsertTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLSelectTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLMetadataTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLSelectProjectionsTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLSelectGroupByTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLFunctionsTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLUpdateTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLDeleteTest"/> + </classes> + </test> + <test name="other-commands"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.TraverseTest"/> + </classes> + </test> + <test name="misc"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.TruncateTest"/> + <class name="com.orientechnologies.orient.test.database.auto.DateTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLFindReferencesTest"/> + <class name="com.orientechnologies.orient.test.database.auto.SQLCreateLinkTest"/> + <class name="com.orientechnologies.orient.test.database.auto.MultipleDBTest"/> + <class name="com.orientechnologies.orient.test.database.auto.ConcurrentUpdatesTest"/> + <class name="com.orientechnologies.orient.test.database.auto.ConcurrentQueriesTest"/> + <class name="com.orientechnologies.orient.test.database.auto.DatabaseThreadFactoryTest"/> + <class name="com.orientechnologies.orient.test.database.auto.CollateTest"/> + <class name="com.orientechnologies.orient.test.database.auto.PoolTest"/> + </classes> + </test> + <test name="DbTools"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.DbCheckTest"/> + <class name="com.orientechnologies.orient.test.database.auto.DbImportExportTest"/> + <class name="com.orientechnologies.orient.test.database.auto.DbCompareTest"/> + </classes> + </test> + <test name="DbToolsDelete"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.DbDeleteTest"/> + </classes> + </test> + <test name="End"> + <classes> + <class name="com.orientechnologies.orient.test.database.auto.DbClosedTest"/> + </classes> + </test> +</suite> \ No newline at end of file
e6897e844505bfdfff46cfc91d9e122002f3e6a5
orientdb
first implementation of binary record serializer- debug info.
a
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializationDebug.java b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializationDebug.java new file mode 100644 index 00000000000..a90e217af70 --- /dev/null +++ b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializationDebug.java @@ -0,0 +1,13 @@ +package com.orientechnologies.orient.core.serialization.serializer.record.binary; + +import java.util.ArrayList; + +public class ORecordSerializationDebug { + + public String className; + public ArrayList<ORecordSerializationDebugProperty> properties; + public boolean readingFailure; + public RuntimeException readingException; + public int failPosition; + +} diff --git a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializationDebugProperty.java b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializationDebugProperty.java new file mode 100644 index 00000000000..1e1f671fa95 --- /dev/null +++ b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializationDebugProperty.java @@ -0,0 +1,15 @@ +package com.orientechnologies.orient.core.serialization.serializer.record.binary; + +import com.orientechnologies.orient.core.metadata.schema.OType; + +public class ORecordSerializationDebugProperty { + + public String name; + public int globalId; + public OType type; + public RuntimeException readingException; + public boolean faildToRead; + public int failPosition; + public Object value; + +} diff --git a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializerBinaryDebug.java b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializerBinaryDebug.java new file mode 100644 index 00000000000..d2b294a2419 --- /dev/null +++ b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializerBinaryDebug.java @@ -0,0 +1,93 @@ +package com.orientechnologies.orient.core.serialization.serializer.record.binary; + +import java.util.ArrayList; + +import com.orientechnologies.common.exception.OException; +import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx; +import com.orientechnologies.orient.core.metadata.OMetadataInternal; +import com.orientechnologies.orient.core.metadata.schema.OGlobalProperty; +import com.orientechnologies.orient.core.metadata.schema.OImmutableSchema; +import com.orientechnologies.orient.core.metadata.schema.OType; +import com.orientechnologies.orient.core.record.impl.ODocument; + +public class ORecordSerializerBinaryDebug extends ORecordSerializerBinaryV0 { + + public ORecordSerializationDebug deserializeDebug(final byte[] iSource, ODatabaseDocumentTx db) { + ORecordSerializationDebug debugInfo = new ORecordSerializationDebug(); + OImmutableSchema schema = ((OMetadataInternal) db.getMetadata()).getImmutableSchemaSnapshot(); + BytesContainer bytes = new BytesContainer(iSource); + if (bytes.bytes[0] != 0) + throw new OException("Unsupported binary serialization version"); + bytes.skip(1); + try { + final String className = readString(bytes); + debugInfo.className = className; + } catch (RuntimeException ex) { + debugInfo.readingFailure = true; + debugInfo.readingException = ex; + debugInfo.failPosition = bytes.offset; + return debugInfo; + } + + debugInfo.properties = new ArrayList<ORecordSerializationDebugProperty>(); + int last = 0; + String fieldName; + int valuePos; + OType type; + while (true) { + ORecordSerializationDebugProperty debugProperty = new ORecordSerializationDebugProperty(); + OGlobalProperty prop = null; + try { + final int len = OVarIntSerializer.readAsInteger(bytes); + if (len != 0) + debugInfo.properties.add(debugProperty); + if (len == 0) { + // SCAN COMPLETED + break; + } else if (len > 0) { + // PARSE FIELD NAME + fieldName = stringFromBytes(bytes.bytes, bytes.offset, len).intern(); + bytes.skip(len); + valuePos = readInteger(bytes); + type = readOType(bytes); + } else { + // LOAD GLOBAL PROPERTY BY ID + final int id = (len * -1) - 1; + debugProperty.globalId = id; + prop = schema.getGlobalPropertyById(id); + fieldName = prop.getName(); + valuePos = readInteger(bytes); + if (prop.getType() != OType.ANY) + type = prop.getType(); + else + type = readOType(bytes); + } + debugProperty.name = fieldName; + debugProperty.type = type; + + if (valuePos != 0) { + int headerCursor = bytes.offset; + bytes.offset = valuePos; + try { + debugProperty.value = readSingleValue(bytes, type, new ODocument()); + } catch (RuntimeException ex) { + debugProperty.faildToRead = true; + debugProperty.readingException = ex; + debugProperty.failPosition = bytes.offset; + } + if (bytes.offset > last) + last = bytes.offset; + bytes.offset = headerCursor; + } else + debugProperty.value = null; + } catch (RuntimeException ex) { + debugInfo.readingFailure = true; + debugInfo.readingException = ex; + debugInfo.failPosition = bytes.offset; + return debugInfo; + } + } + + return debugInfo; + } +} diff --git a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializerBinaryV0.java b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializerBinaryV0.java index 1b8f179c7fb..817eb75926b 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializerBinaryV0.java +++ b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/binary/ORecordSerializerBinaryV0.java @@ -273,12 +273,12 @@ protected OClass serializeClass(final ODocument document, final BytesContainer b return clazz; } - private OGlobalProperty getGlobalProperty(final ODocument document, final int len) { + protected OGlobalProperty getGlobalProperty(final ODocument document, final int len) { final int id = (len * -1) - 1; return ODocumentInternal.getGlobalPropertyById(document, id); } - private OType readOType(final BytesContainer bytes) { + protected OType readOType(final BytesContainer bytes) { return OType.getById(readByte(bytes)); } @@ -286,7 +286,7 @@ private void writeOType(BytesContainer bytes, int pos, OType type) { bytes.bytes[pos] = (byte) type.getId(); } - private Object readSingleValue(BytesContainer bytes, OType type, ODocument document) { + protected Object readSingleValue(BytesContainer bytes, OType type, ODocument document) { Object value = null; switch (type) { case INTEGER: @@ -748,14 +748,14 @@ private OType getTypeFromValueEmbedded(final Object fieldValue) { return type; } - private String readString(final BytesContainer bytes) { + protected String readString(final BytesContainer bytes) { final int len = OVarIntSerializer.readAsInteger(bytes); final String res = stringFromBytes(bytes.bytes, bytes.offset, len); bytes.skip(len); return res; } - private int readInteger(final BytesContainer container) { + protected int readInteger(final BytesContainer container) { final int value = OIntegerSerializer.INSTANCE.deserializeLiteral(container.bytes, container.offset); container.offset += OIntegerSerializer.INT_SIZE; return value; @@ -791,7 +791,7 @@ private byte[] bytesFromString(final String toWrite) { } } - private String stringFromBytes(final byte[] bytes, final int offset, final int len) { + protected String stringFromBytes(final byte[] bytes, final int offset, final int len) { try { return new String(bytes, offset, len, CHARSET_UTF_8); } catch (UnsupportedEncodingException e) { diff --git a/core/src/test/java/com/orientechnologies/orient/core/serialization/serializer/binary/impl/ORecordSerializerBinaryDebugTest.java b/core/src/test/java/com/orientechnologies/orient/core/serialization/serializer/binary/impl/ORecordSerializerBinaryDebugTest.java new file mode 100644 index 00000000000..dba8275f0a2 --- /dev/null +++ b/core/src/test/java/com/orientechnologies/orient/core/serialization/serializer/binary/impl/ORecordSerializerBinaryDebugTest.java @@ -0,0 +1,163 @@ +package com.orientechnologies.orient.core.serialization.serializer.binary.impl; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertNotNull; + +import org.testng.annotations.Test; + +import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx; +import com.orientechnologies.orient.core.metadata.schema.OClass; +import com.orientechnologies.orient.core.metadata.schema.OType; +import com.orientechnologies.orient.core.record.impl.ODocument; +import com.orientechnologies.orient.core.serialization.serializer.record.binary.ORecordSerializationDebug; +import com.orientechnologies.orient.core.serialization.serializer.record.binary.ORecordSerializerBinaryDebug; + +public class ORecordSerializerBinaryDebugTest { + + @Test + public void testSimpleDocumentDebug() { + ODatabaseDocumentTx db = new ODatabaseDocumentTx("memory:" + ORecordSerializerBinaryDebugTest.class.getSimpleName()); + db.create(); + try { + ODocument doc = new ODocument(); + doc.field("test", "test"); + doc.field("anInt", 2); + doc.field("anDouble", 2D); + + byte[] bytes = doc.toStream(); + + ORecordSerializerBinaryDebug debugger = new ORecordSerializerBinaryDebug(); + ORecordSerializationDebug debug = debugger.deserializeDebug(bytes, db); + + assertEquals(debug.properties.size(), 3); + assertEquals(debug.properties.get(0).name, "test"); + assertEquals(debug.properties.get(0).type, OType.STRING); + assertEquals(debug.properties.get(0).value, "test"); + + assertEquals(debug.properties.get(1).name, "anInt"); + assertEquals(debug.properties.get(1).type, OType.INTEGER); + assertEquals(debug.properties.get(1).value, 2); + + assertEquals(debug.properties.get(2).name, "anDouble"); + assertEquals(debug.properties.get(2).type, OType.DOUBLE); + assertEquals(debug.properties.get(2).value, 2D); + } finally { + db.drop(); + } + } + + @Test + public void testSchemaFullDocumentDebug() { + ODatabaseDocumentTx db = new ODatabaseDocumentTx("memory:" + ORecordSerializerBinaryDebugTest.class.getSimpleName()); + db.create(); + try { + OClass clazz = db.getMetadata().getSchema().createClass("some"); + clazz.createProperty("testP", OType.STRING); + clazz.createProperty("theInt", OType.INTEGER); + ODocument doc = new ODocument("some"); + doc.field("testP", "test"); + doc.field("theInt", 2); + doc.field("anDouble", 2D); + + byte[] bytes = doc.toStream(); + + ORecordSerializerBinaryDebug debugger = new ORecordSerializerBinaryDebug(); + ORecordSerializationDebug debug = debugger.deserializeDebug(bytes, db); + + assertEquals(debug.properties.size(), 3); + assertEquals(debug.properties.get(0).name, "testP"); + assertEquals(debug.properties.get(0).type, OType.STRING); + assertEquals(debug.properties.get(0).value, "test"); + + assertEquals(debug.properties.get(1).name, "theInt"); + assertEquals(debug.properties.get(1).type, OType.INTEGER); + assertEquals(debug.properties.get(1).value, 2); + + assertEquals(debug.properties.get(2).name, "anDouble"); + assertEquals(debug.properties.get(2).type, OType.DOUBLE); + assertEquals(debug.properties.get(2).value, 2D); + } finally { + db.drop(); + } + + } + + @Test + public void testSimpleBrokenDocumentDebug() { + ODatabaseDocumentTx db = new ODatabaseDocumentTx("memory:" + ORecordSerializerBinaryDebugTest.class.getSimpleName()); + db.create(); + try { + ODocument doc = new ODocument(); + doc.field("test", "test"); + doc.field("anInt", 2); + doc.field("anDouble", 2D); + + byte[] bytes = doc.toStream(); + byte[] brokenBytes = new byte[bytes.length - 10]; + System.arraycopy(bytes, 0, brokenBytes, 0, bytes.length - 10); + + ORecordSerializerBinaryDebug debugger = new ORecordSerializerBinaryDebug(); + ORecordSerializationDebug debug = debugger.deserializeDebug(brokenBytes, db); + + assertEquals(debug.properties.size(), 3); + assertEquals(debug.properties.get(0).name, "test"); + assertEquals(debug.properties.get(0).type, OType.STRING); + assertEquals(debug.properties.get(0).faildToRead, true); + assertNotNull(debug.properties.get(0).readingException); + + assertEquals(debug.properties.get(1).name, "anInt"); + assertEquals(debug.properties.get(1).type, OType.INTEGER); + assertEquals(debug.properties.get(1).faildToRead, true); + assertNotNull(debug.properties.get(1).readingException); + + assertEquals(debug.properties.get(2).name, "anDouble"); + assertEquals(debug.properties.get(2).type, OType.DOUBLE); + assertEquals(debug.properties.get(2).faildToRead, true); + assertNotNull(debug.properties.get(2).readingException); + } finally { + db.drop(); + } + } + + @Test + public void testBrokenSchemaFullDocumentDebug() { + ODatabaseDocumentTx db = new ODatabaseDocumentTx("memory:" + ORecordSerializerBinaryDebugTest.class.getSimpleName()); + db.create(); + try { + OClass clazz = db.getMetadata().getSchema().createClass("some"); + clazz.createProperty("testP", OType.STRING); + clazz.createProperty("theInt", OType.INTEGER); + ODocument doc = new ODocument("some"); + doc.field("testP", "test"); + doc.field("theInt", 2); + doc.field("anDouble", 2D); + + byte[] bytes = doc.toStream(); + byte[] brokenBytes = new byte[bytes.length - 10]; + System.arraycopy(bytes, 0, brokenBytes, 0, bytes.length - 10); + + ORecordSerializerBinaryDebug debugger = new ORecordSerializerBinaryDebug(); + ORecordSerializationDebug debug = debugger.deserializeDebug(brokenBytes, db); + + assertEquals(debug.properties.size(), 3); + assertEquals(debug.properties.get(0).name, "testP"); + assertEquals(debug.properties.get(0).type, OType.STRING); + assertEquals(debug.properties.get(0).faildToRead, true); + assertNotNull(debug.properties.get(0).readingException); + + assertEquals(debug.properties.get(1).name, "theInt"); + assertEquals(debug.properties.get(1).type, OType.INTEGER); + assertEquals(debug.properties.get(1).faildToRead, true); + assertNotNull(debug.properties.get(1).readingException); + + assertEquals(debug.properties.get(2).name, "anDouble"); + assertEquals(debug.properties.get(2).type, OType.DOUBLE); + assertEquals(debug.properties.get(2).faildToRead, true); + assertNotNull(debug.properties.get(2).readingException); + } finally { + db.drop(); + } + + } + +}
b6096079c17488b1232f7db942c529c7eb5f9843
ReactiveX-RxJava
Unlock in finally block--
c
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/observers/SerializedObserverViaStateMachine.java b/rxjava-core/src/main/java/rx/observers/SerializedObserverViaStateMachine.java index fdd6e844ad..14c9612e07 100644 --- a/rxjava-core/src/main/java/rx/observers/SerializedObserverViaStateMachine.java +++ b/rxjava-core/src/main/java/rx/observers/SerializedObserverViaStateMachine.java @@ -63,25 +63,24 @@ public void onNext(T t) { } while (!state.compareAndSet(current, newState)); if (newState.shouldProcess()) { - if (newState == State.PROCESS_SELF) { - s.onNext(t); - - // finish processing to let this thread move on - do { - current = state.get(); - newState = current.finishProcessing(1); - } while (!state.compareAndSet(current, newState)); - } else { - // drain queue - Object[] items = newState.queue; - for (int i = 0; i < items.length; i++) { - s.onNext((T) items[i]); + int numItemsProcessed = 0; + try { + if (newState == State.PROCESS_SELF) { + s.onNext(t); + numItemsProcessed++; + } else { + // drain queue + Object[] items = newState.queue; + for (int i = 0; i < items.length; i++) { + s.onNext((T) items[i]); + numItemsProcessed++; + } } - + } finally { // finish processing to let this thread move on do { current = state.get(); - newState = current.finishProcessing(items.length); + newState = current.finishProcessing(numItemsProcessed); } while (!state.compareAndSet(current, newState)); } }
615adaba0cdfa8685039f4eb0765df053deead9c
restlet-framework-java
Fixed range issue -607.--
c
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet.test/src/org/restlet/test/data/RangeTestCase.java b/modules/org.restlet.test/src/org/restlet/test/data/RangeTestCase.java index 2e97420ce8..b34582859c 100644 --- a/modules/org.restlet.test/src/org/restlet/test/data/RangeTestCase.java +++ b/modules/org.restlet.test/src/org/restlet/test/data/RangeTestCase.java @@ -266,6 +266,15 @@ public void testGet() throws Exception { assertEquals(2, response.getEntity().getRange().getIndex()); assertEquals(8, response.getEntity().getRange().getSize()); + request.setRanges(Arrays.asList(new Range(2, 1000))); + response = client.handle(request); + assertEquals(Status.SUCCESS_PARTIAL_CONTENT, response.getStatus()); + assertEquals("34567890", response.getEntity().getText()); + assertEquals(10, response.getEntity().getSize()); + assertEquals(8, response.getEntity().getAvailableSize()); + assertEquals(2, response.getEntity().getRange().getIndex()); + assertEquals(8, response.getEntity().getRange().getSize()); + client.stop(); } diff --git a/modules/org.restlet/src/org/restlet/engine/application/RangeFilter.java b/modules/org.restlet/src/org/restlet/engine/application/RangeFilter.java index eea7b61abb..ecbef8ecd0 100644 --- a/modules/org.restlet/src/org/restlet/engine/application/RangeFilter.java +++ b/modules/org.restlet/src/org/restlet/engine/application/RangeFilter.java @@ -106,6 +106,12 @@ protected void afterHandle(Request request, Response response) { .info("The range of the response entity is not equal to the requested one."); } + if (response.getEntity().hasKnownSize() + && requestedRange.getSize() > response + .getEntity().getAvailableSize()) { + requestedRange.setSize(Range.SIZE_MAX); + } + response.setEntity(new RangeRepresentation( response.getEntity(), requestedRange)); response.setStatus(Status.SUCCESS_PARTIAL_CONTENT);
1b7ce0a4407a5cbb1ae7cc0f3e1fc3a741cf9b86
arrayexpress$annotare2
Another round of refactoring of sign-up/sign-in functionality
p
https://github.com/arrayexpress/annotare2
diff --git a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/AccountService.java b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/AccountService.java index 82b954fd6..a56cf9e82 100644 --- a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/AccountService.java +++ b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/AccountService.java @@ -28,12 +28,14 @@ */ public interface AccountService { - ValidationErrors signUp(HttpServletRequest request) throws AccountServiceException; - boolean isLoggedIn(HttpServletRequest request); ValidationErrors login(HttpServletRequest request) throws AccountServiceException; + ValidationErrors signUp(HttpServletRequest request) throws AccountServiceException; + + ValidationErrors changePassword(HttpServletRequest request) throws AccountServiceException; + void logout(HttpSession session); User getCurrentUser(HttpSession session); diff --git a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/AccountServiceImpl.java b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/AccountServiceImpl.java index 1faf49318..4dc9cae16 100644 --- a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/AccountServiceImpl.java +++ b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/AccountServiceImpl.java @@ -22,6 +22,7 @@ import org.slf4j.LoggerFactory; import uk.ac.ebi.fg.annotare2.db.om.User; import uk.ac.ebi.fg.annotare2.web.server.UnauthorizedAccessException; +import uk.ac.ebi.fg.annotare2.web.server.login.utils.FormParams; import uk.ac.ebi.fg.annotare2.web.server.services.AccountManager; import uk.ac.ebi.fg.annotare2.web.server.login.utils.RequestParam; import uk.ac.ebi.fg.annotare2.web.server.login.utils.SessionAttribute; @@ -33,8 +34,6 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; -import static java.util.Arrays.asList; - /** * @author Olga Melnichuk */ @@ -83,6 +82,31 @@ public ValidationErrors signUp(HttpServletRequest request) throws AccountService return errors; } + @Transactional + public ValidationErrors changePassword(HttpServletRequest request) throws AccountServiceException { + SignUpParams params = new SignUpParams(request); + ValidationErrors errors = params.validate(); + if (errors.isEmpty()) { + if (null != accountManager.getByEmail(params.getEmail())) { + errors.append("email", "User with this email already exists"); + } else { + User u = accountManager.createUser(params.getName(), params.getEmail(), params.getPassword()); + try { + emailer.sendFromTemplate( + EmailSender.NEW_USER_TEMPLATE, + ImmutableMap.of( + "to.name", u.getName(), + "to.email", u.getEmail(), + "verification.token", u.getVerificationToken() + ) + ); + } catch (MessagingException x) { + // + } + } + } + return errors; + } @Transactional public ValidationErrors login(HttpServletRequest request) throws AccountServiceException { @@ -112,78 +136,63 @@ public User getCurrentUser(HttpSession session) { return user; } - static class LoginParams { - public static final String EMAIL_PARAM = "email"; - public static final String PASSWORD_PARAM = "password"; - - private final RequestParam email; - private final RequestParam password; + static class LoginParams extends FormParams { private LoginParams(HttpServletRequest request) { - email = RequestParam.from(request, EMAIL_PARAM); - password = RequestParam.from(request, PASSWORD_PARAM); + addParam(RequestParam.from(request, EMAIL_PARAM), true); + addParam(RequestParam.from(request, PASSWORD_PARAM), true); } public ValidationErrors validate() { - ValidationErrors errors = new ValidationErrors(); - for (RequestParam p : asList(email, password)) { - if (p.isEmpty()) { - errors.append(p.getName(), "Please specify a value, " + p.getName() + " is required"); - } - } - return errors; + return validateMandatory(); } public String getEmail() { - return email.getValue(); + return getParamValue(EMAIL_PARAM); } public String getPassword() { - return password.getValue(); + return getParamValue(PASSWORD_PARAM); } } - static class SignUpParams { - public static final String NAME_PARAM = "name"; - public static final String EMAIL_PARAM = "email"; - public static final String PASSWORD_PARAM = "password"; - public static final String CONFIRM_PASSWORD_PARAM = "confirm-password"; - - private final RequestParam name; - private final RequestParam email; - private final RequestParam password; - private final RequestParam confirmPassword; + static class SignUpParams extends FormParams { private SignUpParams(HttpServletRequest request) { - name = RequestParam.from(request, NAME_PARAM); - email = RequestParam.from(request, EMAIL_PARAM); - password = RequestParam.from(request, PASSWORD_PARAM); - confirmPassword = RequestParam.from(request, CONFIRM_PASSWORD_PARAM); + addParam(RequestParam.from(request, NAME_PARAM), true); + addParam(RequestParam.from(request, EMAIL_PARAM), true); + addParam(RequestParam.from(request, PASSWORD_PARAM), true); + addParam(RequestParam.from(request, CONFIRM_PASSWORD_PARAM), false); } public ValidationErrors validate() { - ValidationErrors errors = new ValidationErrors(); - for (RequestParam p : asList(name, email, password)) { - if (p.isEmpty()) { - errors.append(p.getName(), "Please specify a value, " + p.getName() + " is required"); - } + ValidationErrors errors = validateMandatory(); + + if (!isEmailGoodEnough()) { + errors.append(EMAIL_PARAM, "Email is not valid; should at least contain @ sign"); } - if (!password.getValue().equals(confirmPassword.getValue())) { - errors.append(confirmPassword.getName(), "Passwords do not match"); + + if (!isPasswordGoodEnough()) { + errors.append(PASSWORD_PARAM, "Password is too weak; should be at least 4 characters long containing at least one digit"); + } + + if (!hasPasswordConfirmed()) { + errors.append(CONFIRM_PASSWORD_PARAM, "Passwords do not match"); } + return errors; } public String getName() { - return name.getValue(); + return getParamValue(NAME_PARAM); } public String getEmail() { - return email.getValue(); + return getParamValue(EMAIL_PARAM); } public String getPassword() { - return password.getValue(); + return getParamValue(PASSWORD_PARAM); } } } diff --git a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/ChangePasswordServlet.java b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/ChangePasswordServlet.java index 084ec5aa6..75d3dca75 100644 --- a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/ChangePasswordServlet.java +++ b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/ChangePasswordServlet.java @@ -28,39 +28,46 @@ import javax.servlet.http.HttpServletResponse; import java.io.IOException; +import static uk.ac.ebi.fg.annotare2.web.server.login.ServletNavigation.CHANGE_PASSWORD; + +import static com.google.common.base.Strings.isNullOrEmpty; import static uk.ac.ebi.fg.annotare2.web.server.login.ServletNavigation.LOGIN; -import static uk.ac.ebi.fg.annotare2.web.server.login.ServletNavigation.SIGNUP; +import static uk.ac.ebi.fg.annotare2.web.server.login.SessionInformation.EMAIL_SESSION_ATTRIBUTE; +import static uk.ac.ebi.fg.annotare2.web.server.login.SessionInformation.INFO_SESSION_ATTRIBUTE; public class ChangePasswordServlet extends HttpServlet { - private static final Logger log = LoggerFactory.getLogger(SignUpServlet.class); + private static final Logger log = LoggerFactory.getLogger(ChangePasswordServlet.class); @Inject private AccountService accountService; @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - log.debug("Sign-up data submitted; checking.."); + log.debug("Change password request received; processing"); ValidationErrors errors = new ValidationErrors(); + try { - errors.append(accountService.signUp(request)); + errors.append(accountService.changePassword(request)); if (errors.isEmpty()) { - log.debug("Sign-up successful; redirect to login page"); - LOGIN.redirect(request, response); - return; + if (!isNullOrEmpty(request.getParameter("token"))) { + log.debug("Password successfully changed; redirect to login page"); + INFO_SESSION_ATTRIBUTE.set(request.getSession(), "You have successfully changed password; please sign in now"); + LOGIN.redirect(request, response); + return; + } } - log.debug("Sign-up form had invalid entries"); - } catch (Exception e) { - log.debug("Sign-up failed"); + } catch (AccountServiceException e) { + log.debug("Change password request failed", e); errors.append(e.getMessage()); } request.setAttribute("errors", errors); - SIGNUP.forward(getServletConfig().getServletContext(), request, response); + CHANGE_PASSWORD.forward(getServletConfig().getServletContext(), request, response); } @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - SIGNUP.forward(getServletConfig().getServletContext(), request, response); + CHANGE_PASSWORD.forward(getServletConfig().getServletContext(), request, response); } } diff --git a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/ServletNavigation.java b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/ServletNavigation.java index 41d35d90e..9d1464445 100644 --- a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/ServletNavigation.java +++ b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/ServletNavigation.java @@ -38,7 +38,7 @@ enum ServletNavigation { LOGIN("/login", "/login.jsp"), SIGNUP("/sign-up", "/sign-up.jsp"), ACTIVATION("/activate", "/activate.jsp"), - PASSWORD_CHANGER("/change-password", "change-password.jsp"), + CHANGE_PASSWORD("/change-password", "/change-password.jsp"), HOME("/", "/home.jsp"), EDITOR("/edit/", "/editor.jsp"); diff --git a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/SignUpServlet.java b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/SignUpServlet.java index 5174f0f3e..fd8eeeb5b 100644 --- a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/SignUpServlet.java +++ b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/SignUpServlet.java @@ -50,10 +50,10 @@ protected void doPost(HttpServletRequest request, HttpServletResponse response) LOGIN.redirect(request, response); return; } else { - log.debug("Sign-up form had invalid entries"); + log.debug("Sign-up form failed validation"); } } catch (AccountServiceException e) { - log.debug("Sign-up failed"); + log.debug("Sign-up failed", e); errors.append(e.getMessage()); } diff --git a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/utils/FormParams.java b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/utils/FormParams.java new file mode 100644 index 000000000..34c5a6263 --- /dev/null +++ b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/utils/FormParams.java @@ -0,0 +1,76 @@ +package uk.ac.ebi.fg.annotare2.web.server.login.utils; + +/* + * Copyright 2009-2013 European Molecular Biology Laboratory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import java.util.*; + +import static com.google.common.base.Strings.nullToEmpty; + +public abstract class FormParams { + protected static final String NAME_PARAM = "name"; + protected static final String EMAIL_PARAM = "email"; + protected static final String PASSWORD_PARAM = "password"; + protected static final String CONFIRM_PASSWORD_PARAM = "confirm-password"; + protected static final String TOKEN_PARAM = "token"; + + private Map<String,RequestParam> paramMap = new HashMap<String, RequestParam>(); + private Set<RequestParam> mandatoryParamSet = new HashSet<RequestParam>(); + + public String getParamValue(String paramName) { + if (paramMap.containsKey(paramName)) { + return paramMap.get(paramName).getValue(); + } + return null; + } + + public abstract ValidationErrors validate(); + + protected void addParam(RequestParam param, boolean isMandatory) + { + paramMap.put(param.getName(), param); + if (isMandatory) { + mandatoryParamSet.add(param); + } + } + + protected ValidationErrors validateMandatory() { + ValidationErrors errors = new ValidationErrors(); + for (RequestParam p : getMandatoryParams()) { + if (p.isEmpty()) { + errors.append(p.getName(), "Please specify a value, " + p.getName() + " is required"); + } + } + return errors; + } + + protected Collection<RequestParam> getMandatoryParams() { + return Collections.unmodifiableSet(mandatoryParamSet); + } + + protected boolean isEmailGoodEnough() { + return nullToEmpty(getParamValue(EMAIL_PARAM)).matches(".+@.+"); + } + + protected boolean isPasswordGoodEnough() { + return nullToEmpty(getParamValue(PASSWORD_PARAM)).matches("^(?=.*\\d).{4,}$"); + } + + protected boolean hasPasswordConfirmed() { + return getParamValue(PASSWORD_PARAM).equals(getParamValue(CONFIRM_PASSWORD_PARAM)); + } +} diff --git a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/utils/ValidationErrors.java b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/utils/ValidationErrors.java index 76b9d3ae2..d1263c79f 100644 --- a/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/utils/ValidationErrors.java +++ b/app/web/src/main/java/uk/ac/ebi/fg/annotare2/web/server/login/utils/ValidationErrors.java @@ -53,7 +53,7 @@ public String getErrors() { public String getErrors(String name) { Collection<String> err = errors.get(name); - return err == null ? "" : on(", ").join(err); + return err == null ? "" : on(". ").join(err); } } diff --git a/app/web/src/main/webapp/change-password.jsp b/app/web/src/main/webapp/change-password.jsp index 3e9cae6e5..cb01a2882 100644 --- a/app/web/src/main/webapp/change-password.jsp +++ b/app/web/src/main/webapp/change-password.jsp @@ -13,29 +13,24 @@ ~ See the License for the specific language governing permissions and ~ limitations under the License. --%> -<%-- <%@ taglib uri="http://java.sun.com/jsp/jstl/core" prefix="c" %> <%@ taglib uri="http://java.sun.com/jsp/jstl/functions" prefix="f" %> <%@ page isELIgnored="false" %> -<%@ page import="uk.ac.ebi.fg.annotare2.web.server.login.utils.ValidationErrors" %> <% - ValidationErrors errors = (ValidationErrors) request.getAttribute("errors"); - if (errors != null) { - pageContext.setAttribute("dummyErrors", errors.getErrors()); - pageContext.setAttribute("emailErrors", errors.getErrors("email")); - pageContext.setAttribute("passwordErrors", errors.getErrors("password")); - } + pageContext.setAttribute("errors", request.getAttribute("errors")); - String[] values = request.getParameterValues("email"); - pageContext.setAttribute("email", values == null ? "" : values[0]); + String email = request.getParameter("email"); + if (null == email) { + email = (String)session.getAttribute("email"); + } + pageContext.setAttribute("email", email == null ? "" : email); %> ---%> <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html; charset=UTF-8"> - <title>Annotare 2.0 - New user registration</title> + <title>Annotare 2.0 - Change password request</title> <link type="text/css" rel="stylesheet" href="general.css"> <link type="text/css" rel="stylesheet" href="login.css"> </head> @@ -49,38 +44,40 @@ <table class="form"> <tr> <td></td> - <td><h1>Annotare 2.0</h1></td> + <td><h1>Change password request</h1></td> </tr> - <tr class="error"> + <tr class="info"> <td></td> - <td>${dummyErrors}</td> - </tr> - <tr class="row right"> - <td>Email</td> - <td><input type="text" name="email" value="${email}" style="width:98%"/></td> + <td><c:out value="${sessionScope.info}" /><c:remove var="info" scope="session" /></td> </tr> <tr class="error"> <td></td> - <td>${emailErrors}</td> + <td>${errors}</td> </tr> <tr class="row right"> - <td>Password</td> - <td><input type="password" name="password" style="width:98%"/></td> - </tr> - <tr class="error"> - <td></td> - <td>${passwordErrors}</td> - </tr> - <tr class="row"> - <td></td> + <td>Email</td> <td> - <button name="signIn">Sign In</button>&nbsp;&nbsp;<a href="#" onclick="return false;">Forgot your password?</a> + <c:choose> + <c:when test="${email != ''}"> + <input type="text" name="email" value="${email}" style="width:98%"/> + </c:when> + <c:otherwise> + <input type="text" name="email" style="width:98%" autofocus="autofocus"/> + </c:otherwise> + </c:choose> </td> </tr> - <tr> + <tr class="row"> <td></td> <td> - <div style="margin-top:10px;">Don't have an account? <a href="#" onclick="return false;">Sign Up</a></div> + <c:choose> + <c:when test="${email != ''}"> + <button name="changePassword" autofocus="autofocus">Send</button> + </c:when> + <c:otherwise> + <button name="changePassword">Send</button> + </c:otherwise> + </c:choose> </td> </tr> </table> diff --git a/app/web/src/main/webapp/login.css b/app/web/src/main/webapp/login.css index 386e086ac..4fcb40929 100644 --- a/app/web/src/main/webapp/login.css +++ b/app/web/src/main/webapp/login.css @@ -44,7 +44,7 @@ text-align: left; } -.form { +table { border-collapse: collapse; color: #000; width: 100%; @@ -56,6 +56,10 @@ padding: 0 3px; } +.form tr td:first-child { + white-space: nowrap; +} + .form tr.row td { padding-top: 5px; padding-bottom: 5px; diff --git a/app/web/src/main/webapp/login.jsp b/app/web/src/main/webapp/login.jsp index c1e1537ac..be7d4396e 100644 --- a/app/web/src/main/webapp/login.jsp +++ b/app/web/src/main/webapp/login.jsp @@ -54,7 +54,6 @@ <tr class="info"> <td></td> <td><c:out value="${sessionScope.info}" /><c:remove var="info" scope="session" /></td> - </tr> <tr class="error"> <td></td>
1dc33f9f6d29c6b33de2023d4f2158e70a1c89aa
orientdb
UPDATE ADD now possible with subdocuments fields--
a
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLUpdate.java b/core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLUpdate.java index df9e1a06034..94deb108910 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLUpdate.java +++ b/core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLUpdate.java @@ -275,7 +275,13 @@ else if (returning.equalsIgnoreCase("AFTER")) // IN ALL OTHER CASES USE A LIST coll = new ArrayList<Object>(); - record.field(entry.getKey(), coll); + // containField's condition above does NOT check subdocument's fields so + Collection<Object> currColl = record.field(entry.getKey()); + if (currColl==null) + record.field(entry.getKey(), coll); + else + coll = currColl; + } else { fieldValue = record.field(entry.getKey());
4fa174541fd3402cc067ebab5fb44c9b5ce2587e
camel
CAMEL-3203: Fixed adding routes with quartz- endpoints to already started camel should add jobs to scheduler.--git-svn-id: https://svn.apache.org/repos/asf/camel/trunk@1005489 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/camel
diff --git a/components/camel-quartz/src/main/java/org/apache/camel/component/quartz/QuartzComponent.java b/components/camel-quartz/src/main/java/org/apache/camel/component/quartz/QuartzComponent.java index 9ff9bf8cb7515..1ef0cdf81b779 100644 --- a/components/camel-quartz/src/main/java/org/apache/camel/component/quartz/QuartzComponent.java +++ b/components/camel-quartz/src/main/java/org/apache/camel/component/quartz/QuartzComponent.java @@ -186,9 +186,14 @@ protected void doStop() throws Exception { } } - public void addJob(JobDetail job, Trigger trigger) { - // add job to internal list because we will defer adding to the scheduler when camel context has been fully started - jobsToAdd.add(new JobToAdd(job, trigger)); + public void addJob(JobDetail job, Trigger trigger) throws SchedulerException { + if (scheduler == null) { + // add job to internal list because we will defer adding to the scheduler when camel context has been fully started + jobsToAdd.add(new JobToAdd(job, trigger)); + } else { + // add job directly to scheduler + doAddJob(job, trigger); + } } private void doAddJob(JobDetail job, Trigger trigger) throws SchedulerException { diff --git a/components/camel-quartz/src/test/java/org/apache/camel/component/quartz/QuartzAddRoutesAfterCamelContextStartedTest.java b/components/camel-quartz/src/test/java/org/apache/camel/component/quartz/QuartzAddRoutesAfterCamelContextStartedTest.java new file mode 100644 index 0000000000000..52503df24ec5a --- /dev/null +++ b/components/camel-quartz/src/test/java/org/apache/camel/component/quartz/QuartzAddRoutesAfterCamelContextStartedTest.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.camel.component.quartz; + +import org.apache.camel.builder.RouteBuilder; +import org.apache.camel.component.mock.MockEndpoint; +import org.apache.camel.test.junit4.CamelTestSupport; +import org.junit.Test; + +/** + * @version $Revision$ + */ +public class QuartzAddRoutesAfterCamelContextStartedTest extends CamelTestSupport { + + @Test + public void testAddRoutes() throws Exception { + // camel context should already be started + assertTrue(context.getStatus().isStarted()); + + MockEndpoint mock = getMockEndpoint("mock:result"); + mock.expectedMessageCount(2); + + // add the quartz router after CamelContext has been started + context.addRoutes(new RouteBuilder() { + @Override + public void configure() throws Exception { + from("quartz://myGroup/myTimerName?trigger.repeatInterval=2&trigger.repeatCount=1").to("mock:result"); + } + }); + + // it should also work + assertMockEndpointsSatisfied(); + } + +}
ad2358bba102bd4e9876028cf30341ec48aabe4f
ReactiveX-RxJava
GroupBy GroupedObservables should not re-subscribe- to parent sequence--https://github.com/Netflix/RxJava/issues/282--Refactored to maintain a single subscription that propagates events to the correct child GroupedObservables.-
c
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/operators/OperationGroupBy.java b/rxjava-core/src/main/java/rx/operators/OperationGroupBy.java index 1c2e6e969c..edd4ef7ae4 100644 --- a/rxjava-core/src/main/java/rx/operators/OperationGroupBy.java +++ b/rxjava-core/src/main/java/rx/operators/OperationGroupBy.java @@ -17,12 +17,15 @@ import static org.junit.Assert.*; -import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; -import java.util.List; +import java.util.Collection; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import org.junit.Test; @@ -30,6 +33,8 @@ import rx.Observer; import rx.Subscription; import rx.observables.GroupedObservable; +import rx.subscriptions.Subscriptions; +import rx.util.functions.Action1; import rx.util.functions.Func1; import rx.util.functions.Functions; @@ -55,7 +60,9 @@ public static <K, T> Func1<Observer<GroupedObservable<K, T>>, Subscription> grou } private static class GroupBy<K, V> implements Func1<Observer<GroupedObservable<K, V>>, Subscription> { + private final Observable<KeyValue<K, V>> source; + private final ConcurrentHashMap<K, GroupedSubject<K, V>> groupedObservables = new ConcurrentHashMap<K, GroupedSubject<K, V>>(); private GroupBy(Observable<KeyValue<K, V>> source) { this.source = source; @@ -63,61 +70,127 @@ private GroupBy(Observable<KeyValue<K, V>> source) { @Override public Subscription call(final Observer<GroupedObservable<K, V>> observer) { - return source.subscribe(new GroupByObserver(observer)); + return source.subscribe(new Observer<KeyValue<K, V>>() { + + @Override + public void onCompleted() { + // we need to propagate to all children I imagine ... we can't just leave all of those Observable/Observers hanging + for (GroupedSubject<K, V> o : groupedObservables.values()) { + o.onCompleted(); + } + // now the parent + observer.onCompleted(); + } + + @Override + public void onError(Exception e) { + // we need to propagate to all children I imagine ... we can't just leave all of those Observable/Observers hanging + for (GroupedSubject<K, V> o : groupedObservables.values()) { + o.onError(e); + } + // now the parent + observer.onError(e); + } + + @Override + public void onNext(KeyValue<K, V> value) { + GroupedSubject<K, V> gs = groupedObservables.get(value.key); + if (gs == null) { + /* + * Technically the source should be single-threaded so we shouldn't need to do this but I am + * programming defensively as most operators are so this can work with a concurrent sequence + * if it ends up receiving one. + */ + GroupedSubject<K, V> newGs = GroupedSubject.<K, V> create(value.key); + GroupedSubject<K, V> existing = groupedObservables.putIfAbsent(value.key, newGs); + if (existing == null) { + // we won so use the one we created + gs = newGs; + // since we won the creation we emit this new GroupedObservable + observer.onNext(gs); + } else { + // another thread beat us so use the existing one + gs = existing; + } + } + gs.onNext(value.value); + } + }); } + } - private class GroupByObserver implements Observer<KeyValue<K, V>> { - private final Observer<GroupedObservable<K, V>> underlying; + private static class GroupedSubject<K, T> extends GroupedObservable<K, T> implements Observer<T> { - private final ConcurrentHashMap<K, Boolean> keys = new ConcurrentHashMap<K, Boolean>(); + static <K, T> GroupedSubject<K, T> create(K key) { + @SuppressWarnings("unchecked") + final AtomicReference<Observer<T>> subscribedObserver = new AtomicReference<Observer<T>>(EMPTY_OBSERVER); - private GroupByObserver(Observer<GroupedObservable<K, V>> underlying) { - this.underlying = underlying; - } + return new GroupedSubject<K, T>(key, new Func1<Observer<T>, Subscription>() { - @Override - public void onCompleted() { - underlying.onCompleted(); - } + @Override + public Subscription call(Observer<T> observer) { + // register Observer + subscribedObserver.set(observer); - @Override - public void onError(Exception e) { - underlying.onError(e); - } + return new Subscription() { - @Override - public void onNext(final KeyValue<K, V> args) { - K key = args.key; - boolean newGroup = keys.putIfAbsent(key, true) == null; - if (newGroup) { - underlying.onNext(buildObservableFor(source, key)); + @SuppressWarnings("unchecked") + @Override + public void unsubscribe() { + // we remove the Observer so we stop emitting further events (they will be ignored if parent continues to send) + subscribedObserver.set(EMPTY_OBSERVER); + // I don't believe we need to worry about the parent here as it's a separate sequence that would + // be unsubscribed to directly if that needs to happen. + } + }; } - } + }, subscribedObserver); } - } - private static <K, R> GroupedObservable<K, R> buildObservableFor(Observable<KeyValue<K, R>> source, final K key) { - final Observable<R> observable = source.filter(new Func1<KeyValue<K, R>, Boolean>() { - @Override - public Boolean call(KeyValue<K, R> pair) { - return key.equals(pair.key); - } - }).map(new Func1<KeyValue<K, R>, R>() { - @Override - public R call(KeyValue<K, R> pair) { - return pair.value; - } - }); - return new GroupedObservable<K, R>(key, new Func1<Observer<R>, Subscription>() { + private final AtomicReference<Observer<T>> subscribedObserver; - @Override - public Subscription call(Observer<R> observer) { - return observable.subscribe(observer); - } + public GroupedSubject(K key, Func1<Observer<T>, Subscription> onSubscribe, AtomicReference<Observer<T>> subscribedObserver) { + super(key, onSubscribe); + this.subscribedObserver = subscribedObserver; + } + + @Override + public void onCompleted() { + subscribedObserver.get().onCompleted(); + } + + @Override + public void onError(Exception e) { + subscribedObserver.get().onError(e); + } + + @Override + public void onNext(T v) { + subscribedObserver.get().onNext(v); + } - }); } + @SuppressWarnings("rawtypes") + private static Observer EMPTY_OBSERVER = new Observer() { + + @Override + public void onCompleted() { + // do nothing + } + + @Override + public void onError(Exception e) { + // do nothing + } + + @Override + public void onNext(Object args) { + // do nothing + } + + }; + private static class KeyValue<K, V> { private final K key; private final V value; @@ -141,13 +214,12 @@ public void testGroupBy() { Observable<String> source = Observable.from("one", "two", "three", "four", "five", "six"); Observable<GroupedObservable<Integer, String>> grouped = Observable.create(groupBy(source, length)); - Map<Integer, List<String>> map = toMap(grouped); + Map<Integer, Collection<String>> map = toMap(grouped); assertEquals(3, map.size()); - assertEquals(Arrays.asList("one", "two", "six"), map.get(3)); - assertEquals(Arrays.asList("four", "five"), map.get(4)); - assertEquals(Arrays.asList("three"), map.get(5)); - + assertArrayEquals(Arrays.asList("one", "two", "six").toArray(), map.get(3).toArray()); + assertArrayEquals(Arrays.asList("four", "five").toArray(), map.get(4).toArray()); + assertArrayEquals(Arrays.asList("three").toArray(), map.get(5).toArray()); } @Test @@ -155,31 +227,133 @@ public void testEmpty() { Observable<String> source = Observable.from(); Observable<GroupedObservable<Integer, String>> grouped = Observable.create(groupBy(source, length)); - Map<Integer, List<String>> map = toMap(grouped); + Map<Integer, Collection<String>> map = toMap(grouped); assertTrue(map.isEmpty()); } - private static <K, V> Map<K, List<V>> toMap(Observable<GroupedObservable<K, V>> observable) { - Map<K, List<V>> result = new HashMap<K, List<V>>(); - for (GroupedObservable<K, V> g : observable.toBlockingObservable().toIterable()) { - K key = g.getKey(); + private static <K, V> Map<K, Collection<V>> toMap(Observable<GroupedObservable<K, V>> observable) { - for (V value : g.toBlockingObservable().toIterable()) { - List<V> values = result.get(key); - if (values == null) { - values = new ArrayList<V>(); - result.put(key, values); - } + final ConcurrentHashMap<K, Collection<V>> result = new ConcurrentHashMap<K, Collection<V>>(); - values.add(value); - } + observable.forEach(new Action1<GroupedObservable<K, V>>() { - } + @Override + public void call(final GroupedObservable<K, V> o) { + result.put(o.getKey(), new ConcurrentLinkedQueue<V>()); + o.subscribe(new Action1<V>() { + + @Override + public void call(V v) { + result.get(o.getKey()).add(v); + } + + }); + } + }); return result; } + /** + * Assert that only a single subscription to a stream occurs and that all events are received. + * + * @throws Exception + */ + @Test + public void testGroupedEventStream() throws Exception { + + final AtomicInteger eventCounter = new AtomicInteger(); + final AtomicInteger subscribeCounter = new AtomicInteger(); + final AtomicInteger groupCounter = new AtomicInteger(); + final CountDownLatch latch = new CountDownLatch(1); + final int count = 100; + final int groupCount = 2; + + Observable<Event> es = Observable.create(new Func1<Observer<Event>, Subscription>() { + + @Override + public Subscription call(final Observer<Event> observer) { + System.out.println("*** Subscribing to EventStream ***"); + subscribeCounter.incrementAndGet(); + new Thread(new Runnable() { + + @Override + public void run() { + for (int i = 0; i < count; i++) { + Event e = new Event(); + e.source = i % groupCount; + e.message = "Event-" + i; + observer.onNext(e); + } + observer.onCompleted(); + } + + }).start(); + return Subscriptions.empty(); + } + + }); + + es.groupBy(new Func1<Event, Integer>() { + + @Override + public Integer call(Event e) { + return e.source; + } + }).mapMany(new Func1<GroupedObservable<Integer, Event>, Observable<String>>() { + + @Override + public Observable<String> call(GroupedObservable<Integer, Event> eventGroupedObservable) { + System.out.println("GroupedObservable Key: " + eventGroupedObservable.getKey()); + groupCounter.incrementAndGet(); + + return eventGroupedObservable.map(new Func1<Event, String>() { + + @Override + public String call(Event event) { + return "Source: " + event.source + " Message: " + event.message; + } + }); + + }; + }).subscribe(new Observer<String>() { + + @Override + public void onCompleted() { + latch.countDown(); + } + + @Override + public void onError(Exception e) { + e.printStackTrace(); + latch.countDown(); + } + + @Override + public void onNext(String outputMessage) { + System.out.println(outputMessage); + eventCounter.incrementAndGet(); + } + }); + + latch.await(5000, TimeUnit.MILLISECONDS); + assertEquals(1, subscribeCounter.get()); + assertEquals(groupCount, groupCounter.get()); + assertEquals(count, eventCounter.get()); + + } + + private static class Event { + int source; + String message; + + @Override + public String toString() { + return "Event => source: " + source + " message: " + message; + } + } + } }
e86d48730c64d10ba2a838e5663f9ab7a698c9c6
hadoop
HADOOP-7187. Fix socket leak in GangliaContext. - Contributed by Uma Maheswara Rao G--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1085122 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hadoop
diff --git a/CHANGES.txt b/CHANGES.txt index 3cb5136879088..f33d02a834cbc 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -604,6 +604,9 @@ Release 0.21.1 - Unreleased HADOOP-7174. Null is displayed in the "fs -copyToLocal" command. (Uma Maheswara Rao G via szetszwo) + HADOOP-7187. Fix socket leak in GangliaContext. (Uma Maheswara Rao G + via szetszwo) + Release 0.21.0 - 2010-08-13 INCOMPATIBLE CHANGES diff --git a/src/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java b/src/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java index 1b22240f879d0..6460120012d41 100644 --- a/src/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java +++ b/src/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java @@ -112,6 +112,17 @@ public void init(String contextName, ContextFactory factory) { } } + /** + * method to close the datagram socket + */ + @Override + public void close() { + super.close(); + if (datagramSocket != null) { + datagramSocket.close(); + } + } + @InterfaceAudience.Private public void emitRecord(String contextName, String recordName, OutputRecord outRec) diff --git a/src/test/core/org/apache/hadoop/metrics/ganglia/TestGangliaContext.java b/src/test/core/org/apache/hadoop/metrics/ganglia/TestGangliaContext.java new file mode 100644 index 0000000000000..deb8231154cd8 --- /dev/null +++ b/src/test/core/org/apache/hadoop/metrics/ganglia/TestGangliaContext.java @@ -0,0 +1,42 @@ +/* + * TestGangliaContext.java + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.metrics.ganglia; + +import org.junit.Test; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.metrics.ContextFactory; +import org.apache.hadoop.metrics.spi.AbstractMetricsContext; + +public class TestGangliaContext { + + @Test + public void testCloseShouldCloseTheSocketWhichIsCreatedByInit() throws Exception { + AbstractMetricsContext context=new GangliaContext(); + context.init("gangliaContext", ContextFactory.getFactory()); + GangliaContext gangliaContext =(GangliaContext) context; + assertFalse("Socket already closed",gangliaContext.datagramSocket.isClosed()); + context.close(); + assertTrue("Socket not closed",gangliaContext.datagramSocket.isClosed()); + } +}
5a91d607882e59a6255eff0f144a6efecc749af2
spring-framework
Allow setting WSDL document as a Resource--Prior to this change
a
https://github.com/spring-projects/spring-framework
diff --git a/spring-web/src/main/java/org/springframework/remoting/jaxws/LocalJaxWsServiceFactory.java b/spring-web/src/main/java/org/springframework/remoting/jaxws/LocalJaxWsServiceFactory.java index 3e8cf74b9024..7f95b3acd651 100644 --- a/spring-web/src/main/java/org/springframework/remoting/jaxws/LocalJaxWsServiceFactory.java +++ b/spring-web/src/main/java/org/springframework/remoting/jaxws/LocalJaxWsServiceFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2002-2010 the original author or authors. + * Copyright 2002-2012 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,14 @@ package org.springframework.remoting.jaxws; +import java.io.IOException; import java.net.URL; import java.util.concurrent.Executor; import javax.xml.namespace.QName; import javax.xml.ws.Service; import javax.xml.ws.handler.HandlerResolver; +import org.springframework.core.io.Resource; import org.springframework.util.Assert; /** @@ -53,11 +55,22 @@ public class LocalJaxWsServiceFactory { /** * Set the URL of the WSDL document that describes the service. + * @see #setWsdlDocumentResource(Resource) */ public void setWsdlDocumentUrl(URL wsdlDocumentUrl) { this.wsdlDocumentUrl = wsdlDocumentUrl; } + /** + * Set the WSDL document URL as a {@link Resource}. + * @throws IOException + * @since 3.2 + */ + public void setWsdlDocumentResource(Resource wsdlDocumentResource) throws IOException { + Assert.notNull(wsdlDocumentResource, "WSDL Resource must not be null."); + this.wsdlDocumentUrl = wsdlDocumentResource.getURL(); + } + /** * Return the URL of the WSDL document that describes the service. */
2c95fae7ad7c87c96fad2e4c80c3580ea3b06cf3
orientdb
Implemented issue 109:- http://code.google.com/p/orient/issues/detail?id=109 About in-memory clusters- inside regular persistent databases.--
a
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/config/OStorageConfiguration.java b/core/src/main/java/com/orientechnologies/orient/core/config/OStorageConfiguration.java index 8a282499a36..40b08aa627e 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/config/OStorageConfiguration.java +++ b/core/src/main/java/com/orientechnologies/orient/core/config/OStorageConfiguration.java @@ -30,288 +30,298 @@ import com.orientechnologies.orient.core.storage.OStorage; public class OStorageConfiguration implements OSerializableStream { - public static final int CONFIG_RECORD_NUM = 0; - - public int version = 0; - public String name; - public String schemaRecordId; - public String dictionaryRecordId; - - public String localeLanguage = Locale.getDefault().getLanguage(); - public String localeCountry = Locale.getDefault().getCountry(); - public String dateFormat = "yyyy-MM-dd"; - public String dateTimeFormat = "yyyy-MM-dd hh:mm:ss"; - - public List<OStorageClusterConfiguration> clusters = new ArrayList<OStorageClusterConfiguration>(); - public List<OStorageDataConfiguration> dataSegments = new ArrayList<OStorageDataConfiguration>(); - - public OStorageTxConfiguration txSegment = new OStorageTxConfiguration(); - - public List<OEntryConfiguration> properties = new ArrayList<OEntryConfiguration>(); - - private transient Locale localeInstance; - private transient DateFormat dateFormatInstance; - private transient DateFormat dateTimeFormatInstance; - private transient DecimalFormatSymbols unusualSymbols; - private transient OStorage storage; - private transient byte[] record; - - private static final int FIXED_CONFIG_SIZE = 20000; - - public OStorageConfiguration load() throws IOException { - record = storage.readRecord(null, -1, storage.getClusterIdByName(OStorage.CLUSTER_INTERNAL_NAME), CONFIG_RECORD_NUM, null).buffer; - fromStream(record); - return this; - } - - public void update() throws IOException { - if (record == null) - return; - - record = toStream(); - storage.updateRecord(-1, storage.getClusterIdByName(OStorage.CLUSTER_INTERNAL_NAME), 0, record, -1, ORecordBytes.RECORD_TYPE); - } - - public void create() throws IOException { - record = toStream(); - storage.createRecord(storage.getClusterIdByName(OStorage.CLUSTER_INTERNAL_NAME), record, ORecordBytes.RECORD_TYPE); - } - - public OStorageConfiguration(final OStorage iStorage) { - storage = iStorage; - } - - public boolean isEmpty() { - return clusters.isEmpty(); - } - - public Locale getLocaleInstance() { - if (localeInstance == null) - localeInstance = new Locale(localeLanguage, localeCountry); - - return localeInstance; - } - - public DateFormat getDateFormatInstance() { - if (dateFormatInstance == null) { - dateFormatInstance = new SimpleDateFormat(dateFormat); - dateFormatInstance.setLenient(false); - } - return dateFormatInstance; - } - - public DateFormat getDateTimeFormatInstance() { - if (dateTimeFormatInstance == null) { - dateTimeFormatInstance = new SimpleDateFormat(dateTimeFormat); - dateTimeFormatInstance.setLenient(false); - } - return dateTimeFormatInstance; - } - - public DecimalFormatSymbols getUnusualSymbols() { - if (unusualSymbols == null) - unusualSymbols = new DecimalFormatSymbols(getLocaleInstance()); - return unusualSymbols; - } - - public OSerializableStream fromStream(byte[] iStream) throws IOException { - String[] values = new String(iStream).split("\\|"); - int index = 0; - version = Integer.parseInt(read(values[index++])); - name = read(values[index++]); - - schemaRecordId = read(values[index++]); - dictionaryRecordId = read(values[index++]); - - localeLanguage = read(values[index++]); - localeCountry = read(values[index++]); - dateFormat = read(values[index++]); - dateTimeFormat = read(values[index++]); - - int size = Integer.parseInt(read(values[index++])); - String clusterType; - int clusterId; - String clusterName; - - // PREPARE THE LIST OF CLUSTERS - clusters = new ArrayList<OStorageClusterConfiguration>(size); - for (int i = 0; i < size; ++i) - clusters.add(null); - - OStoragePhysicalClusterConfiguration phyCluster; - OStorageLogicalClusterConfiguration logCluster; - - for (int i = 0; i < size; ++i) { - clusterId = Integer.parseInt(read(values[index++])); - clusterName = read(values[index++]); - - clusterType = read(values[index++]); - - // PHYSICAL CLUSTER - if (clusterType.equals("p")) { - phyCluster = new OStoragePhysicalClusterConfiguration(this, clusterId); - phyCluster.name = clusterName; - index = phySegmentFromStream(values, index, phyCluster); - phyCluster.holeFile = new OStorageClusterHoleConfiguration(phyCluster, read(values[index++]), read(values[index++]), - read(values[index++])); - clusters.set(clusterId, phyCluster); - } else { - // LOGICAL CLUSTER - logCluster = new OStorageLogicalClusterConfiguration(clusterName, clusterId, Integer.parseInt(read(values[index++])), - new ORecordId(values[index++])); - clusters.set(clusterId, logCluster); - } - } - - // PREPARE THE LIST OF DATA SEGS - size = Integer.parseInt(read(values[index++])); - dataSegments = new ArrayList<OStorageDataConfiguration>(size); - for (int i = 0; i < size; ++i) - dataSegments.add(null); - - int dataId; - String dataName; - OStorageDataConfiguration data; - for (int i = 0; i < size; ++i) { - dataId = Integer.parseInt(read(values[index++])); - dataName = read(values[index++]); - - data = new OStorageDataConfiguration(this, dataName); - index = phySegmentFromStream(values, index, data); - data.holeFile = new OStorageDataHoleConfiguration(data, read(values[index++]), read(values[index++]), read(values[index++])); - dataSegments.set(dataId, data); - } - - txSegment = new OStorageTxConfiguration(read(values[index++]), read(values[index++]), read(values[index++]), - read(values[index++]), read(values[index++])); - - size = Integer.parseInt(read(values[index++])); - properties = new ArrayList<OEntryConfiguration>(size); - for (int i = 0; i < size; ++i) { - properties.add(new OEntryConfiguration(read(values[index++]), read(values[index++]))); - } - - return this; - } - - public byte[] toStream() throws IOException { - StringBuilder buffer = new StringBuilder(); - - write(buffer, version); - write(buffer, name); - - write(buffer, schemaRecordId); - write(buffer, dictionaryRecordId); - - write(buffer, localeLanguage); - write(buffer, localeCountry); - write(buffer, dateFormat); - write(buffer, dateTimeFormat); - - write(buffer, clusters.size()); - for (OStorageClusterConfiguration c : clusters) { - if (c == null) - continue; - - write(buffer, c.getId()); - write(buffer, c.getName()); - - if (c instanceof OStoragePhysicalClusterConfiguration) { - write(buffer, "p"); - phySegmentToStream(buffer, (OStoragePhysicalClusterConfiguration) c); - fileToStream(buffer, ((OStoragePhysicalClusterConfiguration) c).holeFile); - } else { - write(buffer, "l"); - logSegmentToStream(buffer, (OStorageLogicalClusterConfiguration) c); - } - } - - write(buffer, dataSegments.size()); - for (OStorageDataConfiguration d : dataSegments) { - if (d == null) - continue; - - write(buffer, d.id); - write(buffer, d.name); - - phySegmentToStream(buffer, d); - fileToStream(buffer, d.holeFile); - } - - fileToStream(buffer, txSegment); - write(buffer, txSegment.isSynchRecord()); - write(buffer, txSegment.isSynchTx()); - - write(buffer, properties.size()); - for (OEntryConfiguration e : properties) - entryToStream(buffer, e); - - if (buffer.length() > FIXED_CONFIG_SIZE) - throw new OConfigurationException("Configuration data exceeded size limit: " + FIXED_CONFIG_SIZE + " bytes"); - - // ALLOCATE ENOUGHT SPACE TO REUSE IT EVERY TIME - buffer.append("|"); - buffer.setLength(FIXED_CONFIG_SIZE); - - return buffer.toString().getBytes(); - } - - private int phySegmentFromStream(final String[] values, int index, final OStorageSegmentConfiguration iSegment) { - iSegment.maxSize = read(values[index++]); - iSegment.fileType = read(values[index++]); - iSegment.fileStartSize = read(values[index++]); - iSegment.fileMaxSize = read(values[index++]); - iSegment.fileIncrementSize = read(values[index++]); - iSegment.defrag = read(values[index++]); - - final int size = Integer.parseInt(read(values[index++])); - iSegment.infoFiles = new OStorageFileConfiguration[size]; - for (int i = 0; i < size; ++i) { - iSegment.infoFiles[i] = new OStorageFileConfiguration(iSegment, read(values[index++]), read(values[index++]), - read(values[index++]), iSegment.fileIncrementSize); - } - - return index; - } - - private void phySegmentToStream(final StringBuilder iBuffer, final OStorageSegmentConfiguration iSegment) { - write(iBuffer, iSegment.maxSize); - write(iBuffer, iSegment.fileType); - write(iBuffer, iSegment.fileStartSize); - write(iBuffer, iSegment.fileMaxSize); - write(iBuffer, iSegment.fileIncrementSize); - write(iBuffer, iSegment.defrag); - - write(iBuffer, iSegment.infoFiles.length); - for (OStorageFileConfiguration f : iSegment.infoFiles) - fileToStream(iBuffer, f); - } - - private void logSegmentToStream(final StringBuilder iBuffer, final OStorageLogicalClusterConfiguration iSegment) { - write(iBuffer, iSegment.physicalClusterId); - write(iBuffer, iSegment.map.toString()); - } - - private void fileToStream(final StringBuilder iBuffer, final OStorageFileConfiguration iFile) { - write(iBuffer, iFile.path); - write(iBuffer, iFile.type); - write(iBuffer, iFile.maxSize); - } - - private void entryToStream(final StringBuilder iBuffer, final OEntryConfiguration iEntry) { - write(iBuffer, iEntry.name); - write(iBuffer, iEntry.value); - } - - private String read(final String iValue) { - if (iValue.equals(" ")) - return null; - return iValue; - } - - private void write(final StringBuilder iBuffer, final Object iValue) { - if (iBuffer.length() > 0) - iBuffer.append("|"); - iBuffer.append(iValue != null ? iValue.toString() : " "); - } + public static final int CONFIG_RECORD_NUM = 0; + + public int version = 0; + public String name; + public String schemaRecordId; + public String dictionaryRecordId; + + public String localeLanguage = Locale.getDefault().getLanguage(); + public String localeCountry = Locale.getDefault().getCountry(); + public String dateFormat = "yyyy-MM-dd"; + public String dateTimeFormat = "yyyy-MM-dd hh:mm:ss"; + + public List<OStorageClusterConfiguration> clusters = new ArrayList<OStorageClusterConfiguration>(); + public List<OStorageDataConfiguration> dataSegments = new ArrayList<OStorageDataConfiguration>(); + + public OStorageTxConfiguration txSegment = new OStorageTxConfiguration(); + + public List<OEntryConfiguration> properties = new ArrayList<OEntryConfiguration>(); + + private transient Locale localeInstance; + private transient DateFormat dateFormatInstance; + private transient DateFormat dateTimeFormatInstance; + private transient DecimalFormatSymbols unusualSymbols; + private transient OStorage storage; + private transient byte[] record; + + private static final int FIXED_CONFIG_SIZE = 20000; + + public OStorageConfiguration load() throws IOException { + record = storage.readRecord(null, -1, storage.getClusterIdByName(OStorage.CLUSTER_INTERNAL_NAME), CONFIG_RECORD_NUM, null).buffer; + fromStream(record); + return this; + } + + public void update() throws IOException { + if (record == null) + return; + + record = toStream(); + storage.updateRecord(-1, storage.getClusterIdByName(OStorage.CLUSTER_INTERNAL_NAME), 0, record, -1, ORecordBytes.RECORD_TYPE); + } + + public void create() throws IOException { + record = toStream(); + storage.createRecord(storage.getClusterIdByName(OStorage.CLUSTER_INTERNAL_NAME), record, ORecordBytes.RECORD_TYPE); + } + + public OStorageConfiguration(final OStorage iStorage) { + storage = iStorage; + } + + public boolean isEmpty() { + return clusters.isEmpty(); + } + + public Locale getLocaleInstance() { + if (localeInstance == null) + localeInstance = new Locale(localeLanguage, localeCountry); + + return localeInstance; + } + + public DateFormat getDateFormatInstance() { + if (dateFormatInstance == null) { + dateFormatInstance = new SimpleDateFormat(dateFormat); + dateFormatInstance.setLenient(false); + } + return dateFormatInstance; + } + + public DateFormat getDateTimeFormatInstance() { + if (dateTimeFormatInstance == null) { + dateTimeFormatInstance = new SimpleDateFormat(dateTimeFormat); + dateTimeFormatInstance.setLenient(false); + } + return dateTimeFormatInstance; + } + + public DecimalFormatSymbols getUnusualSymbols() { + if (unusualSymbols == null) + unusualSymbols = new DecimalFormatSymbols(getLocaleInstance()); + return unusualSymbols; + } + + public OSerializableStream fromStream(byte[] iStream) throws IOException { + String[] values = new String(iStream).split("\\|"); + int index = 0; + version = Integer.parseInt(read(values[index++])); + name = read(values[index++]); + + schemaRecordId = read(values[index++]); + dictionaryRecordId = read(values[index++]); + + localeLanguage = read(values[index++]); + localeCountry = read(values[index++]); + dateFormat = read(values[index++]); + dateTimeFormat = read(values[index++]); + + int size = Integer.parseInt(read(values[index++])); + String clusterType; + int clusterId; + String clusterName; + + // PREPARE THE LIST OF CLUSTERS + clusters = new ArrayList<OStorageClusterConfiguration>(size); + for (int i = 0; i < size; ++i) + clusters.add(null); + + OStoragePhysicalClusterConfiguration phyCluster; + OStorageLogicalClusterConfiguration logCluster; + OStorageMemoryClusterConfiguration memCluster; + + for (int i = 0; i < size; ++i) { + clusterId = Integer.parseInt(read(values[index++])); + clusterName = read(values[index++]); + + clusterType = read(values[index++]); + + // PHYSICAL CLUSTER + if (clusterType.equals("p")) { + phyCluster = new OStoragePhysicalClusterConfiguration(this, clusterId); + phyCluster.name = clusterName; + index = phySegmentFromStream(values, index, phyCluster); + phyCluster.holeFile = new OStorageClusterHoleConfiguration(phyCluster, read(values[index++]), read(values[index++]), + read(values[index++])); + clusters.set(clusterId, phyCluster); + } else if (clusterType.equals("l")) { + // LOGICAL CLUSTER + logCluster = new OStorageLogicalClusterConfiguration(clusterName, clusterId, Integer.parseInt(read(values[index++])), + new ORecordId(values[index++])); + clusters.set(clusterId, logCluster); + } else { + // MEMORY CLUSTER + memCluster = new OStorageMemoryClusterConfiguration(clusterName, clusterId); + clusters.set(clusterId, memCluster); + } + } + + // PREPARE THE LIST OF DATA SEGS + size = Integer.parseInt(read(values[index++])); + dataSegments = new ArrayList<OStorageDataConfiguration>(size); + for (int i = 0; i < size; ++i) + dataSegments.add(null); + + int dataId; + String dataName; + OStorageDataConfiguration data; + for (int i = 0; i < size; ++i) { + dataId = Integer.parseInt(read(values[index++])); + dataName = read(values[index++]); + + data = new OStorageDataConfiguration(this, dataName); + index = phySegmentFromStream(values, index, data); + data.holeFile = new OStorageDataHoleConfiguration(data, read(values[index++]), read(values[index++]), read(values[index++])); + dataSegments.set(dataId, data); + } + + txSegment = new OStorageTxConfiguration(read(values[index++]), read(values[index++]), read(values[index++]), + read(values[index++]), read(values[index++])); + + size = Integer.parseInt(read(values[index++])); + properties = new ArrayList<OEntryConfiguration>(size); + for (int i = 0; i < size; ++i) { + properties.add(new OEntryConfiguration(read(values[index++]), read(values[index++]))); + } + + return this; + } + + public byte[] toStream() throws IOException { + StringBuilder buffer = new StringBuilder(); + + write(buffer, version); + write(buffer, name); + + write(buffer, schemaRecordId); + write(buffer, dictionaryRecordId); + + write(buffer, localeLanguage); + write(buffer, localeCountry); + write(buffer, dateFormat); + write(buffer, dateTimeFormat); + + write(buffer, clusters.size()); + for (OStorageClusterConfiguration c : clusters) { + if (c == null) + continue; + + write(buffer, c.getId()); + write(buffer, c.getName()); + + if (c instanceof OStoragePhysicalClusterConfiguration) { + // PHYSICAL + write(buffer, "p"); + phySegmentToStream(buffer, (OStoragePhysicalClusterConfiguration) c); + fileToStream(buffer, ((OStoragePhysicalClusterConfiguration) c).holeFile); + } else if (c instanceof OStorageLogicalClusterConfiguration) { + // LOGICAL + write(buffer, "l"); + logSegmentToStream(buffer, (OStorageLogicalClusterConfiguration) c); + } else { + // MEMORY + write(buffer, "m"); + } + } + + write(buffer, dataSegments.size()); + for (OStorageDataConfiguration d : dataSegments) { + if (d == null) + continue; + + write(buffer, d.id); + write(buffer, d.name); + + phySegmentToStream(buffer, d); + fileToStream(buffer, d.holeFile); + } + + fileToStream(buffer, txSegment); + write(buffer, txSegment.isSynchRecord()); + write(buffer, txSegment.isSynchTx()); + + write(buffer, properties.size()); + for (OEntryConfiguration e : properties) + entryToStream(buffer, e); + + if (buffer.length() > FIXED_CONFIG_SIZE) + throw new OConfigurationException("Configuration data exceeded size limit: " + FIXED_CONFIG_SIZE + " bytes"); + + // ALLOCATE ENOUGHT SPACE TO REUSE IT EVERY TIME + buffer.append("|"); + buffer.setLength(FIXED_CONFIG_SIZE); + + return buffer.toString().getBytes(); + } + + private int phySegmentFromStream(final String[] values, int index, final OStorageSegmentConfiguration iSegment) { + iSegment.maxSize = read(values[index++]); + iSegment.fileType = read(values[index++]); + iSegment.fileStartSize = read(values[index++]); + iSegment.fileMaxSize = read(values[index++]); + iSegment.fileIncrementSize = read(values[index++]); + iSegment.defrag = read(values[index++]); + + final int size = Integer.parseInt(read(values[index++])); + iSegment.infoFiles = new OStorageFileConfiguration[size]; + for (int i = 0; i < size; ++i) { + iSegment.infoFiles[i] = new OStorageFileConfiguration(iSegment, read(values[index++]), read(values[index++]), + read(values[index++]), iSegment.fileIncrementSize); + } + + return index; + } + + private void phySegmentToStream(final StringBuilder iBuffer, final OStorageSegmentConfiguration iSegment) { + write(iBuffer, iSegment.maxSize); + write(iBuffer, iSegment.fileType); + write(iBuffer, iSegment.fileStartSize); + write(iBuffer, iSegment.fileMaxSize); + write(iBuffer, iSegment.fileIncrementSize); + write(iBuffer, iSegment.defrag); + + write(iBuffer, iSegment.infoFiles.length); + for (OStorageFileConfiguration f : iSegment.infoFiles) + fileToStream(iBuffer, f); + } + + private void logSegmentToStream(final StringBuilder iBuffer, final OStorageLogicalClusterConfiguration iSegment) { + write(iBuffer, iSegment.physicalClusterId); + write(iBuffer, iSegment.map.toString()); + } + + private void fileToStream(final StringBuilder iBuffer, final OStorageFileConfiguration iFile) { + write(iBuffer, iFile.path); + write(iBuffer, iFile.type); + write(iBuffer, iFile.maxSize); + } + + private void entryToStream(final StringBuilder iBuffer, final OEntryConfiguration iEntry) { + write(iBuffer, iEntry.name); + write(iBuffer, iEntry.value); + } + + private String read(final String iValue) { + if (iValue.equals(" ")) + return null; + return iValue; + } + + private void write(final StringBuilder iBuffer, final Object iValue) { + if (iBuffer.length() > 0) + iBuffer.append("|"); + iBuffer.append(iValue != null ? iValue.toString() : " "); + } } diff --git a/core/src/main/java/com/orientechnologies/orient/core/config/OStorageMemoryClusterConfiguration.java b/core/src/main/java/com/orientechnologies/orient/core/config/OStorageMemoryClusterConfiguration.java new file mode 100644 index 00000000000..a2b8060568c --- /dev/null +++ b/core/src/main/java/com/orientechnologies/orient/core/config/OStorageMemoryClusterConfiguration.java @@ -0,0 +1,39 @@ +/* + * Copyright 1999-2010 Luca Garulli (l.garulli--at--orientechnologies.com) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.orientechnologies.orient.core.config; + + +public class OStorageMemoryClusterConfiguration implements OStorageClusterConfiguration { + public String name; + public int id; + + public OStorageMemoryClusterConfiguration(final String name, final int id) { + this.name = name; + this.id = id; + } + + public String getName() { + return name; + } + + public int getId() { + return id; + } + + public void setId(final int iId) { + id = iId; + } +} diff --git a/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OStorageLocal.java b/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OStorageLocal.java index 3db5f7a5c73..226b5c61978 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OStorageLocal.java +++ b/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OStorageLocal.java @@ -35,6 +35,7 @@ import com.orientechnologies.orient.core.config.OStorageConfiguration; import com.orientechnologies.orient.core.config.OStorageDataConfiguration; import com.orientechnologies.orient.core.config.OStorageLogicalClusterConfiguration; +import com.orientechnologies.orient.core.config.OStorageMemoryClusterConfiguration; import com.orientechnologies.orient.core.config.OStoragePhysicalClusterConfiguration; import com.orientechnologies.orient.core.config.OStorageSegmentConfiguration; import com.orientechnologies.orient.core.db.record.ODatabaseRecord; @@ -56,6 +57,7 @@ import com.orientechnologies.orient.core.storage.ORecordBrowsingListener; import com.orientechnologies.orient.core.storage.OStorage; import com.orientechnologies.orient.core.storage.OStorageAbstract; +import com.orientechnologies.orient.core.storage.impl.memory.OClusterMemory; import com.orientechnologies.orient.core.tx.OTransaction; public class OStorageLocal extends OStorageAbstract { @@ -338,6 +340,8 @@ public int addCluster(String iClusterName, final String iClusterType, final Obje : (Integer) iParameters[0]); return addLogicalCluster(iClusterName, physicalClusterId); + } else if (OClusterMemory.TYPE.equalsIgnoreCase(iClusterType)) { + return addMemoryCluster(iClusterName); } else OLogManager.instance().exception( "Cluster type '" + iClusterType + "' is not supported. Supported types are: " + Arrays.toString(TYPES), null, @@ -1067,6 +1071,18 @@ private int addLogicalCluster(final String iClusterName, final int iPhysicalClus return id; } + private int addMemoryCluster(final String iClusterName) throws IOException { + final OStorageMemoryClusterConfiguration config = new OStorageMemoryClusterConfiguration(iClusterName, clusters.length); + + configuration.clusters.add(config); + + final OClusterMemory cluster = new OClusterMemory(clusters.length, iClusterName); + final int id = registerCluster(cluster); + configuration.update(); + + return id; + } + public ODataLocal[] getDataSegments() { return dataSegments; }
37653501dcfc08114bd23b6f4b9ec6f60b846b7a
Delta Spike
DELTASPIKE-365 add a boot(Map) method This is needed to startup some containers with initial properties if they support it.
c
https://github.com/apache/deltaspike
diff --git a/deltaspike/cdictrl/api/src/main/java/org/apache/deltaspike/cdise/api/CdiContainer.java b/deltaspike/cdictrl/api/src/main/java/org/apache/deltaspike/cdise/api/CdiContainer.java index 3b131afd0..62c789837 100644 --- a/deltaspike/cdictrl/api/src/main/java/org/apache/deltaspike/cdise/api/CdiContainer.java +++ b/deltaspike/cdictrl/api/src/main/java/org/apache/deltaspike/cdise/api/CdiContainer.java @@ -20,6 +20,7 @@ import javax.enterprise.inject.spi.BeanManager; +import java.util.Map; /** @@ -35,9 +36,9 @@ public interface CdiContainer { /** - * <b>Booting the CdiTestContainer will scan the whole classpath + * <p>Booting the CdiTestContainer will scan the whole classpath * for Beans and extensions available. - * The container might throw a DeploymentException or similar on startup.</b> + * The container might throw a DeploymentException or similar on startup.</p> * * <p><b>Note:</b> booting the container does <i>not</i> automatically * start all CDI Contexts! Depending on the underlying CDI container you @@ -46,6 +47,15 @@ public interface CdiContainer * {@link ContextControl#startContexts()}</p> */ void boot(); + + /** + * <p>Like {@link #boot()} but allows to pass in a configuration Map + * for the container.</p> + * <p>Please note that the configuration is container implementation dependent!</p> + * + * @param properties + */ + void boot(Map<?,?> properties); /** * This will shutdown the underlying CDI container and stop all contexts. diff --git a/deltaspike/cdictrl/impl-openejb/src/main/java/org/apache/deltaspike/cdise/openejb/OpenEjbContainerControl.java b/deltaspike/cdictrl/impl-openejb/src/main/java/org/apache/deltaspike/cdise/openejb/OpenEjbContainerControl.java index c23cd5db5..aa9dbe6c9 100644 --- a/deltaspike/cdictrl/impl-openejb/src/main/java/org/apache/deltaspike/cdise/openejb/OpenEjbContainerControl.java +++ b/deltaspike/cdictrl/impl-openejb/src/main/java/org/apache/deltaspike/cdise/openejb/OpenEjbContainerControl.java @@ -56,11 +56,17 @@ public BeanManager getBeanManager() @Override public synchronized void boot() + { + boot(null); + } + + @Override + public synchronized void boot(Map<?, ?> properties) { if (openEjbContainer == null) { // this immediately boots the container - openEjbContainer = EJBContainer.createEJBContainer(getConfiguration()); + openEjbContainer = EJBContainer.createEJBContainer(properties); // this magic code performs injection try diff --git a/deltaspike/cdictrl/impl-owb/src/main/java/org/apache/deltaspike/cdise/owb/OpenWebBeansContainerControl.java b/deltaspike/cdictrl/impl-owb/src/main/java/org/apache/deltaspike/cdise/owb/OpenWebBeansContainerControl.java index 9c45aea3e..7f1fa7164 100644 --- a/deltaspike/cdictrl/impl-owb/src/main/java/org/apache/deltaspike/cdise/owb/OpenWebBeansContainerControl.java +++ b/deltaspike/cdictrl/impl-owb/src/main/java/org/apache/deltaspike/cdise/owb/OpenWebBeansContainerControl.java @@ -26,6 +26,7 @@ import javax.enterprise.context.spi.CreationalContext; import javax.enterprise.inject.spi.Bean; import javax.enterprise.inject.spi.BeanManager; +import java.util.Map; import java.util.Set; import java.util.logging.Logger; @@ -67,6 +68,13 @@ public synchronized void boot() lifecycle.startApplication(mockServletContextEvent); } + @Override + public void boot(Map<?, ?> properties) + { + // we do not yet support any configuration. + boot(); + } + @Override public synchronized void shutdown() { diff --git a/deltaspike/cdictrl/impl-weld/src/main/java/org/apache/deltaspike/cdise/weld/WeldContainerControl.java b/deltaspike/cdictrl/impl-weld/src/main/java/org/apache/deltaspike/cdise/weld/WeldContainerControl.java index 65b8fd660..7c1a28aa1 100644 --- a/deltaspike/cdictrl/impl-weld/src/main/java/org/apache/deltaspike/cdise/weld/WeldContainerControl.java +++ b/deltaspike/cdictrl/impl-weld/src/main/java/org/apache/deltaspike/cdise/weld/WeldContainerControl.java @@ -26,6 +26,7 @@ import javax.enterprise.context.spi.CreationalContext; import javax.enterprise.inject.spi.Bean; import javax.enterprise.inject.spi.BeanManager; +import java.util.Map; import java.util.Set; import java.util.logging.Logger; @@ -64,6 +65,14 @@ public synchronized void boot() weldContainer = weld.initialize(); } + @Override + public void boot(Map<?, ?> properties) + { + // no configuration yet. Perform default boot + + boot(); + } + @Override public synchronized void shutdown() {
1c52b6551b63053b261f4ac821093a8a203de596
hadoop
YARN-2705. Fixed bugs in ResourceManager node-label- manager that were causing test-failures: added a dummy in-memory- labels-manager. Contributed by Wangda Tan.--(cherry picked from commit e9c66e8fd2ccb658db2848e1ab911f1502de4de5)-
c
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 0300b2e6a359b..63e0e6c94bda7 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -570,6 +570,10 @@ Release 2.6.0 - UNRELEASED YARN-2699. Fixed a bug in CommonNodeLabelsManager that caused tests to fail when using ephemeral ports on NodeIDs. (Wangda Tan via vinodkv) + YARN-2705. Fixed bugs in ResourceManager node-label manager that were causing + test-failures: added a dummy in-memory labels-manager. (Wangda Tan via + vinodkv) + BREAKDOWN OF YARN-1051 SUBTASKS AND RELATED JIRAS YARN-1707. Introduce APIs to add/remove/resize queues in the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index a5e746451e7e8..03a1f6011e0e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1457,10 +1457,16 @@ public class YarnConfiguration extends Configuration { public static final String NODE_LABELS_PREFIX = YARN_PREFIX + "node-labels."; + /** + * Class for RMNodeLabelsManager Please note this value should be consistent + * in client nodes and RM node(s) + */ + public static final String RM_NODE_LABELS_MANAGER_CLASS = NODE_LABELS_PREFIX + + "manager-class"; + /** URI for NodeLabelManager */ - public static final String FS_NODE_LABELS_STORE_URI = NODE_LABELS_PREFIX - + "fs-store.uri"; - public static final String DEFAULT_FS_NODE_LABELS_STORE_URI = "file:///tmp/"; + public static final String FS_NODE_LABELS_STORE_ROOT_DIR = NODE_LABELS_PREFIX + + "fs-store.root-dir"; public static final String FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = NODE_LABELS_PREFIX + "fs-store.retry-policy-spec"; public static final String DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java index 8bb88f27c6539..d68503555f24d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java @@ -220,9 +220,11 @@ protected void serviceStart() throws Exception { // service init, we don't want to trigger any event handling at that time. initDispatcher(getConfig()); - dispatcher.register(NodeLabelsStoreEventType.class, - new ForwardingEventHandler()); - + if (null != dispatcher) { + dispatcher.register(NodeLabelsStoreEventType.class, + new ForwardingEventHandler()); + } + startDispatcher(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java index 2778c742a2d1a..6e685ee3301d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; @@ -54,7 +55,7 @@ public FileSystemNodeLabelsStore(CommonNodeLabelsManager mgr) { protected static final Log LOG = LogFactory.getLog(FileSystemNodeLabelsStore.class); - protected static final String ROOT_DIR_NAME = "FSNodeLabelManagerRoot"; + protected static final String DEFAULT_DIR_NAME = "node-labels"; protected static final String MIRROR_FILENAME = "nodelabel.mirror"; protected static final String EDITLOG_FILENAME = "nodelabel.editlog"; @@ -63,22 +64,27 @@ protected enum SerializedLogType { } Path fsWorkingPath; - Path rootDirPath; FileSystem fs; FSDataOutputStream editlogOs; Path editLogPath; + + private String getDefaultFSNodeLabelsRootDir() throws IOException { + // default is in local: /tmp/hadoop-yarn-${user}/node-labels/ + return "file:///tmp/hadoop-yarn-" + + UserGroupInformation.getCurrentUser().getShortUserName() + "/" + + DEFAULT_DIR_NAME; + } @Override public void init(Configuration conf) throws Exception { fsWorkingPath = - new Path(conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_URI, - YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_URI)); - rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); + new Path(conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR, + getDefaultFSNodeLabelsRootDir())); setFileSystem(conf); // mkdir of root dir path - fs.mkdirs(rootDirPath); + fs.mkdirs(fsWorkingPath); } @Override @@ -159,8 +165,8 @@ public void recover() throws IOException { */ // Open mirror from serialized file - Path mirrorPath = new Path(rootDirPath, MIRROR_FILENAME); - Path oldMirrorPath = new Path(rootDirPath, MIRROR_FILENAME + ".old"); + Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME); + Path oldMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".old"); FSDataInputStream is = null; if (fs.exists(mirrorPath)) { @@ -183,7 +189,7 @@ public void recover() throws IOException { } // Open and process editlog - editLogPath = new Path(rootDirPath, EDITLOG_FILENAME); + editLogPath = new Path(fsWorkingPath, EDITLOG_FILENAME); if (fs.exists(editLogPath)) { is = fs.open(editLogPath); @@ -224,7 +230,7 @@ public void recover() throws IOException { } // Serialize current mirror to mirror.writing - Path writingMirrorPath = new Path(rootDirPath, MIRROR_FILENAME + ".writing"); + Path writingMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".writing"); FSDataOutputStream os = fs.create(writingMirrorPath, true); ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequestPBImpl .newInstance(mgr.getClusterNodeLabels())).getProto().writeDelimitedTo(os); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java index a7546cb70406f..45a2d8d32f214 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java @@ -67,7 +67,7 @@ public void before() throws IOException { tempDir.delete(); tempDir.mkdirs(); tempDir.deleteOnExit(); - conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_URI, + conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR, tempDir.getAbsolutePath()); mgr.init(conf); mgr.start(); @@ -75,7 +75,7 @@ public void before() throws IOException { @After public void after() throws IOException { - getStore().fs.delete(getStore().rootDirPath, true); + getStore().fs.delete(getStore().fsWorkingPath, true); mgr.stop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index bcf7a5488452a..51ed2b1a9b9e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; @@ -321,8 +322,12 @@ protected AMLivelinessMonitor createAMLivelinessMonitor() { return new AMLivelinessMonitor(this.rmDispatcher); } - protected RMNodeLabelsManager createNodeLabelManager() { - return new RMNodeLabelsManager(); + protected RMNodeLabelsManager createNodeLabelManager() + throws InstantiationException, IllegalAccessException { + Class<? extends RMNodeLabelsManager> nlmCls = + conf.getClass(YarnConfiguration.RM_NODE_LABELS_MANAGER_CLASS, + MemoryRMNodeLabelsManager.class, RMNodeLabelsManager.class); + return nlmCls.newInstance(); } protected DelegationTokenRenewer createDelegationTokenRenewer() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DummyRMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/MemoryRMNodeLabelsManager.java similarity index 93% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DummyRMNodeLabelsManager.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/MemoryRMNodeLabelsManager.java index 14bd99984c5b9..89053ca9baa4a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DummyRMNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/MemoryRMNodeLabelsManager.java @@ -25,10 +25,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.event.InlineDispatcher; import org.apache.hadoop.yarn.nodelabels.NodeLabelsStore; -public class DummyRMNodeLabelsManager extends RMNodeLabelsManager { +public class MemoryRMNodeLabelsManager extends RMNodeLabelsManager { Map<NodeId, Set<String>> lastNodeToLabels = null; Collection<String> lastAddedlabels = null; Collection<String> lastRemovedlabels = null; @@ -68,7 +67,7 @@ public void close() throws IOException { @Override protected void initDispatcher(Configuration conf) { - super.dispatcher = new InlineDispatcher(); + super.dispatcher = null; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 0c70f68c71393..9d0ac2739bc66 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -59,7 +59,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; -import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DummyRMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -115,7 +115,7 @@ public MockRM(Configuration conf, RMStateStore store) { @Override protected RMNodeLabelsManager createNodeLabelManager() { - RMNodeLabelsManager mgr = new DummyRMNodeLabelsManager(); + RMNodeLabelsManager mgr = new MemoryRMNodeLabelsManager(); mgr.init(getConfig()); return mgr; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java index 1fbe96869fb2d..0ea745692b83b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java @@ -42,11 +42,11 @@ public class TestRMNodeLabelsManager extends NodeLabelTestBase { private final Resource SMALL_RESOURCE = Resource.newInstance(100, 0); private final Resource LARGE_NODE = Resource.newInstance(1000, 0); - DummyRMNodeLabelsManager mgr = null; + MemoryRMNodeLabelsManager mgr = null; @Before public void before() { - mgr = new DummyRMNodeLabelsManager(); + mgr = new MemoryRMNodeLabelsManager(); mgr.init(new Configuration()); mgr.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index e15c87d00e03f..98dc673da2563 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -84,7 +84,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.Task; import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMWithAMS; import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager; -import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DummyRMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics; @@ -154,7 +154,7 @@ public void setUp() throws Exception { resourceManager = new ResourceManager() { @Override protected RMNodeLabelsManager createNodeLabelManager() { - RMNodeLabelsManager mgr = new DummyRMNodeLabelsManager(); + RMNodeLabelsManager mgr = new MemoryRMNodeLabelsManager(); mgr.init(getConfig()); return mgr; } @@ -1485,7 +1485,7 @@ public void testMoveAppViolateQueueState() throws Exception { resourceManager = new ResourceManager() { @Override protected RMNodeLabelsManager createNodeLabelManager() { - RMNodeLabelsManager mgr = new DummyRMNodeLabelsManager(); + RMNodeLabelsManager mgr = new MemoryRMNodeLabelsManager(); mgr.init(getConfig()); return mgr; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index b84717bbc328c..b90df8ec5a769 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -45,7 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMSecretManagerService; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.TestFifoScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DummyRMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -81,7 +81,7 @@ public void setUp() throws Exception { conf = new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); - mgr = new DummyRMNodeLabelsManager(); + mgr = new MemoryRMNodeLabelsManager(); mgr.init(conf); } @@ -446,7 +446,7 @@ private Configuration getComplexConfigurationWithQueueLabels( @Test(timeout = 300000) public void testContainerAllocationWithSingleUserLimits() throws Exception { - final RMNodeLabelsManager mgr = new DummyRMNodeLabelsManager(); + final RMNodeLabelsManager mgr = new MemoryRMNodeLabelsManager(); mgr.init(conf); // set node -> label diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index 7e6165274b28a..abc701db192fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -39,7 +39,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.MockAsm; -import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DummyRMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; @@ -179,7 +179,7 @@ public ConcurrentMap<NodeId, RMNode> getRMNodes() { return nodesMap; } }; - rmContext.setNodeLabelManager(new DummyRMNodeLabelsManager()); + rmContext.setNodeLabelManager(new MemoryRMNodeLabelsManager()); return rmContext; } @@ -211,7 +211,7 @@ public static CapacityScheduler mockCapacityScheduler() throws IOException { null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), new ClientToAMTokenSecretManagerInRM(), null); - rmContext.setNodeLabelManager(new DummyRMNodeLabelsManager()); + rmContext.setNodeLabelManager(new MemoryRMNodeLabelsManager()); cs.setRMContext(rmContext); cs.init(conf); return cs;
c32ece3060175565124ba73b8b54fea091803eec
Delta Spike
fix JavaDoc - still did refer to old name of the class
p
https://github.com/apache/deltaspike
diff --git a/deltaspike/core/api/src/main/java/org/apache/deltaspike/core/util/metadata/AnnotationInstanceProvider.java b/deltaspike/core/api/src/main/java/org/apache/deltaspike/core/util/metadata/AnnotationInstanceProvider.java index 22398afd2..d5238f23d 100644 --- a/deltaspike/core/api/src/main/java/org/apache/deltaspike/core/util/metadata/AnnotationInstanceProvider.java +++ b/deltaspike/core/api/src/main/java/org/apache/deltaspike/core/util/metadata/AnnotationInstanceProvider.java @@ -42,7 +42,7 @@ * String annotationClassName = ...; * Class<? extends annotation> annotationClass = * (Class<? extends Annotation>) ClassUtils.getClassLoader(null).loadClass(annotationClassName); - * Annotation a = DefaultAnnotation.of(annotationClass) + * Annotation a = AnnotationInstanceProvider.of(annotationClass) * </pre> */ public class AnnotationInstanceProvider implements Annotation, InvocationHandler, Serializable
7fd2d861fe8ce8e51f32d79b5192b144eb2f68ea
Delta Spike
DELTASPIKE-397 handle hashCode, etc on MessageBundles
c
https://github.com/apache/deltaspike
diff --git a/deltaspike/core/impl/src/main/java/org/apache/deltaspike/core/impl/message/MessageBundleInvocationHandler.java b/deltaspike/core/impl/src/main/java/org/apache/deltaspike/core/impl/message/MessageBundleInvocationHandler.java index 340278728..1b5ef5951 100644 --- a/deltaspike/core/impl/src/main/java/org/apache/deltaspike/core/impl/message/MessageBundleInvocationHandler.java +++ b/deltaspike/core/impl/src/main/java/org/apache/deltaspike/core/impl/message/MessageBundleInvocationHandler.java @@ -54,6 +54,30 @@ public class MessageBundleInvocationHandler implements InvocationHandler, Serial @Override public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable { + if (method.getDeclaringClass().equals(Object.class)) + { + // this sometimes gets invoked directly by the container + // there is no perfect solution for those methods, + // so we try to use the best info we have atm. + + if ("hashCode".equals(method.getName())) + { + return proxy.getClass().hashCode(); + } + + if ("toString".equals(method.getName())) + { + return proxy.getClass().toString(); + } + + if ("equals".equals(method.getName())) + { + return proxy.getClass().equals(args[0].getClass()); + } + + return null; + } + final MessageTemplate messageTemplate = method.getAnnotation(MessageTemplate.class); String messageTemplateValue; diff --git a/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/MessageTest.java b/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/MessageTest.java index bd40ddc52..cda57c85f 100644 --- a/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/MessageTest.java +++ b/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/MessageTest.java @@ -35,6 +35,8 @@ import javax.inject.Inject; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; /** * Tests for {@link org.apache.deltaspike.core.api.message.MessageTemplate} @@ -99,4 +101,13 @@ public void testMessageCategory() } + @Test + public void testObjectMethods() + { + messages.hashCode(); + assertNotNull(messages.toString()); + assertTrue(messages.equals(messages)); + assertNotNull(messages.getClass()); + } + }
02b0fc2fa3a41f340a75235d407d8516fe4c7a63
restlet-framework-java
- Disabled failing test cases (temp)--
c
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet.test/src/org/restlet/test/jaxrs/services/tests/AllServiceTests.java b/modules/org.restlet.test/src/org/restlet/test/jaxrs/services/tests/AllServiceTests.java index 22c32949cc..f07b1d3823 100644 --- a/modules/org.restlet.test/src/org/restlet/test/jaxrs/services/tests/AllServiceTests.java +++ b/modules/org.restlet.test/src/org/restlet/test/jaxrs/services/tests/AllServiceTests.java @@ -60,7 +60,7 @@ public static Test suite() { mySuite.addTestSuite(InheritAnnotationTest.class); mySuite.addTestSuite(InjectionTest.class); mySuite.addTestSuite(Issue593Test.class); - mySuite.addTestSuite(JsonTest.class); + // mySuite.addTestSuite(JsonTest.class); mySuite.addTestSuite(ListParamTest.class); mySuite.addTestSuite(MatchedTest.class); mySuite.addTestSuite(MatrixParamTest.class); @@ -74,7 +74,7 @@ public static Test suite() { mySuite.addTestSuite(PathParamTest3.class); mySuite.addTestSuite(PersonsTest.class); mySuite.addTestSuite(PrimitiveWrapperEntityTest.class); - mySuite.addTestSuite(ProviderTest.class); + // mySuite.addTestSuite(ProviderTest.class); mySuite.addTestSuite(QueryParamTest.class); mySuite.addTestSuite(RecursiveTest.class); mySuite.addTestSuite(RepresentationTest.class);
eb5df87c7ec4d7f62873dcf29108ddc2abcd13ca
ReactiveX-RxJava
avoiding some synchronization on combineLatest--
p
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/operators/OperationCombineLatest.java b/rxjava-core/src/main/java/rx/operators/OperationCombineLatest.java index 382f8ba8aa..2d77c3d3ec 100644 --- a/rxjava-core/src/main/java/rx/operators/OperationCombineLatest.java +++ b/rxjava-core/src/main/java/rx/operators/OperationCombineLatest.java @@ -26,6 +26,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import org.junit.Test; import org.mockito.InOrder; @@ -125,17 +126,13 @@ private static class Aggregator<R> implements Func1<Observer<R>, Subscription> { private final FuncN<R> combineLatestFunction; private final AtomicBoolean running = new AtomicBoolean(true); - - // used as an internal lock for handling the latest values and the completed state of each observer + + // Stores how many observers have already completed + private final AtomicInteger numCompleted = new AtomicInteger(0); + + // Used as an internal lock for handling the latest values of each observer private final Object lockObject = new Object(); - /** - * Store when an observer completes. - * <p> - * Note that access to this set MUST BE SYNCHRONIZED via 'lockObject' above. - * */ - private final Set<CombineObserver<R, ?>> completed = new HashSet<CombineObserver<R, ?>>(); - /** * The latest value from each observer * <p> @@ -175,17 +172,14 @@ <T> void addObserver(CombineObserver<R, T> w) { * @param w The observer that has completed. */ <T> void complete(CombineObserver<R, T> w) { - synchronized(lockObject) { - // store that this CombineLatestObserver is completed - completed.add(w); - // if all CombineObservers are completed, we mark the whole thing as completed - if (completed.size() == observers.size()) { - if (running.get()) { - // mark ourselves as done - observer.onCompleted(); - // just to ensure we stop processing in case we receive more onNext/complete/error calls after this - running.set(false); - } + int completed = numCompleted.incrementAndGet(); + // if all CombineObservers are completed, we mark the whole thing as completed + if (completed == observers.size()) { + if (running.get()) { + // mark ourselves as done + observer.onCompleted(); + // just to ensure we stop processing in case we receive more onNext/complete/error calls after this + running.set(false); } } } @@ -228,14 +222,12 @@ <T> void next(CombineObserver<R, T> w, T arg) { // remember that this observer now has a latest value set hasLatestValue.add(w); - // if all observers in the 'observers' list have a value, invoke the combineLatestFunction - for (CombineObserver<R, ?> rw : observers) { - if (!hasLatestValue.contains(rw)) { - // we don't have a value yet for each observer to combine, so we don't have a combined value yet either - return; - } + if (hasLatestValue.size() < observers.size()) { + // we don't have a value yet for each observer to combine, so we don't have a combined value yet either + return; } - // if we get to here this means all the queues have data + + // if we get to here this means all the observers have a latest value int i = 0; for (CombineObserver<R, ?> _w : observers) { argsToCombineLatest[i++] = latestValue.get(_w);
9ae1a87364f11d7566fb79254092a4f03b3bd379
apache$oozie
Closes GH-0141 Oozie uses excessive memory when doing purging From: Chao Wang <[email protected]> git-svn-id: https://svn.apache.org/repos/asf/incubator/oozie/trunk@1168675 13f79535-47bb-0310-9956-ffa450edef68
p
https://github.com/apache/oozie
diff --git a/core/src/main/java/org/apache/oozie/CoordinatorActionBean.java b/core/src/main/java/org/apache/oozie/CoordinatorActionBean.java index a5b22254b2..d0d9d92e00 100644 --- a/core/src/main/java/org/apache/oozie/CoordinatorActionBean.java +++ b/core/src/main/java/org/apache/oozie/CoordinatorActionBean.java @@ -49,8 +49,8 @@ @NamedQuery(name = "UPDATE_COORD_ACTION", query = "update CoordinatorActionBean w set w.actionNumber = :actionNumber, w.actionXml = :actionXml, w.consoleUrl = :consoleUrl, w.createdConf = :createdConf, w.errorCode = :errorCode, w.errorMessage = :errorMessage, w.externalStatus = :externalStatus, w.missingDependencies = :missingDependencies, w.runConf = :runConf, w.timeOut = :timeOut, w.trackerUri = :trackerUri, w.type = :type, w.createdTimestamp = :createdTime, w.externalId = :externalId, w.jobId = :jobId, w.lastModifiedTimestamp = :lastModifiedTime, w.nominalTimestamp = :nominalTime, w.slaXml = :slaXml, w.status = :status where w.id = :id"), @NamedQuery(name = "UPDATE_COORD_ACTION_MIN", query = "update CoordinatorActionBean w set w.actionXml = :actionXml, w.missingDependencies = :missingDependencies, w.lastModifiedTimestamp = :lastModifiedTime, w.status = :status where w.id = :id"), - - @NamedQuery(name = "DELETE_COMPLETED_COORD_ACTIONS", query = "delete from CoordinatorActionBean a where a.id = :id and (a.status = 'SUCCEEDED' OR a.status = 'FAILED' OR a.status = 'KILLED')"), + + @NamedQuery(name = "DELETE_COMPLETED_ACTIONS_FOR_COORDINATOR", query = "delete from CoordinatorActionBean a where a.jobId = :jobId and (a.status = 'SUCCEEDED' OR a.status = 'FAILED' OR a.status= 'KILLED')"), @NamedQuery(name = "GET_COORD_ACTIONS", query = "select OBJECT(w) from CoordinatorActionBean w"), diff --git a/core/src/main/java/org/apache/oozie/command/coord/CoordPurgeCommand.java b/core/src/main/java/org/apache/oozie/command/coord/CoordPurgeCommand.java index 03df57968a..731cdd76de 100644 --- a/core/src/main/java/org/apache/oozie/command/coord/CoordPurgeCommand.java +++ b/core/src/main/java/org/apache/oozie/command/coord/CoordPurgeCommand.java @@ -16,13 +16,11 @@ import org.apache.oozie.store.CoordinatorStore; import org.apache.oozie.store.StoreException; -import org.apache.oozie.store.WorkflowStore; -import org.apache.oozie.store.Store; import org.apache.oozie.util.XLog; -import org.apache.oozie.command.Command; import org.apache.oozie.command.CommandException; public class CoordPurgeCommand extends CoordinatorCommand<Void> { + private static XLog LOG = XLog.getLog(CoordPurgeCommand.class); private int olderThan; private int limit; @@ -33,10 +31,9 @@ public CoordPurgeCommand(int olderThan, int limit) { } protected Void call(CoordinatorStore store) throws StoreException, CommandException { - XLog.getLog(getClass()).debug("STARTED Coord Purge to purge Jobs older than [{0}] days.", olderThan); - int actionDeleted = store.purgeActions(this.olderThan, this.limit); - int jobsDeleted = store.purgeJobs(this.olderThan, this.limit); - XLog.getLog(getClass()).debug("ENDED Coord Purge deleted actions :" + actionDeleted + " and jobs " + jobsDeleted); + LOG.debug("STARTED Coord Purge to purge Jobs older than [{0}] days.", olderThan); + store.purge(olderThan, limit); + LOG.debug("Coord-Purge succeeded"); return null; } diff --git a/core/src/main/java/org/apache/oozie/command/wf/PurgeCommand.java b/core/src/main/java/org/apache/oozie/command/wf/PurgeCommand.java index 899bcdd2d5..040f854277 100644 --- a/core/src/main/java/org/apache/oozie/command/wf/PurgeCommand.java +++ b/core/src/main/java/org/apache/oozie/command/wf/PurgeCommand.java @@ -16,24 +16,25 @@ import org.apache.oozie.store.StoreException; import org.apache.oozie.store.WorkflowStore; -import org.apache.oozie.store.Store; import org.apache.oozie.util.XLog; -import org.apache.oozie.command.Command; import org.apache.oozie.command.CommandException; public class PurgeCommand extends WorkflowCommand<Void> { + private static XLog LOG = XLog.getLog(PurgeCommand.class); private int olderThan; + private int limit; - public PurgeCommand(int olderThan) { + public PurgeCommand(int olderThan, int limit) { super("purge", "purge", 0, XLog.OPS); this.olderThan = olderThan; + this.limit = limit; } @Override protected Void call(WorkflowStore store) throws StoreException, CommandException { - XLog.getLog(getClass()).debug("Attempting to purge Jobs older than [{0}] days.", olderThan); - store.purge(this.olderThan); - XLog.getLog(getClass()).debug("Purge succeeded "); + LOG.debug("Attempting to purge Jobs older than [{0}] days.", olderThan); + store.purge(olderThan, limit); + LOG.debug("Purge succeeded "); return null; } diff --git a/core/src/main/java/org/apache/oozie/service/PurgeService.java b/core/src/main/java/org/apache/oozie/service/PurgeService.java index 7139c24fd8..2b7f4a66bc 100644 --- a/core/src/main/java/org/apache/oozie/service/PurgeService.java +++ b/core/src/main/java/org/apache/oozie/service/PurgeService.java @@ -55,7 +55,7 @@ public PurgeRunnable(int olderThan, int coordOlderThan, int limit) { } public void run() { - Services.get().get(CallableQueueService.class).queue(new PurgeCommand(olderThan)); + Services.get().get(CallableQueueService.class).queue(new PurgeCommand(olderThan, limit)); Services.get().get(CallableQueueService.class).queue(new CoordPurgeCommand(coordOlderThan, limit)); } diff --git a/core/src/main/java/org/apache/oozie/store/CoordinatorStore.java b/core/src/main/java/org/apache/oozie/store/CoordinatorStore.java index 442ea714c6..7484b0014e 100644 --- a/core/src/main/java/org/apache/oozie/store/CoordinatorStore.java +++ b/core/src/main/java/org/apache/oozie/store/CoordinatorStore.java @@ -488,66 +488,38 @@ private void setActionQueryParameters(CoordinatorActionBean aBean, Query q) { q.setParameter("status", aBean.getStatus().toString()); } - public int purgeActions(final long olderThanDays, final long limit) throws StoreException { - - Integer count = doOperation("coord-purge-actions", new Callable<Integer>() { - public Integer call() throws SQLException, StoreException, WorkflowException { - Timestamp createdTime = new Timestamp(System.currentTimeMillis() - (olderThanDays * DAY_IN_MS)); - /* - * this may be better - but does not work? Query g = - * entityManager - * .createNamedQuery("DELETE_COMPLETED_COORD_ACTIONS"); - * g.setParameter("id", id); int deleted_action = - * g.executeUpdate(); - */ - Query q = entityManager.createNamedQuery("GET_COMPLETED_ACTIONS_OLDER_THAN"); - q.setParameter("createdTime", createdTime); - q.setMaxResults((int) limit); - List<CoordinatorActionBean> coordactions = q.getResultList(); - for (CoordinatorActionBean a : coordactions) { - String id = a.getId(); - // remove surely removes - but expensive - to be compared? - entityManager.remove(a); - - } - - return coordactions.size(); - } - }); - return Integer.valueOf(count); - } - - public int purgeJobs(final long olderThanDays, final long limit) throws StoreException { - - Integer count = doOperation("coord-purge-jobs", new Callable<Integer>() { - public Integer call() throws SQLException, StoreException, WorkflowException { - + + /** + * Purge the coordinators completed older than given days. + * + * @param olderThanDays number of days for which to preserve the coordinators + * @param limit maximum number of coordinator jobs to be purged + * @throws StoreException + */ + public void purge(final long olderThanDays, final int limit) throws StoreException { + doOperation("coord-purge", new Callable<Void>() { + public Void call() throws SQLException, StoreException, WorkflowException { Timestamp lastModTm = new Timestamp(System.currentTimeMillis() - (olderThanDays * DAY_IN_MS)); - Query jobQ = entityManager.createNamedQuery("GET_COMPLETED_COORD_JOBS_OLDER_THAN_STATUS"); jobQ.setParameter("lastModTime", lastModTm); - jobQ.setMaxResults((int) limit); + jobQ.setMaxResults(limit); List<CoordinatorJobBean> coordJobs = jobQ.getResultList(); - int deleted = 0; - for (CoordinatorJobBean a : coordJobs) { - String jobId = a.getId(); - - Query actionQ = entityManager.createNamedQuery("GET_COORD_ACTIONS_COUNT_BY_JOBID"); - actionQ.setParameter("jobId", jobId); - Long count = (Long) actionQ.getSingleResult(); - - if (count.intValue() == 0) { - // remove surely removes - but expensive - to be - // compared? - entityManager.remove(a); - deleted++; + + int actionDeleted = 0; + if (coordJobs.size() != 0) { + for (CoordinatorJobBean coord : coordJobs) { + String jobId = coord.getId(); + entityManager.remove(coord); + Query g = entityManager.createNamedQuery("DELETE_COMPLETED_ACTIONS_FOR_COORDINATOR"); + g.setParameter("jobId", jobId); + actionDeleted += g.executeUpdate(); } } - - return deleted; + + XLog.getLog(getClass()).debug("ENDED Coord Purge deleted jobs :" + coordJobs.size() + " and actions " + actionDeleted); + return null; } }); - return Integer.valueOf(count); } public void commit() throws StoreException { diff --git a/core/src/main/java/org/apache/oozie/store/WorkflowStore.java b/core/src/main/java/org/apache/oozie/store/WorkflowStore.java index 88d51968c0..ade8356bbb 100644 --- a/core/src/main/java/org/apache/oozie/store/WorkflowStore.java +++ b/core/src/main/java/org/apache/oozie/store/WorkflowStore.java @@ -773,23 +773,27 @@ public String call() throws SQLException, StoreException { * @param olderThanDays number of days for which to preserve the workflows * @throws StoreException */ - public void purge(final long olderThanDays) throws StoreException { + public void purge(final long olderThanDays, final int limit) throws StoreException { doOperation("purge", new Callable<Void>() { public Void call() throws SQLException, StoreException, WorkflowException { Timestamp maxEndTime = new Timestamp(System.currentTimeMillis() - (olderThanDays * DAY_IN_MS)); Query q = entityManager.createNamedQuery("GET_COMPLETED_WORKFLOWS_OLDER_THAN"); q.setParameter("endTime", maxEndTime); + q.setMaxResults(limit); List<WorkflowJobBean> workflows = q.getResultList(); + + int actionDeleted = 0; if (workflows.size() != 0) { for (WorkflowJobBean w : workflows) { String wfId = w.getId(); entityManager.remove(w); Query g = entityManager.createNamedQuery("DELETE_ACTIONS_FOR_WORKFLOW"); g.setParameter("wfId", wfId); - int deleted_action = g.executeUpdate(); + actionDeleted += g.executeUpdate(); } } + XLog.getLog(getClass()).debug("ENDED Workflow Purge deleted jobs :" + workflows.size() + " and actions " + actionDeleted); return null; } }); diff --git a/core/src/test/java/org/apache/oozie/service/TestPurgeService.java b/core/src/test/java/org/apache/oozie/service/TestPurgeService.java index 57a6315e95..e6a15fc830 100644 --- a/core/src/test/java/org/apache/oozie/service/TestPurgeService.java +++ b/core/src/test/java/org/apache/oozie/service/TestPurgeService.java @@ -93,7 +93,7 @@ public boolean evaluate() throws Exception { }); assertEquals(WorkflowJob.Status.SUCCEEDED, engine.getJob(jobId) .getStatus()); - new PurgeCommand(1).call(); + new PurgeCommand(1, 10000).call(); Thread.sleep(1000); final WorkflowStore store = Services.get().get( diff --git a/core/src/test/java/org/apache/oozie/store/TestDBWorkflowStore.java b/core/src/test/java/org/apache/oozie/store/TestDBWorkflowStore.java index e4fb4bb640..813e44790b 100644 --- a/core/src/test/java/org/apache/oozie/store/TestDBWorkflowStore.java +++ b/core/src/test/java/org/apache/oozie/store/TestDBWorkflowStore.java @@ -485,7 +485,7 @@ private void _testPurge() throws Exception { store.insertAction(a31); store.commitTrx(); store.beginTrx(); - store.purge(30); + store.purge(30, 10000); store.commitTrx(); /* * SqlStatement s = getCount(OozieTable.WF_JOBS); ResultSet rs = diff --git a/release-log.txt b/release-log.txt index 24f9380584..26c8a2374e 100644 --- a/release-log.txt +++ b/release-log.txt @@ -1,5 +1,6 @@ -- Oozie 3.0.0 release +GH-0141 Oozie uses excessive memory when doing purging GH-0341 oozie-jpa.log file is created in extra log directory GH-0307 check for oozie setup owner in setup/start/stop scripts should be optional GH-0295 subworkflow action fails if workflow URI is a directory
fac86445f596bd7d688b5a25ae1d61840bfd9ecb
Delta Spike
fix broken unit test when testing remote containers
c
https://github.com/apache/deltaspike
diff --git a/deltaspike/core/api/src/test/java/org/apache/deltaspike/test/api/config/ConfigResolverTest.java b/deltaspike/core/api/src/test/java/org/apache/deltaspike/test/api/config/ConfigResolverTest.java index 6aa81dae6..1f23a211c 100644 --- a/deltaspike/core/api/src/test/java/org/apache/deltaspike/test/api/config/ConfigResolverTest.java +++ b/deltaspike/core/api/src/test/java/org/apache/deltaspike/test/api/config/ConfigResolverTest.java @@ -23,6 +23,7 @@ import org.apache.deltaspike.core.spi.config.ConfigFilter; import org.apache.deltaspike.core.util.ProjectStageProducer; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import java.util.List; @@ -30,6 +31,13 @@ public class ConfigResolverTest { private static final String DEFAULT_VALUE = "defaultValue"; + + @Before + public void init() + { + ProjectStageProducer.setProjectStage(ProjectStage.UnitTest); + } + @Test public void testOverruledValue() {
690051f46cad97e4fcfb5073be63ea06e02ac01c
spring-framework
Add ability to customize message channels--@EnableWebSocketMessageBroker message channel configuration can now be-customized via WebSocketMessageBrokerConfigurer. It is necessary to-make this easy and even required as part of the basic configuration-since by default the message channels are backed by a thread pool of-size 1
a
https://github.com/spring-projects/spring-framework
diff --git a/spring-messaging/src/main/java/org/springframework/messaging/simp/config/AbstractMessageBrokerConfiguration.java b/spring-messaging/src/main/java/org/springframework/messaging/simp/config/AbstractMessageBrokerConfiguration.java index a12f3ca15802..6c5945ac0e6e 100644 --- a/spring-messaging/src/main/java/org/springframework/messaging/simp/config/AbstractMessageBrokerConfiguration.java +++ b/spring-messaging/src/main/java/org/springframework/messaging/simp/config/AbstractMessageBrokerConfiguration.java @@ -59,6 +59,10 @@ public abstract class AbstractMessageBrokerConfiguration { "com.fasterxml.jackson.databind.ObjectMapper", AbstractMessageBrokerConfiguration.class.getClassLoader()); + private ChannelRegistration clientInboundChannelRegistration; + + private ChannelRegistration clientOutboundChannelRegistration; + private MessageBrokerRegistry brokerRegistry; @@ -69,55 +73,117 @@ protected AbstractMessageBrokerConfiguration() { } - /** - * An accessor for the {@link MessageBrokerRegistry} that ensures its one-time creation - * and initialization through {@link #configureMessageBroker(MessageBrokerRegistry)}. - */ - protected final MessageBrokerRegistry getBrokerRegistry() { - if (this.brokerRegistry == null) { - MessageBrokerRegistry registry = new MessageBrokerRegistry(clientOutboundChannel()); - configureMessageBroker(registry); - this.brokerRegistry = registry; - } - return this.brokerRegistry; - } - - /** - * A hook for sub-classes to customize message broker configuration through the - * provided {@link MessageBrokerRegistry} instance. - */ - protected abstract void configureMessageBroker(MessageBrokerRegistry registry); - - @Bean public AbstractSubscribableChannel clientInboundChannel() { - return new ExecutorSubscribableChannel(clientInboundChannelExecutor()); + ExecutorSubscribableChannel channel = new ExecutorSubscribableChannel(clientInboundChannelExecutor()); + ChannelRegistration r = getClientInboundChannelRegistration(); + if (r.hasInterceptors()) { + channel.setInterceptors(r.getInterceptors()); + } + return channel; } @Bean public ThreadPoolTaskExecutor clientInboundChannelExecutor() { - ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + TaskExecutorRegistration r = getClientInboundChannelRegistration().getTaskExecutorRegistration(); + ThreadPoolTaskExecutor executor = (r != null) ? r.getTaskExecutor() : new ThreadPoolTaskExecutor(); executor.setThreadNamePrefix("ClientInboundChannel-"); return executor; } + protected final ChannelRegistration getClientInboundChannelRegistration() { + if (this.clientInboundChannelRegistration == null) { + ChannelRegistration registration = new ChannelRegistration(); + configureClientInboundChannel(registration); + this.clientInboundChannelRegistration = registration; + } + return this.clientInboundChannelRegistration; + } + + + /** + * A hook for sub-classes to customize the message channel for inbound messages + * from WebSocket clients. + */ + protected abstract void configureClientInboundChannel(ChannelRegistration registration); + + @Bean public AbstractSubscribableChannel clientOutboundChannel() { - return new ExecutorSubscribableChannel(clientOutboundChannelExecutor()); + ExecutorSubscribableChannel channel = new ExecutorSubscribableChannel(clientOutboundChannelExecutor()); + ChannelRegistration r = getClientOutboundChannelRegistration(); + if (r.hasInterceptors()) { + channel.setInterceptors(r.getInterceptors()); + } + return channel; } @Bean public ThreadPoolTaskExecutor clientOutboundChannelExecutor() { - ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + TaskExecutorRegistration r = getClientOutboundChannelRegistration().getTaskExecutorRegistration(); + ThreadPoolTaskExecutor executor = (r != null) ? r.getTaskExecutor() : new ThreadPoolTaskExecutor(); executor.setThreadNamePrefix("ClientOutboundChannel-"); return executor; } + protected final ChannelRegistration getClientOutboundChannelRegistration() { + if (this.clientOutboundChannelRegistration == null) { + ChannelRegistration registration = new ChannelRegistration(); + configureClientOutboundChannel(registration); + this.clientOutboundChannelRegistration = registration; + } + return this.clientOutboundChannelRegistration; + } + + /** + * A hook for sub-classes to customize the message channel for messages from + * the application or message broker to WebSocket clients. + */ + protected abstract void configureClientOutboundChannel(ChannelRegistration registration); + @Bean public AbstractSubscribableChannel brokerChannel() { - return new ExecutorSubscribableChannel(); // synchronous + ChannelRegistration r = getBrokerRegistry().getBrokerChannelRegistration(); + ExecutorSubscribableChannel channel; + if (r.hasTaskExecutor()) { + channel = new ExecutorSubscribableChannel(); // synchronous by default + } + else { + channel = new ExecutorSubscribableChannel(brokerChannelExecutor()); + } + if (r.hasInterceptors()) { + channel.setInterceptors(r.getInterceptors()); + } + return channel; + } + + @Bean + public ThreadPoolTaskExecutor brokerChannelExecutor() { + TaskExecutorRegistration r = getBrokerRegistry().getBrokerChannelRegistration().getTaskExecutorRegistration(); + ThreadPoolTaskExecutor executor = (r != null) ? r.getTaskExecutor() : new ThreadPoolTaskExecutor(); + executor.setThreadNamePrefix("BrokerChannel-"); + return executor; + } + + /** + * An accessor for the {@link MessageBrokerRegistry} that ensures its one-time creation + * and initialization through {@link #configureMessageBroker(MessageBrokerRegistry)}. + */ + protected final MessageBrokerRegistry getBrokerRegistry() { + if (this.brokerRegistry == null) { + MessageBrokerRegistry registry = new MessageBrokerRegistry(clientOutboundChannel()); + configureMessageBroker(registry); + this.brokerRegistry = registry; + } + return this.brokerRegistry; } + /** + * A hook for sub-classes to customize message broker configuration through the + * provided {@link MessageBrokerRegistry} instance. + */ + protected abstract void configureMessageBroker(MessageBrokerRegistry registry); + @Bean public SimpAnnotationMethodMessageHandler simpAnnotationMethodMessageHandler() { diff --git a/spring-messaging/src/main/java/org/springframework/messaging/simp/config/ChannelRegistration.java b/spring-messaging/src/main/java/org/springframework/messaging/simp/config/ChannelRegistration.java new file mode 100644 index 000000000000..52103d9dd07b --- /dev/null +++ b/spring-messaging/src/main/java/org/springframework/messaging/simp/config/ChannelRegistration.java @@ -0,0 +1,74 @@ +/* + * Copyright 2002-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.messaging.simp.config; + +import org.springframework.messaging.support.channel.ChannelInterceptor; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + + +/** + * A registration class for customizing the configuration for a + * {@link org.springframework.messaging.MessageChannel}. + * + * @author Rossen Stoyanchev + * @since 4.0 + */ +public class ChannelRegistration { + + private TaskExecutorRegistration taskExecutorRegistration; + + private List<ChannelInterceptor> interceptors = new ArrayList<ChannelInterceptor>(); + + + /** + * Configure properties of the ThreadPoolTaskExecutor backing the message channel. + */ + public TaskExecutorRegistration taskExecutor() { + this.taskExecutorRegistration = new TaskExecutorRegistration(); + return this.taskExecutorRegistration; + } + + /** + * Configure interceptors for the message channel. + */ + public ChannelRegistration setInterceptors(ChannelInterceptor... interceptors) { + if (interceptors != null) { + this.interceptors.addAll(Arrays.asList(interceptors)); + } + return this; + } + + + protected boolean hasTaskExecutor() { + return (this.taskExecutorRegistration != null); + } + + protected TaskExecutorRegistration getTaskExecutorRegistration() { + return this.taskExecutorRegistration; + } + + protected boolean hasInterceptors() { + return !this.interceptors.isEmpty(); + } + + protected List<ChannelInterceptor> getInterceptors() { + return this.interceptors; + } +} diff --git a/spring-messaging/src/main/java/org/springframework/messaging/simp/config/MessageBrokerRegistry.java b/spring-messaging/src/main/java/org/springframework/messaging/simp/config/MessageBrokerRegistry.java index f19db80bc811..a250ff737497 100644 --- a/spring-messaging/src/main/java/org/springframework/messaging/simp/config/MessageBrokerRegistry.java +++ b/spring-messaging/src/main/java/org/springframework/messaging/simp/config/MessageBrokerRegistry.java @@ -42,6 +42,8 @@ public class MessageBrokerRegistry { private String userDestinationPrefix; + private ChannelRegistration brokerChannelRegistration = new ChannelRegistration(); + public MessageBrokerRegistry(MessageChannel clientOutboundChannel) { Assert.notNull(clientOutboundChannel); @@ -103,6 +105,17 @@ public MessageBrokerRegistry setUserDestinationPrefix(String destinationPrefix) return this; } + /** + * Customize the channel used to send messages from the application to the message + * broker. By default messages from the application to the message broker are sent + * synchronously, which means application code sending a message will find out + * if the message cannot be sent through an exception. However, this can be changed + * if the broker channel is configured here with task executor properties. + */ + public ChannelRegistration configureBrokerChannel() { + return this.brokerChannelRegistration; + } + protected SimpleBrokerMessageHandler getSimpleBroker() { initSimpleBrokerIfNecessary(); @@ -127,4 +140,8 @@ protected Collection<String> getApplicationDestinationPrefixes() { protected String getUserDestinationPrefix() { return this.userDestinationPrefix; } + + protected ChannelRegistration getBrokerChannelRegistration() { + return this.brokerChannelRegistration; + } } diff --git a/spring-messaging/src/main/java/org/springframework/messaging/simp/config/TaskExecutorRegistration.java b/spring-messaging/src/main/java/org/springframework/messaging/simp/config/TaskExecutorRegistration.java new file mode 100644 index 000000000000..afa1ce81ff71 --- /dev/null +++ b/spring-messaging/src/main/java/org/springframework/messaging/simp/config/TaskExecutorRegistration.java @@ -0,0 +1,87 @@ +/* + * Copyright 2002-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.messaging.simp.config; + +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +/** + * A registration class for customizing the properties of {@link ThreadPoolTaskExecutor}. + * + * @author Rossen Stoyanchev + * @since 4.0 + */ +public class TaskExecutorRegistration { + + private int corePoolSize = 1; + + private int maxPoolSize = Integer.MAX_VALUE; + + private int keepAliveSeconds = 60; + + private int queueCapacity = Integer.MAX_VALUE; + + + /** + * Set the ThreadPoolExecutor's core pool size. + * Default is 1. + */ + public TaskExecutorRegistration corePoolSize(int corePoolSize) { + this.corePoolSize = corePoolSize; + return this; + } + + /** + * Set the ThreadPoolExecutor's maximum pool size. + * Default is {@code Integer.MAX_VALUE}. + */ + public TaskExecutorRegistration maxPoolSize(int maxPoolSize) { + this.maxPoolSize = maxPoolSize; + return this; + } + + /** + * Set the ThreadPoolExecutor's keep-alive seconds. + * Default is 60. + */ + public TaskExecutorRegistration keepAliveSeconds(int keepAliveSeconds) { + this.keepAliveSeconds = keepAliveSeconds; + return this; + } + + /** + * Set the capacity for the ThreadPoolExecutor's BlockingQueue. + * Default is {@code Integer.MAX_VALUE}. + * <p>Any positive value will lead to a LinkedBlockingQueue instance; + * any other value will lead to a SynchronousQueue instance. + * @see java.util.concurrent.LinkedBlockingQueue + * @see java.util.concurrent.SynchronousQueue + */ + public TaskExecutorRegistration queueCapacity(int queueCapacity) { + this.queueCapacity = queueCapacity; + return this; + } + + protected ThreadPoolTaskExecutor getTaskExecutor() { + ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + executor.setCorePoolSize(this.corePoolSize); + executor.setMaxPoolSize(this.maxPoolSize); + executor.setKeepAliveSeconds(this.keepAliveSeconds); + executor.setQueueCapacity(this.queueCapacity); + return executor; + } + +} diff --git a/spring-messaging/src/main/java/org/springframework/messaging/support/channel/ExecutorSubscribableChannel.java b/spring-messaging/src/main/java/org/springframework/messaging/support/channel/ExecutorSubscribableChannel.java index 75d45c26839a..440f64ba55ff 100644 --- a/spring-messaging/src/main/java/org/springframework/messaging/support/channel/ExecutorSubscribableChannel.java +++ b/spring-messaging/src/main/java/org/springframework/messaging/support/channel/ExecutorSubscribableChannel.java @@ -57,6 +57,10 @@ public ExecutorSubscribableChannel(Executor executor) { } + public Executor getExecutor() { + return this.executor; + } + @Override protected boolean hasSubscription(MessageHandler handler) { return this.handlers.contains(handler); diff --git a/spring-messaging/src/test/java/org/springframework/messaging/simp/config/MessageBrokerConfigurationTests.java b/spring-messaging/src/test/java/org/springframework/messaging/simp/config/MessageBrokerConfigurationTests.java index 70907050be7a..d803b336e306 100644 --- a/spring-messaging/src/test/java/org/springframework/messaging/simp/config/MessageBrokerConfigurationTests.java +++ b/spring-messaging/src/test/java/org/springframework/messaging/simp/config/MessageBrokerConfigurationTests.java @@ -36,13 +36,17 @@ import org.springframework.messaging.simp.stomp.StompHeaderAccessor; import org.springframework.messaging.support.MessageBuilder; import org.springframework.messaging.support.channel.AbstractSubscribableChannel; +import org.springframework.messaging.support.channel.ChannelInterceptor; +import org.springframework.messaging.support.channel.ChannelInterceptorAdapter; import org.springframework.messaging.support.channel.ExecutorSubscribableChannel; import org.springframework.messaging.support.converter.CompositeMessageConverter; import org.springframework.messaging.support.converter.DefaultContentTypeResolver; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; import org.springframework.stereotype.Controller; import org.springframework.util.MimeTypeUtils; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import static org.junit.Assert.*; @@ -59,6 +63,8 @@ public class MessageBrokerConfigurationTests { private AnnotationConfigApplicationContext cxtStompBroker; + private AnnotationConfigApplicationContext cxtCustomizedChannelConfig; + @Before public void setupOnce() { @@ -70,6 +76,10 @@ public void setupOnce() { this.cxtStompBroker = new AnnotationConfigApplicationContext(); this.cxtStompBroker.register(TestStompMessageBrokerConfig.class); this.cxtStompBroker.refresh(); + + this.cxtCustomizedChannelConfig = new AnnotationConfigApplicationContext(); + this.cxtCustomizedChannelConfig.register(CustomizedChannelConfig.class); + this.cxtCustomizedChannelConfig.refresh(); } @@ -96,6 +106,22 @@ public void clientInboundChannelWithStompBroker() { assertTrue(values.contains(cxtStompBroker.getBean(StompBrokerRelayMessageHandler.class))); } + @Test + public void clientInboundChannelCustomized() { + + AbstractSubscribableChannel channel = this.cxtCustomizedChannelConfig.getBean( + "clientInboundChannel", AbstractSubscribableChannel.class); + + assertEquals(1, channel.getInterceptors().size()); + + ThreadPoolTaskExecutor taskExecutor = this.cxtCustomizedChannelConfig.getBean( + "clientInboundChannelExecutor", ThreadPoolTaskExecutor.class); + + assertEquals(11, taskExecutor.getCorePoolSize()); + assertEquals(12, taskExecutor.getMaxPoolSize()); + assertEquals(13, taskExecutor.getKeepAliveSeconds()); + } + @Test public void clientOutboundChannelUsedByAnnotatedMethod() { @@ -148,6 +174,22 @@ public void clientOutboundChannelUsedBySimpleBroker() { assertEquals("bar", new String((byte[]) message.getPayload())); } + @Test + public void clientOutboundChannelCustomized() { + + AbstractSubscribableChannel channel = this.cxtCustomizedChannelConfig.getBean( + "clientOutboundChannel", AbstractSubscribableChannel.class); + + assertEquals(2, channel.getInterceptors().size()); + + ThreadPoolTaskExecutor taskExecutor = this.cxtCustomizedChannelConfig.getBean( + "clientOutboundChannelExecutor", ThreadPoolTaskExecutor.class); + + assertEquals(21, taskExecutor.getCorePoolSize()); + assertEquals(22, taskExecutor.getMaxPoolSize()); + assertEquals(23, taskExecutor.getKeepAliveSeconds()); + } + @Test public void brokerChannel() { TestChannel channel = this.cxtSimpleBroker.getBean("brokerChannel", TestChannel.class); @@ -207,6 +249,22 @@ public void brokerChannelUsedByUserDestinationMessageHandler() { assertEquals("/foo-users1", headers.getDestination()); } + @Test + public void brokerChannelCustomized() { + + AbstractSubscribableChannel channel = this.cxtCustomizedChannelConfig.getBean( + "brokerChannel", AbstractSubscribableChannel.class); + + assertEquals(3, channel.getInterceptors().size()); + + ThreadPoolTaskExecutor taskExecutor = this.cxtCustomizedChannelConfig.getBean( + "brokerChannelExecutor", ThreadPoolTaskExecutor.class); + + assertEquals(31, taskExecutor.getCorePoolSize()); + assertEquals(32, taskExecutor.getMaxPoolSize()); + assertEquals(33, taskExecutor.getKeepAliveSeconds()); + } + @Test public void messageConverter() { CompositeMessageConverter messageConverter = this.cxtStompBroker.getBean( @@ -240,9 +298,6 @@ public TestController subscriptionController() { return new TestController(); } - @Override - protected void configureMessageBroker(MessageBrokerRegistry registry) { - } @Override @Bean @@ -250,16 +305,29 @@ public AbstractSubscribableChannel clientInboundChannel() { return new TestChannel(); } + @Override + protected void configureClientInboundChannel(ChannelRegistration registration) { + } + @Override @Bean public AbstractSubscribableChannel clientOutboundChannel() { return new TestChannel(); } + @Override + protected void configureClientOutboundChannel(ChannelRegistration registration) { + } + @Override public AbstractSubscribableChannel brokerChannel() { return new TestChannel(); } + + @Override + protected void configureMessageBroker(MessageBrokerRegistry registry) { + } + } @Configuration @@ -271,6 +339,32 @@ public void configureMessageBroker(MessageBrokerRegistry registry) { } } + @Configuration + static class CustomizedChannelConfig extends AbstractMessageBrokerConfiguration { + + private ChannelInterceptor interceptor = new ChannelInterceptorAdapter(); + + + @Override + protected void configureClientInboundChannel(ChannelRegistration registration) { + registration.setInterceptors(this.interceptor); + registration.taskExecutor().corePoolSize(11).maxPoolSize(12).keepAliveSeconds(13).queueCapacity(14); + } + + @Override + protected void configureClientOutboundChannel(ChannelRegistration registration) { + registration.setInterceptors(this.interceptor, this.interceptor); + registration.taskExecutor().corePoolSize(21).maxPoolSize(22).keepAliveSeconds(23).queueCapacity(24); + } + + @Override + protected void configureMessageBroker(MessageBrokerRegistry registry) { + registry.configureBrokerChannel().setInterceptors(this.interceptor, this.interceptor, this.interceptor); + registry.configureBrokerChannel().taskExecutor() + .corePoolSize(31).maxPoolSize(32).keepAliveSeconds(33).queueCapacity(34); + } + } + private static class TestChannel extends ExecutorSubscribableChannel { diff --git a/spring-websocket/src/main/java/org/springframework/web/socket/messaging/config/DelegatingWebSocketMessageBrokerConfiguration.java b/spring-websocket/src/main/java/org/springframework/web/socket/messaging/config/DelegatingWebSocketMessageBrokerConfiguration.java index 3cb0a5e14930..9c84f2c20410 100644 --- a/spring-websocket/src/main/java/org/springframework/web/socket/messaging/config/DelegatingWebSocketMessageBrokerConfiguration.java +++ b/spring-websocket/src/main/java/org/springframework/web/socket/messaging/config/DelegatingWebSocketMessageBrokerConfiguration.java @@ -21,6 +21,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Configuration; +import org.springframework.messaging.simp.config.ChannelRegistration; import org.springframework.messaging.simp.config.MessageBrokerRegistry; import org.springframework.util.CollectionUtils; @@ -58,6 +59,20 @@ protected void registerStompEndpoints(StompEndpointRegistry registry) { } } + @Override + protected void configureClientInboundChannel(ChannelRegistration registration) { + for (WebSocketMessageBrokerConfigurer c : this.configurers) { + c.configureClientInboundChannel(registration); + } + } + + @Override + protected void configureClientOutboundChannel(ChannelRegistration registration) { + for (WebSocketMessageBrokerConfigurer c : this.configurers) { + c.configureClientOutboundChannel(registration); + } + } + @Override protected void configureMessageBroker(MessageBrokerRegistry registry) { for (WebSocketMessageBrokerConfigurer c : this.configurers) { diff --git a/spring-websocket/src/main/java/org/springframework/web/socket/messaging/config/WebSocketMessageBrokerConfigurer.java b/spring-websocket/src/main/java/org/springframework/web/socket/messaging/config/WebSocketMessageBrokerConfigurer.java index 5a3e00b75c96..f7bba9c58fee 100644 --- a/spring-websocket/src/main/java/org/springframework/web/socket/messaging/config/WebSocketMessageBrokerConfigurer.java +++ b/spring-websocket/src/main/java/org/springframework/web/socket/messaging/config/WebSocketMessageBrokerConfigurer.java @@ -17,6 +17,7 @@ package org.springframework.web.socket.messaging.config; +import org.springframework.messaging.simp.config.ChannelRegistration; import org.springframework.messaging.simp.config.MessageBrokerRegistry; /** @@ -35,6 +36,22 @@ public interface WebSocketMessageBrokerConfigurer { */ void registerStompEndpoints(StompEndpointRegistry registry); + /** + * Configure the {@link org.springframework.messaging.MessageChannel} used for + * incoming messages from WebSocket clients. By default the channel is backed + * by a thread pool of size 1. It is recommended to customize thread pool + * settings for production use. + */ + void configureClientInboundChannel(ChannelRegistration registration); + + /** + * Configure the {@link org.springframework.messaging.MessageChannel} used for + * incoming messages from WebSocket clients. By default the channel is backed + * by a thread pool of size 1. It is recommended to customize thread pool + * settings for production use. + */ + void configureClientOutboundChannel(ChannelRegistration registration); + /** * Configure message broker options. */ diff --git a/spring-websocket/src/test/java/org/springframework/web/socket/messaging/SimpAnnotationMethodIntegrationTests.java b/spring-websocket/src/test/java/org/springframework/web/socket/messaging/SimpAnnotationMethodIntegrationTests.java index 377a4d1e1adc..27879b0152d5 100644 --- a/spring-websocket/src/test/java/org/springframework/web/socket/messaging/SimpAnnotationMethodIntegrationTests.java +++ b/spring-websocket/src/test/java/org/springframework/web/socket/messaging/SimpAnnotationMethodIntegrationTests.java @@ -36,6 +36,7 @@ import org.springframework.context.annotation.Configuration; import org.springframework.messaging.handler.annotation.MessageExceptionHandler; import org.springframework.messaging.handler.annotation.MessageMapping; +import org.springframework.messaging.simp.config.ChannelRegistration; import org.springframework.messaging.simp.config.MessageBrokerRegistry; import org.springframework.messaging.simp.stomp.StompCommand; import org.springframework.messaging.support.channel.AbstractSubscribableChannel; @@ -215,6 +216,14 @@ public void registerStompEndpoints(StompEndpointRegistry registry) { registry.addEndpoint("/ws").setHandshakeHandler(this.handshakeHandler); } + @Override + public void configureClientInboundChannel(ChannelRegistration registration) { + } + + @Override + public void configureClientOutboundChannel(ChannelRegistration registration) { + } + @Override public void configureMessageBroker(MessageBrokerRegistry configurer) { configurer.setApplicationDestinationPrefixes("/app"); diff --git a/spring-websocket/src/test/java/org/springframework/web/socket/messaging/config/WebSocketMessageBrokerConfigurationSupportTests.java b/spring-websocket/src/test/java/org/springframework/web/socket/messaging/config/WebSocketMessageBrokerConfigurationSupportTests.java index 352f5b002d82..944a44f4c58a 100644 --- a/spring-websocket/src/test/java/org/springframework/web/socket/messaging/config/WebSocketMessageBrokerConfigurationSupportTests.java +++ b/spring-websocket/src/test/java/org/springframework/web/socket/messaging/config/WebSocketMessageBrokerConfigurationSupportTests.java @@ -27,6 +27,7 @@ import org.springframework.messaging.handler.annotation.SendTo; import org.springframework.messaging.simp.SimpMessageType; import org.springframework.messaging.simp.annotation.SubscribeMapping; +import org.springframework.messaging.simp.config.ChannelRegistration; import org.springframework.messaging.simp.config.MessageBrokerRegistry; import org.springframework.messaging.simp.stomp.StompCommand; import org.springframework.messaging.simp.stomp.StompHeaderAccessor; @@ -119,20 +120,29 @@ public String handleMessage() { @Configuration static class TestSimpleMessageBrokerConfig implements WebSocketMessageBrokerConfigurer { + @Bean + public TestController subscriptionController() { + return new TestController(); + } + @Override public void registerStompEndpoints(StompEndpointRegistry registry) { registry.addEndpoint("/simpleBroker"); } @Override - public void configureMessageBroker(MessageBrokerRegistry configurer) { - // SimpleBroker used by default + public void configureClientInboundChannel(ChannelRegistration registration) { } - @Bean - public TestController subscriptionController() { - return new TestController(); + @Override + public void configureClientOutboundChannel(ChannelRegistration registration) { } + + @Override + public void configureMessageBroker(MessageBrokerRegistry registry) { + // SimpleBroker used by default + } + } @Configuration
ccedd8f8e0e01c6472cd32d371d8f579f60af9fc
orientdb
fixed cluster id selection in distributed mode.--
c
https://github.com/orientechnologies/orientdb
diff --git a/server/src/main/java/com/orientechnologies/orient/server/distributed/task/OCreateRecordTask.java b/server/src/main/java/com/orientechnologies/orient/server/distributed/task/OCreateRecordTask.java index 8a2b793b258..113f0231f3d 100644 --- a/server/src/main/java/com/orientechnologies/orient/server/distributed/task/OCreateRecordTask.java +++ b/server/src/main/java/com/orientechnologies/orient/server/distributed/task/OCreateRecordTask.java @@ -20,6 +20,8 @@ package com.orientechnologies.orient.server.distributed.task; import com.orientechnologies.orient.core.Orient; +import com.orientechnologies.orient.core.db.ODatabaseDocumentInternal; +import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal; import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx; import com.orientechnologies.orient.core.db.record.OPlaceholder; import com.orientechnologies.orient.core.id.ORID; @@ -28,6 +30,7 @@ import com.orientechnologies.orient.core.record.ORecord; import com.orientechnologies.orient.core.record.ORecordInternal; import com.orientechnologies.orient.core.record.impl.ODocument; +import com.orientechnologies.orient.core.record.impl.ODocumentInternal; import com.orientechnologies.orient.core.version.ORecordVersion; import com.orientechnologies.orient.server.OServer; import com.orientechnologies.orient.server.distributed.ODistributedRequest; @@ -65,11 +68,14 @@ public OCreateRecordTask(final ORecordId iRid, final byte[] iContent, final ORec public OCreateRecordTask(final ORecord record) { this((ORecordId) record.getIdentity(), record.toStream(), record.getRecordVersion(), ORecordInternal.getRecordType(record)); - if (rid.getClusterId() == ORID.CLUSTER_ID_INVALID && record instanceof ODocument) { - final OClass clazz = ((ODocument) record).getSchemaClass(); - if (clazz != null) { + if (rid.getClusterId() == ORID.CLUSTER_ID_INVALID) { + final OClass clazz; + if (record instanceof ODocument && (clazz = ODocumentInternal.getImmutableSchemaClass((ODocument) record)) != null) { // PRE-ASSIGN THE CLUSTER ID ON CALLER NODE clusterId = clazz.getClusterSelection().getCluster(clazz, (ODocument) record); + } else { + ODatabaseDocumentInternal db = ODatabaseRecordThreadLocal.INSTANCE.get(); + clusterId = db.getDefaultClusterId(); } } }
6ecd0a21f1545357c3bd8e9b4a161f158c8d11eb
Mylyn Reviews
322734: review type identification fallback
c
https://github.com/eclipse-mylyn/org.eclipse.mylyn.reviews
diff --git a/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewsUtil.java b/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewsUtil.java index eeeadcf5..d1a7e91b 100644 --- a/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewsUtil.java +++ b/org.eclipse.mylyn.reviews.core/src/org/eclipse/mylyn/reviews/core/ReviewsUtil.java @@ -49,16 +49,22 @@ public static List<ReviewSubTask> getReviewSubTasksFor( List<ReviewSubTask> resultList = new ArrayList<ReviewSubTask>(); try { for (ITask subTask : taskContainer.getChildren()) { - + if (!ReviewsUtil.hasReviewMarker(subTask)) { + TaskData taskData = taskDataManager.getTaskData(subTask); + if(getReviewAttachments(repositoryModel, taskData).size()>0){ + ReviewsUtil.markAsReview(subTask); + } + } + if (ReviewsUtil.isMarkedAsReview(subTask)) {//.getSummary().startsWith("Review")) { //$NON-NLS-1$ // change to review data manager for (Review review : getReviewAttachmentFromTask( taskDataManager, repositoryModel, subTask)) { // TODO change to latest etc - if(review.getResult()!=null) - resultList.add(new ReviewSubTask(getPatchFile(review - .getScope()), getPatchCreationDate(review - .getScope()), + if (review.getResult() != null) + resultList.add(new ReviewSubTask( + getPatchFile(review.getScope()), + getPatchCreationDate(review.getScope()), getAuthorString(review.getScope()), subTask .getOwner(), review.getResult() .getRating(), review.getResult() @@ -114,8 +120,8 @@ static List<Review> parseAttachments(TaskAttribute attribute, TaskAttribute.ATTACHMENT_URL).getValue()); ZipInputStream stream = new ZipInputStream(url.openStream()); - while (!stream.getNextEntry().getName().equals( - ReviewConstants.REVIEW_DATA_FILE)) { + while (!stream.getNextEntry().getName() + .equals(ReviewConstants.REVIEW_DATA_FILE)) { } ResourceSet resourceSet = new ResourceSetImpl(); @@ -142,24 +148,36 @@ public static List<Review> getReviewAttachmentFromTask( List<Review> reviews = new ArrayList<Review>(); TaskData taskData = taskDataManager.getTaskData(task); if (taskData != null) { - List<TaskAttribute> attributesByType = taskData - .getAttributeMapper().getAttributesByType(taskData, - TaskAttribute.TYPE_ATTACHMENT); - for (TaskAttribute attribute : attributesByType) { - // TODO move RepositoryModel.createTaskAttachment to interface? - ITaskAttachment taskAttachment = ((RepositoryModel) repositoryModel) - .createTaskAttachment(attribute); - if (taskAttachment!=null&&taskAttachment.getFileName().equals( - ReviewConstants.REVIEW_DATA_CONTAINER)) { - reviews.addAll(parseAttachments(attribute, - new NullProgressMonitor())); - } + for (TaskAttribute attribute : getReviewAttachments( + repositoryModel, taskData)) { + reviews.addAll(parseAttachments(attribute, + new NullProgressMonitor())); + } } return reviews; } + public static List<TaskAttribute> getReviewAttachments( + IRepositoryModel repositoryModel, TaskData taskData) { + + List<TaskAttribute> matchingAttributes = new ArrayList<TaskAttribute>(); + List<TaskAttribute> attributesByType = taskData.getAttributeMapper() + .getAttributesByType(taskData, TaskAttribute.TYPE_ATTACHMENT); + for (TaskAttribute attribute : attributesByType) { + // TODO move RepositoryModel.createTaskAttachment to interface? + ITaskAttachment taskAttachment = ((RepositoryModel) repositoryModel) + .createTaskAttachment(attribute); + if (taskAttachment != null + && taskAttachment.getFileName().equals( + ReviewConstants.REVIEW_DATA_CONTAINER)) { + matchingAttributes.add(attribute); + } + } + return matchingAttributes; + } + private static List<ITargetPathStrategy> strategies; static { strategies = new ArrayList<ITargetPathStrategy>(); @@ -167,7 +185,6 @@ public static List<Review> getReviewAttachmentFromTask( strategies.add(new GitPatchPathFindingStrategy()); } - public static List<? extends ITargetPathStrategy> getPathFindingStrategies() { return strategies; } @@ -179,6 +196,11 @@ public static boolean isMarkedAsReview(ITask task) { } public static void markAsReview(ITask task) { - task.setAttribute(ReviewConstants.ATTR_REVIEW_FLAG, Boolean.TRUE.toString()); + task.setAttribute(ReviewConstants.ATTR_REVIEW_FLAG, + Boolean.TRUE.toString()); + } + + public static boolean hasReviewMarker(ITask task) { + return task.getAttribute(ReviewConstants.ATTR_REVIEW_FLAG) != null; } } diff --git a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPartAdvisor.java b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPartAdvisor.java index 75df46d7..94a62a2d 100644 --- a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPartAdvisor.java +++ b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPartAdvisor.java @@ -23,6 +23,7 @@ import org.eclipse.mylyn.reviews.ui.ReviewCommentTaskAttachmentSource; import org.eclipse.mylyn.reviews.ui.ReviewsUiPlugin; import org.eclipse.mylyn.tasks.core.AbstractRepositoryConnector; +import org.eclipse.mylyn.tasks.core.IRepositoryModel; import org.eclipse.mylyn.tasks.core.ITask; import org.eclipse.mylyn.tasks.core.TaskRepository; import org.eclipse.mylyn.tasks.core.data.TaskAttribute; @@ -37,6 +38,21 @@ public class ReviewTaskEditorPartAdvisor implements ITaskEditorPartDescriptorAdvisor { public boolean canCustomize(ITask task) { + if (!ReviewsUtil.hasReviewMarker(task)) { + try { + IRepositoryModel repositoryModel = TasksUi.getRepositoryModel(); + TaskData taskData = TasksUiPlugin.getTaskDataManager() + .getTaskData(task); + if (ReviewsUtil.getReviewAttachments(repositoryModel, taskData) + .size() > 0) { + ReviewsUtil.markAsReview(task); + } + } catch (CoreException e) { + // FIXME + e.printStackTrace(); + } + } + boolean isReview = ReviewsUtil.isMarkedAsReview(task); return isReview; } @@ -101,7 +117,7 @@ public void afterSubmit(ITask task) { connector.getTaskAttachmentHandler().postContent( taskRepository, task, attachment, "review result", //$NON-NLS-1$ attachmentAttribute, new NullProgressMonitor()); - + TasksUiInternal.synchronizeTask(connector, task, false, null); } } catch (CoreException e) {
d27180c451f4659c5719e6a2793283bc3c998af6
betterform$betterform
[feature] readded SSL-Functionality for http-components: - enabled 443,8443 as default ports - renamed property from "httpclient.ssl.factory" to "httpclient.ssl.context" - renamed property from "httpclient.ssl.factory.defaultPort" to "httpclient.ssl.context.customPort" - renamed KeyStoreSSLProtocolSocketFactory to KeyStoreSSLContext
p
https://github.com/betterform/betterform
diff --git a/betty/betterform-config.xml b/betty/betterform-config.xml index d7ad51e1b..47b2072ca 100644 --- a/betty/betterform-config.xml +++ b/betty/betterform-config.xml @@ -108,30 +108,32 @@ <!-- Full classpath of SSLProtocolSocketFactory which should be used by httpclient. - For now there is only one implementing Class in betterFORM, which allows verification of a server cert (KeyStoreSSLProtocolSocketFactory). + For now there is only one implementing Class in betterFORM, which allows verification of a server cert (KeyStoreSSLContext). SSLProtocolSocketFactory-Handling will be activated if the property is specified. + ! You have to uncomment this if you would like to use betterFORM with custom SSL-Certificates. ! --> <!-- - <property name="httpclient.ssl.factory" value="de.betterform.connector.http.ssl.KeyStoreSSLProtocolSocketFactory" description="Full classpath of SSLProtocolSocketFactory which should be used by httpclient."/> + <property name="httpclient.ssl.context" value="de.betterform.connector.http.ssl.KeyStoreSSLContext" description="Full classpath of SSLProtocolSocketFactory which should be used by httpclient."/> --> <!-- - Specify defaultPort for SSLProtocolSocketFactory for httpclient. - If not specified 443 will be used. + Specify a customPort for SSLProtocolSocketFactory for httpclient. + Port 443,8443 are the default ports. See: http://hc.apache.org/httpclient-3.x/apidocs/org/apache/commons/httpclient/protocol/Protocol.html --> - <property name="httpclient.ssl.factory.defaultPort" value="DEFAULT-SSL-PORT" description="Specify defaultPort for SSLProtocolSocketFactory for httpclient. 443 if not specified."/> - +<!-- + <property name="httpclient.ssl.context.customPort" value="CUSTOM-SSL-PORT" description="Specify custom Port for SSLProtocolSocketFactory for httpclient. 443,8443 if not specified."/> +--> <!-- Location of JAVA keystore used by httpclient to verify trusted ssl-certs. - If / is omitted in the value "user.home" will be used a relative path. + If / (root-dir) is omitted in the value "user.home" will be used as relative path. Otherwise an absolute path is expected. - Used by: KeyStoreSSLProtocolSocketFactory + Used by: KeyStoreSSLContext --> <property name="httpclient.ssl.keystore.path" value="PATH-TO-KEYSTORE" description="Location of the keystore to be used by httpclient."/> <!-- Password to unlock JAVA keystore. - Used by: KeyStoreSSLProtocolSocketFactory + Used by: KeyStoreSSLContext --> <property name="httpclient.ssl.keystore.passwd" value="KEYSTORE-PASSWD" description="Password to unlock keystore."/> diff --git a/core/src/main/java/de/betterform/connector/http/AbstractHTTPConnector.java b/core/src/main/java/de/betterform/connector/http/AbstractHTTPConnector.java index 6ea2cc8f9..f84f41ab1 100644 --- a/core/src/main/java/de/betterform/connector/http/AbstractHTTPConnector.java +++ b/core/src/main/java/de/betterform/connector/http/AbstractHTTPConnector.java @@ -5,41 +5,38 @@ package de.betterform.connector.http; -import de.betterform.connector.ConnectorFactory; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import de.betterform.connector.AbstractConnector; +import de.betterform.connector.ConnectorFactory; +import de.betterform.connector.http.ssl.KeyStoreSSLContext; +import de.betterform.xml.config.Config; import de.betterform.xml.xforms.XFormsConstants; import de.betterform.xml.xforms.exception.XFormsException; import de.betterform.xml.xforms.exception.XFormsInternalSubmitException; import de.betterform.xml.xforms.model.submission.RequestHeader; import de.betterform.xml.xforms.model.submission.RequestHeaders; - +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.http.Header; - import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.HttpResponse; - +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.Credentials; +import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.AuthCache; - -import org.apache.http.client.HttpClient; import org.apache.http.client.methods.*; import org.apache.http.client.params.ClientPNames; import org.apache.http.client.params.CookiePolicy; import org.apache.http.client.protocol.ClientContext; -import org.apache.http.conn.ClientConnectionManager; +import org.apache.http.conn.scheme.Scheme; +import org.apache.http.conn.ssl.SSLSocketFactory; import org.apache.http.cookie.Cookie; import org.apache.http.cookie.MalformedCookieException; import org.apache.http.entity.StringEntity; import org.apache.http.impl.auth.BasicScheme; import org.apache.http.impl.client.BasicAuthCache; import org.apache.http.impl.client.DefaultHttpClient; -import org.apache.http.auth.Credentials; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.auth.AuthScope; import org.apache.http.impl.cookie.BrowserCompatSpec; -import org.apache.http.message.BasicHeader; import org.apache.http.params.BasicHttpParams; import org.apache.http.params.HttpParams; import org.apache.http.protocol.BasicHttpContext; @@ -66,14 +63,14 @@ public class AbstractHTTPConnector extends AbstractConnector { * Custom-SSL: * Key for storing custom SSL-protocol */ - public static final String SSL_CUSTOM_PROTOCOL = "ssl-protocol"; + public static final String SSL_CUSTOM_SCHEME = "ssl_custom_scheme"; /* * Custom-SSL: * SSL-factory properties, see betterform-config.xml for description. */ - public static final String HTTPCLIENT_SSL_FACTORY= "httpclient.ssl.factory"; - public static final String HTTPCLIENT_SSL_FACTORY_DEFAULTPORT= "httpclient.ssl.factory.defaultPort"; + public static final String HTTPCLIENT_SSL_CONTEXT= "httpclient.ssl.context"; + public static final String HTTPCLIENT_SSL_CONTEXT_CUSTOMPORT= "httpclient.ssl.context.customPort"; public static final String HTTPCLIENT_SSL_KEYSTORE_PATH= "httpclient.ssl.keystore.path"; public static final String HTTPCLIENT_SSL_KEYSTORE_PASSWD= "httpclient.ssl.keystore.passwd"; @@ -244,14 +241,16 @@ protected void execute(HttpRequestBase httpRequestBase) throws Exception{ DefaultHttpClient client = ConnectorFactory.getFactory().getHttpClient(httpParams); -/* - if (! getContext().containsKey(AbstractHTTPConnector.SSL_CUSTOM_PROTOCOL)) { - String factoryPath = Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_FACTORY); - if (factoryPath != null) { - initSSLProtocol(factoryPath); + + if (! getContext().containsKey(AbstractHTTPConnector.SSL_CUSTOM_SCHEME)) { + LOGGER.debug("SSL_CUSTOM_SCHEME"); + LOGGER.debug("SSL_CUSTOM_SCHEME: Factory: " + Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_CONTEXT)); + String contextPath = Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_CONTEXT); + if (contextPath != null) { + initSSLScheme(contextPath); } } -*/ + if (LOGGER.isDebugEnabled()) { LOGGER.debug("context params>>>"); Map map = getContext(); @@ -380,20 +379,18 @@ protected void execute(HttpRequestBase httpRequestBase) throws Exception{ throw new MalformedCookieException("Cookies must be passed as org.apache.commons.httpclient.Cookie objects."); } } - HttpResponse httpResponse = client.execute(httpRequestBase); - try { - /* TODO - if (getContext().containsKey(AbstractHTTPConnector.SSL_CUSTOM_PROTOCOL)) { - LOGGER.debug("Using customSSL-Protocol-Handler"); + if (getContext().containsKey(AbstractHTTPConnector.SSL_CUSTOM_SCHEME)) { + LOGGER.debug("Using customSSL-Protocol-Handler"); + Iterator<Scheme> schemes = ((Vector<Scheme>) getContext().get(AbstractHTTPConnector.SSL_CUSTOM_SCHEME)).iterator(); - HostConfiguration hc = new HostConfiguration(); - hc.setHost(httpMethod.getURI().getHost(), httpMethod.getURI().getPort(), (Protocol) getContext().get(AbstractHTTPConnector.SSL_CUSTOM_PROTOCOL)); - client.executeMethod(hc, httpMethod); - } else { - client.executeMethod(httpMethod); - } */ + while (schemes.hasNext()) { + client.getConnectionManager().getSchemeRegistry().register(schemes.next()); + } + } + HttpResponse httpResponse = client.execute(httpRequestBase); + try { if (httpResponse.getStatusLine().getStatusCode() >= 300) { // Allow 302 only if (httpResponse.getStatusLine().getStatusCode() != 302) { @@ -403,6 +400,7 @@ protected void execute(HttpRequestBase httpRequestBase) throws Exception{ this.handleHttpMethod(httpResponse); } catch (Exception e) { + LOGGER.trace("AbstractHTTPConnector Exception: ", e); try { throw new XFormsInternalSubmitException(httpResponse.getStatusLine().getStatusCode(), httpResponse.getStatusLine().getReasonPhrase(), EntityUtils.toString(httpResponse.getEntity()), XFormsConstants.RESOURCE_ERROR); } catch (IOException e1) { @@ -433,35 +431,37 @@ private void configureRequest(HttpEntityEnclosingRequestBase httpMethod, String //httpMethod.setHeader(new BasicHeader("Content-Length", String.valueOf(body.getBytes(encoding).length))); } - /* - private void initSSLProtocol(String factoryPath) throws Exception { - Protocol sslProtocol; - LOGGER.debug("creating sslProtocol ..."); - LOGGER.debug("ProtocolPath: " + factoryPath); - Class sslClass = Class.forName(factoryPath); - Object sslFactory = sslClass.newInstance(); - if (sslFactory instanceof SecureProtocolSocketFactory) { - int defaultPort; - if (Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_FACTORY_DEFAULTPORT) != null) { + + private void initSSLScheme(String contextPath) throws Exception { + LOGGER.debug("creating sslScheme ..."); + LOGGER.debug("KeyStoreSSLContext: " + contextPath); + Class contextClass = Class.forName(contextPath); + Object context = contextClass.newInstance(); + Vector<Scheme> schemes = new Vector<Scheme>(); + + if (context instanceof KeyStoreSSLContext) { + int httpSSLPort = 443; + int tomcatSSLPort = 8443; + + SSLSocketFactory socketFactory = new SSLSocketFactory(((KeyStoreSSLContext)context).getSSLContext()); + if (Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_CONTEXT_CUSTOMPORT) != null) { try { - defaultPort = Integer.parseInt(Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_FACTORY_DEFAULTPORT)); + int customPort = Integer.parseInt(Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_CONTEXT_CUSTOMPORT)); + LOGGER.trace("CustomPort: " + customPort); + Scheme sslScheme = new Scheme("https", customPort, socketFactory); + schemes.add(sslScheme); } catch (NumberFormatException nfe) { - LOGGER.warn(AbstractHTTPConnector.HTTPCLIENT_SSL_FACTORY_DEFAULTPORT + " is not parsable as a number. Check your settings in betterform-config.xml!", nfe); - LOGGER.warn("Setting sslPort to 443"); - defaultPort = 443; + LOGGER.warn(AbstractHTTPConnector.HTTPCLIENT_SSL_CONTEXT_CUSTOMPORT + " is not parsable as a number. Check your settings in betterform-config.xml!", nfe); } - } else { - defaultPort = 443; } - LOGGER.trace("DefaultPort: " + defaultPort); - sslProtocol = new Protocol("https", (ProtocolSocketFactory) sslFactory, defaultPort); - Protocol.registerProtocol("https", sslProtocol); + Scheme sslScheme1 = new Scheme("https", httpSSLPort, socketFactory); + schemes.add(sslScheme1); + Scheme sslScheme2 = new Scheme("https", tomcatSSLPort, socketFactory); + schemes.add(sslScheme2); - getContext().put(AbstractHTTPConnector.SSL_CUSTOM_PROTOCOL, sslProtocol); + getContext().put(AbstractHTTPConnector.SSL_CUSTOM_SCHEME, schemes); } } - - */ } //end of class diff --git a/core/src/main/java/de/betterform/connector/http/ssl/KeyStoreSSLProtocolSocketFactory.java b/core/src/main/java/de/betterform/connector/http/ssl/KeyStoreSSLContext.java similarity index 71% rename from core/src/main/java/de/betterform/connector/http/ssl/KeyStoreSSLProtocolSocketFactory.java rename to core/src/main/java/de/betterform/connector/http/ssl/KeyStoreSSLContext.java index 04c89a0e1..916444557 100644 --- a/core/src/main/java/de/betterform/connector/http/ssl/KeyStoreSSLProtocolSocketFactory.java +++ b/core/src/main/java/de/betterform/connector/http/ssl/KeyStoreSSLContext.java @@ -62,18 +62,20 @@ import de.betterform.connector.http.AbstractHTTPConnector; import de.betterform.xml.config.Config; import de.betterform.xml.config.XFormsConfigException; - import org.apache.commons.httpclient.contrib.ssl.AuthSSLInitializationError; import org.apache.commons.httpclient.contrib.ssl.AuthSSLX509TrustManager; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import javax.net.ssl.*; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.net.*; +import java.net.MalformedURLException; +import java.net.URL; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.KeyStoreException; @@ -83,23 +85,18 @@ import java.security.cert.X509Certificate; import java.util.Enumeration; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.conn.scheme.SocketFactory; -import org.apache.http.conn.ssl.SSLSocketFactory; -import org.apache.http.params.HttpConnectionParams; - /** * @author <a href="mailto:[email protected]">tobi</a> - * @version $Id: KeyStoreSSLProtocolSocketFactory 08.10.2010 tobi $ + * @version $Id: KeyStoreSSLContext 08.10.2010 tobi $ */ -public class KeyStoreSSLProtocolSocketFactory { +public class KeyStoreSSLContext { private static String keyStorePath = null; private static String keyStorePasswd = null; private SSLContext sslcontext = null; - private static Log LOGGER = LogFactory.getLog(KeyStoreSSLProtocolSocketFactory.class); + private static Log LOGGER = LogFactory.getLog(KeyStoreSSLContext.class); - public KeyStoreSSLProtocolSocketFactory() { + public KeyStoreSSLContext() { try { this.keyStorePath = Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_KEYSTORE_PATH, null); this.keyStorePasswd = Config.getInstance().getProperty(AbstractHTTPConnector.HTTPCLIENT_SSL_KEYSTORE_PASSWD , null); @@ -109,13 +106,13 @@ public KeyStoreSSLProtocolSocketFactory() { } private URL getKeyStoreURL() throws AuthSSLInitializationError { - if (KeyStoreSSLProtocolSocketFactory.keyStorePath != null) { + if (KeyStoreSSLContext.keyStorePath != null) { File keystore; - if (KeyStoreSSLProtocolSocketFactory.keyStorePath.startsWith( File.separator)) { - keystore = new File(KeyStoreSSLProtocolSocketFactory.keyStorePath); + if (KeyStoreSSLContext.keyStorePath.startsWith( File.separator)) { + keystore = new File(KeyStoreSSLContext.keyStorePath); } else { - keystore = new File(System.getProperty("user.home") + File.separator + KeyStoreSSLProtocolSocketFactory.keyStorePath); + keystore = new File(System.getProperty("user.home") + File.separator + KeyStoreSSLContext.keyStorePath); } try { return keystore.toURI().toURL(); @@ -129,15 +126,15 @@ private URL getKeyStoreURL() throws AuthSSLInitializationError { } private String getKeyStorePasswd() throws AuthSSLInitializationError { - if (KeyStoreSSLProtocolSocketFactory.keyStorePasswd != null) { + if (KeyStoreSSLContext.keyStorePasswd != null) { //TODO: Support encryption of passwd! - return KeyStoreSSLProtocolSocketFactory.keyStorePasswd; + return KeyStoreSSLContext.keyStorePasswd; } throw new AuthSSLInitializationError("You must configure "+ AbstractHTTPConnector.HTTPCLIENT_SSL_KEYSTORE_PASSWD + " in betterform-config.xml!"); } - public static KeyStore createKeyStore(final URL url, final String password) + private KeyStore createKeyStore(final URL url, final String password) throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException { if (url == null) { @@ -145,7 +142,7 @@ public static KeyStore createKeyStore(final URL url, final String password) } LOGGER.debug("Initializing key store"); - KeyStore keystore = KeyStore.getInstance("jks"); + KeyStore keystore = KeyStore.getInstance(KeyStore.getDefaultType()); InputStream is = null; try { is = url.openStream(); @@ -217,97 +214,10 @@ private SSLContext createSSLContext() { } } - private SSLContext getSSLContext() { + public SSLContext getSSLContext() { if (this.sslcontext == null) { this.sslcontext = createSSLContext(); } return this.sslcontext; } - - - /** - * Attempts to get a new socket connection to the given host within the given time limit. - * <p> - * To circumvent the limitations of older JREs that do not support connect timeout a - * controller thread is executed. The controller thread attempts to create a new socket - * within the given limit of time. If socket constructor does not return until the - - * </p> - * - * @param host the host name/IP - * @param port the port on the host - * @param localAddress the local host name/IP to bind the socket to - * @param localPort the port on the local machine - - * - * @return Socket a new socket - * - * @throws IOException if an I/O error occurs while creating the socket - * @throws java.net.UnknownHostException if the IP address of the host cannot be - * determined - */ - public Socket createSocket( - final String host, - final int port, - final InetAddress localAddress, - final int localPort, - final HttpConnectionParams params - ) throws IOException, UnknownHostException, ConnectTimeoutException { - if (params == null) { - throw new IllegalArgumentException("Parameters may not be null"); - } - int timeout = 0;//params.getConnectionTimeout(params); - SSLSocketFactory socketfactory = new SSLSocketFactory(getSSLContext()); - Socket socket = socketfactory.createSocket(); - SocketAddress localaddr = new InetSocketAddress(localAddress, localPort); - SocketAddress remoteaddr = new InetSocketAddress(host, port); - socket.bind(localaddr); - if (timeout == 0) { - socket.connect(remoteaddr, timeout); - } else { - socket.connect(remoteaddr); - } - return socket; - } - - public Socket createSocket( - String host, - int port, - InetAddress clientHost, - int clientPort) - throws IOException, UnknownHostException - { - return getSSLContext().getSocketFactory().createSocket( - host, - port, - clientHost, - clientPort - ); - } - - - public Socket createSocket(String host, int port) - throws IOException, UnknownHostException - { - return getSSLContext().getSocketFactory().createSocket( - host, - port - ); - } - - - public Socket createSocket( - Socket socket, - String host, - int port, - boolean autoClose) - throws IOException, UnknownHostException - { - return getSSLContext().getSocketFactory().createSocket( - socket, - host, - port, - autoClose - ); - } } diff --git a/web/src/main/webapp/WEB-INF/betterform-config.xml b/web/src/main/webapp/WEB-INF/betterform-config.xml index 06d788fb9..234b6cd2a 100644 --- a/web/src/main/webapp/WEB-INF/betterform-config.xml +++ b/web/src/main/webapp/WEB-INF/betterform-config.xml @@ -106,30 +106,32 @@ <!-- Full classpath of SSLProtocolSocketFactory which should be used by httpclient. - For now there is only one implementing Class in betterFORM, which allows verification of a server cert (KeyStoreSSLProtocolSocketFactory). + For now there is only one implementing Class in betterFORM, which allows verification of a server cert (KeyStoreSSLContext). SSLProtocolSocketFactory-Handling will be activated if the property is specified. + ! You have to uncomment this if you would like to use betterFORM with custom SSL-Certificates. ! --> <!-- - <property name="httpclient.ssl.factory" value="de.betterform.connector.http.ssl.KeyStoreSSLProtocolSocketFactory" description="Full classpath of SSLProtocolSocketFactory which should be used by httpclient."/> + <property name="httpclient.ssl.context" value="de.betterform.connector.http.ssl.KeyStoreSSLContext" description="Full classpath of SSLProtocolSocketFactory which should be used by httpclient."/> --> <!-- - Specify defaultPort for SSLProtocolSocketFactory for httpclient. - If not specified 443 will be used. + Specify a customPort for SSLProtocolSocketFactory for httpclient. + Port 443,8443 are the default ports. See: http://hc.apache.org/httpclient-3.x/apidocs/org/apache/commons/httpclient/protocol/Protocol.html --> - <property name="httpclient.ssl.factory.defaultPort" value="DEFAULT-SSL-PORT" description="Specify defaultPort for SSLProtocolSocketFactory for httpclient. 443 if not specified."/> - +<!-- + <property name="httpclient.ssl.context.customPort" value="CUSTOM-SSL-PORT" description="Specify custom Port for SSLProtocolSocketFactory for httpclient. 443,8443 if not specified."/> +--> <!-- Location of JAVA keystore used by httpclient to verify trusted ssl-certs. - If / is omitted in the value "user.home" will be used a relative path. + If / (root-dir) is omitted in the value "user.home" will be used as relative path. Otherwise an absolute path is expected. - Used by: KeyStoreSSLProtocolSocketFactory + Used by: KeyStoreSSLContext --> <property name="httpclient.ssl.keystore.path" value="PATH-TO-KEYSTORE" description="Location of the keystore to be used by httpclient."/> <!-- Password to unlock JAVA keystore. - Used by: KeyStoreSSLProtocolSocketFactory + Used by: KeyStoreSSLContext --> <property name="httpclient.ssl.keystore.passwd" value="KEYSTORE-PASSWD" description="Password to unlock keystore."/>
17fdbe6c71b97305d5c1d8dd78a6c7b249c9e481
tomakehurst$wiremock
More flexibility with request body matching
p
https://github.com/wiremock/wiremock
diff --git a/docs-v2/_docs/record-playback.md b/docs-v2/_docs/record-playback.md index 4f701d4f25..42ae79074c 100644 --- a/docs-v2/_docs/record-playback.md +++ b/docs-v2/_docs/record-playback.md @@ -191,6 +191,11 @@ POST /__admin/recordings/start "caseInsensitive" : true } }, + "requestBodyPattern" : { + "matcher" : "equalToJson", + "ignoreArrayOrder" : false, + "ignoreExtraElements" : true + }, "extractBodyCriteria" : { "textSizeThreshold" : "2048", "binarySizeThreshold" : "10240" @@ -200,10 +205,6 @@ POST /__admin/recordings/start "transformers" : [ "modify-response-header" ], "transformerParameters" : { "headerValue" : "123" - }, - "jsonMatchingFlags" : { - "ignoreArrayOrder" : false, - "ignoreExtraElements" : true } } ``` @@ -245,6 +246,11 @@ POST /__admin/recordings/snapshot "caseInsensitive" : true } }, + "requestBodyPattern" : { + "matcher" : "equalToJson", + "ignoreArrayOrder" : false, + "ignoreExtraElements" : true + }, "extractBodyCriteria" : { "textSizeThreshold" : "2 kb", "binarySizeThreshold" : "1 Mb" @@ -255,10 +261,6 @@ POST /__admin/recordings/snapshot "transformers" : [ "modify-response-header" ], "transformerParameters" : { "headerValue" : "123" - }, - "jsonMatchingFlags" : { - "ignoreArrayOrder" : false, - "ignoreExtraElements" : true } } ``` @@ -355,20 +357,26 @@ As with other types of WireMock extension, parameters can be supplied. The exact ``` -### JSON matching flags +### Request body matching -When a stub is recorded from a request with a JSON body, its body match operator will be set to `equalToJson` (rather than `equalTo`, the default). -This operator has two optional parameters, indicating whether array order should be ignored, and whether extra elements should be ignored. +By default, the body match operator for a recorded stub is based on the `Content-Type` header of the request. For `*/json` MIME types, the operator will be `equalToJson` with both the `ignoreArrayOrder` and `ignoreExtraElements` options set to `true`. For `*/xml` MIME types, it will use `equalToXml`. Otherwise, it will use `equalTo` with the `caseInsensitive` option set to `false`. + + This behavior can be customized via the `requestBodyPattern` parameter, which accepts a `matcher` (either `equalTo`, `equalToJson`, `equalToXml`, or `auto`) and any relevant matcher options (`ignoreArrayOrder`, `ignoreExtraElements`, or `caseInsensitive`). For example, here's how to preserve the default behavior, but set `ignoreArrayOrder` to `false` when `equalToJson` is used: ```json -"jsonMatchingFlags" : { - "ignoreArrayOrder" : false, - "ignoreExtraElements" : true +"requestBodyPattern" : { + "matcher": "auto", + "ignoreArrayOrder" : false } ``` -If not specified, both of these will default to `false`. - +If you want to always match request bodies with `equalTo` case-insensitively, regardless of the MIME type, use: +```json +"requestBodyPattern" : { + "matcher": "equalTo", + "caseInsenstivie" : true + } +``` > **note** > diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/RecordSpec.java b/src/main/java/com/github/tomakehurst/wiremock/recording/RecordSpec.java index dc834bac28..655f96cf6e 100644 --- a/src/main/java/com/github/tomakehurst/wiremock/recording/RecordSpec.java +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/RecordSpec.java @@ -34,6 +34,8 @@ public class RecordSpec { private final ProxiedServeEventFilters filters; // Headers from the request to include in the stub mapping, if they match the corresponding matcher private final Map<String, CaptureHeadersSpec> captureHeaders; + // Factory for the StringValuePattern that will be used to match request bodies + private final RequestBodyPatternFactory requestBodyPatternFactory; // Criteria for extracting body from responses private final ResponseDefinitionBodyMatcher extractBodyCriteria; // How to format StubMappings in the response body @@ -46,30 +48,29 @@ public class RecordSpec { private final List<String> transformers; // Parameters for stub mapping transformers private final Parameters transformerParameters; - private final JsonMatchingFlags jsonMatchingFlags; @JsonCreator public RecordSpec( @JsonProperty("targetBaseUrl") String targetBaseUrl, @JsonProperty("filters") ProxiedServeEventFilters filters, @JsonProperty("captureHeaders") Map<String, CaptureHeadersSpec> captureHeaders, + @JsonProperty("requestBodyPattern") RequestBodyPatternFactory requestBodyPatternFactory, @JsonProperty("extractBodyCriteria") ResponseDefinitionBodyMatcher extractBodyCriteria, @JsonProperty("outputFormat") SnapshotOutputFormatter outputFormat, @JsonProperty("persist") Boolean persist, @JsonProperty("repeatsAsScenarios") Boolean repeatsAsScenarios, @JsonProperty("transformers") List<String> transformers, - @JsonProperty("transformerParameters") Parameters transformerParameters, - @JsonProperty("jsonMatchingFlags") JsonMatchingFlags jsonMatchingFlags) { + @JsonProperty("transformerParameters") Parameters transformerParameters) { this.targetBaseUrl = targetBaseUrl; this.filters = filters == null ? new ProxiedServeEventFilters() : filters; this.captureHeaders = captureHeaders; + this.requestBodyPatternFactory = requestBodyPatternFactory == null ? RequestBodyAutomaticPatternFactory.DEFAULTS : requestBodyPatternFactory; this.extractBodyCriteria = extractBodyCriteria; this.outputFormat = outputFormat == null ? SnapshotOutputFormatter.FULL : outputFormat; this.persist = persist == null ? true : persist; this.repeatsAsScenarios = repeatsAsScenarios; this.transformers = transformers; this.transformerParameters = transformerParameters; - this.jsonMatchingFlags = jsonMatchingFlags; } private RecordSpec() { @@ -79,7 +80,7 @@ private RecordSpec() { public static final RecordSpec DEFAULTS = new RecordSpec(); public static RecordSpec forBaseUrl(String targetBaseUrl) { - return new RecordSpec(targetBaseUrl, null, null, null, null, null, true, null, null, null); + return new RecordSpec(targetBaseUrl, null, null, null, null, null, null, true, null, null); } public String getTargetBaseUrl() { @@ -110,7 +111,5 @@ public Boolean getRepeatsAsScenarios() { public ResponseDefinitionBodyMatcher getExtractBodyCriteria() { return extractBodyCriteria; } - public JsonMatchingFlags getJsonMatchingFlags() { - return jsonMatchingFlags; - } + public RequestBodyPatternFactory getRequestBodyPatternFactory() { return requestBodyPatternFactory; } } diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/RecordSpecBuilder.java b/src/main/java/com/github/tomakehurst/wiremock/recording/RecordSpecBuilder.java index 4fea93623a..862056c43f 100644 --- a/src/main/java/com/github/tomakehurst/wiremock/recording/RecordSpecBuilder.java +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/RecordSpecBuilder.java @@ -16,8 +16,7 @@ package com.github.tomakehurst.wiremock.recording; import com.github.tomakehurst.wiremock.extension.Parameters; -import com.github.tomakehurst.wiremock.matching.RequestPattern; -import com.github.tomakehurst.wiremock.matching.RequestPatternBuilder; +import com.github.tomakehurst.wiremock.matching.*; import java.util.List; import java.util.Map; @@ -32,13 +31,13 @@ public class RecordSpecBuilder { private RequestPatternBuilder filterRequestPatternBuilder; private List<UUID> filterIds; private Map<String, CaptureHeadersSpec> headers = newLinkedHashMap(); + private RequestBodyPatternFactory requestBodyPatternFactory; private long maxTextBodySize = ResponseDefinitionBodyMatcher.DEFAULT_MAX_TEXT_SIZE; private long maxBinaryBodySize = ResponseDefinitionBodyMatcher.DEFAULT_MAX_BINARY_SIZE; private boolean persistentStubs = true; private boolean repeatsAsScenarios = true; private List<String> transformerNames; private Parameters transformerParameters; - private JsonMatchingFlags jsonMatchingFlags; public RecordSpecBuilder forTarget(String targetBaseUrl) { this.targetBaseUrl = targetBaseUrl; @@ -98,6 +97,26 @@ public RecordSpecBuilder captureHeader(String key, Boolean caseInsensitive) { return this; } + public RecordSpecBuilder requestBodyAutoPattern(boolean ignoreArrayOrder, boolean ignoreExtraElements, boolean caseInsensitive) { + this.requestBodyPatternFactory = new RequestBodyAutomaticPatternFactory(ignoreArrayOrder, ignoreExtraElements, caseInsensitive); + return this; + } + + public RecordSpecBuilder requestBodyEqualToJsonPattern(boolean ignoreArrayOrder, boolean ignoreExtraElements) { + this.requestBodyPatternFactory = new RequestBodyEqualToJsonPatternFactory(ignoreArrayOrder, ignoreExtraElements); + return this; + } + + public RecordSpecBuilder requestBodyEqualToXmlPattern() { + this.requestBodyPatternFactory = new RequestBodyEqualToXmlPatternFactory(); + return this; + } + + public RecordSpecBuilder requestBodyEqualToPattern(boolean caseInsensitive) { + this.requestBodyPatternFactory = new RequestBodyEqualToPatternFactory(caseInsensitive); + return this; + } + public RecordSpec build() { RequestPattern filterRequestPattern = filterRequestPatternBuilder != null ? filterRequestPatternBuilder.build() : @@ -112,17 +131,12 @@ public RecordSpec build() { targetBaseUrl, filters, headers.isEmpty() ? null : headers, + requestBodyPatternFactory, responseDefinitionBodyMatcher, SnapshotOutputFormatter.FULL, persistentStubs, repeatsAsScenarios, transformerNames, - transformerParameters, - jsonMatchingFlags); - } - - public RecordSpecBuilder jsonBodyMatchFlags(boolean ignoreArrayOrder, boolean ignoreExtraElements) { - this.jsonMatchingFlags = new JsonMatchingFlags(ignoreArrayOrder, ignoreExtraElements); - return this; + transformerParameters); } } diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/Recorder.java b/src/main/java/com/github/tomakehurst/wiremock/recording/Recorder.java index a80b967ced..9bbe627f16 100644 --- a/src/main/java/com/github/tomakehurst/wiremock/recording/Recorder.java +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/Recorder.java @@ -106,7 +106,7 @@ public SnapshotRecordResult takeSnapshot(List<ServeEvent> serveEvents, RecordSpe final List<StubMapping> stubMappings = serveEventsToStubMappings( Lists.reverse(serveEvents), recordSpec.getFilters(), - new SnapshotStubMappingGenerator(recordSpec.getCaptureHeaders(), recordSpec.getJsonMatchingFlags()), + new SnapshotStubMappingGenerator(recordSpec.getCaptureHeaders(), recordSpec.getRequestBodyPatternFactory()), getStubMappingPostProcessor(admin.getOptions(), recordSpec) ); diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyAutomaticPatternFactory.java b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyAutomaticPatternFactory.java new file mode 100644 index 0000000000..f0f3c3bc50 --- /dev/null +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyAutomaticPatternFactory.java @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2011 Thomas Akehurst + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.github.tomakehurst.wiremock.recording; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.github.tomakehurst.wiremock.http.ContentTypeHeader; +import com.github.tomakehurst.wiremock.http.Request; +import com.github.tomakehurst.wiremock.matching.EqualToJsonPattern; +import com.github.tomakehurst.wiremock.matching.EqualToPattern; +import com.github.tomakehurst.wiremock.matching.EqualToXmlPattern; +import com.github.tomakehurst.wiremock.matching.StringValuePattern; + +public class RequestBodyAutomaticPatternFactory implements RequestBodyPatternFactory { + + private final Boolean caseInsensitive; + private final Boolean ignoreArrayOrder; + private final Boolean ignoreExtraElements; + + @JsonCreator + public RequestBodyAutomaticPatternFactory( + @JsonProperty("ignoreArrayOrder") Boolean ignoreArrayOrder, + @JsonProperty("ignoreExtraElements") Boolean ignoreExtraElements, + @JsonProperty("caseInsensitive") Boolean caseInsensitive) { + this.ignoreArrayOrder = ignoreArrayOrder == null ? true : ignoreArrayOrder; + this.ignoreExtraElements = ignoreExtraElements == null ? true : ignoreExtraElements; + this.caseInsensitive = caseInsensitive == null ? false : caseInsensitive; + } + + private RequestBodyAutomaticPatternFactory() { + this(null, null, null); + } + + public static final RequestBodyAutomaticPatternFactory DEFAULTS = new RequestBodyAutomaticPatternFactory(); + + public Boolean isIgnoreArrayOrder() { + return ignoreArrayOrder; + } + + public Boolean isIgnoreExtraElements() { + return ignoreExtraElements; + } + + public Boolean isCaseInsensitive() { + return caseInsensitive; + } + + /** + * If request body was JSON or XML, use "equalToJson" or "equalToXml" (respectively) in the RequestPattern so it's + * easier to read. Otherwise, just use "equalTo" + */ + @Override + public StringValuePattern forRequest(Request request) { + final ContentTypeHeader contentType = request.getHeaders().getContentTypeHeader(); + if (contentType.mimeTypePart() != null) { + if (contentType.mimeTypePart().contains("json")) { + return new EqualToJsonPattern(request.getBodyAsString(), ignoreArrayOrder, ignoreExtraElements); + } else if (contentType.mimeTypePart().contains("xml")) { + return new EqualToXmlPattern(request.getBodyAsString()); + } + } + + return new EqualToPattern(request.getBodyAsString(), caseInsensitive); + } +} diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/JsonMatchingFlags.java b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToJsonPatternFactory.java similarity index 62% rename from src/main/java/com/github/tomakehurst/wiremock/recording/JsonMatchingFlags.java rename to src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToJsonPatternFactory.java index 2a7f3c69fe..87b54fc90c 100644 --- a/src/main/java/com/github/tomakehurst/wiremock/recording/JsonMatchingFlags.java +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToJsonPatternFactory.java @@ -15,15 +15,20 @@ */ package com.github.tomakehurst.wiremock.recording; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; +import com.github.tomakehurst.wiremock.http.Request; +import com.github.tomakehurst.wiremock.matching.EqualToJsonPattern; -public class JsonMatchingFlags { +public class RequestBodyEqualToJsonPatternFactory implements RequestBodyPatternFactory { private final Boolean ignoreArrayOrder; private final Boolean ignoreExtraElements; - public JsonMatchingFlags(@JsonProperty("ignoreArrayOrder") Boolean ignoreArrayOrder, - @JsonProperty("ignoreExtraElements") Boolean ignoreExtraElements) { + @JsonCreator + public RequestBodyEqualToJsonPatternFactory( + @JsonProperty("ignoreArrayOrder") Boolean ignoreArrayOrder, + @JsonProperty("ignoreExtraElements") Boolean ignoreExtraElements) { this.ignoreArrayOrder = ignoreArrayOrder; this.ignoreExtraElements = ignoreExtraElements; } @@ -35,4 +40,9 @@ public Boolean isIgnoreArrayOrder() { public Boolean isIgnoreExtraElements() { return ignoreExtraElements; } + + @Override + public EqualToJsonPattern forRequest(Request request) { + return new EqualToJsonPattern(request.getBodyAsString(), ignoreArrayOrder, ignoreExtraElements); + } } diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToPatternFactory.java b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToPatternFactory.java new file mode 100644 index 0000000000..f6c7b9dad0 --- /dev/null +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToPatternFactory.java @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2011 Thomas Akehurst + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.github.tomakehurst.wiremock.recording; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.github.tomakehurst.wiremock.http.Request; +import com.github.tomakehurst.wiremock.matching.EqualToPattern; + +public class RequestBodyEqualToPatternFactory implements RequestBodyPatternFactory { + + private final Boolean caseInsensitive; + + @JsonCreator + public RequestBodyEqualToPatternFactory(@JsonProperty("caseInsensitive") Boolean caseInsensitive) { + this.caseInsensitive = caseInsensitive; + } + + public Boolean isCaseInsensitive() { + return caseInsensitive; + } + + @Override + public EqualToPattern forRequest(Request request) { + return new EqualToPattern(request.getBodyAsString(), caseInsensitive); + } +} diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToXmlPatternFactory.java b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToXmlPatternFactory.java new file mode 100644 index 0000000000..0c31cc75c7 --- /dev/null +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToXmlPatternFactory.java @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2011 Thomas Akehurst + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.github.tomakehurst.wiremock.recording; + +import com.github.tomakehurst.wiremock.http.Request; +import com.github.tomakehurst.wiremock.matching.EqualToXmlPattern; + +public class RequestBodyEqualToXmlPatternFactory implements RequestBodyPatternFactory { + + @Override + public EqualToXmlPattern forRequest(Request request) { + return new EqualToXmlPattern(request.getBodyAsString()); + } +} diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyPatternFactory.java b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyPatternFactory.java new file mode 100644 index 0000000000..c2f350d292 --- /dev/null +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestBodyPatternFactory.java @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2011 Thomas Akehurst + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.github.tomakehurst.wiremock.recording; + +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.github.tomakehurst.wiremock.http.Request; +import com.github.tomakehurst.wiremock.matching.StringValuePattern; + +/** + * Factory for the StringValuePattern to use in a recorded stub mapping to match request bodies + */ +@JsonTypeInfo( + use = JsonTypeInfo.Id.NAME, + include = JsonTypeInfo.As.PROPERTY, + property = "matcher", + defaultImpl = RequestBodyAutomaticPatternFactory.class +) +@JsonSubTypes({ + @JsonSubTypes.Type(value = RequestBodyAutomaticPatternFactory.class, name = "auto"), + @JsonSubTypes.Type(value = RequestBodyEqualToPatternFactory.class, name = "equalTo"), + @JsonSubTypes.Type(value = RequestBodyEqualToJsonPatternFactory.class, name = "equalToJson"), + @JsonSubTypes.Type(value = RequestBodyEqualToXmlPatternFactory.class, name = "equalToXml") +}) +public interface RequestBodyPatternFactory { + StringValuePattern forRequest(Request request); +} diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/RequestPatternTransformer.java b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestPatternTransformer.java index 65c7c36afd..dd194e99b7 100644 --- a/src/main/java/com/github/tomakehurst/wiremock/recording/RequestPatternTransformer.java +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/RequestPatternTransformer.java @@ -15,9 +15,6 @@ */ package com.github.tomakehurst.wiremock.recording; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.github.tomakehurst.wiremock.http.ContentTypeHeader; import com.github.tomakehurst.wiremock.http.Request; import com.github.tomakehurst.wiremock.matching.*; import com.google.common.base.Function; @@ -33,13 +30,13 @@ */ public class RequestPatternTransformer implements Function<Request, RequestPatternBuilder> { private final Map<String, CaptureHeadersSpec> headers; - private final JsonMatchingFlags jsonMatchingFlags; + private final RequestBodyPatternFactory bodyPatternFactory; - @JsonCreator - public RequestPatternTransformer(@JsonProperty("headers") Map<String, CaptureHeadersSpec> headers, - @JsonProperty("jsonMatchingFlags") JsonMatchingFlags jsonMatchingFlags) { + public RequestPatternTransformer( + Map<String, CaptureHeadersSpec> headers, + RequestBodyPatternFactory bodyPatternFactory) { this.headers = headers; - this.jsonMatchingFlags = jsonMatchingFlags; + this.bodyPatternFactory = bodyPatternFactory; } /** @@ -62,29 +59,10 @@ public RequestPatternBuilder apply(Request request) { } String body = request.getBodyAsString(); - if (body != null && !body.isEmpty()) { - builder.withRequestBody(valuePatternForContentType(request)); + if (bodyPatternFactory != null && body != null && !body.isEmpty()) { + builder.withRequestBody(bodyPatternFactory.forRequest(request)); } return builder; } - - /** - * If request body was JSON or XML, use "equalToJson" or "equalToXml" (respectively) in the RequestPattern so it's - * easier to read. Otherwise, just use "equalTo" - */ - private StringValuePattern valuePatternForContentType(Request request) { - final ContentTypeHeader contentType = request.getHeaders().getContentTypeHeader(); - if (contentType.mimeTypePart() != null) { - if (contentType.mimeTypePart().contains("json")) { - return jsonMatchingFlags == null ? - equalToJson(request.getBodyAsString()) : - equalToJson(request.getBodyAsString(), jsonMatchingFlags.isIgnoreArrayOrder(), jsonMatchingFlags.isIgnoreExtraElements()); - } else if (contentType.mimeTypePart().contains("xml")) { - return equalToXml(request.getBodyAsString()); - } - } - - return equalTo(request.getBodyAsString()); - } } diff --git a/src/main/java/com/github/tomakehurst/wiremock/recording/SnapshotStubMappingGenerator.java b/src/main/java/com/github/tomakehurst/wiremock/recording/SnapshotStubMappingGenerator.java index 32e9ecebad..5fb93f6715 100644 --- a/src/main/java/com/github/tomakehurst/wiremock/recording/SnapshotStubMappingGenerator.java +++ b/src/main/java/com/github/tomakehurst/wiremock/recording/SnapshotStubMappingGenerator.java @@ -40,9 +40,9 @@ public SnapshotStubMappingGenerator( this.responseTransformer = responseTransformer; } - public SnapshotStubMappingGenerator(Map<String, CaptureHeadersSpec> captureHeaders, JsonMatchingFlags jsonMatchingFlags) { + public SnapshotStubMappingGenerator(Map<String, CaptureHeadersSpec> captureHeaders, RequestBodyPatternFactory requestBodyPatternFactory) { this( - new RequestPatternTransformer(captureHeaders, jsonMatchingFlags), + new RequestPatternTransformer(captureHeaders, requestBodyPatternFactory), new LoggedResponseDefinitionTransformer() ); } diff --git a/src/main/resources/raml/examples/record-spec.example.json b/src/main/resources/raml/examples/record-spec.example.json index 8fff7ed1bc..fef1992388 100644 --- a/src/main/resources/raml/examples/record-spec.example.json +++ b/src/main/resources/raml/examples/record-spec.example.json @@ -10,6 +10,11 @@ "caseInsensitive" : true } }, + "requestBodyPattern" : { + "matcher" : "equalToJson", + "ignoreArrayOrder" : false, + "ignoreExtraElements" : true + }, "extractBodyCriteria" : { "textSizeThreshold" : "2048", "binarySizeThreshold" : "10240" @@ -19,9 +24,5 @@ "transformers" : [ "modify-response-header" ], "transformerParameters" : { "headerValue" : "123" - }, - "jsonMatchingFlags" : { - "ignoreArrayOrder" : false, - "ignoreExtraElements" : true } } \ No newline at end of file diff --git a/src/main/resources/raml/examples/snapshot-spec.example.json b/src/main/resources/raml/examples/snapshot-spec.example.json index 45317b706f..b9a3e18fba 100644 --- a/src/main/resources/raml/examples/snapshot-spec.example.json +++ b/src/main/resources/raml/examples/snapshot-spec.example.json @@ -10,6 +10,11 @@ "caseInsensitive" : true } }, + "requestBodyPattern" : { + "matcher" : "equalToJson", + "ignoreArrayOrder" : false, + "ignoreExtraElements" : true + }, "extractBodyCriteria" : { "textSizeThreshold" : "2 kb", "binarySizeThreshold" : "1 Mb" @@ -20,9 +25,5 @@ "transformers" : [ "modify-response-header" ], "transformerParameters" : { "headerValue" : "123" - }, - "jsonMatchingFlags" : { - "ignoreArrayOrder" : false, - "ignoreExtraElements" : true } } \ No newline at end of file diff --git a/src/main/resources/raml/schemas/record-spec.schema.json b/src/main/resources/raml/schemas/record-spec.schema.json index 4c49d63585..e3db883663 100644 --- a/src/main/resources/raml/schemas/record-spec.schema.json +++ b/src/main/resources/raml/schemas/record-spec.schema.json @@ -18,6 +18,29 @@ } } }, + "requestBodyPattern": { + "type": "object", + "properties": { + "matcher": { + "type": "string", + "enum": [ + "equalTo", + "equalToJson", + "equalToXml", + "auto" + ] + }, + "ignoreArrayOrder": { + "type": "boolean" + }, + "ignoreExtraElements": { + "type": "boolean" + }, + "caseInsensitive": { + "type": "boolean" + } + } + }, "filters": { "type": "object", "properties": { @@ -35,17 +58,6 @@ } } }, - "jsonMatchingFlags": { - "type": "object", - "properties": { - "ignoreArrayOrder": { - "type": "boolean" - }, - "ignoreExtraElements": { - "type": "boolean" - } - } - }, "persist": { "type": "boolean" }, diff --git a/src/test/java/com/github/tomakehurst/wiremock/RecordingDslAcceptanceTest.java b/src/test/java/com/github/tomakehurst/wiremock/RecordingDslAcceptanceTest.java index dc3233ed89..e7c592e9c0 100644 --- a/src/test/java/com/github/tomakehurst/wiremock/RecordingDslAcceptanceTest.java +++ b/src/test/java/com/github/tomakehurst/wiremock/RecordingDslAcceptanceTest.java @@ -199,7 +199,7 @@ public void supportsInstanceClientWithSpec() { adminClient.startStubRecording( recordSpec() .forTarget(targetBaseUrl) - .jsonBodyMatchFlags(true, true) + .requestBodyEqualToJsonPattern(true, true) ); client.postJson("/record-this-with-body", "{}"); @@ -218,7 +218,7 @@ public void supportsDirectDslCallsWithSpec() { proxyingService.startRecording( recordSpec() .forTarget(targetBaseUrl) - .jsonBodyMatchFlags(true, true) + .requestBodyEqualToJsonPattern(true, true) ); client.postJson("/record-this-with-body", "{}"); diff --git a/src/test/java/com/github/tomakehurst/wiremock/SnapshotDslAcceptanceTest.java b/src/test/java/com/github/tomakehurst/wiremock/SnapshotDslAcceptanceTest.java index 89129b6cfb..35a96d31bd 100644 --- a/src/test/java/com/github/tomakehurst/wiremock/SnapshotDslAcceptanceTest.java +++ b/src/test/java/com/github/tomakehurst/wiremock/SnapshotDslAcceptanceTest.java @@ -22,9 +22,7 @@ import com.github.tomakehurst.wiremock.extension.StubMappingTransformer; import com.github.tomakehurst.wiremock.http.RequestMethod; import com.github.tomakehurst.wiremock.http.ResponseDefinition; -import com.github.tomakehurst.wiremock.matching.EqualToJsonPattern; -import com.github.tomakehurst.wiremock.matching.EqualToPattern; -import com.github.tomakehurst.wiremock.matching.StringValuePattern; +import com.github.tomakehurst.wiremock.matching.*; import com.github.tomakehurst.wiremock.stubbing.Scenario; import com.github.tomakehurst.wiremock.stubbing.StubMapping; import com.github.tomakehurst.wiremock.testsupport.WireMatchers; @@ -106,8 +104,8 @@ public void snapshotRecordsAllLoggedRequestsWhenNoParametersPassed() throws Exce JSONAssert.assertEquals("{ \"counter\": 55 }", bodyPattern.getExpected(), true); EqualToJsonPattern equalToJsonPattern = (EqualToJsonPattern) bodyPattern; - assertThat(equalToJsonPattern.isIgnoreArrayOrder(), nullValue()); - assertThat(equalToJsonPattern.isIgnoreExtraElements(), nullValue()); + assertThat(equalToJsonPattern.isIgnoreArrayOrder(), is(true)); + assertThat(equalToJsonPattern.isIgnoreExtraElements(), is(true)); } @Test @@ -253,27 +251,64 @@ public void appliesTransformerWithParameters() { } @Test - public void supportsConfigurationOfJsonBodyMatching() { + public void supportsConfigurationOfAutoRequestBodyPatternFactory() { client.postJson("/some-json", "{}"); + client.postWithBody("/some-json", "<foo/>", "application/xml", "utf-8"); + client.postWithBody("/some-json", "foo", "application/text", "utf-8"); - List<StubMapping> mappings = snapshotRecord( - recordSpec().jsonBodyMatchFlags(true, true) - ); + List<StubMapping> mappings = snapshotRecord(recordSpec().requestBodyAutoPattern(false, false, true)); + + EqualToJsonPattern jsonBodyPattern = (EqualToJsonPattern) mappings.get(0).getRequest().getBodyPatterns().get(0); + assertThat(jsonBodyPattern.getEqualToJson(), is("{}")); + assertThat(jsonBodyPattern.isIgnoreArrayOrder(), is(false)); + assertThat(jsonBodyPattern.isIgnoreExtraElements(), is(false)); + + EqualToXmlPattern xmlBodyPattern = (EqualToXmlPattern) mappings.get(1).getRequest().getBodyPatterns().get(0); + assertThat(xmlBodyPattern.getEqualToXml(), is("<foo/>")); + + EqualToPattern textBodyPattern = (EqualToPattern) mappings.get(2).getRequest().getBodyPatterns().get(0); + assertThat(textBodyPattern.getEqualTo(), is("foo")); + assertThat(textBodyPattern.getCaseInsensitive(), is(true)); + } + + @Test + public void supportsConfigurationOfRequestBodyPatternFactoryWithEqualToJsonPattern() { + client.postJson("/some-json", "{}"); + + List<StubMapping> mappings = snapshotRecord(recordSpec().requestBodyEqualToJsonPattern(false, true)); EqualToJsonPattern bodyPattern = (EqualToJsonPattern) mappings.get(0).getRequest().getBodyPatterns().get(0); - assertThat(bodyPattern.isIgnoreArrayOrder(), is(true)); + assertThat(bodyPattern.isIgnoreArrayOrder(), is(false)); assertThat(bodyPattern.isIgnoreExtraElements(), is(true)); } @Test - public void defaultsToNoJsonBodyMatchingFlags() { + public void supportsConfigurationOfRequestBodyPatternFactoryWithEqualToXmlPattern() { + client.postWithBody("/some-json", "<foo/>", "application/xml", "utf-8"); + + List<StubMapping> mappings = snapshotRecord(recordSpec().requestBodyEqualToXmlPattern()); + + assertThat(mappings.get(0).getRequest().getBodyPatterns().get(0), instanceOf(EqualToXmlPattern.class)); + } + + @Test + public void supportsConfigurationOfRequestBodyPatternFactoryWithEqualToPattern() { + client.postWithBody("/some-json", "foo", "application/text", "utf-8"); + + List<StubMapping> mappings = snapshotRecord(recordSpec().requestBodyEqualToPattern(true)); + + EqualToPattern bodyPattern = (EqualToPattern) mappings.get(0).getRequest().getBodyPatterns().get(0); + assertThat(bodyPattern.getCaseInsensitive(), is(true)); + } + + @Test + public void defaultsToAutomaticRequestBodyPattern() { client.postJson("/some-json", "{}"); List<StubMapping> mappings = snapshotRecord(recordSpec()); EqualToJsonPattern bodyPattern = (EqualToJsonPattern) mappings.get(0).getRequest().getBodyPatterns().get(0); - assertThat(bodyPattern.isIgnoreArrayOrder(), nullValue()); - assertThat(bodyPattern.isIgnoreExtraElements(), nullValue()); + assertThat(bodyPattern, is(new EqualToJsonPattern("{}", true, true))); } @Test diff --git a/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyAutomaticPatternFactoryTest.java b/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyAutomaticPatternFactoryTest.java new file mode 100644 index 0000000000..a64088f4c6 --- /dev/null +++ b/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyAutomaticPatternFactoryTest.java @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2011 Thomas Akehurst + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.github.tomakehurst.wiremock.recording; + +import com.github.tomakehurst.wiremock.http.Request; +import com.github.tomakehurst.wiremock.matching.*; +import org.junit.Test; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static com.github.tomakehurst.wiremock.matching.MockRequest.mockRequest; + +public class RequestBodyAutomaticPatternFactoryTest { + private final static String JSON_TEST_STRING = "{ \"foo\": 1 }"; + private final static String XML_TEST_STRING = "<foo/>"; + + @Test + public void forRequestWithTextBodyIsCaseSensitiveByDefault() { + Request request = mockRequest().body(JSON_TEST_STRING); + EqualToPattern pattern = (EqualToPattern) patternForRequest(request); + + assertThat(pattern.getEqualTo(), is(JSON_TEST_STRING)); + assertThat(pattern.getCaseInsensitive(), is(false)); + } + + @Test + public void forRequestWithTextBodyRespectsCaseInsensitiveOption() { + Request request = mockRequest().body(JSON_TEST_STRING); + RequestBodyAutomaticPatternFactory patternFactory = new RequestBodyAutomaticPatternFactory(false, false, true); + EqualToPattern pattern = (EqualToPattern) patternFactory.forRequest(request); + + assertThat(pattern.getEqualTo(), is(JSON_TEST_STRING)); + assertThat(pattern.getCaseInsensitive(), is(true)); + } + + @Test + public void forRequestWithJsonBodyIgnoresExtraElementsAndArrayOrderByDefault() { + Request request = mockRequest() + .header("Content-Type", "application/json") + .body(JSON_TEST_STRING); + EqualToJsonPattern pattern = (EqualToJsonPattern) patternForRequest(request); + + assertThat(pattern.getEqualToJson(), is(JSON_TEST_STRING)); + assertThat(pattern.isIgnoreExtraElements(), is(true)); + assertThat(pattern.isIgnoreArrayOrder(), is(true)); + } + + @Test + public void forRequestWithJsonBodyRespectsOptions() { + RequestBodyAutomaticPatternFactory patternFactory = new RequestBodyAutomaticPatternFactory(false, false, false); + Request request = mockRequest() + .header("Content-Type", "application/json") + .body(JSON_TEST_STRING); + EqualToJsonPattern pattern = (EqualToJsonPattern) patternFactory.forRequest(request); + + assertThat(pattern.getEqualToJson(), is(JSON_TEST_STRING)); + assertThat(pattern.isIgnoreExtraElements(), is(false)); + assertThat(pattern.isIgnoreArrayOrder(), is(false)); + } + + @Test + public void forRequestWithXmlBody() { + Request request = mockRequest() + .header("Content-Type", "application/xml") + .body(XML_TEST_STRING); + EqualToXmlPattern pattern = (EqualToXmlPattern) patternForRequest(request); + + assertThat(pattern.getEqualToXml(), is(XML_TEST_STRING)); + } + + private static StringValuePattern patternForRequest(Request request) { + return RequestBodyAutomaticPatternFactory.DEFAULTS.forRequest(request); + } +} diff --git a/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToJsonPatternFactoryTest.java b/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToJsonPatternFactoryTest.java new file mode 100644 index 0000000000..4b30beebef --- /dev/null +++ b/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyEqualToJsonPatternFactoryTest.java @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2011 Thomas Akehurst + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.github.tomakehurst.wiremock.recording; + +import com.github.tomakehurst.wiremock.matching.EqualToJsonPattern; +import org.junit.Test; + +import static com.github.tomakehurst.wiremock.matching.MockRequest.mockRequest; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +public class RequestBodyEqualToJsonPatternFactoryTest { + + @Test + public void withIgnoreArrayOrder() { + RequestBodyEqualToJsonPatternFactory patternFactory = new RequestBodyEqualToJsonPatternFactory(true, false); + EqualToJsonPattern pattern = (EqualToJsonPattern) patternFactory.forRequest(mockRequest().body("{}")); + + assertThat(pattern.getEqualToJson(), is("{}")); + assertThat(pattern.isIgnoreExtraElements(), is(false)); + assertThat(pattern.isIgnoreArrayOrder(), is(true)); + } + + @Test + public void withIgnoreExtraElements() { + RequestBodyEqualToJsonPatternFactory patternFactory = new RequestBodyEqualToJsonPatternFactory(false, true); + EqualToJsonPattern pattern = (EqualToJsonPattern) patternFactory.forRequest(mockRequest().body("{}")); + + assertThat(pattern.getEqualToJson(), is("{}")); + assertThat(pattern.isIgnoreExtraElements(), is(true)); + assertThat(pattern.isIgnoreArrayOrder(), is(false)); + } +} diff --git a/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyPatternFactoryJsonDeserializerTest.java b/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyPatternFactoryJsonDeserializerTest.java new file mode 100644 index 0000000000..ee2b6c3021 --- /dev/null +++ b/src/test/java/com/github/tomakehurst/wiremock/recording/RequestBodyPatternFactoryJsonDeserializerTest.java @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2011 Thomas Akehurst + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.github.tomakehurst.wiremock.recording; + +import com.github.tomakehurst.wiremock.common.Json; +import com.github.tomakehurst.wiremock.matching.*; +import org.junit.Test; + +import static com.github.tomakehurst.wiremock.matching.MockRequest.mockRequest; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +public class RequestBodyPatternFactoryJsonDeserializerTest { + @Test + public void correctlyDeserializesWithEmptyObject() { + RequestBodyPatternFactory bodyPatternFactory = deserializeJson("{}"); + assertThat(bodyPatternFactory, instanceOf(RequestBodyAutomaticPatternFactory.class)); + } + + @Test + public void correctlyDeserializesWithAutoMatcher() { + RequestBodyPatternFactory bodyPatternFactory = deserializeJson("{ \"matcher\": \"auto\" }"); + assertThat(bodyPatternFactory, instanceOf(RequestBodyAutomaticPatternFactory.class)); + } + + @Test + public void correctlyDeserializesWithEqualToMatcher() { + RequestBodyPatternFactory bodyPatternFactory = deserializeJson( + "{ \n" + + " \"matcher\": \"equalTo\", \n" + + " \"caseInsensitive\": true \n" + + "} " + ); + EqualToPattern bodyPattern = (EqualToPattern) bodyPatternFactory.forRequest(mockRequest()); + assertThat(bodyPattern.getCaseInsensitive(), is(true)); + } + + @Test + public void correctlyDeserializesWithEqualToJsonMatcher() { + RequestBodyPatternFactory bodyPatternFactory = deserializeJson( + "{ \n" + + " \"matcher\": \"equalToJson\", \n" + + " \"ignoreArrayOrder\": false, \n" + + " \"ignoreExtraElements\": true \n" + + "} " + ); + EqualToJsonPattern bodyPattern = (EqualToJsonPattern) bodyPatternFactory.forRequest(mockRequest().body("1")); + assertThat(bodyPattern.isIgnoreArrayOrder(), is(false)); + assertThat(bodyPattern.isIgnoreExtraElements(), is(true)); + + } + + @Test + public void correctlyDeserializesWithEqualToXmlMatcher() { + RequestBodyPatternFactory bodyPatternFactory = deserializeJson("{ \"matcher\": \"equalToXml\" }"); + assertThat(bodyPatternFactory, instanceOf(RequestBodyEqualToXmlPatternFactory.class)); + } + + private static RequestBodyPatternFactory deserializeJson(String json) { + return Json.read(json, RequestBodyPatternFactory.class); + } +} diff --git a/src/test/java/com/github/tomakehurst/wiremock/recording/RequestPatternTransformerTest.java b/src/test/java/com/github/tomakehurst/wiremock/recording/RequestPatternTransformerTest.java index 007a2d24c2..3bbe1ac1b8 100644 --- a/src/test/java/com/github/tomakehurst/wiremock/recording/RequestPatternTransformerTest.java +++ b/src/test/java/com/github/tomakehurst/wiremock/recording/RequestPatternTransformerTest.java @@ -15,75 +15,50 @@ */ package com.github.tomakehurst.wiremock.recording; -import com.github.tomakehurst.wiremock.recording.CaptureHeadersSpec; -import com.github.tomakehurst.wiremock.recording.RequestPatternTransformer; import com.github.tomakehurst.wiremock.http.Request; import com.github.tomakehurst.wiremock.http.RequestMethod; -import com.github.tomakehurst.wiremock.matching.RequestPatternBuilder; +import com.github.tomakehurst.wiremock.matching.*; +import com.google.common.collect.ImmutableMap; import org.junit.Test; import java.util.Map; import static com.github.tomakehurst.wiremock.client.WireMock.*; import static com.github.tomakehurst.wiremock.matching.MockRequest.mockRequest; -import static com.google.common.collect.Maps.newLinkedHashMap; import static org.junit.Assert.assertEquals; public class RequestPatternTransformerTest { @Test - public void applyWithDefaultsAndNoBody() { + public void applyIncludesMethodAndUrlMatchers() { Request request = mockRequest() .url("/foo") .method(RequestMethod.GET) .header("User-Agent", "foo") .header("X-Foo", "bar"); + RequestPatternBuilder expected = new RequestPatternBuilder(RequestMethod.GET, urlEqualTo("/foo")); - // Default is to include method and URL exactly assertEquals(expected.build(), new RequestPatternTransformer(null, null).apply(request).build()); } @Test - public void applyWithUrlAndPlainTextBody() { + public void applyWithHeaders() { Request request = mockRequest() - .url("/foo") - .method(RequestMethod.GET) - .body("HELLO") - .header("Accept", "foo") - .header("User-Agent", "bar"); + .url("/") + .method(RequestMethod.POST) + .header("X-CaseSensitive", "foo") + .header("X-Ignored", "ignored") + .header("X-CaseInsensitive", "Baz"); - RequestPatternBuilder expected = new RequestPatternBuilder(RequestMethod.GET, urlEqualTo("/foo")) - .withRequestBody(equalTo("HELLO")); + RequestPatternBuilder expected = new RequestPatternBuilder(RequestMethod.POST, urlEqualTo("/")) + .withHeader("X-CaseSensitive", equalTo("foo")) + .withHeader("X-CaseInsensitive", equalToIgnoreCase("Baz")); - Map<String, CaptureHeadersSpec> headers = newLinkedHashMap(); + Map<String, CaptureHeadersSpec> headers = ImmutableMap.of( + "X-CaseSensitive", new CaptureHeadersSpec(false), + "X-CaseInsensitive", new CaptureHeadersSpec(true) + ); assertEquals(expected.build(), new RequestPatternTransformer(headers, null).apply(request).build()); } - - @Test - public void applyWithOnlyJsonBody() { - Request request = mockRequest() - .url("/somewhere") - .header("Content-Type", "application/json") - .body("['hello']"); - RequestPatternBuilder expected = new RequestPatternBuilder() - .withUrl("/somewhere") - .withRequestBody(equalToJson("['hello']")); - - assertEquals(expected.build(), new RequestPatternTransformer(null, null).apply(request).build()); - } - - @Test - public void applyWithOnlyXmlBody() { - Request request = mockRequest() - .url("/somewhere") - .header("Content-Type", "application/xml") - .body("<foo/>"); - - RequestPatternBuilder expected = new RequestPatternBuilder() - .withUrl("/somewhere") - .withRequestBody(equalToXml("<foo/>")); - - assertEquals(expected.build(), new RequestPatternTransformer(null, null).apply(request).build()); - } } diff --git a/src/test/java/ignored/Examples.java b/src/test/java/ignored/Examples.java index 4dd2fd738b..fc50a248de 100644 --- a/src/test/java/ignored/Examples.java +++ b/src/test/java/ignored/Examples.java @@ -417,7 +417,7 @@ public void recordingDsl() { .ignoreRepeatRequests() .transformers("modify-response-header") .transformerParameters(Parameters.one("headerValue", "123")) - .jsonBodyMatchFlags(false, true) + .requestBodyEqualToJsonPattern(false, true) ); System.out.println(Json.write(recordSpec() @@ -431,7 +431,7 @@ public void recordingDsl() { .ignoreRepeatRequests() .transformers("modify-response-header") .transformerParameters(Parameters.one("headerValue", "123")) - .jsonBodyMatchFlags(false, true) + .requestBodyEqualToJsonPattern(false, true) .build())); } @@ -449,7 +449,7 @@ public void snapshotDsl() { .ignoreRepeatRequests() .transformers("modify-response-header") .transformerParameters(Parameters.one("headerValue", "123")) - .jsonBodyMatchFlags(false, true) + .requestBodyEqualToJsonPattern(false, true) ); System.out.println(Json.write(recordSpec() @@ -463,7 +463,7 @@ public void snapshotDsl() { .ignoreRepeatRequests() .transformers("modify-response-header") .transformerParameters(Parameters.one("headerValue", "123")) - .jsonBodyMatchFlags(false, true) + .requestBodyEqualToJsonPattern(false, true) .build())); } }
4f79b07e174ed1f57115a6b0a9f6a6e74e6733ee
hadoop
HADOOP-6932. Namenode start (init) fails because- of invalid kerberos key
c
https://github.com/apache/hadoop
diff --git a/CHANGES.txt b/CHANGES.txt index f43935c87233a..72a1e3e6ffa26 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -220,6 +220,9 @@ Trunk (unreleased changes) HADOOP-6833. IPC leaks call parameters when exceptions thrown. (Todd Lipcon via Eli Collins) + HADOOP-6932. Namenode start (init) fails because of invalid kerberos + key, even when security set to "simple" (boryas) + Release 0.21.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/src/java/org/apache/hadoop/security/SecurityUtil.java b/src/java/org/apache/hadoop/security/SecurityUtil.java index 00187bd6f2401..44ef31ef32989 100644 --- a/src/java/org/apache/hadoop/security/SecurityUtil.java +++ b/src/java/org/apache/hadoop/security/SecurityUtil.java @@ -174,7 +174,7 @@ static String getLocalHostName() throws UnknownHostException { } /** - * If a keytab has been provided, login as that user. Substitute $host in + * Login as a principal specified in config. Substitute $host in * user's Kerberos principal name with a dynamically looked-up fully-qualified * domain name of the current host. * @@ -192,8 +192,9 @@ public static void login(final Configuration conf, } /** - * If a keytab has been provided, login as that user. Substitute $host in - * user's Kerberos principal name with hostname. + * Login as a principal specified in config. Substitute $host in user's Kerberos principal + * name with hostname. If non-secure mode - return. If no keytab available - + * bail out with an exception * * @param conf * conf to use @@ -208,9 +209,14 @@ public static void login(final Configuration conf, public static void login(final Configuration conf, final String keytabFileKey, final String userNameKey, String hostname) throws IOException { - String keytabFilename = conf.get(keytabFileKey); - if (keytabFilename == null) + + if(! UserGroupInformation.isSecurityEnabled()) return; + + String keytabFilename = conf.get(keytabFileKey); + if (keytabFilename == null || keytabFilename.length() == 0) { + throw new IOException("Running in secure mode, but config doesn't have a keytab"); + } String principalConfig = conf.get(userNameKey, System .getProperty("user.name")); diff --git a/src/test/core/org/apache/hadoop/security/TestSecurityUtil.java b/src/test/core/org/apache/hadoop/security/TestSecurityUtil.java index 14ec74372d091..d5a3a25f90972 100644 --- a/src/test/core/org/apache/hadoop/security/TestSecurityUtil.java +++ b/src/test/core/org/apache/hadoop/security/TestSecurityUtil.java @@ -16,12 +16,15 @@ */ package org.apache.hadoop.security; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.IOException; import javax.security.auth.kerberos.KerberosPrincipal; +import org.apache.hadoop.conf.Configuration; +import org.junit.Assert; import org.junit.Test; public class TestSecurityUtil { @@ -70,4 +73,23 @@ public void testGetServerPrincipal() throws IOException { verify(shouldNotReplace, hostname, shouldNotReplace); verify(shouldNotReplace, shouldNotReplace, shouldNotReplace); } + + @Test + public void testStartsWithIncorrectSettings() throws IOException { + Configuration conf = new Configuration(); + conf.set( + org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + String keyTabKey="key"; + conf.set(keyTabKey, ""); + UserGroupInformation.setConfiguration(conf); + boolean gotException = false; + try { + SecurityUtil.login(conf, keyTabKey, "", ""); + } catch (IOException e) { + // expected + gotException=true; + } + assertTrue("Exception for empty keytabfile name was expected", gotException); + } }
5dabaf626e0a3493889eadcbd5ebf73d4e145912
camel
CAMEL-1091 - Fix compilation issue on Java 1.5--git-svn-id: https://svn.apache.org/repos/asf/activemq/camel/trunk@718279 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/camel
diff --git a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/InterfacesTest.java b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/InterfacesTest.java index b36e2faef83de..337ac66092c4a 100644 --- a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/InterfacesTest.java +++ b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/InterfacesTest.java @@ -15,25 +15,27 @@ public class InterfacesTest extends ContextTestSupport { private String remoteInterfaceAddress; - public InterfacesTest() throws SocketException { - // retirieve an address of some remote network interface + public InterfacesTest() throws IOException { + // Retrieve an address of some remote network interface Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces(); while(interfaces.hasMoreElements()) { NetworkInterface interfaze = interfaces.nextElement(); - if (!interfaze.isUp() || interfaze.isLoopback()) { - continue; - } Enumeration<InetAddress> addresses = interfaze.getInetAddresses(); - if(addresses.hasMoreElements()) { - remoteInterfaceAddress = addresses.nextElement().getHostAddress(); + if(addresses.hasMoreElements()) { + InetAddress nextAddress = addresses.nextElement(); + if (nextAddress.isLoopbackAddress() || nextAddress.isReachable(2000)) { + break; + } + remoteInterfaceAddress = nextAddress.getHostAddress(); } }; } public void testLocalInterfaceHandled() throws IOException, InterruptedException { - getMockEndpoint("mock:endpoint").expectedMessageCount(3); + int expectedMessages = (remoteInterfaceAddress != null) ? 3 : 2; + getMockEndpoint("mock:endpoint").expectedMessageCount(expectedMessages); URL localUrl = new URL("http://localhost:4567/testRoute"); String localResponse = IOUtils.toString(localUrl.openStream()); @@ -44,9 +46,11 @@ public void testLocalInterfaceHandled() throws IOException, InterruptedException localResponse = IOUtils.toString(localUrl.openStream()); assertEquals("local-differentPort", localResponse); - URL url = new URL("http://" + remoteInterfaceAddress + ":4567/testRoute"); - String remoteResponse = IOUtils.toString(url.openStream()); - assertEquals("remote", remoteResponse); + if (remoteInterfaceAddress != null) { + URL url = new URL("http://" + remoteInterfaceAddress + ":4567/testRoute"); + String remoteResponse = IOUtils.toString(url.openStream()); + assertEquals("remote", remoteResponse); + } assertMockEndpointsSatisfied(); } @@ -65,9 +69,11 @@ public void configure() throws Exception { .setBody().constant("local-differentPort") .to("mock:endpoint"); - from("jetty:http://" + remoteInterfaceAddress + ":4567/testRoute") - .setBody().constant("remote") - .to("mock:endpoint"); + if (remoteInterfaceAddress != null) { + from("jetty:http://" + remoteInterfaceAddress + ":4567/testRoute") + .setBody().constant("remote") + .to("mock:endpoint"); + } } }; }
bc79855cf59458c38498b449f3b8ab99fa1191c9
kotlin
Minor: additional logging when failing to read- file in VirtualFileKotlinClass--
p
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/kotlin/VirtualFileKotlinClass.java b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/kotlin/VirtualFileKotlinClass.java index a63970f91bd2d..76305bd1cf1d3 100644 --- a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/kotlin/VirtualFileKotlinClass.java +++ b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/kotlin/VirtualFileKotlinClass.java @@ -16,6 +16,7 @@ package org.jetbrains.jet.lang.resolve.kotlin; +import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.util.Ref; import com.intellij.openapi.vfs.VirtualFile; import kotlin.Function0; @@ -34,12 +35,12 @@ import org.jetbrains.jet.storage.StorageManager; import org.jetbrains.jet.utils.UtilsPackage; -import java.io.IOException; - import static org.jetbrains.asm4.ClassReader.*; import static org.jetbrains.asm4.Opcodes.ASM4; public class VirtualFileKotlinClass implements KotlinJvmBinaryClass { + private final static Logger LOG = Logger.getInstance(VirtualFileKotlinClass.class); + private final VirtualFile file; private final NotNullLazyValue<JvmClassName> className; private final NullableLazyValue<KotlinClassHeader> classHeader; @@ -80,7 +81,8 @@ public void visit(int version, int access, String name, String signature, String } }, SKIP_CODE | SKIP_DEBUG | SKIP_FRAMES); } - catch (IOException e) { + catch (Throwable e) { + logFileReadingError(e); throw UtilsPackage.rethrow(e); } return classNameRef.get(); @@ -112,7 +114,8 @@ public void visitEnd() { } }, SKIP_CODE | SKIP_DEBUG | SKIP_FRAMES); } - catch (IOException e) { + catch (Throwable e) { + logFileReadingError(e); throw UtilsPackage.rethrow(e); } } @@ -196,7 +199,8 @@ public void visitEnd() { } }, SKIP_CODE | SKIP_DEBUG | SKIP_FRAMES); } - catch (IOException e) { + catch (Throwable e) { + logFileReadingError(e); throw UtilsPackage.rethrow(e); } } @@ -221,4 +225,13 @@ public boolean equals(Object obj) { public String toString() { return getClass().getSimpleName() + ": " + file.toString(); } + + private void logFileReadingError(@NotNull Throwable e) { + LOG.error( + "Could not read file: " + file.getPath() + "\n" + + "Size in bytes: " + file.getLength() + "\n" + + "File type: " + file.getFileType().getName(), + e + ); + } } \ No newline at end of file
23f836454d9c5a495111b068f45d6aa89a2a724a
hbase
HADOOP-1424. TestHBaseCluster fails with- IllegalMonitorStateException. Fix regression introduced by HADOOP-1397.--git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@541095 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hbase
diff --git a/CHANGES.txt b/CHANGES.txt index 65fd5cb1c100..092e9a0505a4 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -14,3 +14,5 @@ Trunk (unreleased changes) 'Performance Evaluation', etc. 7. HADOOP-1420, HADOOP-1423. Findbugs changes, remove reference to removed class HLocking. + 8. HADOOP-1424. TestHBaseCluster fails with IllegalMonitorStateException. Fix + regression introduced by HADOOP-1397. diff --git a/src/java/org/apache/hadoop/hbase/HLocking.java b/src/java/org/apache/hadoop/hbase/HLocking.java new file mode 100644 index 000000000000..8031caf99b58 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/HLocking.java @@ -0,0 +1,101 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.concurrent.atomic.AtomicInteger; + +/** + * HLocking is a set of lock primitives that does not rely on a + * particular thread holding the monitor for an object. This is + * especially important when a lock must persist over multiple RPC's + * since there is no guarantee that the same Server thread will handle + * all the RPC's until the lock is released. Not requiring that the locker + * thread is same as unlocking thread is the key distinction between this + * class and {@link java.util.concurrent.locks.ReentrantReadWriteLock}. + * + * <p>For each independent entity that needs locking, create a new HLocking + * instance. + */ +public class HLocking { + private Integer mutex; + + // If lockers == 0, the lock is unlocked + // If lockers > 0, locked for read + // If lockers == -1 locked for write + + private AtomicInteger lockers; + + /** Constructor */ + public HLocking() { + this.mutex = new Integer(0); + this.lockers = new AtomicInteger(0); + } + + /** + * Caller needs the nonexclusive read-lock + */ + public void obtainReadLock() { + synchronized(mutex) { + while(lockers.get() < 0) { + try { + mutex.wait(); + } catch(InterruptedException ie) { + } + } + lockers.incrementAndGet(); + mutex.notifyAll(); + } + } + + /** + * Caller is finished with the nonexclusive read-lock + */ + public void releaseReadLock() { + synchronized(mutex) { + if(lockers.decrementAndGet() < 0) { + throw new IllegalStateException("lockers: " + lockers); + } + mutex.notifyAll(); + } + } + + /** + * Caller needs the exclusive write-lock + */ + public void obtainWriteLock() { + synchronized(mutex) { + while(!lockers.compareAndSet(0, -1)) { + try { + mutex.wait(); + } catch (InterruptedException ie) { + } + } + mutex.notifyAll(); + } + } + + /** + * Caller is finished with the write lock + */ + public void releaseWriteLock() { + synchronized(mutex) { + if(!lockers.compareAndSet(-1, 0)) { + throw new IllegalStateException("lockers: " + lockers); + } + mutex.notifyAll(); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/HMemcache.java b/src/java/org/apache/hadoop/hbase/HMemcache.java index 87616e25f2d1..740caf1d323c 100644 --- a/src/java/org/apache/hadoop/hbase/HMemcache.java +++ b/src/java/org/apache/hadoop/hbase/HMemcache.java @@ -15,14 +15,17 @@ */ package org.apache.hadoop.hbase; -import org.apache.hadoop.io.*; +import java.io.IOException; +import java.util.Iterator; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.Vector; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import java.io.*; -import java.util.*; -import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Text; /******************************************************************************* * The HMemcache holds in-memory modifications to the HRegion. This is really a @@ -39,7 +42,7 @@ public class HMemcache { TreeMap<HStoreKey, BytesWritable> snapshot = null; - ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private final HLocking lock = new HLocking(); public HMemcache() { super(); @@ -70,7 +73,7 @@ public Snapshot() { public Snapshot snapshotMemcacheForLog(HLog log) throws IOException { Snapshot retval = new Snapshot(); - this.lock.writeLock().lock(); + this.lock.obtainWriteLock(); try { if(snapshot != null) { throw new IOException("Snapshot in progress!"); @@ -99,7 +102,7 @@ public Snapshot snapshotMemcacheForLog(HLog log) throws IOException { return retval; } finally { - this.lock.writeLock().unlock(); + this.lock.releaseWriteLock(); } } @@ -109,7 +112,7 @@ public Snapshot snapshotMemcacheForLog(HLog log) throws IOException { * Modifying the structure means we need to obtain a writelock. */ public void deleteSnapshot() throws IOException { - this.lock.writeLock().lock(); + this.lock.obtainWriteLock(); try { if(snapshot == null) { @@ -135,7 +138,7 @@ public void deleteSnapshot() throws IOException { } } finally { - this.lock.writeLock().unlock(); + this.lock.releaseWriteLock(); } } @@ -145,14 +148,14 @@ public void deleteSnapshot() throws IOException { * Operation uses a write lock. */ public void add(Text row, TreeMap<Text, BytesWritable> columns, long timestamp) { - this.lock.writeLock().lock(); + this.lock.obtainWriteLock(); try { for (Map.Entry<Text, BytesWritable> es: columns.entrySet()) { HStoreKey key = new HStoreKey(row, es.getKey(), timestamp); memcache.put(key, es.getValue()); } } finally { - this.lock.writeLock().unlock(); + this.lock.releaseWriteLock(); } } @@ -163,7 +166,7 @@ public void add(Text row, TreeMap<Text, BytesWritable> columns, long timestamp) */ public BytesWritable[] get(HStoreKey key, int numVersions) { Vector<BytesWritable> results = new Vector<BytesWritable>(); - this.lock.readLock().lock(); + this.lock.obtainReadLock(); try { Vector<BytesWritable> result = get(memcache, key, numVersions-results.size()); results.addAll(0, result); @@ -180,7 +183,7 @@ public BytesWritable[] get(HStoreKey key, int numVersions) { return (results.size() == 0)? null: results.toArray(new BytesWritable[results.size()]); } finally { - this.lock.readLock().unlock(); + this.lock.releaseReadLock(); } } @@ -192,7 +195,7 @@ public BytesWritable[] get(HStoreKey key, int numVersions) { */ public TreeMap<Text, BytesWritable> getFull(HStoreKey key) { TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>(); - this.lock.readLock().lock(); + this.lock.obtainReadLock(); try { internalGetFull(memcache, key, results); for(int i = history.size()-1; i >= 0; i--) { @@ -202,7 +205,7 @@ public TreeMap<Text, BytesWritable> getFull(HStoreKey key) { return results; } finally { - this.lock.readLock().unlock(); + this.lock.releaseReadLock(); } } @@ -275,7 +278,7 @@ public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow) super(timestamp, targetCols); - lock.readLock().lock(); + lock.obtainReadLock(); try { this.backingMaps = new TreeMap[history.size() + 1]; @@ -367,7 +370,7 @@ public void close() { } } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); scannerClosed = true; } } diff --git a/src/java/org/apache/hadoop/hbase/HRegion.java b/src/java/org/apache/hadoop/hbase/HRegion.java index b5d000a19735..3cdb8f4cd0af 100644 --- a/src/java/org/apache/hadoop/hbase/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/HRegion.java @@ -23,7 +23,6 @@ import java.io.*; import java.util.*; -import java.util.concurrent.locks.ReentrantReadWriteLock; /** * HRegion stores data for a certain region of a table. It stores all columns @@ -283,7 +282,7 @@ public WriteState() { int maxUnflushedEntries = 0; int compactionThreshold = 0; - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private final HLocking lock = new HLocking(); ////////////////////////////////////////////////////////////////////////////// // Constructor @@ -398,7 +397,7 @@ public void closeAndDelete() throws IOException { * time-sensitive thread. */ public Vector<HStoreFile> close() throws IOException { - lock.writeLock().lock(); + lock.obtainWriteLock(); try { boolean shouldClose = false; synchronized(writestate) { @@ -438,7 +437,7 @@ public Vector<HStoreFile> close() throws IOException { } } } finally { - lock.writeLock().unlock(); + lock.releaseWriteLock(); } } @@ -614,7 +613,7 @@ public FileSystem getFilesystem() { * @return - true if the region should be split */ public boolean needsSplit(Text midKey) { - lock.readLock().lock(); + lock.obtainReadLock(); try { Text key = new Text(); @@ -632,7 +631,7 @@ public boolean needsSplit(Text midKey) { return (maxSize > (DESIRED_MAX_FILE_SIZE + (DESIRED_MAX_FILE_SIZE / 2))); } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); } } @@ -641,7 +640,7 @@ public boolean needsSplit(Text midKey) { */ public boolean needsCompaction() { boolean needsCompaction = false; - lock.readLock().lock(); + lock.obtainReadLock(); try { for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) { if(i.next().getNMaps() > compactionThreshold) { @@ -650,7 +649,7 @@ public boolean needsCompaction() { } } } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); } return needsCompaction; } @@ -670,7 +669,7 @@ public boolean needsCompaction() { */ public boolean compactStores() throws IOException { boolean shouldCompact = false; - lock.readLock().lock(); + lock.obtainReadLock(); try { synchronized(writestate) { if((! writestate.writesOngoing) @@ -683,32 +682,30 @@ public boolean compactStores() throws IOException { } } } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); } if(! shouldCompact) { LOG.info("not compacting region " + this.regionInfo.regionName); - return false; - - } else { - lock.writeLock().lock(); - try { - LOG.info("starting compaction on region " + this.regionInfo.regionName); - for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) { - HStore store = it.next(); - store.compact(); - } - LOG.info("compaction completed on region " + this.regionInfo.regionName); - return true; - - } finally { - synchronized(writestate) { - writestate.writesOngoing = false; - recentCommits = 0; - writestate.notifyAll(); - } - lock.writeLock().unlock(); + return false; + } + lock.obtainWriteLock(); + try { + LOG.info("starting compaction on region " + this.regionInfo.regionName); + for (Iterator<HStore> it = stores.values().iterator(); it.hasNext();) { + HStore store = it.next(); + store.compact(); + } + LOG.info("compaction completed on region " + this.regionInfo.regionName); + return true; + + } finally { + synchronized (writestate) { + writestate.writesOngoing = false; + recentCommits = 0; + writestate.notifyAll(); } + lock.releaseWriteLock(); } } @@ -928,7 +925,7 @@ public BytesWritable[] get(Text row, Text column, long timestamp, int numVersion private BytesWritable[] get(HStoreKey key, int numVersions) throws IOException { - lock.readLock().lock(); + lock.obtainReadLock(); try { // Check the memcache @@ -948,7 +945,7 @@ private BytesWritable[] get(HStoreKey key, int numVersions) throws IOException { return targetStore.get(key, numVersions); } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); } } @@ -965,7 +962,7 @@ private BytesWritable[] get(HStoreKey key, int numVersions) throws IOException { public TreeMap<Text, BytesWritable> getFull(Text row) throws IOException { HStoreKey key = new HStoreKey(row, System.currentTimeMillis()); - lock.readLock().lock(); + lock.obtainReadLock(); try { TreeMap<Text, BytesWritable> memResult = memcache.getFull(key); for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext(); ) { @@ -976,7 +973,7 @@ public TreeMap<Text, BytesWritable> getFull(Text row) throws IOException { return memResult; } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); } } @@ -985,7 +982,7 @@ public TreeMap<Text, BytesWritable> getFull(Text row) throws IOException { * columns. This Iterator must be closed by the caller. */ public HInternalScannerInterface getScanner(Text[] cols, Text firstRow) throws IOException { - lock.readLock().lock(); + lock.obtainReadLock(); try { TreeSet<Text> families = new TreeSet<Text>(); for(int i = 0; i < cols.length; i++) { @@ -1001,7 +998,7 @@ public HInternalScannerInterface getScanner(Text[] cols, Text firstRow) throws I return new HScanner(cols, firstRow, memcache, storelist); } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); } } @@ -1024,11 +1021,11 @@ public long startUpdate(Text row) throws IOException { // We obtain a per-row lock, so other clients will // block while one client performs an update. - lock.readLock().lock(); + lock.obtainReadLock(); try { return obtainLock(row); } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); } } diff --git a/src/java/org/apache/hadoop/hbase/HStore.java b/src/java/org/apache/hadoop/hbase/HStore.java index aa3b64d6cc65..7669747b5219 100644 --- a/src/java/org/apache/hadoop/hbase/HStore.java +++ b/src/java/org/apache/hadoop/hbase/HStore.java @@ -23,7 +23,6 @@ import java.util.Random; import java.util.TreeMap; import java.util.Vector; -import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -64,7 +63,7 @@ public class HStore { Integer compactLock = 0; Integer flushLock = 0; - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private final HLocking lock = new HLocking(); TreeMap<Long, MapFile.Reader> maps = new TreeMap<Long, MapFile.Reader>(); TreeMap<Long, HStoreFile> mapFiles = new TreeMap<Long, HStoreFile>(); @@ -237,7 +236,7 @@ public HStore(Path dir, Text regionName, Text colFamily, int maxVersions, /** Turn off all the MapFile readers */ public void close() throws IOException { LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily); - this.lock.writeLock().lock(); + this.lock.obtainWriteLock(); try { for (MapFile.Reader map: maps.values()) { map.close(); @@ -247,7 +246,7 @@ public void close() throws IOException { LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily); } finally { - this.lock.writeLock().unlock(); + this.lock.releaseWriteLock(); } } @@ -319,7 +318,7 @@ Vector<HStoreFile> flushCacheHelper(TreeMap<HStoreKey, BytesWritable> inputCache // C. Finally, make the new MapFile available. if(addToAvailableMaps) { - this.lock.writeLock().lock(); + this.lock.obtainWriteLock(); try { maps.put(logCacheFlushId, new MapFile.Reader(fs, mapfile.toString(), conf)); @@ -330,7 +329,7 @@ Vector<HStoreFile> flushCacheHelper(TreeMap<HStoreKey, BytesWritable> inputCache } } finally { - this.lock.writeLock().unlock(); + this.lock.releaseWriteLock(); } } return getAllMapFiles(); @@ -338,12 +337,12 @@ Vector<HStoreFile> flushCacheHelper(TreeMap<HStoreKey, BytesWritable> inputCache } public Vector<HStoreFile> getAllMapFiles() { - this.lock.readLock().lock(); + this.lock.obtainReadLock(); try { return new Vector<HStoreFile>(mapFiles.values()); } finally { - this.lock.readLock().unlock(); + this.lock.releaseReadLock(); } } @@ -385,12 +384,12 @@ void compactHelper(boolean deleteSequenceInfo) throws IOException { // Grab a list of files to compact. Vector<HStoreFile> toCompactFiles = null; - this.lock.writeLock().lock(); + this.lock.obtainWriteLock(); try { toCompactFiles = new Vector<HStoreFile>(mapFiles.values()); } finally { - this.lock.writeLock().unlock(); + this.lock.releaseWriteLock(); } // Compute the max-sequenceID seen in any of the to-be-compacted TreeMaps @@ -627,7 +626,7 @@ void processReadyCompaction() throws IOException { Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily); - this.lock.writeLock().lock(); + this.lock.obtainWriteLock(); try { Path doneFile = new Path(curCompactStore, COMPACTION_DONE); if(! fs.exists(doneFile)) { @@ -744,7 +743,7 @@ void processReadyCompaction() throws IOException { // 7. Releasing the write-lock - this.lock.writeLock().unlock(); + this.lock.releaseWriteLock(); } } @@ -760,7 +759,7 @@ void processReadyCompaction() throws IOException { * The returned object should map column names to byte arrays (byte[]). */ public void getFull(HStoreKey key, TreeMap<Text, BytesWritable> results) throws IOException { - this.lock.readLock().lock(); + this.lock.obtainReadLock(); try { MapFile.Reader[] maparray = maps.values().toArray(new MapFile.Reader[maps.size()]); @@ -789,7 +788,7 @@ public void getFull(HStoreKey key, TreeMap<Text, BytesWritable> results) throws } } finally { - this.lock.readLock().unlock(); + this.lock.releaseReadLock(); } } @@ -805,7 +804,7 @@ public BytesWritable[] get(HStoreKey key, int numVersions) throws IOException { } Vector<BytesWritable> results = new Vector<BytesWritable>(); - this.lock.readLock().lock(); + this.lock.obtainReadLock(); try { MapFile.Reader[] maparray = maps.values().toArray(new MapFile.Reader[maps.size()]); @@ -846,7 +845,7 @@ public BytesWritable[] get(HStoreKey key, int numVersions) throws IOException { } } finally { - this.lock.readLock().unlock(); + this.lock.releaseReadLock(); } } @@ -862,7 +861,7 @@ public long getLargestFileSize(Text midKey) { return maxSize; } - this.lock.readLock().lock(); + this.lock.obtainReadLock(); try { long mapIndex = 0L; @@ -889,7 +888,7 @@ public long getLargestFileSize(Text midKey) { LOG.warn(e); } finally { - this.lock.readLock().unlock(); + this.lock.releaseReadLock(); } return maxSize; } @@ -898,12 +897,12 @@ public long getLargestFileSize(Text midKey) { * @return Returns the number of map files currently in use */ public int getNMaps() { - this.lock.readLock().lock(); + this.lock.obtainReadLock(); try { return maps.size(); } finally { - this.lock.readLock().unlock(); + this.lock.releaseReadLock(); } } @@ -945,7 +944,7 @@ public HStoreScanner(long timestamp, Text[] targetCols, Text firstRow) super(timestamp, targetCols); - lock.readLock().lock(); + lock.obtainReadLock(); try { this.readers = new MapFile.Reader[mapFiles.size()]; @@ -1060,7 +1059,7 @@ public void close() { } } finally { - lock.readLock().unlock(); + lock.releaseReadLock(); scannerClosed = true; } }
ceb0d5e68bc6cbf8015be6d5dd785991fbc81455
hbase
fix spurious 400s produced by test--git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@790486 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hbase
diff --git a/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/TestRowResource.java b/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/TestRowResource.java index 0a8b9b702a51..fa760181a402 100644 --- a/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/TestRowResource.java +++ b/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/TestRowResource.java @@ -317,7 +317,7 @@ public void testMultiCellGetPutXML() throws IOException, JAXBException { Thread.yield(); // make sure the fake row was not actually created - response = client.get(path); + response = client.get(path, MIMETYPE_XML); assertEquals(response.getCode(), 404); // check that all of the values were created @@ -349,7 +349,7 @@ public void testMultiCellGetPutPB() throws IOException { Thread.yield(); // make sure the fake row was not actually created - response = client.get(path); + response = client.get(path, MIMETYPE_PROTOBUF); assertEquals(response.getCode(), 404); // check that all of the values were created
e858bda37b4cc782c38317c8fd792f00a5e3653a
arquillian$arquillian-graphene
ARQGRA-192: Support for enriching also List<WebElement> fields annotated with @FindBy added
a
https://github.com/arquillian/arquillian-graphene
diff --git a/graphene-webdriver/graphene-webdriver-ftest/src/test/java/org/jboss/arquillian/graphene/enricher/TestInitializingPageFragments.java b/graphene-webdriver/graphene-webdriver-ftest/src/test/java/org/jboss/arquillian/graphene/enricher/TestInitializingPageFragments.java index 27bdc9e79..6efa6f824 100644 --- a/graphene-webdriver/graphene-webdriver-ftest/src/test/java/org/jboss/arquillian/graphene/enricher/TestInitializingPageFragments.java +++ b/graphene-webdriver/graphene-webdriver-ftest/src/test/java/org/jboss/arquillian/graphene/enricher/TestInitializingPageFragments.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertNotNull; import java.net.URL; +import java.util.List; import org.jboss.arquillian.drone.api.annotation.Drone; import org.jboss.arquillian.graphene.enricher.page.TestPage; @@ -48,6 +49,9 @@ public class TestInitializingPageFragments { @FindBy(xpath = "//input") private WebElement input; + @FindBy(className = "divs") + private List<WebElement> divs; + @Page private TestPage testPage; @@ -92,4 +96,16 @@ public void testOtherWebElementsInitialisedCorrectly() { assertEquals("The value of the input is wrong, the element which represents it was not initialised correctly!", input.getAttribute("value"), EXPECTED_VALUE); } + + @Test + public void testInitializeListOfWebElements() { + loadPage(); + assertNotNull("The list of WebElements was not initialized correctly!", divs); + + for (int i = 1; i <= 3; i++) { + WebElement div = divs.get(i - 1); + assertEquals("The WebElement number " + i + " from list was not initialized correctly!", String.valueOf(i), + div.getText()); + } + } } diff --git a/graphene-webdriver/graphene-webdriver-ftest/src/test/resources/org/jboss/arquillian/graphene/ftest/pageFragmentsEnricher/sample.html b/graphene-webdriver/graphene-webdriver-ftest/src/test/resources/org/jboss/arquillian/graphene/ftest/pageFragmentsEnricher/sample.html index cdb50b1c6..744c1b6a6 100644 --- a/graphene-webdriver/graphene-webdriver-ftest/src/test/resources/org/jboss/arquillian/graphene/ftest/pageFragmentsEnricher/sample.html +++ b/graphene-webdriver/graphene-webdriver-ftest/src/test/resources/org/jboss/arquillian/graphene/ftest/pageFragmentsEnricher/sample.html @@ -10,5 +10,9 @@ </div> <div class="refByXpath">Some value too</div> <input type="text" id="input"/> + + <div class="divs">1</div> + <div class="divs">2</div> + <div class="divs">3</div> </body> </html> \ No newline at end of file diff --git a/graphene-webdriver/graphene-webdriver-impl/src/main/java/org/jboss/arquillian/graphene/enricher/PageFragmentsEnricher.java b/graphene-webdriver/graphene-webdriver-impl/src/main/java/org/jboss/arquillian/graphene/enricher/PageFragmentsEnricher.java index 0a8c26538..c19f9946b 100644 --- a/graphene-webdriver/graphene-webdriver-impl/src/main/java/org/jboss/arquillian/graphene/enricher/PageFragmentsEnricher.java +++ b/graphene-webdriver/graphene-webdriver-impl/src/main/java/org/jboss/arquillian/graphene/enricher/PageFragmentsEnricher.java @@ -39,9 +39,9 @@ /** * Enricher is a class for injecting into fields initialised <code>WebElement</code> and Page Fragments instances. - * + * * @author <a href="mailto:[email protected]">Juraj Huska</a> - * + * */ public class PageFragmentsEnricher implements TestEnricher { @@ -113,9 +113,19 @@ private void initNotPageFragmentsFields(List<Field> fields, Object object) { FindBy findBy = i.getAnnotation(FindBy.class); final By by = Factory.getReferencedBy(findBy); - WebElement element = setUpTheProxy(by); - - setObjectToField(i, object, element); + Class<?> fieldType = i.getType(); + + if (fieldType.equals(WebElement.class)) { + //it is plain WebElement field + WebElement element = setUpTheProxyForWebElement(by); + setObjectToField(i, object, element); + + } else if (fieldType.equals(List.class)) { + //it is List of WebElements + List<WebElement> elements = setUpTheProxyForListOfWebElements(by); + setObjectToField(i, object, elements); + } + } } @@ -129,21 +139,35 @@ private void setObjectToField(Field field, Object objectWithField, Object object field.set(objectWithField, object); } catch (Exception e) { // TODO more grained - throw new RuntimeException("The Page Fragment field can not be initialised!", e); + throw new RuntimeException("The given object" + object + " can not be set to the field " + field + + " of the object which declares it: " + objectWithField + "!", e); } if (!accessible) { field.setAccessible(false); } } - private WebElement setUpTheProxy(final By by) { + private List<WebElement> setUpTheProxyForListOfWebElements(final By by) { + List<WebElement> elements = GrapheneProxy.getProxyForFutureTarget(new GrapheneProxy.FutureTarget() { + + @Override + public Object getTarget() { + WebDriver driver = GrapheneContext.getProxyForInterfaces(HasInputDevices.class); + List<WebElement> elements = driver.findElements(by); + return elements; + } + }, List.class); + return elements; + } + + private WebElement setUpTheProxyForWebElement(final By by) { WebElement e = GrapheneProxy.getProxyForFutureTarget(new GrapheneProxy.FutureTarget() { @Override public Object getTarget() { WebDriver driver = GrapheneContext.getProxyForInterfaces(HasInputDevices.class); - WebElement root = driver.findElement(by); - return root; + WebElement element = driver.findElement(by); + return element; } }, WebElement.class); return e; @@ -156,7 +180,7 @@ private void initPageFragmentsFields(List<Field> fields, Object object) { FindBy findBy = pageFragmentField.getAnnotation(FindBy.class); final By by = Factory.getReferencedBy(findBy); - WebElement rootElement = setUpTheProxy(by); + WebElement rootElement = setUpTheProxyForWebElement(by); // initialise Page Fragment Class<?> implementationClass = pageFragmentField.getType(); @@ -167,8 +191,8 @@ private void initPageFragmentsFields(List<Field> fields, Object object) { } /** - * It removes all fields with type <code>WebElement</code> from the given list of fields. - * + * It removes all fields with type <code>WebElement</code> from the given list of fields. + * * @param findByFields * @return */ @@ -182,6 +206,8 @@ private List<Field> removePlainFindBy(List<Field> findByFields) { if (fieldType.equals(WebElement.class)) { i.remove(); + } else if (fieldType.equals(List.class)) { + i.remove(); } }
5a2052fad3f92e6675ba6362028f5f5851934f01
camel
CAMEL-4059: Fixed test on windows--git-svn-id: https://svn.apache.org/repos/asf/camel/trunk@1132659 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/camel
diff --git a/components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsTestConnectionOnStartupTest.java b/components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsTestConnectionOnStartupTest.java index 8b24f76a2c209..045aed17b0efd 100644 --- a/components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsTestConnectionOnStartupTest.java +++ b/components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsTestConnectionOnStartupTest.java @@ -46,7 +46,6 @@ public void configure() throws Exception { context.start(); fail("Should have thrown an exception"); } catch (FailedToCreateConsumerException e) { - // expected assertEquals("Failed to create Consumer for endpoint: Endpoint[activemq://queue:foo?testConnectionOnStartup=true]. " + "Reason: Cannot get JMS Connection on startup for destination foo", e.getMessage()); } @@ -65,12 +64,8 @@ public void configure() throws Exception { context.start(); fail("Should have thrown an exception"); } catch (FailedToCreateProducerException e) { - // expected - assertEquals("Failed to create Producer for endpoint: Endpoint[activemq://queue:foo?testConnectionOnStartup=true]. " - + "Reason: org.apache.camel.FailedToCreateProducerException: Failed to create Producer for endpoint: " - + "Endpoint[activemq://queue:foo?testConnectionOnStartup=true]. Reason: javax.jms.JMSException: " - + "Could not connect to broker URL: tcp://localhost:61111. Reason: java.net.ConnectException: Connection refused", - e.getMessage()); + assertTrue(e.getMessage().startsWith("Failed to create Producer for endpoint: Endpoint[activemq://queue:foo?testConnectionOnStartup=true].")); + assertTrue(e.getMessage().contains("java.net.ConnectException")); } }
859f9b8494cb6c7fec7f9b803bc975e7fb3b8ab4
Mylyn Reviews
322734: Renamed part/section to review
a
https://github.com/eclipse-mylyn/org.eclipse.mylyn.reviews
diff --git a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPart.java b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPart.java index 772324ca..a91c237a 100644 --- a/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPart.java +++ b/org.eclipse.mylyn.reviews.ui/src/org/eclipse/mylyn/reviews/ui/editors/ReviewTaskEditorPart.java @@ -73,7 +73,7 @@ public class ReviewTaskEditorPart extends AbstractTaskEditorPart { private Composite composite; public ReviewTaskEditorPart() { - setPartName("Scope"); + setPartName("Review"); setExpandVertically(true); }
93ab055d17bf4663c439424a40a053d7b0255aa7
kotlin
Change Signature: Do not fail on unresolved- PsiMethod -KT-9535 Fixed--
c
https://github.com/JetBrains/kotlin
diff --git a/idea/src/org/jetbrains/kotlin/idea/refactoring/changeSignature/JetChangeSignatureUsageProcessor.java b/idea/src/org/jetbrains/kotlin/idea/refactoring/changeSignature/JetChangeSignatureUsageProcessor.java index c566ef20f2a96..f6b990ffd6901 100644 --- a/idea/src/org/jetbrains/kotlin/idea/refactoring/changeSignature/JetChangeSignatureUsageProcessor.java +++ b/idea/src/org/jetbrains/kotlin/idea/refactoring/changeSignature/JetChangeSignatureUsageProcessor.java @@ -442,7 +442,7 @@ private static void findSAMUsages(ChangeInfo changeInfo, Set<UsageInfo> result) if (((PsiMethod) method).getContainingClass() == null) return; FunctionDescriptor methodDescriptor = JavaResolutionUtils.getJavaMethodDescriptor((PsiMethod) method); - assert methodDescriptor != null; + if (methodDescriptor == null) return; DeclarationDescriptor containingDescriptor = methodDescriptor.getContainingDeclaration(); if (!(containingDescriptor instanceof JavaClassDescriptor)) return;
b09e981f1e236c08194c5871214d5431c98eb260
drools
[BZ-1007977] when returning a cached KieModule from- the KieRepository referring to a snapshot release check if there is a newer- release on the maven repository--
c
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/main/java/org/drools/compiler/kie/builder/impl/InternalKieScanner.java b/drools-compiler/src/main/java/org/drools/compiler/kie/builder/impl/InternalKieScanner.java index 0b3a11e27eb..3194f75b493 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/kie/builder/impl/InternalKieScanner.java +++ b/drools-compiler/src/main/java/org/drools/compiler/kie/builder/impl/InternalKieScanner.java @@ -14,4 +14,6 @@ public interface InternalKieScanner extends KieScanner { KieModule loadArtifact(ReleaseId releaseId); KieModule loadArtifact(ReleaseId releaseId, InputStream pomXML); + + String getArtifactVersion(ReleaseId releaseId); } diff --git a/drools-compiler/src/main/java/org/drools/compiler/kie/builder/impl/KieRepositoryImpl.java b/drools-compiler/src/main/java/org/drools/compiler/kie/builder/impl/KieRepositoryImpl.java index 5c335a60843..708d0d1adf6 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/kie/builder/impl/KieRepositoryImpl.java +++ b/drools-compiler/src/main/java/org/drools/compiler/kie/builder/impl/KieRepositoryImpl.java @@ -46,14 +46,19 @@ public class KieRepositoryImpl public static final KieRepository INSTANCE = new KieRepositoryImpl(); - private final KieModuleRepo kieModuleRepo = new KieModuleRepo(); + private final KieModuleRepo kieModuleRepo; + + private InternalKieScanner internalKieScanner; + + public KieRepositoryImpl() { + internalKieScanner = getInternalKieScanner(); + kieModuleRepo = new KieModuleRepo(internalKieScanner); + } private final AtomicReference<ReleaseId> defaultGAV = new AtomicReference(new ReleaseIdImpl(DEFAULT_GROUP, DEFAULT_ARTIFACT, DEFAULT_VERSION)); - private InternalKieScanner internalKieScanner; - public void setDefaultGAV(ReleaseId releaseId) { this.defaultGAV.set(releaseId); } @@ -122,10 +127,6 @@ private static class DummyKieScanner implements InternalKieScanner { - public KieModule loadArtifact(ReleaseId releaseId) { - return null; - } - public void start(long pollingInterval) { } @@ -138,9 +139,17 @@ public void scanNow() { public void setKieContainer(KieContainer kieContainer) { } + public KieModule loadArtifact(ReleaseId releaseId) { + return null; + } + public KieModule loadArtifact(ReleaseId releaseId, InputStream pomXML) { return null; } + + public String getArtifactVersion(ReleaseId releaseId) { + return null; + } } public KieModule addKieModule(Resource resource, Resource... dependencies) { @@ -195,9 +204,14 @@ public KieModule getKieModule(Resource resource) { private static class KieModuleRepo { + private final InternalKieScanner kieScanner; private final Map<String, TreeMap<ComparableVersion, KieModule>> kieModules = new HashMap<String, TreeMap<ComparableVersion, KieModule>>(); private final Map<ReleaseId, KieModule> oldKieModules = new HashMap<ReleaseId, KieModule>(); + private KieModuleRepo(InternalKieScanner kieScanner) { + this.kieScanner = kieScanner; + } + void store(KieModule kieModule) { ReleaseId releaseId = kieModule.getReleaseId(); String ga = releaseId.getGroupId() + ":" + releaseId.getArtifactId(); @@ -225,12 +239,23 @@ KieModule load(ReleaseId releaseId) { KieModule load(ReleaseId releaseId, VersionRange versionRange) { String ga = releaseId.getGroupId() + ":" + releaseId.getArtifactId(); TreeMap<ComparableVersion, KieModule> artifactMap = kieModules.get(ga); - if (artifactMap == null) { + if ( artifactMap == null ) { return null; } if (versionRange.fixed) { - return artifactMap.get(new ComparableVersion(releaseId.getVersion())); + KieModule kieModule = artifactMap.get(new ComparableVersion(releaseId.getVersion())); + if ( kieModule != null && releaseId.isSnapshot() ) { + String oldSnapshotVersion = ((ReleaseIdImpl)kieModule.getReleaseId()).getSnapshotVersion(); + String currentSnapshotVersion = kieScanner.getArtifactVersion(releaseId); + if ( oldSnapshotVersion != null && currentSnapshotVersion != null && + new ComparableVersion(currentSnapshotVersion).compareTo(new ComparableVersion(oldSnapshotVersion)) > 0) { + // if the snapshot currently available on the maven repo is newer than the cached one + // return null to enforce the building of this newer version + return null; + } + } + return kieModule; } if (versionRange.upperBound == null) { @@ -241,11 +266,11 @@ KieModule load(ReleaseId releaseId, VersionRange versionRange) { artifactMap.ceilingEntry(new ComparableVersion(versionRange.upperBound)) : artifactMap.lowerEntry(new ComparableVersion(versionRange.upperBound)); - if (entry == null) { + if ( entry == null ) { return null; } - if (versionRange.lowerBound == null) { + if ( versionRange.lowerBound == null ) { return entry.getValue(); } diff --git a/drools-compiler/src/main/java/org/drools/compiler/kproject/ReleaseIdImpl.java b/drools-compiler/src/main/java/org/drools/compiler/kproject/ReleaseIdImpl.java index 6777edbd6db..58f204c9625 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/kproject/ReleaseIdImpl.java +++ b/drools-compiler/src/main/java/org/drools/compiler/kproject/ReleaseIdImpl.java @@ -12,6 +12,8 @@ public class ReleaseIdImpl implements ReleaseId { private final String artifactId; private final String version; + private String snapshotVersion; + public ReleaseIdImpl(String releaseId) { String[] split = releaseId.split(":"); this.groupId = split[0]; @@ -60,6 +62,10 @@ public String getCompilationCachePathPrefix() { //return "META-INF/maven/" + groupId + "/" + artifactId + "/"; return "META-INF/"; } + + public boolean isSnapshot() { + return version.endsWith("-SNAPSHOT"); + } public static ReleaseId fromPropertiesString(String string) { Properties props = new Properties(); @@ -101,4 +107,12 @@ public int hashCode() { result = 31 * result + (version != null ? version.hashCode() : 0); return result; } + + public String getSnapshotVersion() { + return snapshotVersion; + } + + public void setSnapshotVersion(String snapshotVersion) { + this.snapshotVersion = snapshotVersion; + } } diff --git a/kie-ci/src/main/java/org/kie/scanner/KieRepositoryScannerImpl.java b/kie-ci/src/main/java/org/kie/scanner/KieRepositoryScannerImpl.java index 6eef4d1d340..b7917859b5b 100644 --- a/kie-ci/src/main/java/org/kie/scanner/KieRepositoryScannerImpl.java +++ b/kie-ci/src/main/java/org/kie/scanner/KieRepositoryScannerImpl.java @@ -1,5 +1,6 @@ package org.kie.scanner; +import org.drools.compiler.kproject.ReleaseIdImpl; import org.drools.compiler.kproject.models.KieModuleModelImpl; import org.kie.api.builder.ReleaseId; import org.kie.api.builder.KieModule; @@ -87,6 +88,14 @@ public KieModule loadArtifact(ReleaseId releaseId, InputStream pomXml) { Artifact artifact = resolver.resolveArtifact(artifactName); return artifact != null ? buildArtifact(releaseId, artifact, resolver) : loadPomArtifact(releaseId); } + + public String getArtifactVersion(ReleaseId releaseId) { + if (!releaseId.isSnapshot()) { + return releaseId.getVersion(); + } + Artifact artifact = getArtifactResolver().resolveArtifact(releaseId.toString()); + return artifact != null ? artifact.getVersion() : null; + } private KieModule loadPomArtifact(ReleaseId releaseId) { ArtifactResolver resolver = getResolverFor(releaseId, false); @@ -101,6 +110,9 @@ private KieModule loadPomArtifact(ReleaseId releaseId) { } private InternalKieModule buildArtifact(ReleaseId releaseId, Artifact artifact, ArtifactResolver resolver) { + if (releaseId.isSnapshot()) { + ((ReleaseIdImpl)releaseId).setSnapshotVersion(artifact.getVersion()); + } ZipKieModule kieModule = createZipKieModule(releaseId, artifact.getFile()); if (kieModule != null) { addDependencies(kieModule, resolver, resolver.getArtifactDependecies(new DependencyDescriptor(artifact).toString())); diff --git a/kie-ci/src/test/java/org/kie/scanner/KieModuleMavenTest.java b/kie-ci/src/test/java/org/kie/scanner/KieModuleMavenTest.java index 636c97cbaf7..3c09126fae0 100644 --- a/kie-ci/src/test/java/org/kie/scanner/KieModuleMavenTest.java +++ b/kie-ci/src/test/java/org/kie/scanner/KieModuleMavenTest.java @@ -24,8 +24,6 @@ import org.kie.api.builder.ReleaseId; import org.kie.api.builder.model.KieBaseModel; import org.kie.api.definition.KiePackage; -import org.kie.api.definition.process.*; -import org.kie.api.definition.process.Process; import org.kie.api.definition.rule.Rule; import org.kie.api.runtime.KieContainer; import org.kie.api.runtime.KieSession; @@ -138,8 +136,9 @@ public void testKieModulePojoDependencies() throws Exception { assertEquals(1, list.size()); } - @Test @Ignore + @Test public void testKieContainerBeforeAndAfterDeployOfSnapshot() throws Exception { + // BZ-1007977 KieServices ks = KieServices.Factory.get(); String group = "org.kie.test"; @@ -180,7 +179,6 @@ public void testKieContainerBeforeAndAfterDeployOfSnapshot() throws Exception { assertEquals(1, packages2.size()); Collection<Rule> rules2 = packages2.iterator().next().getRules(); assertEquals(4, rules2.size()); - } public static String generatePomXml(ReleaseId releaseId, ReleaseId... dependencies) {
12060cb9a634e3b1ae0b7d1508bedd37e3dc0394
hadoop
YARN-3028. Better syntax for replaceLabelsOnNode in- RMAdmin CLI. Contributed by Rohith Sharmaks--(cherry picked from commit fd93e5387b554a78413bc0f14b729e58fea604ea)-
p
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 5422c0eb26dbd..af6a01509c6dd 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -178,6 +178,9 @@ Release 2.7.0 - UNRELEASED YARN-2897. CrossOriginFilter needs more log statements (Mit Desai via jeagles) + YARN-3028. Better syntax for replaceLabelsOnNode in RMAdmin CLI + (Rohith Sharmaks via wangda) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java index 9ea333cab4cba..6f1bbd09d83bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java @@ -100,7 +100,8 @@ public class RMAdminCLI extends HAAdmin { new UsageInfo("[label1,label2,label3] (label splitted by \",\")", "remove from cluster node labels")) .put("-replaceLabelsOnNode", - new UsageInfo("[node1:port,label1,label2 node2:port,label1,label2]", + new UsageInfo( + "[node1[:port]=label1,label2 node2[:port]=label1,label2]", "replace labels on nodes")) .put("-directlyAccessNodeLabelStore", new UsageInfo("", "Directly access node label store, " @@ -199,7 +200,7 @@ private static void printHelp(String cmd, boolean isHAEnabled) { " [-getGroup [username]]" + " [[-addToClusterNodeLabels [label1,label2,label3]]" + " [-removeFromClusterNodeLabels [label1,label2,label3]]" + - " [-replaceLabelsOnNode [node1:port,label1,label2 node2:port,label1]" + + " [-replaceLabelsOnNode [node1[:port]=label1,label2 node2[:port]=label1]" + " [-directlyAccessNodeLabelStore]]"); if (isHAEnabled) { appendHAUsage(summary); @@ -398,8 +399,18 @@ private Map<NodeId, Set<String>> buildNodeLabelsMapFromStr(String args) continue; } - String[] splits = nodeToLabels.split(","); + // "," also supported for compatibility + String[] splits = nodeToLabels.split("="); + int index = 0; + if (splits.length != 2) { + splits = nodeToLabels.split(","); + index = 1; + } + String nodeIdStr = splits[0]; + if (index == 0) { + splits = splits[1].split(","); + } if (nodeIdStr.trim().isEmpty()) { throw new IOException("node name cannot be empty"); @@ -408,7 +419,7 @@ private Map<NodeId, Set<String>> buildNodeLabelsMapFromStr(String args) NodeId nodeId = ConverterUtils.toNodeIdWithDefaultPort(nodeIdStr); map.put(nodeId, new HashSet<String>()); - for (int i = 1; i < splits.length; i++) { + for (int i = index; i < splits.length; i++) { if (!splits[i].trim().isEmpty()) { map.get(nodeId).add(splits[i].trim()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java index 92af27dc692ca..1dfeac21d4425 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java @@ -73,7 +73,6 @@ public class TestRMAdminCLI { @Before public void configure() throws IOException, YarnException { remoteAdminServiceAccessed = false; - dummyNodeLabelsManager = new DummyCommonNodeLabelsManager(); admin = mock(ResourceManagerAdministrationProtocol.class); when(admin.addToClusterNodeLabels(any(AddToClusterNodeLabelsRequest.class))) .thenAnswer(new Answer<AddToClusterNodeLabelsResponse>() { @@ -105,6 +104,7 @@ protected HAServiceTarget resolveTarget(String rmId) { return haServiceTarget; } }; + initDummyNodeLabelsManager(); rmAdminCLI.localNodeLabelsManager = dummyNodeLabelsManager; YarnConfiguration conf = new YarnConfiguration(); @@ -124,6 +124,13 @@ protected HAServiceTarget resolveTarget(String rmId) { }; } + private void initDummyNodeLabelsManager() { + Configuration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); + dummyNodeLabelsManager = new DummyCommonNodeLabelsManager(); + dummyNodeLabelsManager.init(conf); + } + @Test(timeout=500) public void testRefreshQueues() throws Exception { String[] args = { "-refreshQueues" }; @@ -281,7 +288,7 @@ public void testHelp() throws Exception { "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" + " [username]] [[-addToClusterNodeLabels [label1,label2,label3]]" + " [-removeFromClusterNodeLabels [label1,label2,label3]] [-replaceLabelsOnNode " + - "[node1:port,label1,label2 node2:port,label1] [-directlyAccessNodeLabelStore]] " + + "[node1[:port]=label1,label2 node2[:port]=label1] [-directlyAccessNodeLabelStore]] " + "[-help [cmd]]")); assertTrue(dataOut .toString() @@ -361,7 +368,7 @@ public void testHelp() throws Exception { "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" + " [username]] [[-addToClusterNodeLabels [label1,label2,label3]]" + " [-removeFromClusterNodeLabels [label1,label2,label3]] [-replaceLabelsOnNode " + - "[node1:port,label1,label2 node2:port,label1] [-directlyAccessNodeLabelStore]] " + + "[node1[:port]=label1,label2 node2[:port]=label1] [-directlyAccessNodeLabelStore]] " + "[-transitionToActive [--forceactive] <serviceId>] " + "[-transitionToStandby <serviceId>] [-failover" + " [--forcefence] [--forceactive] <serviceId> <serviceId>] " + @@ -501,24 +508,29 @@ public void testRemoveFromClusterNodeLabels() throws Exception { @Test public void testReplaceLabelsOnNode() throws Exception { // Successfully replace labels - dummyNodeLabelsManager.addToCluserNodeLabels(ImmutableSet.of("x", "Y")); + dummyNodeLabelsManager + .addToCluserNodeLabels(ImmutableSet.of("x", "y", "Y")); String[] args = - { "-replaceLabelsOnNode", "node1,x,Y node2,Y", + { "-replaceLabelsOnNode", + "node1:8000,x,y node2:8000=y node3,x,Y node4=Y", "-directlyAccessNodeLabelStore" }; assertEquals(0, rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( - NodeId.newInstance("node1", 0))); + NodeId.newInstance("node1", 8000))); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( - NodeId.newInstance("node2", 0))); - + NodeId.newInstance("node2", 8000))); + assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( + NodeId.newInstance("node3", 0))); + assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( + NodeId.newInstance("node4", 0))); + // no labels, should fail args = new String[] { "-replaceLabelsOnNode" }; assertTrue(0 != rmAdminCLI.run(args)); - + // no labels, should fail args = - new String[] { "-replaceLabelsOnNode", - "-directlyAccessNodeLabelStore" }; + new String[] { "-replaceLabelsOnNode", "-directlyAccessNodeLabelStore" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail @@ -529,20 +541,6 @@ public void testReplaceLabelsOnNode() throws Exception { assertTrue(0 != rmAdminCLI.run(args)); } - @Test - public void testReplaceLabelsOnNodeWithPort() throws Exception { - // Successfully replace labels - dummyNodeLabelsManager.addToCluserNodeLabels(ImmutableSet.of("x", "y")); - String[] args = - { "-replaceLabelsOnNode", "node1:8000,x,y node2:8000,y", - "-directlyAccessNodeLabelStore" }; - assertEquals(0, rmAdminCLI.run(args)); - assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( - NodeId.newInstance("node1", 8000))); - assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( - NodeId.newInstance("node2", 8000))); - } - private void testError(String[] args, String template, ByteArrayOutputStream data, int resultCode) throws Exception { int actualResultCode = rmAdminCLI.run(args); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java index 242f59caf2271..0ab1115491838 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java @@ -333,23 +333,32 @@ private void assertNodeLabelsDisabledErrorMessage(IOException e) { public void testNodeLabelsDisabled() throws IOException { DummyCommonNodeLabelsManager mgr = new DummyCommonNodeLabelsManager(); Configuration conf = new YarnConfiguration(); - conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); + conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, false); mgr.init(conf); mgr.start(); + boolean caught = false; // add labels try { mgr.addToCluserNodeLabels(ImmutableSet.of("x")); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); + caught = true; } + // check exception caught + Assert.assertTrue(caught); + caught = false; // remove labels try { mgr.removeFromClusterNodeLabels(ImmutableSet.of("x")); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); + caught = true; } + // check exception caught + Assert.assertTrue(caught); + caught = false; // add labels to node try { @@ -357,7 +366,11 @@ public void testNodeLabelsDisabled() throws IOException { CommonNodeLabelsManager.EMPTY_STRING_SET)); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); + caught = true; } + // check exception caught + Assert.assertTrue(caught); + caught = false; // remove labels from node try { @@ -365,7 +378,11 @@ public void testNodeLabelsDisabled() throws IOException { CommonNodeLabelsManager.EMPTY_STRING_SET)); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); + caught = true; } + // check exception caught + Assert.assertTrue(caught); + caught = false; // replace labels on node try { @@ -373,7 +390,11 @@ public void testNodeLabelsDisabled() throws IOException { CommonNodeLabelsManager.EMPTY_STRING_SET)); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); + caught = true; } + // check exception caught + Assert.assertTrue(caught); + caught = false; mgr.close(); }
35889c08e997c54194471256e0760ee29a54bafe
kotlin
Test data paths fixed--
c
https://github.com/JetBrains/kotlin
diff --git a/j2k/test/org/jetbrains/jet/j2k/StandaloneJavaToKotlinConverterTest.java b/j2k/test/org/jetbrains/jet/j2k/StandaloneJavaToKotlinConverterTest.java index a168d150a4654..d153c24160160 100644 --- a/j2k/test/org/jetbrains/jet/j2k/StandaloneJavaToKotlinConverterTest.java +++ b/j2k/test/org/jetbrains/jet/j2k/StandaloneJavaToKotlinConverterTest.java @@ -14,8 +14,6 @@ import java.io.File; import java.io.IOException; -import static org.jetbrains.jet.j2k.TestCaseBuilder.getTestDataPathBase; - /** * @author ignatov */ @@ -33,7 +31,7 @@ public StandaloneJavaToKotlinConverterTest(String dataPath, String name) { @Override protected void runTest() throws Throwable { - String javaPath = "testData" + File.separator + getTestFilePath(); + String javaPath = "j2k/testData" + File.separator + getTestFilePath(); String kotlinPath = javaPath.replace(".jav", ".kt"); final File kotlinFile = new File(kotlinPath); @@ -89,7 +87,7 @@ public String getName() { @NotNull public static Test suite() { TestSuite suite = new TestSuite(); - suite.addTest(TestCaseBuilder.suiteForDirectory(getTestDataPathBase(), "/ast", new TestCaseBuilder.NamedTestFactory() { + suite.addTest(TestCaseBuilder.suiteForDirectory("j2k/testData", "/ast", new TestCaseBuilder.NamedTestFactory() { @NotNull @Override public Test createTest(@NotNull String dataPath, @NotNull String name) {
42fa86735cb89bdde76545946d154c045513c12b
Delta Spike
DELTASPIKE-277 fix JsfMessageProducer
c
https://github.com/apache/deltaspike
diff --git a/deltaspike/modules/jsf/impl/src/main/java/org/apache/deltaspike/jsf/impl/message/JsfMessageProducer.java b/deltaspike/modules/jsf/impl/src/main/java/org/apache/deltaspike/jsf/impl/message/JsfMessageProducer.java index af4bd70e5..6dc58b8a7 100644 --- a/deltaspike/modules/jsf/impl/src/main/java/org/apache/deltaspike/jsf/impl/message/JsfMessageProducer.java +++ b/deltaspike/modules/jsf/impl/src/main/java/org/apache/deltaspike/jsf/impl/message/JsfMessageProducer.java @@ -34,12 +34,12 @@ public class JsfMessageProducer { @Produces @Dependent - public JsfMessage<?> createJsfMessage(InjectionPoint injectionPoint) + public JsfMessage createJsfMessage(InjectionPoint injectionPoint) { return createJsfMessageFor(injectionPoint, ReflectionUtils.getRawType(injectionPoint.getType())); } - private JsfMessage<?> createJsfMessageFor(InjectionPoint injectionPoint, Class<Object> rawType) + private JsfMessage createJsfMessageFor(InjectionPoint injectionPoint, Class<Object> rawType) { //X TODO check if the JsfMessage should get injected into a UIComponent and use #getClientId()
a72d7744322cea57224b426f06d1f81521c027fe
simpleserver$simpleserver
completed custAuth export * made exports resources * added default option * improved stability * closes #117
p
https://github.com/simpleserver/simpleserver
diff --git a/src/simpleserver/Authenticator.java b/src/simpleserver/Authenticator.java index 26fe9d3a..9402a34b 100644 --- a/src/simpleserver/Authenticator.java +++ b/src/simpleserver/Authenticator.java @@ -120,7 +120,11 @@ public void register(String playerName, String password) { server.data.save(); if (server.options.getBoolean("enableCustAuthExport")) { - server.custAuthExport.addEntry(playerName, server.config.players.get(playerName), pwHash); + Integer groupId = server.config.players.get(playerName); + if (groupId == null) { + groupId = server.config.properties.getInt("defaultGroup"); + } + server.custAuthExport.addEntry(playerName, groupId, pwHash); } } diff --git a/src/simpleserver/Server.java b/src/simpleserver/Server.java index 3f2e331d..dbe5bf5a 100644 --- a/src/simpleserver/Server.java +++ b/src/simpleserver/Server.java @@ -48,7 +48,6 @@ import simpleserver.config.xml.Config; import simpleserver.config.xml.GlobalConfig; import simpleserver.export.CustAuthExport; -import simpleserver.export.Export; import simpleserver.lang.Translations; import simpleserver.log.AdminLog; import simpleserver.log.ConnectionLog; @@ -108,7 +107,6 @@ public class Server { private MessageLog messageLog; private SystemInputQueue systemInput; - private List<Export> exports; public CustAuthExport custAuthExport; private MinecraftWrapper minecraft; @@ -272,12 +270,6 @@ public void saveConfig() { globalConfig.save(); } - public void saveExports() { - for (Export export : exports) { - export.save(); - } - } - public String findName(String prefix) { Player i = playerList.findPlayer(prefix); if (i != null) { @@ -412,11 +404,6 @@ private void initialize() { resources.add(data = new GlobalData()); resources.add(docs = new ReadFiles()); - exports = new LinkedList<Export>(); - if (options.getBoolean("enableCustAuthExport")) { - exports.add(custAuthExport = new CustAuthExport(this)); - } - time = new Time(this); bots = new BotController(this); @@ -454,6 +441,11 @@ private void startup() { requestTracker = new RequestTracker(this); messager = new Messager(this); + if (options.getBoolean("enableCustAuthExport")) { + resources.add(custAuthExport = new CustAuthExport(this)); + custAuthExport.load(); + } + messageLog = new MessageLog(config.properties.get("logMessageFormat"), config.properties.getBoolean("logMessages")); minecraft = new MinecraftWrapper(this, options, systemInput); @@ -523,7 +515,6 @@ private void shutdown() { requestTracker.stop(); c10t.stop(); saveResources(); - saveExports(); playerList.waitUntilEmpty(); minecraft.stop(); diff --git a/src/simpleserver/command/SetGroupCommand.java b/src/simpleserver/command/SetGroupCommand.java index d2250403..2eeb06ba 100644 --- a/src/simpleserver/command/SetGroupCommand.java +++ b/src/simpleserver/command/SetGroupCommand.java @@ -111,5 +111,9 @@ protected void setGroup(Server server, int group, String target) { server.config.players.set(target, group); server.saveConfig(); System.out.println("[SimpleServer] Player " + target + "'s group was set to " + new Integer(group).toString() + "!"); + + if (server.options.getBoolean("enableCustAuthExport")) { + server.custAuthExport.updateGroup(target, group); + } } } diff --git a/src/simpleserver/export/CustAuthExport.java b/src/simpleserver/export/CustAuthExport.java index a351ac54..4bed8801 100644 --- a/src/simpleserver/export/CustAuthExport.java +++ b/src/simpleserver/export/CustAuthExport.java @@ -22,13 +22,14 @@ import simpleserver.Server; -public class CustAuthExport extends PropertiesExport implements Export { +public class CustAuthExport extends PropertiesExport { private Server server; public CustAuthExport(Server server) { super("custAuthData.txt", 2); this.server = server; - header = "Export of custAuth data in the format: playerName=groupId,pwHash"; + header = "Export of custAuth data in the format: playerName=groupId,pwHash\n" + + "DO NOT MODIFY THIS FILE!"; } @Override @@ -43,14 +44,17 @@ protected void populate() { public void addEntry(String playerName, int groupId, byte[] pwHash) { setEntry(playerName, String.valueOf(groupId), hashToHex(pwHash)); + save(); } public void updatePw(String playerName, byte[] pwHash) { updateEntry(playerName, 1, hashToHex(pwHash)); + save(); } public void updateGroup(String playerName, int groupId) { updateEntry(playerName, 0, String.valueOf(groupId)); + save(); } private String hashToHex(byte[] pwHash) { diff --git a/src/simpleserver/export/Export.java b/src/simpleserver/export/Export.java deleted file mode 100644 index b44152b0..00000000 --- a/src/simpleserver/export/Export.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2010 SimpleServer authors (see CONTRIBUTORS) - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -package simpleserver.export; - -public interface Export { - public void save(); -} diff --git a/src/simpleserver/export/PropertiesExport.java b/src/simpleserver/export/PropertiesExport.java index 59625ab0..15adf6f3 100644 --- a/src/simpleserver/export/PropertiesExport.java +++ b/src/simpleserver/export/PropertiesExport.java @@ -21,13 +21,17 @@ package simpleserver.export; import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; +import simpleserver.Resource; import simpleserver.config.SortedProperties; -abstract class PropertiesExport implements Export { +abstract class PropertiesExport implements Resource { private static final String LOCATION = "simpleserver" + File.separator + "export"; protected SortedProperties properties; @@ -52,11 +56,28 @@ protected File getFile() { return file; } - new File(LOCATION).mkdir(); populate(); + new File(LOCATION).mkdir(); return file; } + public void load() { + try { + InputStream stream = new FileInputStream(getFile()); + try { + properties.load(stream); + } finally { + stream.close(); + } + } catch (FileNotFoundException e) { + save(); + } catch (IOException e) { + properties.clear(); + populate(); + save(); + } + } + public void save() { try { OutputStream stream = new FileOutputStream(getFile()); @@ -103,7 +124,7 @@ public static String join(String glue, String[] pieces) { } if (pieces.length > 0) { - return value.substring(0, value.length() - 2); + return value.substring(0, value.length() - 1); } else { return ""; } diff --git a/src/simpleserver/options/defaults/simpleserver.properties b/src/simpleserver/options/defaults/simpleserver.properties index 27ba0026..99b620eb 100644 --- a/src/simpleserver/options/defaults/simpleserver.properties +++ b/src/simpleserver/options/defaults/simpleserver.properties @@ -6,6 +6,7 @@ c10tMins=60 custAuth=false enableRcon=false enableTelnet=false +enableCustAuthExport=false gameMode=0 internalPort=25566 ipAddress=0.0.0.0
be724da23f0c63da3e4d517380b9fe8c3916a810
agorava$agorava-core
AGOVA-35 Add support for Weld 2.0 * ProcessBean<X> changed to ProcessBean<? extends X> * Calls to BeanManager.getBeans not allowed until AfterDeploymentValidation Works on both weld-1.x, weld-2.x and owb profiles. Changed to use JavaArchive in @Deployment instead of GenericArchive due to bad assumption in OpenWebBeans container: https://issues.apache.org/jira/browse/OWB-880
a
https://github.com/agorava/agorava-core
diff --git a/agorava-core-impl-cdi/pom.xml b/agorava-core-impl-cdi/pom.xml index b5bf8c0..2d7dde6 100644 --- a/agorava-core-impl-cdi/pom.xml +++ b/agorava-core-impl-cdi/pom.xml @@ -137,9 +137,9 @@ </profile> <profile> - <id>weld</id> + <id>weld-1.x</id> - <!-- use this profile to compile and test Agorava with Weld --> + <!-- use this profile to compile and test Agorava with Weld 1 --> <activation> <activeByDefault>true</activeByDefault> <property> @@ -148,7 +148,6 @@ </property> </activation> - <properties> <arquillian>weld-ee-embedded-1.1</arquillian> </properties> @@ -175,6 +174,42 @@ </profile> + <profile> + <id>weld-2.x</id> + + <!-- use this profile to compile and test Agorava with Weld 2 --> + <activation> + <property> + <name>arquillian</name> + <value>weld-ee-embedded-1.1</value> + </property> + </activation> + + <properties> + <arquillian>weld-ee-embedded-1.1</arquillian> + </properties> + <dependencies> + <dependency> + <groupId>org.jboss.spec</groupId> + <artifactId>jboss-javaee-6.0</artifactId> + <type>pom</type> + </dependency> + <dependency> + <groupId>org.jboss.arquillian.container</groupId> + <artifactId>arquillian-weld-ee-embedded-1.1</artifactId> + </dependency> + <dependency> + <groupId>org.jboss.weld</groupId> + <artifactId>weld-core</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-simple</artifactId> + </dependency> + </dependencies> + + </profile> </profiles> <build> diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/AgoravaExtension.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/AgoravaExtension.java index 0eb6134..21a6e7a 100644 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/AgoravaExtension.java +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/AgoravaExtension.java @@ -16,22 +16,10 @@ package org.agorava.core.cdi; -import com.google.common.collect.BiMap; -import com.google.common.collect.HashBiMap; -import com.google.common.collect.Iterables; -import org.agorava.core.api.*; -import org.agorava.core.api.exception.AgoravaException; -import org.agorava.core.api.oauth.OAuthAppSettings; -import org.agorava.core.oauth.OAuthSessionImpl; -import org.agorava.core.oauth.scribe.OAuthProviderScribe; -import org.apache.deltaspike.core.util.bean.BeanBuilder; -import org.apache.deltaspike.core.util.metadata.builder.AnnotatedTypeBuilder; +import static com.google.common.collect.Sets.newHashSet; +import static java.util.logging.Level.INFO; +import static java.util.logging.Level.WARNING; -import javax.enterprise.context.ApplicationScoped; -import javax.enterprise.context.Dependent; -import javax.enterprise.context.spi.CreationalContext; -import javax.enterprise.event.Observes; -import javax.enterprise.inject.spi.*; import java.io.Serializable; import java.lang.annotation.Annotation; import java.lang.reflect.Type; @@ -41,9 +29,45 @@ import java.util.Set; import java.util.logging.Logger; -import static com.google.common.collect.Sets.newHashSet; -import static java.util.logging.Level.INFO; -import static java.util.logging.Level.WARNING; +import javax.enterprise.context.ApplicationScoped; +import javax.enterprise.context.Dependent; +import javax.enterprise.context.spi.CreationalContext; +import javax.enterprise.event.Observes; +import javax.enterprise.inject.Any; +import javax.enterprise.inject.spi.AfterBeanDiscovery; +import javax.enterprise.inject.spi.AfterDeploymentValidation; +import javax.enterprise.inject.spi.Annotated; +import javax.enterprise.inject.spi.AnnotatedConstructor; +import javax.enterprise.inject.spi.AnnotatedField; +import javax.enterprise.inject.spi.AnnotatedMember; +import javax.enterprise.inject.spi.AnnotatedMethod; +import javax.enterprise.inject.spi.AnnotatedType; +import javax.enterprise.inject.spi.Bean; +import javax.enterprise.inject.spi.BeanManager; +import javax.enterprise.inject.spi.BeforeBeanDiscovery; +import javax.enterprise.inject.spi.Extension; +import javax.enterprise.inject.spi.ProcessAnnotatedType; +import javax.enterprise.inject.spi.ProcessBean; +import javax.enterprise.inject.spi.ProcessProducer; +import javax.enterprise.inject.spi.ProcessProducerMethod; +import javax.enterprise.inject.spi.Producer; +import javax.enterprise.util.AnnotationLiteral; + +import org.agorava.core.api.ApplyQualifier; +import org.agorava.core.api.GenericRoot; +import org.agorava.core.api.Injectable; +import org.agorava.core.api.RemoteApi; +import org.agorava.core.api.ServiceRelated; +import org.agorava.core.api.exception.AgoravaException; +import org.agorava.core.api.oauth.OAuthAppSettings; +import org.agorava.core.oauth.OAuthSessionImpl; +import org.agorava.core.oauth.scribe.OAuthProviderScribe; +import org.apache.deltaspike.core.util.bean.BeanBuilder; +import org.apache.deltaspike.core.util.metadata.builder.AnnotatedTypeBuilder; + +import com.google.common.collect.BiMap; +import com.google.common.collect.HashBiMap; +import com.google.common.collect.Iterables; /** * Agorava CDI extension to discover existing module and configured modules @@ -52,6 +76,8 @@ */ public class AgoravaExtension implements Extension, Serializable { + private static final long serialVersionUID = 1L; + private static final Set<Annotation> servicesQualifiersConfigured = newHashSet(); private static Logger log = Logger.getLogger(AgoravaExtension.class.getName()); private static BiMap<String, Annotation> servicesToQualifier = HashBiMap.create(); @@ -104,8 +130,12 @@ public static void setMultiSession(boolean ms) { * no matching meta-annotation was found. */ public static Set<Annotation> getAnnotationsWithMeta(Annotated element, final Class<? extends Annotation> metaAnnotationType) { + return getAnnotationsWithMeta(element.getAnnotations(), metaAnnotationType); + } + + public static Set<Annotation> getAnnotationsWithMeta(Set<Annotation> qualifiers, final Class<? extends Annotation> metaAnnotationType) { Set<Annotation> annotations = new HashSet<Annotation>(); - for (Annotation annotation : element.getAnnotations()) { + for (Annotation annotation : qualifiers) { if (annotation.annotationType().isAnnotationPresent(metaAnnotationType)) { annotations.add(annotation); } @@ -185,19 +215,20 @@ private <T> void processGenericAnnotatedType(ProcessAnnotatedType<T> pat) { pat.setAnnotatedType(atb.create()); } - } else + } else { pat.veto(); + } } - public void processGenericOauthService(@Observes ProcessAnnotatedType<OAuthServiceImpl> pat) { + public void processGenericOauthService(@Observes ProcessAnnotatedType<? extends OAuthServiceImpl> pat) { processGenericAnnotatedType(pat); } - public void processGenericOauthProvider(@Observes ProcessAnnotatedType<OAuthProviderScribe> pat) { + public void processGenericOauthProvider(@Observes ProcessAnnotatedType<? extends OAuthProviderScribe> pat) { processGenericAnnotatedType(pat); } - public void processGenericSession(@Observes ProcessAnnotatedType<OAuthSessionImpl> pat) { + public void processGenericSession(@Observes ProcessAnnotatedType<? extends OAuthSessionImpl> pat) { processGenericAnnotatedType(pat); } @@ -242,30 +273,24 @@ public void processOAuthSettingsProducer(@Observes final ProcessProducer<?, OAut //----------------- Process Bean Phase ---------------------------------- - private void CommonsProcessRemoteService(ProcessBean<RemoteApi> pb, BeanManager beanManager) { - CreationalContext ctx = beanManager.createCreationalContext(null); + /* + * This does practically not do much anymore after the discovery was moved + * to AfterDeploymentValidation. see https://issues.jboss.org/browse/CDI-274 + * Kept around to do simple deployment validation of ServiceRelated qualifier. + */ + private void CommonsProcessRemoteService(ProcessBean<? extends RemoteApi> pb) { Annotated annotated = pb.getAnnotated(); Set<Annotation> qualifiers = AgoravaExtension.getAnnotationsWithMeta(annotated, ServiceRelated.class); if (qualifiers.size() != 1) throw new AgoravaException("A RemoteService bean should have one and only one service related Qualifier : " + pb.getAnnotated().toString()); - Annotation qual = Iterables.getOnlyElement(qualifiers); - log.log(INFO, "Found new service related qualifier : {0}", qual); - - Bean<?> beanSoc = pb.getBean(); - - final RemoteApi smah = (RemoteApi) beanManager.getReference(beanSoc, RemoteApi.class, ctx); - String name = smah.getServiceName(); - servicesToQualifier.put(name, qual); - - ctx.release(); } - public void processRemoteServiceRoot(@Observes ProcessBean<RemoteApi> pb, BeanManager beanManager) { - CommonsProcessRemoteService(pb, beanManager); + public void processRemoteServiceRoot(@Observes ProcessBean<? extends RemoteApi> pb) { + CommonsProcessRemoteService(pb); } - public void processRemoteServiceRoot(@Observes ProcessProducerMethod<RemoteApi, ?> pb, BeanManager beanManager) { - CommonsProcessRemoteService((ProcessBean<RemoteApi>) pb, beanManager); + public void processRemoteServiceRoot(@Observes ProcessProducerMethod<? extends RemoteApi, ?> pb) { + CommonsProcessRemoteService((ProcessBean<? extends RemoteApi>) pb); } @@ -320,9 +345,28 @@ public void registerGenericBeans(@Observes AfterBeanDiscovery abd, BeanManager b //--------------------- After Deployment validation phase - public void endOfExtension(@Observes AfterDeploymentValidation adv) { + public void endOfExtension(@Observes AfterDeploymentValidation adv, BeanManager beanManager) { + + registerServiceNames(beanManager); + log.info("Agorava initialization complete"); } + private void registerServiceNames(BeanManager beanManager) { + Set<Bean<?>> beans = beanManager.getBeans(RemoteApi.class, new AnyLiteral()); + + for(Bean<?> bean : beans) { + Set<Annotation> qualifiers = getAnnotationsWithMeta(bean.getQualifiers(), ServiceRelated.class); + Annotation qual = Iterables.getOnlyElement(qualifiers); + CreationalContext<?> ctx = beanManager.createCreationalContext(null); + final RemoteApi smah = (RemoteApi) beanManager.getReference(bean, RemoteApi.class, ctx); + String name = smah.getServiceName(); + servicesToQualifier.put(name, qual); + ctx.release(); + } + } + public static class AnyLiteral extends AnnotationLiteral<Any> implements Any { + private static final long serialVersionUID = 1L; + } } diff --git a/agorava-core-impl-cdi/src/test/java/org/agorava/core/cdi/test/AgoravaTestDeploy.java b/agorava-core-impl-cdi/src/test/java/org/agorava/core/cdi/test/AgoravaTestDeploy.java index ac2da49..418debf 100644 --- a/agorava-core-impl-cdi/src/test/java/org/agorava/core/cdi/test/AgoravaTestDeploy.java +++ b/agorava-core-impl-cdi/src/test/java/org/agorava/core/cdi/test/AgoravaTestDeploy.java @@ -16,15 +16,18 @@ package org.agorava.core.cdi.test; +import java.io.FileNotFoundException; + import org.jboss.arquillian.container.test.api.Deployment; -import org.jboss.shrinkwrap.api.*; +import org.jboss.shrinkwrap.api.Archive; +import org.jboss.shrinkwrap.api.ArchivePath; +import org.jboss.shrinkwrap.api.Filter; +import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.jboss.shrinkwrap.resolver.api.maven.Maven; -import java.io.FileNotFoundException; - /** * Created with IntelliJ IDEA. * User: antoine @@ -45,10 +48,10 @@ public boolean include(ArchivePath path) { .addAsResource("META-INF/services/javax.enterprise.inject.spi.Extension") .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"); - GenericArchive[] libs = Maven.resolver() + JavaArchive[] libs = Maven.resolver() .loadPomFromFile("pom.xml") .resolve("org.apache.deltaspike.core:deltaspike-core-impl") - .withTransitivity().as(GenericArchive.class); + .withTransitivity().as(JavaArchive.class); WebArchive ret = ShrinkWrap
6be649b286d7b3e2a1193cbe808f8a46fa1ae4c4
internetarchive$heritrix3
[HER-1783] BloomFilter64bit bit-length bug prevents full bitfield from being used; premature saturation * BloomFilter64bit.java include the split-to-subarrays (for larger bitfields) and round-up-to-power-of-2 (for performance) options previously in largely-redundant classes fit a number of problems with int/long overflow and bitwise ops add methods for reporting/testing * BloomFilter.java add methods for reporting/testing * BloomFilterTest.java, BloomFilter64bitTest.java more extensive tests, including two lengthy tests of default/oversized blooms usually disabled by renaming * BloomFilter32bit.java, BloomFilter32bitSplit.java, BloomFilter32bp2.java, BloomFilter32bp2Split.java deleted as buggy or redundant * BenchmarkBlooms.java move to test source dir * BloomUriUniqFilter.java change to accept filter instance (rather than parameters) for added configuration flexibility fix comments * BloomUriUniqFilterTest.java supply filter not paramters
p
https://github.com/internetarchive/heritrix3
diff --git a/commons/src/main/java/org/archive/util/BloomFilter.java b/commons/src/main/java/org/archive/util/BloomFilter.java index c83fbc5e7..17de30d2d 100644 --- a/commons/src/main/java/org/archive/util/BloomFilter.java +++ b/commons/src/main/java/org/archive/util/BloomFilter.java @@ -1,15 +1,11 @@ /* BloomFilter * -* $Id$ -* -* Created on Jun 30, 2005 -* -* Copyright (C) 2005 Internet Archive; an adaptation of +* Copyright (C) 2010 Internet Archive; an adaptation of * LGPL work (C) Sebastiano Vigna * * This file is part of the Heritrix web crawler (crawler.archive.org). * -* Heritrix is free software; you can redistribute it and/or modify +* This class is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * any later version. @@ -27,13 +23,13 @@ package org.archive.util; /** - * Common interface for different Bloom filter - * implementations + * Common interface for different Bloom filter implementations * * @author Gordon Mohr */ public interface BloomFilter { - /** The number of character sequences in the filter. + /** The number of character sequences in the filter (considered to be the + * number of add()s that returned 'true') * * @return the number of character sequences in the filter (but see {@link #contains(CharSequence)}). */ @@ -67,4 +63,23 @@ public interface BloomFilter { * @return memory used by bloom bitfield, in bytes */ public abstract long getSizeBytes(); + + /** + * Report the number of expected inserts used at instantiation time to + * calculate the bitfield size. + * + * @return long number of inserts expected at instantiation + */ + public abstract long getExpectedInserts(); + + /** + * Report the number of internal independent hash function (and thus the + * number of bits set/checked for each item presented). + * + * @return long count of hash functions + */ + public abstract long getHashCount(); + + // public for white-box unit testing + public boolean getBit(long bitIndex); } \ No newline at end of file diff --git a/commons/src/main/java/org/archive/util/BloomFilter32bit.java b/commons/src/main/java/org/archive/util/BloomFilter32bit.java deleted file mode 100644 index 7a52ce392..000000000 --- a/commons/src/main/java/org/archive/util/BloomFilter32bit.java +++ /dev/null @@ -1,223 +0,0 @@ -/* BloomFilter32bit -* -* $Id$ -* -* Created on Jun 21, 2005 -* -* Copyright (C) 2005 Internet Archive; a slight adaptation of -* LGPL work (C) Sebastiano Vigna -* -* This file is part of the Heritrix web crawler (crawler.archive.org). -* -* Heritrix is free software; you can redistribute it and/or modify -* it under the terms of the GNU Lesser Public License as published by -* the Free Software Foundation; either version 2.1 of the License, or -* any later version. -* -* Heritrix is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU Lesser Public License for more details. -* -* You should have received a copy of the GNU Lesser Public License -* along with Heritrix; if not, write to the Free Software -* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package org.archive.util; - -import java.io.Serializable; -import java.security.SecureRandom; - -/** A Bloom filter. - * - * SLIGHTLY ADAPTED VERSION OF MG4J it.unimi.dsi.mg4j.util.BloomFilter - * - * <p>KEY CHANGES: - * - * <ul> - * <li>Adapted to use 32bit ops as much as possible... may be slightly - * faster on 32bit hardware/OS</li> - * <li>NUMBER_OF_WEIGHTS is 2083, to better avoid collisions between - * similar strings</li> - * <li>Removed dependence on cern.colt MersenneTwister (replaced with - * SecureRandom) and QuickBitVector (replaced with local methods).</li> - * </ul> - * - * <hr> - * - * <P>Instances of this class represent a set of character sequences (with false positives) - * using a Bloom filter. Because of the way Bloom filters work, - * you cannot remove elements. - * - * <P>Bloom filters have an expected error rate, depending on the number - * of hash functions used, on the filter size and on the number of elements in the filter. This implementation - * uses a variable optimal number of hash functions, depending on the expected - * number of elements. More precisely, a Bloom - * filter for <var>n</var> character sequences with <var>d</var> hash functions will use - * ln 2 <var>d</var><var>n</var> &#8776; 1.44 <var>d</var><var>n</var> bits; - * false positives will happen with probability 2<sup>-<var>d</var></sup>. - * - * <P>Hash functions are generated at creation time using universal hashing. Each hash function - * uses {@link #NUMBER_OF_WEIGHTS} random integers, which are cyclically multiplied by - * the character codes in a character sequence. The resulting integers are XOR-ed together. - * - * <P>This class exports access methods that are very similar to those of {@link java.util.Set}, - * but it does not implement that interface, as too many non-optional methods - * would be unimplementable (e.g., iterators). - * - * @author Sebastiano Vigna - */ -public class BloomFilter32bit implements Serializable, BloomFilter { - - private static final long serialVersionUID = -1567837798979475689L; - - /** The number of weights used to create hash functions. */ - final public static int NUMBER_OF_WEIGHTS = 2083; // CHANGED FROM 16 - /** The number of bits in this filter. */ - final public long m; - /** The number of hash functions used by this filter. */ - final public int d; - /** The underlying bit vectorS. */ - final private int[] bits; - /** The random integers used to generate the hash functions. */ - final private int[][] weight; - - /** The number of elements currently in the filter. It may be - * smaller than the actual number of additions of distinct character - * sequences because of false positives. - */ - private int size; - - /** The natural logarithm of 2, used in the computation of the number of bits. */ - private final static double NATURAL_LOG_OF_2 = Math.log( 2 ); - - private final static boolean DEBUG = false; - - /** Creates a new Bloom filter with given number of hash functions and expected number of elements. - * - * @param n the expected number of elements. - * @param d the number of hash functions; if the filter add not more than <code>n</code> elements, - * false positives will happen with probability 2<sup>-<var>d</var></sup>. - */ - public BloomFilter32bit( final int n, final int d ) { - this.d = d; - int len = - (int)Math.ceil( ( (long)n * (long)d / NATURAL_LOG_OF_2 ) / 32 ); - this.m = len*32L; - if ( m >= 1L<<32 ) { - throw new IllegalArgumentException( "This filter would require " + m + " bits" ); - } - bits = new int[ len ]; - - if ( DEBUG ) System.err.println( "Number of bits: " + m ); - - // seeded for reproduceable behavior in repeated runs; BUT: - // SecureRandom's default implementation (as of 1.5) - // seems to mix in its own seeding. - final SecureRandom random = new SecureRandom(new byte[] {19,96}); - weight = new int[ d ][]; - for( int i = 0; i < d; i++ ) { - weight[ i ] = new int[ NUMBER_OF_WEIGHTS ]; - for( int j = 0; j < NUMBER_OF_WEIGHTS; j++ ) - weight[ i ][ j ] = random.nextInt(); - } - } - - /** The number of character sequences in the filter. - * - * @return the number of character sequences in the filter (but see {@link #contains(CharSequence)}). - */ - - public int size() { - return size; - } - - /** Hashes the given sequence with the given hash function. - * - * @param s a character sequence. - * @param l the length of <code>s</code>. - * @param k a hash function index (smaller than {@link #d}). - * @return the position in the filter corresponding to <code>s</code> for the hash function <code>k</code>. - */ - private long hash( final CharSequence s, final int l, final int k ) { - final int[] w = weight[ k ]; - int h = 0, i = l; - while( i-- != 0 ) h ^= s.charAt( i ) * w[ i % NUMBER_OF_WEIGHTS ]; - return ((long)h-Integer.MIN_VALUE) % m; - } - - /** Checks whether the given character sequence is in this filter. - * - * <P>Note that this method may return true on a character sequence that is has - * not been added to the filter. This will happen with probability 2<sub>-<var>d</var></sub>, - * where <var>d</var> is the number of hash functions specified at creation time, if - * the number of the elements in the filter is less than <var>n</var>, the number - * of expected elements specified at creation time. - * - * @param s a character sequence. - * @return true if the sequence is in the filter (or if a sequence with the - * same hash sequence is in the filter). - */ - - public boolean contains( final CharSequence s ) { - int i = d, l = s.length(); - while( i-- != 0 ) if ( ! getBit( hash( s, l, i ) ) ) return false; - return true; - } - - /** Adds a character sequence to the filter. - * - * @param s a character sequence. - * @return true if the character sequence was not in the filter (but see {@link #contains(CharSequence)}). - */ - - public boolean add( final CharSequence s ) { - boolean result = false; - int i = d, l = s.length(); - long h; - while( i-- != 0 ) { - h = hash( s, l, i ); - if ( ! getBit( h ) ) result = true; - setBit( h ); - } - if ( result ) size++; - return result; - } - - protected final static long ADDRESS_BITS_PER_UNIT = 5; // 32=2^5 - protected final static long BIT_INDEX_MASK = 31; // = BITS_PER_UNIT - 1; - - /** - * Returns from the local bitvector the value of the bit with - * the specified index. The value is <tt>true</tt> if the bit - * with the index <tt>bitIndex</tt> is currently set; otherwise, - * returns <tt>false</tt>. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the bit index. - * @return the value of the bit with the specified index. - */ - protected boolean getBit(long bitIndex) { - return ((bits[(int)(bitIndex >> ADDRESS_BITS_PER_UNIT)] & (1 << (bitIndex & BIT_INDEX_MASK))) != 0); - } - - /** - * Changes the bit with index <tt>bitIndex</tt> in local bitvector. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the index of the bit to be set. - */ - protected void setBit(long bitIndex) { - bits[(int)(bitIndex >> ADDRESS_BITS_PER_UNIT)] |= 1 << (bitIndex & BIT_INDEX_MASK); - } - - /* (non-Javadoc) - * @see org.archive.util.BloomFilter#getSizeBytes() - */ - public long getSizeBytes() { - return bits.length*4; - } -} diff --git a/commons/src/main/java/org/archive/util/BloomFilter32bitSplit.java b/commons/src/main/java/org/archive/util/BloomFilter32bitSplit.java deleted file mode 100644 index fd71c8847..000000000 --- a/commons/src/main/java/org/archive/util/BloomFilter32bitSplit.java +++ /dev/null @@ -1,251 +0,0 @@ -/* BloomFilter32bit -* -* $Id$ -* -* Created on Jun 21, 2005 -* -* Copyright (C) 2005 Internet Archive; a slight adaptation of -* LGPL work (C) Sebastiano Vigna -* -* This file is part of the Heritrix web crawler (crawler.archive.org). -* -* Heritrix is free software; you can redistribute it and/or modify -* it under the terms of the GNU Lesser Public License as published by -* the Free Software Foundation; either version 2.1 of the License, or -* any later version. -* -* Heritrix is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU Lesser Public License for more details. -* -* You should have received a copy of the GNU Lesser Public License -* along with Heritrix; if not, write to the Free Software -* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package org.archive.util; - -import java.io.Serializable; -import java.security.SecureRandom; - -/** A Bloom filter. - * - * SLIGHTLY ADAPTED VERSION OF MG4J it.unimi.dsi.mg4j.util.BloomFilter - * - * <p>KEY CHANGES: - * - * <ul> - * <li>Adapted to use 32bit ops as much as possible... may be slightly - * faster on 32bit hardware/OS</li> - * <li>NUMBER_OF_WEIGHTS is 2083, to better avoid collisions between - * similar strings</li> - * <li>Removed dependence on cern.colt MersenneTwister (replaced with - * SecureRandom) and QuickBitVector (replaced with local methods).</li> - * </ul> - * - * <hr> - * - * <P>Instances of this class represent a set of character sequences (with false positives) - * using a Bloom filter. Because of the way Bloom filters work, - * you cannot remove elements. - * - * <P>Bloom filters have an expected error rate, depending on the number - * of hash functions used, on the filter size and on the number of elements in the filter. This implementation - * uses a variable optimal number of hash functions, depending on the expected - * number of elements. More precisely, a Bloom - * filter for <var>n</var> character sequences with <var>d</var> hash functions will use - * ln 2 <var>d</var><var>n</var> &#8776; 1.44 <var>d</var><var>n</var> bits; - * false positives will happen with probability 2<sup>-<var>d</var></sup>. - * - * <P>Hash functions are generated at creation time using universal hashing. Each hash function - * uses {@link #NUMBER_OF_WEIGHTS} random integers, which are cyclically multiplied by - * the character codes in a character sequence. The resulting integers are XOR-ed together. - * - * <P>This class exports access methods that are very similar to those of {@link java.util.Set}, - * but it does not implement that interface, as too many non-optional methods - * would be unimplementable (e.g., iterators). - * - * @author Sebastiano Vigna - */ -public class BloomFilter32bitSplit implements Serializable, BloomFilter { - - private static final long serialVersionUID = -164106965277863971L; - - /** The number of weights used to create hash functions. */ - final public static int NUMBER_OF_WEIGHTS = 2083; // CHANGED FROM 16 - /** The number of bits in this filter. */ - final public long m; - /** The number of hash functions used by this filter. */ - final public int d; - /** The underlying bit vectorS. */ -// final private int[] bits; - final private int[][] bits; - /** The random integers used to generate the hash functions. */ - final private int[][] weight; - - /** The number of elements currently in the filter. It may be - * smaller than the actual number of additions of distinct character - * sequences because of false positives. - */ - private int size; - - /** The natural logarithm of 2, used in the computation of the number of bits. */ - private final static double NATURAL_LOG_OF_2 = Math.log( 2 ); - - /** number of ints in 1MB. */ - private final static int ONE_MB_INTS = 1 << 18; // - - private final static boolean DEBUG = false; - - /** Creates a new Bloom filter with given number of hash functions and expected number of elements. - * - * @param n the expected number of elements. - * @param d the number of hash functions; if the filter add not more than <code>n</code> elements, - * false positives will happen with probability 2<sup>-<var>d</var></sup>. - */ - public BloomFilter32bitSplit( final int n, final int d ) { - this.d = d; - int len = - (int)Math.ceil( ( (long)n * (long)d / NATURAL_LOG_OF_2 ) / 32 ); - // round up to ensure divisible into 1MiB chunks - len = ((len / ONE_MB_INTS)+1)*ONE_MB_INTS; - this.m = len*32L; - if ( m >= 1L<<54 ) { - throw new IllegalArgumentException( "This filter would require " + m + " bits" ); - } -// bits = new int[ len ]; - bits = new int[ len/ONE_MB_INTS ][ONE_MB_INTS]; - - if ( DEBUG ) System.err.println( "Number of bits: " + m ); - - // seeded for reproduceable behavior in repeated runs; BUT: - // SecureRandom's default implementation (as of 1.5) - // seems to mix in its own seeding. - final SecureRandom random = new SecureRandom(new byte[] {19,96}); - weight = new int[ d ][]; - for( int i = 0; i < d; i++ ) { - weight[ i ] = new int[ NUMBER_OF_WEIGHTS ]; - for( int j = 0; j < NUMBER_OF_WEIGHTS; j++ ) - weight[ i ][ j ] = random.nextInt(); - } - } - - /** The number of character sequences in the filter. - * - * @return the number of character sequences in the filter (but see {@link #contains(CharSequence)}). - */ - - public int size() { - return size; - } - - /** Hashes the given sequence with the given hash function. - * - * @param s a character sequence. - * @param l the length of <code>s</code>. - * @param k a hash function index (smaller than {@link #d}). - * @return the position in the filter corresponding to <code>s</code> for the hash function <code>k</code>. - */ - private long hash( final CharSequence s, final int l, final int k ) { - final int[] w = weight[ k ]; - int h = 0, i = l; - while( i-- != 0 ) h ^= s.charAt( i ) * w[ i % NUMBER_OF_WEIGHTS ]; - return ((long)h-Integer.MIN_VALUE) % m; - } - - /** Checks whether the given character sequence is in this filter. - * - * <P>Note that this method may return true on a character sequence that is has - * not been added to the filter. This will happen with probability 2<sub>-<var>d</var></sub>, - * where <var>d</var> is the number of hash functions specified at creation time, if - * the number of the elements in the filter is less than <var>n</var>, the number - * of expected elements specified at creation time. - * - * @param s a character sequence. - * @return true if the sequence is in the filter (or if a sequence with the - * same hash sequence is in the filter). - */ - - public boolean contains( final CharSequence s ) { - int i = d, l = s.length(); - while( i-- != 0 ) if ( ! getBit( hash( s, l, i ) ) ) return false; - return true; - } - - /** Adds a character sequence to the filter. - * - * @param s a character sequence. - * @return true if the character sequence was not in the filter (but see {@link #contains(CharSequence)}). - */ - - public boolean add( final CharSequence s ) { - boolean result = false; - int i = d, l = s.length(); - long h; - while( i-- != 0 ) { - h = hash( s, l, i ); - if ( ! setGetBit( h ) ) result = true; - } - if ( result ) size++; - return result; - } - - protected final static long ADDRESS_BITS_PER_UNIT = 5; // 32=2^5 - protected final static long BIT_INDEX_MASK = 31; // = BITS_PER_UNIT - 1; - - /** - * Returns from the local bitvector the value of the bit with - * the specified index. The value is <tt>true</tt> if the bit - * with the index <tt>bitIndex</tt> is currently set; otherwise, - * returns <tt>false</tt>. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the bit index. - * @return the value of the bit with the specified index. - */ - protected boolean getBit(long bitIndex) { - long intIndex = (bitIndex >>> ADDRESS_BITS_PER_UNIT); - return ((bits[(int)(intIndex / ONE_MB_INTS)][(int)(intIndex % ONE_MB_INTS)] - & (1 << (bitIndex & BIT_INDEX_MASK))) != 0); - } - - /** - * Changes the bit with index <tt>bitIndex</tt> in local bitvector. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the index of the bit to be set. - */ - protected void setBit(long bitIndex) { - long intIndex = (bitIndex >>> ADDRESS_BITS_PER_UNIT); - bits[(int)(intIndex / ONE_MB_INTS)][(int)(intIndex % ONE_MB_INTS)] - |= 1 << (bitIndex & BIT_INDEX_MASK); - } - - /** - * Sets the bit with index <tt>bitIndex</tt> in local bitvector -- - * returning the old value. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the index of the bit to be set. - */ - protected boolean setGetBit(long bitIndex) { - long intIndex = (int) (bitIndex >>> ADDRESS_BITS_PER_UNIT); - int a = (int)(intIndex / ONE_MB_INTS); - int b = (int)(intIndex % ONE_MB_INTS); - int mask = 1 << (bitIndex & BIT_INDEX_MASK); - boolean ret = ((bits[a][b] & (mask)) != 0); - bits[a][b] |= mask; - return ret; - } - - /* (non-Javadoc) - * @see org.archive.util.BloomFilter#getSizeBytes() - */ - public long getSizeBytes() { - return bits.length*bits[0].length*4; - } -} diff --git a/commons/src/main/java/org/archive/util/BloomFilter32bp2.java b/commons/src/main/java/org/archive/util/BloomFilter32bp2.java deleted file mode 100644 index ffa64d667..000000000 --- a/commons/src/main/java/org/archive/util/BloomFilter32bp2.java +++ /dev/null @@ -1,235 +0,0 @@ -/* BloomFilter -* -* $Id$ -* -* Created on Jun 21, 2005 -* -* Copyright (C) 2005 Internet Archive; a slight adaptation of -* LGPL work (C) Sebastiano Vigna -* -* This file is part of the Heritrix web crawler (crawler.archive.org). -* -* Heritrix is free software; you can redistribute it and/or modify -* it under the terms of the GNU Lesser Public License as published by -* the Free Software Foundation; either version 2.1 of the License, or -* any later version. -* -* Heritrix is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU Lesser Public License for more details. -* -* You should have received a copy of the GNU Lesser Public License -* along with Heritrix; if not, write to the Free Software -* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package org.archive.util; - -import java.io.Serializable; -import java.security.SecureRandom; - -/** A Bloom filter. - * - * SLIGHTLY ADAPTED VERSION OF MG4J it.unimi.dsi.mg4j.util.BloomFilter - * - * <p>KEY CHANGES: - * - * <ul> - * <li>Adapted to use 32bit ops as much as possible... may be slightly - * faster on 32bit hardware/OS</li> - * <li>Changed to use bitfield that is a power-of-two in size, allowing - * hash() to use bitshifting rather than modulus... may be slightly - * faster</li> - * <li>NUMBER_OF_WEIGHTS is 2083, to better avoid collisions between - * similar strings</li> - * <li>Removed dependence on cern.colt MersenneTwister (replaced with - * SecureRandom) and QuickBitVector (replaced with local methods).</li> - * </ul> - * - * <hr> - * - * <P>Instances of this class represent a set of character sequences (with false positives) - * using a Bloom filter. Because of the way Bloom filters work, - * you cannot remove elements. - * - * <P>Bloom filters have an expected error rate, depending on the number - * of hash functions used, on the filter size and on the number of elements in the filter. This implementation - * uses a variable optimal number of hash functions, depending on the expected - * number of elements. More precisely, a Bloom - * filter for <var>n</var> character sequences with <var>d</var> hash functions will use - * ln 2 <var>d</var><var>n</var> &#8776; 1.44 <var>d</var><var>n</var> bits; - * false positives will happen with probability 2<sup>-<var>d</var></sup>. - * - * <P>Hash functions are generated at creation time using universal hashing. Each hash function - * uses {@link #NUMBER_OF_WEIGHTS} random integers, which are cyclically multiplied by - * the character codes in a character sequence. The resulting integers are XOR-ed together. - * - * <P>This class exports access methods that are very similar to those of {@link java.util.Set}, - * but it does not implement that interface, as too many non-optional methods - * would be unimplementable (e.g., iterators). - * - * @author Sebastiano Vigna - */ -public class BloomFilter32bp2 implements Serializable, BloomFilter { - - private static final long serialVersionUID = -2292902803681146635L; - - /** The number of weights used to create hash functions. */ - final public static int NUMBER_OF_WEIGHTS = 2083; // CHANGED FROM 16 - /** The number of bits in this filter. */ - final public long m; - /** the power-of-two that m is */ - final public long power; // 1<<power == m - /** The number of hash functions used by this filter. */ - final public int d; - /** The underlying bit vectorS. */ - final private int[] bits; - /** The random integers used to generate the hash functions. */ - final private int[][] weight; - - /** The number of elements currently in the filter. It may be - * smaller than the actual number of additions of distinct character - * sequences because of false positives. - */ - private int size; - - /** The natural logarithm of 2, used in the computation of the number of bits. */ - private final static double NATURAL_LOG_OF_2 = Math.log( 2 ); - - private final static boolean DEBUG = false; - - /** Creates a new Bloom filter with given number of hash functions and expected number of elements. - * - * @param n the expected number of elements. - * @param d the number of hash functions; if the filter add not more than <code>n</code> elements, - * false positives will happen with probability 2<sup>-<var>d</var></sup>. - */ - public BloomFilter32bp2( final int n, final int d ) { - this.d = d; - long minBits = (long) ((long)n * (long)d / NATURAL_LOG_OF_2); - long pow = 0; - while((1L<<pow) < minBits) { - pow++; - } - this.power = pow; - this.m = 1L<<pow; - int len = (int) (m / 32); - if ( m > 1L<<32 ) { - throw new IllegalArgumentException( "This filter would require " + m + " bits" ); - } - System.out.println("power "+power+" bits "+m+" len "+len); - - bits = new int[ len ]; - - if ( DEBUG ) System.err.println( "Number of bits: " + m ); - - // seeded for reproduceable behavior in repeated runs; BUT: - // SecureRandom's default implementation (as of 1.5) - // seems to mix in its own seeding. - final SecureRandom random = new SecureRandom(new byte[] {19,96}); - weight = new int[ d ][]; - for( int i = 0; i < d; i++ ) { - weight[ i ] = new int[ NUMBER_OF_WEIGHTS ]; - for( int j = 0; j < NUMBER_OF_WEIGHTS; j++ ) - weight[ i ][ j ] = random.nextInt(); - } - } - - /** The number of character sequences in the filter. - * - * @return the number of character sequences in the filter (but see {@link #contains(CharSequence)}). - */ - - public int size() { - return size; - } - - /** Hashes the given sequence with the given hash function. - * - * @param s a character sequence. - * @param l the length of <code>s</code>. - * @param k a hash function index (smaller than {@link #d}). - * @return the position in the filter corresponding to <code>s</code> for the hash function <code>k</code>. - */ - private int hash( final CharSequence s, final int l, final int k ) { - final int[] w = weight[ k ]; - int h = 0, i = l; - while( i-- != 0 ) h ^= s.charAt( i ) * w[ i % NUMBER_OF_WEIGHTS ]; - return h >>> (32-power); - } - - /** Checks whether the given character sequence is in this filter. - * - * <P>Note that this method may return true on a character sequence that is has - * not been added to the filter. This will happen with probability 2<sub>-<var>d</var></sub>, - * where <var>d</var> is the number of hash functions specified at creation time, if - * the number of the elements in the filter is less than <var>n</var>, the number - * of expected elements specified at creation time. - * - * @param s a character sequence. - * @return true if the sequence is in the filter (or if a sequence with the - * same hash sequence is in the filter). - */ - - public boolean contains( final CharSequence s ) { - int i = d, l = s.length(); - while( i-- != 0 ) if ( ! getBit( hash( s, l, i ) ) ) return false; - return true; - } - - /** Adds a character sequence to the filter. - * - * @param s a character sequence. - * @return true if the character sequence was not in the filter (but see {@link #contains(CharSequence)}). - */ - - public boolean add( final CharSequence s ) { - boolean result = false; - int i = d, l = s.length(); - int h; - while( i-- != 0 ) { - h = hash( s, l, i ); - if ( ! getBit( h ) ) result = true; - setBit( h ); - } - if ( result ) size++; - return result; - } - - protected final static int ADDRESS_BITS_PER_UNIT = 5; // 32=2^5 - protected final static int BIT_INDEX_MASK = 31; // = BITS_PER_UNIT - 1; - - /** - * Returns from the local bitvector the value of the bit with - * the specified index. The value is <tt>true</tt> if the bit - * with the index <tt>bitIndex</tt> is currently set; otherwise, - * returns <tt>false</tt>. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the bit index. - * @return the value of the bit with the specified index. - */ - protected boolean getBit(int bitIndex) { - return ((bits[(int)(bitIndex >>> ADDRESS_BITS_PER_UNIT)] & (1 << (bitIndex & BIT_INDEX_MASK))) != 0); - } - - /** - * Changes the bit with index <tt>bitIndex</tt> in local bitvector. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the index of the bit to be set. - */ - protected void setBit(int bitIndex) { - bits[(int)(bitIndex >>> ADDRESS_BITS_PER_UNIT)] |= 1 << (bitIndex & BIT_INDEX_MASK); - } - - /* (non-Javadoc) - * @see org.archive.util.BloomFilter#getSizeBytes() - */ - public long getSizeBytes() { - return bits.length*4; - } -} diff --git a/commons/src/main/java/org/archive/util/BloomFilter32bp2Split.java b/commons/src/main/java/org/archive/util/BloomFilter32bp2Split.java deleted file mode 100644 index aba45f75f..000000000 --- a/commons/src/main/java/org/archive/util/BloomFilter32bp2Split.java +++ /dev/null @@ -1,262 +0,0 @@ -/* BloomFilter -* -* $Id$ -* -* Created on Jun 21, 2005 -* -* Copyright (C) 2005 Internet Archive; a slight adaptation of -* LGPL work (C) Sebastiano Vigna -* -* This file is part of the Heritrix web crawler (crawler.archive.org). -* -* Heritrix is free software; you can redistribute it and/or modify -* it under the terms of the GNU Lesser Public License as published by -* the Free Software Foundation; either version 2.1 of the License, or -* any later version. -* -* Heritrix is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU Lesser Public License for more details. -* -* You should have received a copy of the GNU Lesser Public License -* along with Heritrix; if not, write to the Free Software -* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package org.archive.util; - -import java.io.Serializable; -import java.security.SecureRandom; - -/** A Bloom filter. - * - * SLIGHTLY ADAPTED VERSION OF MG4J it.unimi.dsi.mg4j.util.BloomFilter - * - * <p>KEY CHANGES: - * - * <ul> - * <li>Adapted to use 32bit ops as much as possible... may be slightly - * faster on 32bit hardware/OS</li> - * <li>Changed to use bitfield that is a power-of-two in size, allowing - * hash() to use bitshifting rather than modulus... may be slightly - * faster</li> - * <li>NUMBER_OF_WEIGHTS is 2083, to better avoid collisions between - * similar strings</li> - * <li>Removed dependence on cern.colt MersenneTwister (replaced with - * SecureRandom) and QuickBitVector (replaced with local methods).</li> - * </ul> - * - * <hr> - * - * <P>Instances of this class represent a set of character sequences (with false positives) - * using a Bloom filter. Because of the way Bloom filters work, - * you cannot remove elements. - * - * <P>Bloom filters have an expected error rate, depending on the number - * of hash functions used, on the filter size and on the number of elements in the filter. This implementation - * uses a variable optimal number of hash functions, depending on the expected - * number of elements. More precisely, a Bloom - * filter for <var>n</var> character sequences with <var>d</var> hash functions will use - * ln 2 <var>d</var><var>n</var> &#8776; 1.44 <var>d</var><var>n</var> bits; - * false positives will happen with probability 2<sup>-<var>d</var></sup>. - * - * <P>Hash functions are generated at creation time using universal hashing. Each hash function - * uses {@link #NUMBER_OF_WEIGHTS} random integers, which are cyclically multiplied by - * the character codes in a character sequence. The resulting integers are XOR-ed together. - * - * <P>This class exports access methods that are very similar to those of {@link java.util.Set}, - * but it does not implement that interface, as too many non-optional methods - * would be unimplementable (e.g., iterators). - * - * @author Sebastiano Vigna - */ -public class BloomFilter32bp2Split implements Serializable, BloomFilter { - - private static final long serialVersionUID = -1504889954381695129L; - - /** The number of weights used to create hash functions. */ - final public static int NUMBER_OF_WEIGHTS = 2083; // CHANGED FROM 16 - /** The number of bits in this filter. */ - final public long m; - /** the power-of-two that m is */ - final public long power; // 1<<power == m - /** The number of hash functions used by this filter. */ - final public int d; - /** The underlying bit vectorS. */ - final private int[][] bits; - /** Bitshift to get first index */ - final private int aShift; - /** Mask to get second index */ - final private int bMask; - /** The random integers used to generate the hash functions. */ - final private int[][] weight; - - /** The number of elements currently in the filter. It may be - * smaller than the actual number of additions of distinct character - * sequences because of false positives. - */ - private int size; - - /** The natural logarithm of 2, used in the computation of the number of bits. */ - private final static double NATURAL_LOG_OF_2 = Math.log( 2 ); - - private final static boolean DEBUG = false; - - /** Creates a new Bloom filter with given number of hash functions and expected number of elements. - * - * @param n the expected number of elements. - * @param d the number of hash functions; if the filter add not more than <code>n</code> elements, - * false positives will happen with probability 2<sup>-<var>d</var></sup>. - */ - public BloomFilter32bp2Split( final int n, final int d ) { - this.d = d; - long minBits = (long) ((long)n * (long)d / NATURAL_LOG_OF_2); - long pow = 0; - while((1L<<pow) < minBits) { - pow++; - } - this.power = pow; - this.m = 1L<<pow; - int len = (int) (m / 32); - if ( m > 1L<<32 ) { - throw new IllegalArgumentException( "This filter would require " + m + " bits" ); - } - - aShift = (int) (pow - ADDRESS_BITS_PER_UNIT - 8); - bMask = (1<<aShift) - 1; - bits = new int[256][ 1<<aShift ]; - - System.out.println("power "+power+" bits "+m+" len "+len); - System.out.println("aShift "+aShift+" bMask "+bMask); - - if ( DEBUG ) System.err.println( "Number of bits: " + m ); - - // seeded for reproduceable behavior in repeated runs; BUT: - // SecureRandom's default implementation (as of 1.5) - // seems to mix in its own seeding. - final SecureRandom random = new SecureRandom(new byte[] {19,96}); - weight = new int[ d ][]; - for( int i = 0; i < d; i++ ) { - weight[ i ] = new int[ NUMBER_OF_WEIGHTS ]; - for( int j = 0; j < NUMBER_OF_WEIGHTS; j++ ) - weight[ i ][ j ] = random.nextInt(); - } - } - - /** The number of character sequences in the filter. - * - * @return the number of character sequences in the filter (but see {@link #contains(CharSequence)}). - */ - - public int size() { - return size; - } - - /** Hashes the given sequence with the given hash function. - * - * @param s a character sequence. - * @param l the length of <code>s</code>. - * @param k a hash function index (smaller than {@link #d}). - * @return the position in the filter corresponding to <code>s</code> for the hash function <code>k</code>. - */ - private int hash( final CharSequence s, final int l, final int k ) { - final int[] w = weight[ k ]; - int h = 0, i = l; - while( i-- != 0 ) h ^= s.charAt( i ) * w[ i % NUMBER_OF_WEIGHTS ]; - return h >>> (32-power); - } - - /** Checks whether the given character sequence is in this filter. - * - * <P>Note that this method may return true on a character sequence that is has - * not been added to the filter. This will happen with probability 2<sub>-<var>d</var></sub>, - * where <var>d</var> is the number of hash functions specified at creation time, if - * the number of the elements in the filter is less than <var>n</var>, the number - * of expected elements specified at creation time. - * - * @param s a character sequence. - * @return true if the sequence is in the filter (or if a sequence with the - * same hash sequence is in the filter). - */ - - public boolean contains( final CharSequence s ) { - int i = d, l = s.length(); - while( i-- != 0 ) if ( ! getBit( hash( s, l, i ) ) ) return false; - return true; - } - - /** Adds a character sequence to the filter. - * - * @param s a character sequence. - * @return true if the character sequence was not in the filter (but see {@link #contains(CharSequence)}). - */ - - public boolean add( final CharSequence s ) { - boolean result = false; - int i = d, l = s.length(); - int h; - while( i-- != 0 ) { - h = hash( s, l, i ); - if ( ! setGetBit( h ) ) result = true; - } - if ( result ) size++; - return result; - } - - protected final static int ADDRESS_BITS_PER_UNIT = 5; // 32=2^5 - protected final static int BIT_INDEX_MASK = 31; // = BITS_PER_UNIT - 1; - - /** - * Returns from the local bitvector the value of the bit with - * the specified index. The value is <tt>true</tt> if the bit - * with the index <tt>bitIndex</tt> is currently set; otherwise, - * returns <tt>false</tt>. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the bit index. - * @return the value of the bit with the specified index. - */ - protected boolean getBit(int bitIndex) { - int intIndex = (int)(bitIndex >>> ADDRESS_BITS_PER_UNIT); - return ((bits[intIndex>>>aShift][intIndex&bMask] & (1 << (bitIndex & BIT_INDEX_MASK))) != 0); - } - - /** - * Changes the bit with index <tt>bitIndex</tt> in local bitvector. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the index of the bit to be set. - */ - protected void setBit(int bitIndex) { - int intIndex = (int)(bitIndex >>> ADDRESS_BITS_PER_UNIT); - bits[intIndex>>>aShift][intIndex&bMask] |= 1 << (bitIndex & BIT_INDEX_MASK); - } - - /** - * Sets the bit with index <tt>bitIndex</tt> in local bitvector -- - * returning the old value. - * - * (adapted from cern.colt.bitvector.QuickBitVector) - * - * @param bitIndex the index of the bit to be set. - */ - protected boolean setGetBit(int bitIndex) { - int intIndex = (int)(bitIndex >>> ADDRESS_BITS_PER_UNIT); - int a = intIndex>>>aShift; - int b = intIndex&bMask; - int mask = 1 << (bitIndex & BIT_INDEX_MASK); - boolean ret = ((bits[a][b] & (mask)) != 0); - bits[a][b] |= mask; - return ret; - } - - /* (non-Javadoc) - * @see org.archive.util.BloomFilter#getSizeBytes() - */ - public long getSizeBytes() { - return bits.length*bits[0].length*4; - } -} diff --git a/commons/src/main/java/org/archive/util/BloomFilter64bit.java b/commons/src/main/java/org/archive/util/BloomFilter64bit.java index 97b0d06e1..aeca26ed2 100644 --- a/commons/src/main/java/org/archive/util/BloomFilter64bit.java +++ b/commons/src/main/java/org/archive/util/BloomFilter64bit.java @@ -28,104 +28,165 @@ import java.io.Serializable; import java.security.SecureRandom; +import java.util.Random; /** A Bloom filter. * - * SLIGHTLY ADAPTED VERSION OF MG4J it.unimi.dsi.mg4j.util.BloomFilter + * ADAPTED/IMPROVED VERSION OF MG4J it.unimi.dsi.mg4j.util.BloomFilter * * <p>KEY CHANGES: * * <ul> * <li>NUMBER_OF_WEIGHTS is 2083, to better avoid collisions between - * similar strings</li> + * similar strings (common in the domain of URIs)</li> + * * <li>Removed dependence on cern.colt MersenneTwister (replaced with * SecureRandom) and QuickBitVector (replaced with local methods).</li> - * <li>Adapted to allow long bit indices so long as the index/64 (used - * an array index in bit vector) fits within Integer.MAX_VALUE. (Thus - * it supports filters up to 64*Integer.MAX_VALUE bits in size, or - * 16GiB.)</li> + * + * <li>Adapted to allow long bit indices</li> + * + * <li>Stores bitfield in an array of up to 2^22 arrays of 2^26 longs. Thus, + * bitfield may grow to 2^48 longs in size -- 2PiB, 2*54 bitfield indexes. + * (I expect this will outstrip available RAM for the next few years.)</li> * </ul> * * <hr> * - * <P>Instances of this class represent a set of character sequences (with false positives) - * using a Bloom filter. Because of the way Bloom filters work, + * <P>Instances of this class represent a set of character sequences (with + * false positives) using a Bloom filter. Because of the way Bloom filters work, * you cannot remove elements. * * <P>Bloom filters have an expected error rate, depending on the number - * of hash functions used, on the filter size and on the number of elements in the filter. This implementation - * uses a variable optimal number of hash functions, depending on the expected - * number of elements. More precisely, a Bloom - * filter for <var>n</var> character sequences with <var>d</var> hash functions will use - * ln 2 <var>d</var><var>n</var> &#8776; 1.44 <var>d</var><var>n</var> bits; - * false positives will happen with probability 2<sup>-<var>d</var></sup>. + * of hash functions used, on the filter size and on the number of elements in + * the filter. This implementation uses a variable optimal number of hash + * functions, depending on the expected number of elements. More precisely, a + * Bloom filter for <var>n</var> character sequences with <var>d</var> hash + * functions will use ln 2 <var>d</var><var>n</var> &#8776; + * 1.44 <var>d</var><var>n</var> bits; false positives will happen with + * probability 2<sup>-<var>d</var></sup>. * - * <P>Hash functions are generated at creation time using universal hashing. Each hash function - * uses {@link #NUMBER_OF_WEIGHTS} random integers, which are cyclically multiplied by - * the character codes in a character sequence. The resulting integers are XOR-ed together. + * <P>Hash functions are generated at creation time using universal hashing. + * Each hash function uses {@link #NUMBER_OF_WEIGHTS} random integers, which + * are cyclically multiplied by the character codes in a character sequence. + * The resulting integers are XOR-ed together. * - * <P>This class exports access methods that are very similar to those of {@link java.util.Set}, - * but it does not implement that interface, as too many non-optional methods - * would be unimplementable (e.g., iterators). + * <P>This class exports access methods that are very similar to those of + * {@link java.util.Set}, but it does not implement that interface, as too + * many non-optional methods would be unimplementable (e.g., iterators). * * @author Sebastiano Vigna + * @contributor Gordon Mohr */ public class BloomFilter64bit implements Serializable, BloomFilter { - - private static final long serialVersionUID = 2317000663009608403L; + private static final long serialVersionUID = 2L; /** The number of weights used to create hash functions. */ - final public static int NUMBER_OF_WEIGHTS = 2083; // CHANGED FROM 16 + final static int NUMBER_OF_WEIGHTS = 2083; // CHANGED FROM 16 /** The number of bits in this filter. */ - final public long m; + final protected long m; + /** if bitfield is an exact power of 2 in length, it is this power */ + protected int power = -1; + /** The expected number of inserts; determines calculated size */ + final protected long expectedInserts; /** The number of hash functions used by this filter. */ - final public int d; - /** The underlying bit vector. package access for testing */ - final long[] bits; + final protected int d; + /** The underlying bit vector */ + final protected long[][] bits; /** The random integers used to generate the hash functions. */ - final long[][] weight; + final protected long[][] weight; /** The number of elements currently in the filter. It may be * smaller than the actual number of additions of distinct character * sequences because of false positives. */ - private int size; + int size; /** The natural logarithm of 2, used in the computation of the number of bits. */ - private final static double NATURAL_LOG_OF_2 = Math.log( 2 ); + final static double NATURAL_LOG_OF_2 = Math.log( 2 ); + + /** power-of-two to use as maximum size of bitfield subarrays */ + protected final static int SUBARRAY_POWER_OF_TWO = 26; // 512MiB of longs + /** number of longs in one subarray */ + protected final static int SUBARRAY_LENGTH_IN_LONGS = 1 << SUBARRAY_POWER_OF_TWO; + /** mask for lowest SUBARRAY_POWER_OF_TWO bits */ + protected final static int SUBARRAY_MASK = SUBARRAY_LENGTH_IN_LONGS - 1; //0x0FFFFFFF - private final static boolean DEBUG = false; + final static boolean DEBUG = false; - /** Creates a new Bloom filter with given number of hash functions and expected number of elements. + /** Creates a new Bloom filter with given number of hash functions and + * expected number of elements. + * + * @param n the expected number of elements. + * @param d the number of hash functions; if the filter add not more + * than <code>n</code> elements, false positives will happen with + * probability 2<sup>-<var>d</var></sup>. + */ + public BloomFilter64bit( final long n, final int d) { + this(n,d, new SecureRandom(), false); + } + + public BloomFilter64bit( final long n, final int d, boolean roundUp) { + this(n,d, new SecureRandom(), roundUp); + } + + /** Creates a new Bloom filter with given number of hash functions and + * expected number of elements. * * @param n the expected number of elements. - * @param d the number of hash functions; if the filter add not more than <code>n</code> elements, - * false positives will happen with probability 2<sup>-<var>d</var></sup>. + * @param d the number of hash functions; if the filter add not more + * than <code>n</code> elements, false positives will happen with + * probability 2<sup>-<var>d</var></sup>. + * @param Random weightsGenerator may provide a seeded Random for reproducible + * internal universal hash function weighting + * @param roundUp if true, round bit size up to next-nearest-power-of-2 */ - public BloomFilter64bit( final int n, final int d ) { + public BloomFilter64bit( final long n, final int d, Random weightsGenerator, boolean roundUp ) { + this.expectedInserts = n; this.d = d; - int len = (int)Math.ceil( ( (long)n * (long)d / NATURAL_LOG_OF_2 ) / 64L ); - if ( len/64 > Integer.MAX_VALUE ) throw new IllegalArgumentException( "This filter would require " + len * 64L + " bits" ); - bits = new long[ len ]; - m = bits.length * 64L; + long lenInLongs = (long)Math.ceil( ( (long)n * (long)d / NATURAL_LOG_OF_2 ) / 64L ); + if ( lenInLongs > (1L<<48) ) { + throw new IllegalArgumentException( + "This filter would require " + lenInLongs + " longs, " + + "greater than this classes maximum of 2^48 longs (2PiB)." ); + } + long lenInBits = lenInLongs * 64L; + + if(roundUp) { + int pow = 0; + while((1L<<pow) < lenInBits) { + pow++; + } + this.power = pow; + this.m = 1L<<pow; + lenInLongs = m/64L; + } else { + this.m = lenInBits; + } + + + int arrayOfArraysLength = (int)((lenInLongs+SUBARRAY_LENGTH_IN_LONGS-1)/SUBARRAY_LENGTH_IN_LONGS); + bits = new long[ (int)(arrayOfArraysLength) ][]; + // ensure last subarray is no longer than necessary + long lenInLongsRemaining = lenInLongs; + for(int i = 0; i < bits.length; i++) { + bits[i] = new long[(int)Math.min(lenInLongsRemaining,SUBARRAY_LENGTH_IN_LONGS)]; + lenInLongsRemaining -= bits[i].length; + } if ( DEBUG ) System.err.println( "Number of bits: " + m ); - // seeded for reproduceable behavior in repeated runs; BUT: - // SecureRandom's default implementation (as of 1.5) - // seems to mix in its own seeding. - final SecureRandom random = new SecureRandom(new byte[] {19,96}); weight = new long[ d ][]; for( int i = 0; i < d; i++ ) { weight[ i ] = new long[ NUMBER_OF_WEIGHTS ]; for( int j = 0; j < NUMBER_OF_WEIGHTS; j++ ) - weight[ i ][ j ] = random.nextLong(); + weight[ i ][ j ] = weightsGenerator.nextLong(); } } /** The number of character sequences in the filter. * - * @return the number of character sequences in the filter (but see {@link #contains(CharSequence)}). + * @return the number of character sequences in the filter (but + * see {@link #contains(CharSequence)}). */ public int size() { @@ -139,15 +200,29 @@ public int size() { * @param k a hash function index (smaller than {@link #d}). * @return the position in the filter corresponding to <code>s</code> for the hash function <code>k</code>. */ - - private long hash( final CharSequence s, final int l, final int k ) { + protected long hash( final CharSequence s, final int l, final int k ) { final long[] w = weight[ k ]; long h = 0; int i = l; while( i-- != 0 ) h ^= s.charAt( i ) * w[ i % NUMBER_OF_WEIGHTS ]; - return ( h & 0x7FFFFFFFFFFFFFFFL ) % m; + long retVal; + if(power>0) { + retVal = h >>> (64-power); + } else { + // ####----####---- + retVal = ( h & 0x7FFFFFFFFFFFFFFFL ) % m; + } + return retVal; } - + + public long[] bitIndexesFor(CharSequence s) { + long[] ret = new long[d]; + for(int i = 0; i < d; i++) { + ret[i] = hash(s,s.length(),i); + } + return ret; + } + /** Checks whether the given character sequence is in this filter. * * <P>Note that this method may return true on a character sequence that is has @@ -179,9 +254,8 @@ public boolean add( final CharSequence s ) { long h; while( i-- != 0 ) { h = hash( s, l, i ); - if ( ! getBit( h ) ) { + if ( ! setGetBit( h ) ) { result = true; - setBit( h ); } } if ( result ) size++; @@ -189,7 +263,7 @@ public boolean add( final CharSequence s ) { } protected final static long ADDRESS_BITS_PER_UNIT = 6; // 64=2^6 - protected final static long BIT_INDEX_MASK = 63; // = BITS_PER_UNIT - 1; + protected final static long BIT_INDEX_MASK = (1<<6)-1; // = 63 = 2^BITS_PER_UNIT - 1; /** * Returns from the local bitvector the value of the bit with @@ -202,8 +276,11 @@ public boolean add( final CharSequence s ) { * @param bitIndex the bit index. * @return the value of the bit with the specified index. */ - protected boolean getBit(long bitIndex) { - return ((bits[(int)(bitIndex >> ADDRESS_BITS_PER_UNIT)] & (1L << (bitIndex & BIT_INDEX_MASK))) != 0); + public boolean getBit(long bitIndex) { + long longIndex = bitIndex >>> ADDRESS_BITS_PER_UNIT; + int arrayIndex = (int) (longIndex >>> SUBARRAY_POWER_OF_TWO); + int subarrayIndex = (int) (longIndex & SUBARRAY_MASK); + return ((bits[arrayIndex][subarrayIndex] & (1L << (bitIndex & BIT_INDEX_MASK))) != 0); } /** @@ -214,13 +291,45 @@ protected boolean getBit(long bitIndex) { * @param bitIndex the index of the bit to be set. */ protected void setBit( long bitIndex) { - bits[(int)(bitIndex >> ADDRESS_BITS_PER_UNIT)] |= 1L << (bitIndex & BIT_INDEX_MASK); + long longIndex = bitIndex >>> ADDRESS_BITS_PER_UNIT; + int arrayIndex = (int) (longIndex >>> SUBARRAY_POWER_OF_TWO); + int subarrayIndex = (int) (longIndex & SUBARRAY_MASK); + bits[arrayIndex][subarrayIndex] |= (1L << (bitIndex & BIT_INDEX_MASK)); + } + + /** + * Sets the bit with index <tt>bitIndex</tt> in local bitvector -- + * returning the old value. + * + * (adapted from cern.colt.bitvector.QuickBitVector) + * + * @param bitIndex the index of the bit to be set. + */ + protected boolean setGetBit( long bitIndex) { + long longIndex = bitIndex >>> ADDRESS_BITS_PER_UNIT; + int arrayIndex = (int) (longIndex >>> SUBARRAY_POWER_OF_TWO); + int subarrayIndex = (int) (longIndex & SUBARRAY_MASK); + long mask = 1L << (bitIndex & BIT_INDEX_MASK); + boolean ret = (bits[arrayIndex][subarrayIndex] & mask)!=0; + bits[arrayIndex][subarrayIndex] |= mask; + return ret; } /* (non-Javadoc) * @see org.archive.util.BloomFilter#getSizeBytes() */ public long getSizeBytes() { - return bits.length*8; + // account for ragged-sized last array + return 8*(((bits.length-1)*bits[0].length)+bits[bits.length-1].length); } + + @Override + public long getExpectedInserts() { + return expectedInserts; + } + + @Override + public long getHashCount() { + return d; + } } diff --git a/commons/src/main/java/org/archive/util/BenchmarkBlooms.java b/commons/src/test/java/org/archive/util/BenchmarkBlooms.java similarity index 55% rename from commons/src/main/java/org/archive/util/BenchmarkBlooms.java rename to commons/src/test/java/org/archive/util/BenchmarkBlooms.java index 535564f9a..b3f206787 100644 --- a/commons/src/main/java/org/archive/util/BenchmarkBlooms.java +++ b/commons/src/test/java/org/archive/util/BenchmarkBlooms.java @@ -49,35 +49,34 @@ public void instanceMain(String[] args) { int d_hashes = (args.length > 2) ? Integer.parseInt(args[2]) : 22; int adds = - (args.length > 3) ? Integer.parseInt(args[3]) : 5000000; + (args.length > 3) ? Integer.parseInt(args[3]) : 10000000; + int contains = + (args.length > 4) ? Integer.parseInt(args[4]) : 8000000; String prefix = - (args.length > 4) ? args[4] : "http://www.archive.org/"; + (args.length > 5) ? args[5] : "http://www.archive.org/"; System.out.println( "reps="+reps+" n_expected="+n_expected+ - " d_hashes="+d_hashes+" adds="+adds+" prefix="+prefix); + " d_hashes="+d_hashes+" adds="+adds+ + " contains="+contains+" prefix="+prefix); - BloomFilter bloom64; - BloomFilter bloom32; - BloomFilter bloom32split; - BloomFilter bloom32p2; - BloomFilter bloom32p2split; + BloomFilter64bit bloom64; +// BloomFilter bloom32; +// BloomFilter bloom32split; for (int r=0;r<reps;r++) { - bloom32 = new BloomFilter32bit(n_expected,d_hashes); - testBloom(bloom32,adds,prefix); - bloom32=null; - bloom32split = new BloomFilter32bitSplit(n_expected,d_hashes); - testBloom(bloom32split,adds,prefix); - bloom32split=null; - bloom64 = new BloomFilter64bit(n_expected,d_hashes); - testBloom(bloom64,adds,prefix); - bloom64=null; - bloom32p2 = new BloomFilter32bp2(n_expected,d_hashes); - testBloom(bloom32p2,adds,prefix); - bloom32p2=null; - bloom32p2split = new BloomFilter32bp2Split(n_expected,d_hashes); - testBloom(bloom32p2split,adds,prefix); - bloom32p2split=null; +// bloom32 = new BloomFilter32bit(n_expected,d_hashes); +// testBloom(bloom32,adds,contains,prefix); +// bloom32=null; +// bloom32split = new BloomFilter32bitSplit(n_expected,d_hashes); +// testBloom(bloom32split,adds,contains,prefix); +// bloom32split=null; + bloom64 = new BloomFilter64bit(n_expected,d_hashes); + testBloom(null, bloom64,adds,contains,prefix); + bloom64=null; + // rounded up to power-of-2 bits size + bloom64 = new BloomFilter64bit(n_expected,d_hashes,true); + testBloom("bitsize rounded up",bloom64,adds,contains,prefix); + bloom64=null; } } @@ -87,19 +86,29 @@ public void instanceMain(String[] args) { * @param adds * @param d_hashes */ - private void testBloom(BloomFilter bloom, int adds, String prefix) { + private void testBloom(String note, BloomFilter bloom, int adds, int contains, String prefix) { System.gc(); long startTime = System.currentTimeMillis(); - long falsePositives = 0; - for(int i = 0; i<adds; i++) { + long falsePositivesAdds = 0; + int i = 0; + for(; i<adds; i++) { if(!bloom.add(prefix+Integer.toString(i))) { - falsePositives++; + falsePositivesAdds++; } } + long falsPositivesContains = 0; + for(; i<(adds+contains); i++) { + if(bloom.contains(prefix+Integer.toString(i))) { + falsPositivesContains++; + } + } long finishTime = System.currentTimeMillis(); - System.out.println(bloom.getClass().getName()+": " + System.out.println(bloom.getClass().getName() + +((note!=null)?" ("+note+")" : "") + +":\n " +(finishTime-startTime)+"ms " +bloom.getSizeBytes()+"bytes " - +falsePositives+"false"); + +falsePositivesAdds+" falseDuringAdds " + +falsPositivesContains+" falseDuringContains "); } } diff --git a/commons/src/test/java/org/archive/util/BloomFilter64bitTest.java b/commons/src/test/java/org/archive/util/BloomFilter64bitTest.java index e3ba2f5ae..35b0a64ab 100644 --- a/commons/src/test/java/org/archive/util/BloomFilter64bitTest.java +++ b/commons/src/test/java/org/archive/util/BloomFilter64bitTest.java @@ -19,7 +19,7 @@ package org.archive.util; - +import java.util.Random; /** * BloomFilter64 tests @@ -28,32 +28,8 @@ * @version $Date: 2009-11-19 14:39:53 -0800 (Thu, 19 Nov 2009) $, $Revision: 6674 $ */ public class BloomFilter64bitTest extends BloomFilterTest { - - protected void setUp() throws Exception { - // test at default size of BloomUriUniqFilter -- but don't depend on that - // 'engine'-subproject class for values - bloom = new BloomFilter64bit(125000000,22); - } - - public void testDistributionOfSetBits() { - // prelaod - testBasics(); - - BloomFilter64bit bloom64 = (BloomFilter64bit)bloom; - for(int i = 0; i<bloom64.bits.length; i++) { - // verify that first set bit is in first 20% of bitfield - if(bloom64.bits[i]>0) { - assertTrue("set bits not as expected in early positions",(i/(double)bloom64.bits.length)<0.2d); - break; - } - } - for(int i = bloom64.bits.length-1; i>=0; i--) { - // verify that first set bit is in first 20% of bitfield - if(bloom64.bits[i]>0) { - assertTrue("set bits not as expected in late positions",(i/(double)bloom64.bits.length)>0.8d); - break; - } - } - + @Override + BloomFilter createBloom(long n, int d, Random weightsGenerator) { + return new BloomFilter64bit(n, d, weightsGenerator, false); } } diff --git a/commons/src/test/java/org/archive/util/BloomFilterTest.java b/commons/src/test/java/org/archive/util/BloomFilterTest.java index d61ecfaef..769b8cefe 100644 --- a/commons/src/test/java/org/archive/util/BloomFilterTest.java +++ b/commons/src/test/java/org/archive/util/BloomFilterTest.java @@ -19,39 +19,155 @@ package org.archive.util; +import java.security.SecureRandom; +import java.util.Random; + import junit.framework.TestCase; /** - * BloomFilter tests + * BloomFilter tests. * * @contributor gojomo * @version $Date: 2009-11-19 14:39:53 -0800 (Thu, 19 Nov 2009) $, $Revision: 6674 $ */ public abstract class BloomFilterTest extends TestCase { - protected BloomFilter bloom; - - protected abstract void setUp() throws Exception; - public void testBasics() { - // require initial additions to return 'true' (for 'added') - assertTrue(bloom.add("abracadabra")); - assertTrue(bloom.add("foobar")); - assertTrue(bloom.add("rumplestiltskin")); - assertTrue(bloom.add("buckaroobanzai")); - assertTrue(bloom.add("scheherazade")); + abstract BloomFilter createBloom(long n, int d, Random random); + + protected void trialWithParameters(long targetSize, int hashCount, long addCount, long containsCount) { + BloomFilter bloom = createBloom(targetSize,hashCount,new Random(1996L)); + + int addFalsePositives = checkAdds(bloom,addCount); + checkDistribution(bloom); + // this is a *very* rough and *very* lenient upper bound for adds <= targetSize + long maxTolerableDuringAdds = addCount / (1<<hashCount); + assertTrue( + "excessive false positives ("+addFalsePositives+">"+maxTolerableDuringAdds+") during adds", + addFalsePositives<10); - // require readdition to return 'false' (not added because already present) - assertFalse(bloom.add("abracadabra")); - assertFalse(bloom.add("foobar")); - assertFalse(bloom.add("rumplestiltskin")); - assertFalse(bloom.add("buckaroobanzai")); - assertFalse(bloom.add("scheherazade")); + if(containsCount==0) { + return; + } + int containsFalsePositives = checkContains(bloom,containsCount); + // expect at least 0 if bloom wasn't saturated in add phase + // if was saturated, expect at least 1/4th of the theoretical 1-in-every-(2<<hashCount) + long minTolerableDuringContains = (addCount < targetSize) ? 0 : containsCount / ((1<<hashCount) * 4); + // expect no more than 4 times the theoretical-at-saturation + long maxTolerableDuringContains = containsCount * 4 / (1<<hashCount); + assertTrue( + "excessive false positives ("+containsFalsePositives+">"+maxTolerableDuringContains+") during contains", + containsFalsePositives<=maxTolerableDuringContains); // no more than double expected 1-in-4mil + assertTrue( + "missing false positives ("+containsFalsePositives+"<"+minTolerableDuringContains+") during contains", + containsFalsePositives>=minTolerableDuringContains); // should be at least a couple + } + + /** + * Test very-large (almost 800MB, spanning more than Integer.MAX_VALUE bit + * indexes) bloom at saturation for expected behavior and level of + * false-positives. + * + * Renamed to non-'test' name so not automatically run, because can + * take 15+ minutes to complete. + */ + public void testOversized() { + trialWithParameters(200000000,22,200000000,32000000); + } + + /** + * Test large (495MB), default-sized bloom at saturation for + * expected behavior and level of false-positives. + * + * Renamed to non-'test' name so not automatically run, because can + * take 15+ minutes to complete. + */ + public void testDefaultFull() { + trialWithParameters(125000000,22,125000000,34000000); + } + + public void testDefaultAbbreviated() { + trialWithParameters(125000000,22,17000000,0); + } + + public void testSmall() { + trialWithParameters(10000000, 20, 10000000, 10000000); + } + + /** + * Check that the given filter behaves properly as a large number of + * constructed unique strings are added: responding positively to + * contains, and negatively to redundant adds. Assuming that the filter + * was empty before it was called, any add()s that report the string was + * already present are false-positives; report the total of same so the + * caller can evaluate if that level was suspiciously out of the expected + * error rate. + * + * @param bloom BloomFilter to check + * @param count int number of unique strings to check + * @return + */ + protected int checkAdds(BloomFilter bloom, long count) { + int falsePositives = 0; + for(int i = 0; i < count; i++) { + String str = "add"+Integer.toString(i); + if(!bloom.add(str)) { + falsePositives++; + } + assertTrue(bloom.contains(str)); + assertFalse(str+" not present on re-add",bloom.add(str)); + } + return falsePositives; + } + + /** + * Check if the given filter contains any of the given constructed + * strings. Since the previously-added strings (of checkAdds) were + * different from these, *any* positive contains results are + * false-positives. Return the total count so that the calling method + * can determine if the false-positive rate is outside the expected + * range. + * + * @param bloom BloomFilter to check + * @param count int number of unique strings to check + * @return + */ + protected int checkContains(BloomFilter bloom, long count) { + int falsePositives = 0; + for(int i = 0; i < count; i++) { + String str = "contains"+Integer.toString(i); + if(bloom.contains(str)) { + falsePositives++; + } + } + return falsePositives; } - @Override - protected void tearDown() throws Exception { - super.tearDown(); - bloom = null; + /** + * Check that the given bloom filter, assumed to have already had a + * significant number of items added, has bits set in the lower and upper + * 10% of its bit field. + * + * (This would have caught previous int/long bugs in the filter hashing + * or conversion of bit indexes into array indexes and bit masks.) + * + * @param bloom BloomFilter to check + */ + public void checkDistribution(BloomFilter bloom) { + long bitLength = bloom.getSizeBytes() * 8L; + for(long i = 0; i<bitLength; i++) { + // verify that first set bit is in first 20% of bitfield + if(bloom.getBit(i)) { + assertTrue("set bits not as expected in early positions",(i/(double)bitLength)<0.1d); + break; + } + } + for(long i = bitLength-1; i>=0; i--) { + // verify that first set bit is in first 20% of bitfield + if(bloom.getBit(i)) { + assertTrue("set bits not as expected in late positions",(i/(double)bitLength)>0.1d); + break; + } + } } } diff --git a/engine/src/main/java/org/archive/crawler/util/BloomUriUniqFilter.java b/engine/src/main/java/org/archive/crawler/util/BloomUriUniqFilter.java index 2b15f5d2d..98120a6e9 100644 --- a/engine/src/main/java/org/archive/crawler/util/BloomUriUniqFilter.java +++ b/engine/src/main/java/org/archive/crawler/util/BloomUriUniqFilter.java @@ -47,25 +47,8 @@ * through 125 million unique inserts, which creates a filter structure * about 495MB in size. * - * You may use the following system properties to tune the size and - * false-positive rate of the bloom filter structure used by this class: - * - * org.archive.crawler.util.BloomUriUniqFilter.expected-size (default 125000000) - * org.archive.crawler.util.BloomUriUniqFilter.hash-count (default 22) - * - * The resulting filter will take up approximately... - * - * 1.44 * expected-size * hash-count / 8 - * - * ...bytes. - * - * The BloomFilter64bit implementation class supports filters up to - * 16GiB in size. - * - * (If you only need a filter up to 512MiB in size, the - * BloomFilter32bitSplit *might* offer better performance, on 32bit - * JVMs or with respect to heap-handling of giant arrays. The only - * current way to swap in this class is by editing the source.) + * You may swap in an differently-configured BloomFilter class to alter + * these tradeoffs. * * @author gojomo * @version $Date$, $Revision$ @@ -78,28 +61,13 @@ public class BloomUriUniqFilter extends SetBasedUriUniqFilter Logger.getLogger(BloomUriUniqFilter.class.getName()); BloomFilter bloom; // package access for testing convenience - - // these defaults create a bloom filter that is - // 1.44*125mil*22/8 ~= 495MB in size, and at full - // capacity will give a false contained indication - // 1/(2^22) ~= 1 in every 4 million probes - protected int expectedInserts= 125000000; // default 125 million; - public int getExpectedInserts() { - return expectedInserts; + public BloomFilter getBloomFilter() { + return bloom; } - public void setExpectedInserts(int expectedInserts) { - this.expectedInserts = expectedInserts; + public void setBloomFilter(BloomFilter filter) { + bloom = filter; } - protected int hashCount = 22; // 1 in 4 million false pos - public int getHashCount() { - return hashCount; - } - public void setHashCount(int hashCount) { - this.hashCount = hashCount; - } - - /** * Default constructor */ @@ -109,14 +77,17 @@ public BloomUriUniqFilter() { /** * Initializer. - * - * @param n the expected number of elements. - * @param d the number of hash functions; if the filter adds not more - * than <code>n</code> elements, false positives will happen with - * probability 2<sup>-<var>d</var></sup>. */ public void afterPropertiesSet() { - bloom = new BloomFilter64bit(expectedInserts,hashCount); + if(bloom==null) { + // configure default bloom filter if operator hasn't already + + // these defaults create a bloom filter that is + // 1.44*125mil*22/8 ~= 495MB in size, and at full + // capacity will give a false contained indication + // 1/(2^22) ~= 1 in every 4 million probes + bloom = new BloomFilter64bit(125000000,22); + } } public void forget(String canonical, CrawlURI item) { @@ -128,8 +99,11 @@ protected boolean setAdd(CharSequence uri) { boolean added = bloom.add(uri); // warn if bloom has reached its expected size (and its false-pos // rate will now exceed the theoretical/designed level) - if( added && (count() == expectedInserts)) { - LOGGER.warning("Bloom has reached expected limit "+expectedInserts); + if( added && (count() == bloom.getExpectedInserts())) { + LOGGER.warning( + "Bloom has reached expected limit "+bloom.getExpectedInserts()+ + "; false-positive rate will now rise above goal of "+ + "1-in-(2^"+bloom.getHashCount()); } return added; } diff --git a/engine/src/test/java/org/archive/crawler/util/BloomUriUniqFilterTest.java b/engine/src/test/java/org/archive/crawler/util/BloomUriUniqFilterTest.java index e00511e46..28390704e 100644 --- a/engine/src/test/java/org/archive/crawler/util/BloomUriUniqFilterTest.java +++ b/engine/src/test/java/org/archive/crawler/util/BloomUriUniqFilterTest.java @@ -32,6 +32,7 @@ import org.archive.modules.CrawlURI; import org.archive.net.UURI; import org.archive.net.UURIFactory; +import org.archive.util.BloomFilter64bit; /** @@ -53,8 +54,7 @@ public class BloomUriUniqFilterTest extends TestCase protected void setUp() throws Exception { super.setUp(); this.filter = new BloomUriUniqFilter(); - this.filter.setExpectedInserts(2000); - this.filter.setHashCount(24); + this.filter.setBloomFilter(new BloomFilter64bit(2000, 24)); this.filter.afterPropertiesSet(); this.filter.setDestination(this); }
c3a1bbd206678a74f06a2ee2320b3547e26c3d43
drools
JBRULES-1520: moving test to correct test file--git-svn-id: https://svn.jboss.org/repos/labs/labs/jbossrules/trunk@19375 c60d74c8-e8f6-0310-9e8f-d4a2fc68ab70-
p
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/test/java/org/drools/integrationtests/FirstOrderLogicTest.java b/drools-compiler/src/test/java/org/drools/integrationtests/FirstOrderLogicTest.java index 6faeee4fdc9..0f4aa59cbbc 100644 --- a/drools-compiler/src/test/java/org/drools/integrationtests/FirstOrderLogicTest.java +++ b/drools-compiler/src/test/java/org/drools/integrationtests/FirstOrderLogicTest.java @@ -82,6 +82,34 @@ public void testCollect() throws Exception { results.get( 0 ).getClass().getName() ); } + public void testCollectNodeSharing() throws Exception { + final PackageBuilder builder = new PackageBuilder(); + builder.addPackageFromDrl( new InputStreamReader( getClass().getResourceAsStream( "test_collectNodeSharing.drl" ) ) ); + final Package pkg = builder.getPackage(); + + RuleBase ruleBase = getRuleBase(); + ruleBase.addPackage( pkg ); + ruleBase = SerializationHelper.serializeObject(ruleBase); + final WorkingMemory workingMemory = ruleBase.newStatefulSession(); + + final List list = new ArrayList(); + workingMemory.setGlobal( "results", + list ); + + workingMemory.insert( new Cheese( "stilton", + 10 ) ); + workingMemory.insert( new Cheese( "brie", + 15 ) ); + + workingMemory.fireAllRules(); + + assertEquals( 1, + list.size() ); + + assertEquals( 2, + ((List) list.get( 0 )).size() ); + } + public void testCollectModify() throws Exception { // read in the source final Reader reader = new InputStreamReader( getClass().getResourceAsStream( "test_Collect.drl" ) ); diff --git a/drools-compiler/src/test/java/org/drools/integrationtests/MiscTest.java b/drools-compiler/src/test/java/org/drools/integrationtests/MiscTest.java index 7e8b54a5575..74a51f95268 100644 --- a/drools-compiler/src/test/java/org/drools/integrationtests/MiscTest.java +++ b/drools-compiler/src/test/java/org/drools/integrationtests/MiscTest.java @@ -3065,34 +3065,6 @@ public void testContainsInArray() throws Exception { list.get( 1 ) ); } - public void testCollectNodeSharing() throws Exception { - final PackageBuilder builder = new PackageBuilder(); - builder.addPackageFromDrl( new InputStreamReader( getClass().getResourceAsStream( "test_collectNodeSharing.drl" ) ) ); - final Package pkg = builder.getPackage(); - - RuleBase ruleBase = getRuleBase(); - ruleBase.addPackage( pkg ); - ruleBase = SerializationHelper.serializeObject(ruleBase); - final WorkingMemory workingMemory = ruleBase.newStatefulSession(); - - final List list = new ArrayList(); - workingMemory.setGlobal( "results", - list ); - - workingMemory.insert( new Cheese( "stilton", - 10 ) ); - workingMemory.insert( new Cheese( "brie", - 15 ) ); - - workingMemory.fireAllRules(); - - assertEquals( 1, - list.size() ); - - assertEquals( 2, - ((List) list.get( 0 )).size() ); - } - public void testNodeSharingNotExists() throws Exception { final PackageBuilder builder = new PackageBuilder(); builder.addPackageFromDrl( new InputStreamReader( getClass().getResourceAsStream( "test_nodeSharingNotExists.drl" ) ) );
328b8d4d2ee831d64f8347072f95cf70a07e24a1
artificerrepo$artificer
Moved the test maven projects. Updated the s-ramp wagon's "get" functionality to bring it in-line with put.
p
https://github.com/artificerrepo/artificer
diff --git a/s-ramp-wagon/src/main/java/org/overlord/sramp/wagon/SrampWagon.java b/s-ramp-wagon/src/main/java/org/overlord/sramp/wagon/SrampWagon.java index c0caeb408..7c3f588e8 100644 --- a/s-ramp-wagon/src/main/java/org/overlord/sramp/wagon/SrampWagon.java +++ b/s-ramp-wagon/src/main/java/org/overlord/sramp/wagon/SrampWagon.java @@ -15,20 +15,13 @@ */ package org.overlord.sramp.wagon; -import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.io.StringWriter; -import java.security.MessageDigest; import javax.xml.bind.JAXBException; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; import org.apache.commons.io.IOUtils; import org.apache.http.conn.HttpHostConnectException; @@ -59,9 +52,7 @@ import org.overlord.sramp.client.SrampServerException; import org.overlord.sramp.wagon.models.MavenGavInfo; import org.overlord.sramp.wagon.util.DevNullOutputStream; -import org.overlord.sramp.wagon.util.PomGenerator; import org.s_ramp.xmlns._2010.s_ramp.BaseArtifactType; -import org.w3c.dom.Document; /** * Implements a wagon provider that uses the S-RAMP Atom API. @@ -127,11 +118,65 @@ public void closeConnection() throws ConnectionException { public void fillInputData(InputData inputData) throws TransferFailedException, ResourceDoesNotExistException, AuthorizationException { Resource resource = inputData.getResource(); - + // Skip maven-metadata.xml files - they are not (yet?) supported if (resource.getName().contains("maven-metadata.xml")) throw new ResourceDoesNotExistException("Could not find file: '" + resource + "'"); logger.debug("Looking up resource from s-ramp repository: " + resource); + + MavenGavInfo gavInfo = MavenGavInfo.fromResource(resource); + if (gavInfo.isHash()) { + doGetHash(gavInfo, inputData); + } else { + doGetArtifact(gavInfo, inputData); + } + + } + + /** + * Gets the hash data from the s-ramp repository and stores it in the {@link InputData} for + * use by Maven. + * @param gavInfo + * @param inputData + * @throws TransferFailedException + * @throws ResourceDoesNotExistException + * @throws AuthorizationException + */ + private void doGetHash(MavenGavInfo gavInfo, InputData inputData) throws TransferFailedException, + ResourceDoesNotExistException, AuthorizationException { + String artyPath = gavInfo.getFullName(); + String hashPropName; + if (gavInfo.getType().endsWith(".md5")) { + hashPropName = "maven.hash.md5"; + artyPath = artyPath.substring(0, artyPath.length() - 4); + } else { + hashPropName = "maven.hash.sha1"; + artyPath = artyPath.substring(0, artyPath.length() - 5); + } + SrampArchiveEntry entry = this.archive.getEntry(artyPath); + if (entry == null) { + throw new ResourceDoesNotExistException("Failed to find resource hash: " + gavInfo.getName()); + } + BaseArtifactType metaData = entry.getMetaData(); + + String hashValue = SrampModelUtils.getCustomProperty(metaData, hashPropName); + if (hashValue == null) { + throw new ResourceDoesNotExistException("Failed to find resource hash: " + gavInfo.getName()); + } + inputData.setInputStream(IOUtils.toInputStream(hashValue)); + } + + /*** + * Gets the artifact content from the s-ramp repository and stores it in the {@link InputData} + * object for use by Maven. + * @param gavInfo + * @param inputData + * @throws TransferFailedException + * @throws ResourceDoesNotExistException + * @throws AuthorizationException + */ + private void doGetArtifact(MavenGavInfo gavInfo, InputData inputData) throws TransferFailedException, + ResourceDoesNotExistException, AuthorizationException { // RESTEasy uses the current thread's context classloader to load its logger class. This // fails in Maven because the context classloader is the wagon plugin's classloader, which // doesn't know about any of the RESTEasy JARs. So here we're temporarily setting the @@ -140,43 +185,19 @@ public void fillInputData(InputData inputData) throws TransferFailedException, ClassLoader oldCtxCL = Thread.currentThread().getContextClassLoader(); Thread.currentThread().setContextClassLoader(SrampWagon.class.getClassLoader()); try { - MavenGavInfo gavInfo = MavenGavInfo.fromResource(resource); String endpoint = getSrampEndpoint(); SrampAtomApiClient client = new SrampAtomApiClient(endpoint); // Query the artifact meta data using GAV info BaseArtifactType artifact = findExistingArtifact(client, gavInfo); if (artifact == null) - throw new ResourceDoesNotExistException("Artifact not found in s-ramp repository: '" + resource + "'"); + throw new ResourceDoesNotExistException("Artifact not found in s-ramp repository: '" + gavInfo.getName() + "'"); + this.archive.addEntry(gavInfo.getFullName(), artifact, null); ArtifactType type = ArtifactType.valueOf(artifact); - if ("pom".equals(gavInfo.getType())) { - String serializedPom = generatePom(artifact); - inputData.setInputStream(new ByteArrayInputStream(serializedPom.getBytes("UTF-8"))); - return; - } else if ("pom.sha1".equals(gavInfo.getType())) { - // Generate a SHA1 hash on the fly for the POM - String serializedPom = generatePom(artifact); - MessageDigest md = MessageDigest.getInstance("SHA1"); - md.update(serializedPom.getBytes("UTF-8")); - byte[] mdbytes = md.digest(); - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < mdbytes.length; i++) { - sb.append(Integer.toString((mdbytes[i] & 0xff) + 0x100, 16).substring(1)); - } - inputData.setInputStream(new ByteArrayInputStream(sb.toString().getBytes("UTF-8"))); - return; - } else if (gavInfo.getType().endsWith(".sha1")) { - InputStream artifactContent = client.getArtifactContent(type, artifact.getUuid()); - String sha1Hash = generateSHA1Hash(artifactContent); - inputData.setInputStream(new ByteArrayInputStream(sha1Hash.getBytes("UTF-8"))); - return; - } else { - // Get the artifact content as an input stream - InputStream artifactContent = client.getArtifactContent(type, artifact.getUuid()); - inputData.setInputStream(artifactContent); - return; - } + // Get the artifact content as an input stream + InputStream artifactContent = client.getArtifactContent(type, artifact.getUuid()); + inputData.setInputStream(artifactContent); } catch (ResourceDoesNotExistException e) { throw e; } catch (SrampClientException e) { @@ -189,64 +210,6 @@ public void fillInputData(InputData inputData) throws TransferFailedException, } finally { Thread.currentThread().setContextClassLoader(oldCtxCL); } - throw new ResourceDoesNotExistException("Could not find file: '" + resource + "'"); - } - - /** - * Generates a SHA1 hash for the given binary content. - * @param artifactContent an s-ramp artifact input stream - * @return a SHA1 hash - */ - private String generateSHA1Hash(InputStream artifactContent) { - try { - MessageDigest md = MessageDigest.getInstance("SHA1"); - byte[] buff = new byte[2048]; - int count = artifactContent.read(buff); - while (count != -1) { - md.update(buff, 0, count); - count = artifactContent.read(buff); - } - byte[] mdbytes = md.digest(); - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < mdbytes.length; i++) { - sb.append(Integer.toString((mdbytes[i] & 0xff) + 0x100, 16).substring(1)); - } - return sb.toString(); - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - IOUtils.closeQuietly(artifactContent); - } - } - - /** - * Generates a POM for the artifact. - * @param artifact - * @throws Exception - */ - private String generatePom(BaseArtifactType artifact) throws Exception { - ArtifactType type = ArtifactType.valueOf(artifact); - PomGenerator pomGenerator = new PomGenerator(); - Document pomDoc = pomGenerator.generatePom(artifact, type); - String serializedPom = serializeDocument(pomDoc); - return serializedPom; - } - - /** - * Serialize a document to a string. - * @param document - */ - private String serializeDocument(Document document) { - try { - StringWriter writer = new StringWriter(); - Transformer transformer = TransformerFactory.newInstance().newTransformer(); - transformer.setOutputProperty(javax.xml.transform.OutputKeys.OMIT_XML_DECLARATION, "no"); - transformer.setOutputProperty(javax.xml.transform.OutputKeys.INDENT, "yes"); - transformer.transform(new DOMSource(document), new StreamResult(writer)); - return writer.toString(); - } catch (Exception e) { - throw new RuntimeException(e); - } } /** @@ -503,7 +466,8 @@ private BaseArtifactType findExistingArtifactByGAV(SrampAtomApiClient client, Ma * @throws SrampServerException * @throws JAXBException */ - private BaseArtifactType findExistingArtifactByUniversal(SrampAtomApiClient client, MavenGavInfo gavInfo) throws SrampServerException, SrampClientException, JAXBException { + private BaseArtifactType findExistingArtifactByUniversal(SrampAtomApiClient client, MavenGavInfo gavInfo) + throws SrampServerException, SrampClientException, JAXBException { String artifactType = gavInfo.getGroupId().substring(gavInfo.getGroupId().indexOf('.') + 1); String uuid = gavInfo.getArtifactId(); Entry entry = null; diff --git a/s-ramp-wagon/src/main/resources/org/overlord/sramp/wagon/util/pom.template b/s-ramp-wagon/src/main/resources/org/overlord/sramp/wagon/util/pom.template deleted file mode 100644 index f512f2053..000000000 --- a/s-ramp-wagon/src/main/resources/org/overlord/sramp/wagon/util/pom.template +++ /dev/null @@ -1,18 +0,0 @@ -<?xml version="1.0"?> -<project - xmlns="http://maven.apache.org/POM/4.0.0" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> - - <modelVersion>4.0.0</modelVersion> - <groupId /> - <artifactId /> - <version /> - <name /> - <description /> - <type /> - - <dependencies> - </dependencies> - -</project> \ No newline at end of file diff --git a/s-ramp-wagon/src/test/resources/test-wagon-pull/.gitignore b/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/.gitignore similarity index 100% rename from s-ramp-wagon/src/test/resources/test-wagon-pull/.gitignore rename to s-ramp-wagon/src/test/maven-projects/test-wagon-pull/.gitignore diff --git a/s-ramp-wagon/src/test/resources/test-wagon-pull/pom.xml b/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/pom.xml similarity index 92% rename from s-ramp-wagon/src/test/resources/test-wagon-pull/pom.xml rename to s-ramp-wagon/src/test/maven-projects/test-wagon-pull/pom.xml index 1f9d097bd..71ea9bf15 100644 --- a/s-ramp-wagon/src/test/resources/test-wagon-pull/pom.xml +++ b/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/pom.xml @@ -5,6 +5,7 @@ <artifactId>test-wagon-pull</artifactId> <version>0.0.1-SNAPSHOT</version> <name>test-wagon-pull</name> + <repositories> <repository> <id>local-sramp-repo</id> @@ -25,7 +26,11 @@ <groupId>org.overlord.sramp.test</groupId> <artifactId>test-wagon-push</artifactId> <version>0.0.1-SNAPSHOT</version> - <type>xsd</type> + </dependency> + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <version>4.10</version> </dependency> </dependencies> diff --git a/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/src/main/java/org/overlord/sramp/wagontest/TestPullDependency.java b/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/src/main/java/org/overlord/sramp/wagontest/TestPullDependency.java new file mode 100644 index 000000000..0aed61c75 --- /dev/null +++ b/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/src/main/java/org/overlord/sramp/wagontest/TestPullDependency.java @@ -0,0 +1,43 @@ +/* + * Copyright 2012 JBoss Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.overlord.sramp.wagontest; + +import org.overlord.sramp.test.wagon.Widget; + +/** + * Tests the ability to create a new instance of the generated class found in + * the test-wagon-push project. + * + * @author [email protected] + */ +public class TestPullDependency { + + /** + * Constructor. + */ + public TestPullDependency() { + } + + /** + * Do something with a Widget. + */ + public void doit() { + Widget widget = new Widget(); + System.out.println("It's done!"); + System.out.println(widget); + } + +} diff --git a/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/src/test/java/org/overlord/sramp/wagontest/TestPullDependencyTest.java b/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/src/test/java/org/overlord/sramp/wagontest/TestPullDependencyTest.java new file mode 100644 index 000000000..fa54b9f5f --- /dev/null +++ b/s-ramp-wagon/src/test/maven-projects/test-wagon-pull/src/test/java/org/overlord/sramp/wagontest/TestPullDependencyTest.java @@ -0,0 +1,37 @@ +/* + * Copyright 2012 JBoss Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.overlord.sramp.wagontest; + +import org.junit.Test; + +/** + * Unit test. + * + * @author [email protected] + */ +public class TestPullDependencyTest { + + /** + * Test method for {@link org.overlord.sramp.wagontest.TestPullDependency#doit()}. + */ + @Test + public void testDoit() { + TestPullDependency tpd = new TestPullDependency(); + tpd.doit(); + System.out.println("DONE: success"); + } + +} diff --git a/s-ramp-wagon/src/test/resources/test-wagon-push/.gitignore b/s-ramp-wagon/src/test/maven-projects/test-wagon-push/.gitignore similarity index 100% rename from s-ramp-wagon/src/test/resources/test-wagon-push/.gitignore rename to s-ramp-wagon/src/test/maven-projects/test-wagon-push/.gitignore diff --git a/s-ramp-wagon/src/test/resources/test-wagon-push/pom.xml b/s-ramp-wagon/src/test/maven-projects/test-wagon-push/pom.xml similarity index 97% rename from s-ramp-wagon/src/test/resources/test-wagon-push/pom.xml rename to s-ramp-wagon/src/test/maven-projects/test-wagon-push/pom.xml index 595c057ff..0706da5d9 100644 --- a/s-ramp-wagon/src/test/resources/test-wagon-push/pom.xml +++ b/s-ramp-wagon/src/test/maven-projects/test-wagon-push/pom.xml @@ -10,7 +10,7 @@ <repository> <id>local-sramp-repo</id> <name>Local S-RAMP Repository</name> - <url>sramp://localhost:9090/s-ramp-atom/s-ramp/</url> + <url>sramp://localhost:8080/s-ramp-atom/s-ramp/</url> <layout>default</layout> <releases> <enabled>true</enabled> diff --git a/s-ramp-wagon/src/test/resources/test-wagon-push/src/main/resources/META-INF/schemas/widget.xsd b/s-ramp-wagon/src/test/maven-projects/test-wagon-push/src/main/resources/META-INF/schemas/widget.xsd similarity index 100% rename from s-ramp-wagon/src/test/resources/test-wagon-push/src/main/resources/META-INF/schemas/widget.xsd rename to s-ramp-wagon/src/test/maven-projects/test-wagon-push/src/main/resources/META-INF/schemas/widget.xsd
0dd95079938476608211f34c414b90f9eca45f77
camel
CAMEL-1712 Upgraded the camel-ibatis to JUnit4--git-svn-id: https://svn.apache.org/repos/asf/camel/trunk@785119 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/camel
diff --git a/components/camel-ibatis/pom.xml b/components/camel-ibatis/pom.xml index e512bfbff1e42..d243937f4e6e0 100644 --- a/components/camel-ibatis/pom.xml +++ b/components/camel-ibatis/pom.xml @@ -57,8 +57,7 @@ <!-- testing --> <dependency> <groupId>org.apache.camel</groupId> - <artifactId>camel-core</artifactId> - <type>test-jar</type> + <artifactId>camel-test</artifactId> <scope>test</scope> </dependency> <dependency> diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisBatchConsumerTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisBatchConsumerTest.java index c8869b8c8e4ea..c9ba157fd8338 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisBatchConsumerTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisBatchConsumerTest.java @@ -19,12 +19,14 @@ import org.apache.camel.Exchange; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.junit.Test; /** * @version $Revision$ */ public class IBatisBatchConsumerTest extends IBatisTestSupport { + @Test public void testBatchConsumer() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(2); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisPollingDelayRouteTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisPollingDelayRouteTest.java index c6845708750d1..6bf1ef0aaf749 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisPollingDelayRouteTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisPollingDelayRouteTest.java @@ -19,15 +19,19 @@ import java.sql.Connection; import java.sql.Statement; -import org.apache.camel.ContextTestSupport; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.apache.camel.test.junit4.CamelTestSupport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * @version $Revision$ */ -public class IBatisPollingDelayRouteTest extends ContextTestSupport { +public class IBatisPollingDelayRouteTest extends CamelTestSupport { + @Test public void testSendAccountBean() throws Exception { createTestData(); @@ -67,7 +71,8 @@ public void configure() throws Exception { } @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { super.setUp(); // lets create the database... @@ -78,7 +83,8 @@ protected void setUp() throws Exception { } @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { Connection connection = createConnection(); Statement statement = connection.createStatement(); statement.execute("drop table ACCOUNT"); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForDeleteTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForDeleteTest.java index 2b2327d0d7d13..953bfb04cba74 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForDeleteTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForDeleteTest.java @@ -18,12 +18,14 @@ import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.junit.Test; /** * @version $Revision$ */ public class IBatisQueryForDeleteTest extends IBatisTestSupport { + @Test public void testDelete() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); @@ -42,7 +44,8 @@ public void testDelete() throws Exception { rows = template.requestBody("ibatis:count?statementType=QueryForObject", null, Integer.class); assertEquals("There should be 0 rows", 0, rows.intValue()); } - + + @Test public void testDeleteNotFound() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForInsertTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForInsertTest.java index 3a0d6e58ada09..b232df1c05cb1 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForInsertTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForInsertTest.java @@ -18,12 +18,14 @@ import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.junit.Test; /** * @version $Revision$ */ public class IBatisQueryForInsertTest extends IBatisTestSupport { + @Test public void testInsert() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForListTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForListTest.java index 4286be054de30..ffdd7883105de 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForListTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForListTest.java @@ -20,12 +20,14 @@ import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.junit.Test; /** * @version $Revision$ */ public class IBatisQueryForListTest extends IBatisTestSupport { + @Test public void testQueryForList() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForListWithSplitTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForListWithSplitTest.java index e6d1883088eae..c74ad19e7cd27 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForListWithSplitTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForListWithSplitTest.java @@ -18,12 +18,14 @@ import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.junit.Test; /** * @version $Revision$ */ public class IBatisQueryForListWithSplitTest extends IBatisTestSupport { + @Test public void testQueryForList() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(2); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForObjectTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForObjectTest.java index b5e8efb07fe50..8229dde693060 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForObjectTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForObjectTest.java @@ -18,12 +18,14 @@ import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.junit.Test; /** * @version $Revision$ */ public class IBatisQueryForObjectTest extends IBatisTestSupport { + @Test public void testQueryForObject() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); @@ -37,6 +39,7 @@ public void testQueryForObject() throws Exception { assertEquals("Claus", account.getFirstName()); } + @Test public void testQueryForNotFound() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForUpdateTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForUpdateTest.java index a5318c6d5ab71..838c3421265e5 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForUpdateTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueryForUpdateTest.java @@ -18,12 +18,14 @@ import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.junit.Test; /** * @version $Revision$ */ public class IBatisQueryForUpdateTest extends IBatisTestSupport { + @Test public void testUpdate() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueueTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueueTest.java index 89b74ccc022c1..85ed3088868e1 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueueTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisQueueTest.java @@ -20,12 +20,16 @@ import java.sql.Statement; import java.util.List; -import org.apache.camel.ContextTestSupport; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.apache.camel.test.junit4.CamelTestSupport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; -public class IBatisQueueTest extends ContextTestSupport { +public class IBatisQueueTest extends CamelTestSupport { + @Test public void testConsume() throws Exception { MockEndpoint endpoint = getMockEndpoint("mock:results"); @@ -76,7 +80,8 @@ public void configure() throws Exception { } @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { super.setUp(); // lets create the database... @@ -88,7 +93,8 @@ protected void setUp() throws Exception { } @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { super.tearDown(); IBatisEndpoint endpoint = resolveMandatoryEndpoint("ibatis:Account", IBatisEndpoint.class); Connection connection = endpoint.getSqlMapClient().getDataSource().getConnection(); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisRouteEmptyResultSetTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisRouteEmptyResultSetTest.java index 52141b52714d8..ba443f5a2327b 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisRouteEmptyResultSetTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisRouteEmptyResultSetTest.java @@ -20,15 +20,19 @@ import java.sql.Statement; import java.util.ArrayList; -import org.apache.camel.ContextTestSupport; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.apache.camel.test.junit4.CamelTestSupport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * @version $Revision$ */ -public class IBatisRouteEmptyResultSetTest extends ContextTestSupport { +public class IBatisRouteEmptyResultSetTest extends CamelTestSupport { + @Test public void testRouteEmptyResultSet() throws Exception { MockEndpoint endpoint = getMockEndpoint("mock:results"); endpoint.expectedMinimumMessageCount(1); @@ -51,7 +55,8 @@ public void configure() throws Exception { } @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { super.setUp(); // lets create the database... @@ -62,7 +67,8 @@ protected void setUp() throws Exception { } @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { Connection connection = createConnection(); Statement statement = connection.createStatement(); statement.execute("drop table ACCOUNT"); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisRouteTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisRouteTest.java index d7591985f8683..a964ab6679ffb 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisRouteTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisRouteTest.java @@ -20,15 +20,19 @@ import java.sql.Statement; import java.util.List; -import org.apache.camel.ContextTestSupport; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; +import org.apache.camel.test.junit4.CamelTestSupport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * @version $Revision$ */ -public class IBatisRouteTest extends ContextTestSupport { +public class IBatisRouteTest extends CamelTestSupport { + @Test public void testSendAccountBean() throws Exception { MockEndpoint endpoint = getMockEndpoint("mock:results"); endpoint.expectedMinimumMessageCount(1); @@ -68,7 +72,8 @@ public void configure() throws Exception { } @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { super.setUp(); // lets create the database... @@ -79,7 +84,8 @@ protected void setUp() throws Exception { } @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { Connection connection = createConnection(); Statement statement = connection.createStatement(); statement.execute("drop table ACCOUNT"); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisTestSupport.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisTestSupport.java index 6c0190c24ce37..518cb205dd70b 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisTestSupport.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisTestSupport.java @@ -19,12 +19,16 @@ import java.sql.Connection; import java.sql.Statement; -import org.apache.camel.ContextTestSupport; +import org.apache.camel.test.junit4.CamelTestSupport; +import org.junit.After; +import org.junit.Before; -public class IBatisTestSupport extends ContextTestSupport { + +public class IBatisTestSupport extends CamelTestSupport { @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { super.setUp(); // lets create the database... @@ -50,7 +54,8 @@ protected void setUp() throws Exception { } @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { Connection connection = createConnection(); Statement statement = connection.createStatement(); statement.execute("drop table ACCOUNT"); diff --git a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisUnknownStatementTypeTest.java b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisUnknownStatementTypeTest.java index 91f415313f91d..2e00582db406a 100644 --- a/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisUnknownStatementTypeTest.java +++ b/components/camel-ibatis/src/test/java/org/apache/camel/component/ibatis/IBatisUnknownStatementTypeTest.java @@ -17,15 +17,17 @@ package org.apache.camel.component.ibatis; import org.apache.camel.CamelExecutionException; -import org.apache.camel.ContextTestSupport; import org.apache.camel.FailedToCreateProducerException; import org.apache.camel.builder.RouteBuilder; +import org.apache.camel.test.junit4.CamelTestSupport; +import org.junit.Test; /** * @version $Revision$ */ -public class IBatisUnknownStatementTypeTest extends ContextTestSupport { +public class IBatisUnknownStatementTypeTest extends CamelTestSupport { + @Test public void testStatementTypeNotSet() throws Exception { try { template.sendBody("direct:start", "Hello");
6ebf6a1c3a48f4b3f18bfe4e42f86e1b3b8398a2
orientdb
HTTP static content now supports single file as- configuration--
a
https://github.com/orientechnologies/orientdb
diff --git a/server/src/main/java/com/orientechnologies/orient/server/network/protocol/http/command/get/OServerCommandGetStaticContent.java b/server/src/main/java/com/orientechnologies/orient/server/network/protocol/http/command/get/OServerCommandGetStaticContent.java index d1ce5c3bea7..6c36d1286f2 100644 --- a/server/src/main/java/com/orientechnologies/orient/server/network/protocol/http/command/get/OServerCommandGetStaticContent.java +++ b/server/src/main/java/com/orientechnologies/orient/server/network/protocol/http/command/get/OServerCommandGetStaticContent.java @@ -38,11 +38,14 @@ public class OServerCommandGetStaticContent extends OServerCommandConfigurableAb "GET|*.swf", "GET|favicon.ico", "GET|robots.txt" }; private static final String CONFIG_HTTP_CACHE = "http.cache:"; + private static final String CONFIG_ROOT_PATH = "root.path"; + private static final String CONFIG_FILE_PATH = "file.path"; private Map<String, OStaticContentCachedEntry> cacheContents; private Map<String, String> cacheHttp = new HashMap<String, String>(); private String cacheHttpDefault = "Cache-Control: max-age=3000"; - private String wwwPath; + private String rootPath; + private String filePath; public OServerCommandGetStaticContent() { super(DEF_PATTERN); @@ -63,7 +66,11 @@ else if (filter.length() > 0) { cacheHttp.put(f, par.value); } } - } + } else if (par.name.startsWith(CONFIG_ROOT_PATH)) + rootPath = par.value; + else if (par.name.startsWith(CONFIG_FILE_PATH)) + filePath = par.value; + } } @@ -72,14 +79,23 @@ public boolean execute(final OHttpRequest iRequest) throws Exception { iRequest.data.commandInfo = "Get static content"; iRequest.data.commandDetail = iRequest.url; - if (wwwPath == null) { - wwwPath = iRequest.configuration.getValueAsString("orientdb.www.path", "src/site"); + if (filePath == null && rootPath == null) { + // GET GLOBAL CONFIG + rootPath = iRequest.configuration.getValueAsString("orientdb.www.path", "src/site"); + if (rootPath == null) { + OLogManager.instance().warn(this, + "No path configured. Specify the 'root.path', 'file.path' or the global 'orientdb.www.path' variable", rootPath); + return false; + } + } - final File wwwPathDirectory = new File(wwwPath); + if (filePath == null) { + // CHECK DIRECTORY + final File wwwPathDirectory = new File(rootPath); if (!wwwPathDirectory.exists()) - OLogManager.instance().warn(this, "orientdb.www.path variable points to '%s' but it doesn't exists", wwwPath); + OLogManager.instance().warn(this, "path variable points to '%s' but it doesn't exists", rootPath); if (!wwwPathDirectory.isDirectory()) - OLogManager.instance().warn(this, "orientdb.www.path variable points to '%s' but it isn't a directory", wwwPath); + OLogManager.instance().warn(this, "path variable points to '%s' but it isn't a directory", rootPath); } if (cacheContents == null && OGlobalConfiguration.SERVER_CACHE_FILE_STATIC.getValueAsBoolean()) @@ -91,18 +107,22 @@ public boolean execute(final OHttpRequest iRequest) throws Exception { String type = null; try { - final String url = getResource(iRequest); - - String filePath; - // REPLACE WWW WITH REAL PATH - if (url.startsWith("/www")) - filePath = wwwPath + url.substring("/www".length(), url.length()); - else - filePath = wwwPath + url; + String path; + if (filePath != null) + // SINGLE FILE + path = filePath; + else { + // GET FROM A DIRECTORY + final String url = getResource(iRequest); + if (url.startsWith("/www")) + path = rootPath + url.substring("/www".length(), url.length()); + else + path = rootPath + url; + } if (cacheContents != null) { synchronized (cacheContents) { - final OStaticContentCachedEntry cachedEntry = cacheContents.get(filePath); + final OStaticContentCachedEntry cachedEntry = cacheContents.get(path); if (cachedEntry != null) { is = new ByteArrayInputStream(cachedEntry.content); contentSize = cachedEntry.size; @@ -112,38 +132,38 @@ public boolean execute(final OHttpRequest iRequest) throws Exception { } if (is == null) { - File inputFile = new File(filePath); + File inputFile = new File(path); if (!inputFile.exists()) { - OLogManager.instance().debug(this, "Static resource not found: %s", filePath); + OLogManager.instance().debug(this, "Static resource not found: %s", path); sendBinaryContent(iRequest, 404, "File not found", null, null, 0); return false; } - if (inputFile.isDirectory()) { - inputFile = new File(filePath + "/index.htm"); + if (filePath == null && inputFile.isDirectory()) { + inputFile = new File(path + "/index.htm"); if (inputFile.exists()) - filePath = url + "/index.htm"; + path = path + "/index.htm"; else { - inputFile = new File(url + "/index.html"); + inputFile = new File(path + "/index.html"); if (inputFile.exists()) - filePath = url + "/index.html"; + path = path + "/index.html"; } } - if (filePath.endsWith(".htm") || filePath.endsWith(".html")) + if (path.endsWith(".htm") || path.endsWith(".html")) type = "text/html"; - else if (filePath.endsWith(".png")) + else if (path.endsWith(".png")) type = "image/png"; - else if (filePath.endsWith(".jpeg")) + else if (path.endsWith(".jpeg")) type = "image/jpeg"; - else if (filePath.endsWith(".js")) + else if (path.endsWith(".js")) type = "application/x-javascript"; - else if (filePath.endsWith(".css")) + else if (path.endsWith(".css")) type = "text/css"; - else if (filePath.endsWith(".ico")) + else if (path.endsWith(".ico")) type = "image/x-icon"; - else if (filePath.endsWith(".otf")) + else if (path.endsWith(".otf")) type = "font/opentype"; else type = "text/plain"; @@ -162,7 +182,7 @@ else if (filePath.endsWith(".otf")) cachedEntry.size = contentSize; cachedEntry.type = type; - cacheContents.put(url, cachedEntry); + cacheContents.put(path, cachedEntry); is = new ByteArrayInputStream(cachedEntry.content); }
d9dbcbd78c093f4d7d326babab7d64262c2e0280
drools
JBRULES-340 core implementation for 'from' -The- from node is now added -ReteooBuilder is added
a
https://github.com/kiegroup/drools
diff --git a/drools-core/src/main/java/org/drools/common/AbstractWorkingMemory.java b/drools-core/src/main/java/org/drools/common/AbstractWorkingMemory.java index b10c07473da..f6cfd376433 100644 --- a/drools-core/src/main/java/org/drools/common/AbstractWorkingMemory.java +++ b/drools-core/src/main/java/org/drools/common/AbstractWorkingMemory.java @@ -95,7 +95,7 @@ public abstract class AbstractWorkingMemory protected final AgendaEventSupport agendaEventSupport = new AgendaEventSupport( this ); /** The <code>RuleBase</code> with which this memory is associated. */ - protected transient InternalRuleBase ruleBase; + protected transient InternalRuleBase ruleBase; protected final FactHandleFactory handleFactory; @@ -156,40 +156,75 @@ public AbstractWorkingMemory(final int id, void setRuleBase(InternalRuleBase ruleBase) { this.ruleBase = ruleBase; } - + public void addEventListener(final WorkingMemoryEventListener listener) { - this.workingMemoryEventSupport.addEventListener( listener ); + try { + lock.lock(); + this.workingMemoryEventSupport.addEventListener( listener ); + } finally { + lock.unlock(); + } } public void removeEventListener(final WorkingMemoryEventListener listener) { - this.workingMemoryEventSupport.removeEventListener( listener ); + try { + lock.lock(); + this.workingMemoryEventSupport.removeEventListener( listener ); + } finally { + lock.unlock(); + } } public List getWorkingMemoryEventListeners() { - return this.workingMemoryEventSupport.getEventListeners(); + try { + lock.lock(); + return this.workingMemoryEventSupport.getEventListeners(); + } finally { + lock.unlock(); + } } public void addEventListener(final AgendaEventListener listener) { - this.agendaEventSupport.addEventListener( listener ); + try { + lock.lock(); + this.agendaEventSupport.addEventListener( listener ); + } finally { + lock.unlock(); + } } public void removeEventListener(final AgendaEventListener listener) { - this.agendaEventSupport.removeEventListener( listener ); + try { + lock.lock(); + this.agendaEventSupport.removeEventListener( listener ); + } finally { + lock.unlock(); + } } - public FactHandleFactory getFactHandleFactory() { - return this.handleFactory; + public List getAgendaEventListeners() { + try { + lock.lock(); + return this.agendaEventSupport.getEventListeners(); + } finally { + lock.unlock(); + } } - public List getAgendaEventListeners() { - return this.agendaEventSupport.getEventListeners(); + public FactHandleFactory getFactHandleFactory() { + return this.handleFactory; } /** * @see WorkingMemory */ public Map getGlobals() { - return this.globals; + try { + lock.lock(); + return this.globals; + } finally { + lock.unlock(); + } } /** @@ -197,20 +232,25 @@ public Map getGlobals() { */ public void setGlobal(final String name, final Object value) { - // Make sure the global has been declared in the RuleBase - final Map globalDefintions = this.ruleBase.getGlobals(); - final Class type = (Class) globalDefintions.get( name ); - if ( (type == null) ) { - throw new RuntimeException( "Unexpected global [" + name + "]" ); - } else if ( !type.isInstance( value ) ) { - throw new RuntimeException( "Illegal class for global. " + "Expected [" + type.getName() + "], " + "found [" + value.getClass().getName() + "]." ); + try { + lock.lock(); + // Make sure the global has been declared in the RuleBase + final Map globalDefintions = this.ruleBase.getGlobals(); + final Class type = (Class) globalDefintions.get( name ); + if ( (type == null) ) { + throw new RuntimeException( "Unexpected global [" + name + "]" ); + } else if ( !type.isInstance( value ) ) { + throw new RuntimeException( "Illegal class for global. " + "Expected [" + type.getName() + "], " + "found [" + value.getClass().getName() + "]." ); - } else { - this.globals.put( name, - value ); + } else { + this.globals.put( name, + value ); + } + } finally { + lock.unlock(); } } - + public long getId() { return this.id; } @@ -219,8 +259,13 @@ public long getId() { * @see WorkingMemory */ public Object getGlobal(final String name) { - final Object object = this.globals.get( name ); - return object; + try { + lock.lock(); + final Object object = this.globals.get( name ); + return object; + } finally { + lock.unlock(); + } } /** @@ -293,14 +338,19 @@ public synchronized void fireAllRules(final AgendaFilter agendaFilter) throws Fa * */ public Object getObject(final FactHandle handle) { - // you must always take the value from the assertMap, incase the handle - // is not from this WorkingMemory - InternalFactHandle factHandle = (InternalFactHandle) this.assertMap.get( handle ); - if ( factHandle != null ) { - return factHandle.getObject(); - } + try { + lock.lock(); + // you must always take the value from the assertMap, incase the handle + // is not from this WorkingMemory + InternalFactHandle factHandle = (InternalFactHandle) this.assertMap.get( handle ); + if ( factHandle != null ) { + return factHandle.getObject(); + } - return null; + return null; + } finally { + lock.unlock(); + } } @@ -308,13 +358,23 @@ public Object getObject(final FactHandle handle) { * @see WorkingMemory */ public FactHandle getFactHandle(final Object object) { - final FactHandle factHandle = (FactHandle) this.assertMap.get( object ); + try { + lock.lock(); + final FactHandle factHandle = (FactHandle) this.assertMap.get( object ); - return factHandle; + return factHandle; + } finally { + lock.unlock(); + } } public List getFactHandles() { - return new ArrayList( this.assertMap.values() ); + try { + lock.lock(); + return new ArrayList( this.assertMap.values() ); + } finally { + lock.unlock(); + } } /** @@ -422,8 +482,8 @@ public FactHandle assertObject(final Object object, return null; } InternalFactHandle handle = null; - this.lock.lock(); try { + this.lock.lock(); // check if the object already exists in the WM handle = (InternalFactHandle) this.assertMap.get( object ); @@ -656,8 +716,8 @@ public void retractObject(final FactHandle factHandle, final boolean updateEqualsMap, final Rule rule, final Activation activation) throws FactException { - this.lock.lock(); try { + this.lock.lock(); final InternalFactHandle handle = (InternalFactHandle) factHandle; if ( handle.getId() == -1 ) { // can't retract an already retracted handle diff --git a/drools-core/src/main/java/org/drools/common/BetaNodeBinder.java b/drools-core/src/main/java/org/drools/common/BetaNodeBinder.java index 5b7ea409544..326442b2592 100644 --- a/drools-core/src/main/java/org/drools/common/BetaNodeBinder.java +++ b/drools-core/src/main/java/org/drools/common/BetaNodeBinder.java @@ -60,7 +60,7 @@ public boolean isAllowed(final InternalFactHandle handle, } for ( int i = 0; i < this.constraints.length; i++ ) { - if ( !this.constraints[i].isAllowed( handle, + if ( !this.constraints[i].isAllowed( handle.getObject(), tuple, workingMemory ) ) { return false; diff --git a/drools-core/src/main/java/org/drools/common/InstanceEqualsConstraint.java b/drools-core/src/main/java/org/drools/common/InstanceEqualsConstraint.java index 52cacdf05e9..61008d66b87 100644 --- a/drools-core/src/main/java/org/drools/common/InstanceEqualsConstraint.java +++ b/drools-core/src/main/java/org/drools/common/InstanceEqualsConstraint.java @@ -48,10 +48,10 @@ public Declaration[] getRequiredDeclarations() { return this.declarations; } - public boolean isAllowed(final InternalFactHandle handle, + public boolean isAllowed(final Object object, final Tuple tuple, final WorkingMemory workingMemory) { - return (tuple.get( this.otherColumn ).getObject() == handle.getObject()); + return (tuple.get( this.otherColumn ).getObject() == object); } public String toString() { diff --git a/drools-core/src/main/java/org/drools/common/InstanceNotEqualsConstraint.java b/drools-core/src/main/java/org/drools/common/InstanceNotEqualsConstraint.java index 89cb4eb38b5..2be1baefb7c 100644 --- a/drools-core/src/main/java/org/drools/common/InstanceNotEqualsConstraint.java +++ b/drools-core/src/main/java/org/drools/common/InstanceNotEqualsConstraint.java @@ -41,10 +41,10 @@ public Declaration[] getRequiredDeclarations() { return this.declarations; } - public boolean isAllowed(final InternalFactHandle handle, + public boolean isAllowed(final Object object, final Tuple tuple, final WorkingMemory workingMemory) { - return !(tuple.get( this.otherColumn ).getObject() == handle.getObject()); + return !(tuple.get( this.otherColumn ).getObject() == object); } public String toString() { diff --git a/drools-core/src/main/java/org/drools/leaps/AlphaMemory.java b/drools-core/src/main/java/org/drools/leaps/AlphaMemory.java index 3d2ee8ec637..ec11da7389a 100644 --- a/drools-core/src/main/java/org/drools/leaps/AlphaMemory.java +++ b/drools-core/src/main/java/org/drools/leaps/AlphaMemory.java @@ -19,7 +19,7 @@ boolean checkAlpha( final FieldConstraint alpha, final WorkingMemory workingMemory ) { Boolean ret = (Boolean) this.alphaChecks.get( factHandle ); if (ret == null) { - ret = new Boolean( alpha.isAllowed( factHandle, tuple, workingMemory ) ); + ret = new Boolean( alpha.isAllowed( factHandle.getObject(), tuple, workingMemory ) ); this.alphaChecks.put( factHandle, ret ); } diff --git a/drools-core/src/main/java/org/drools/leaps/ColumnConstraints.java b/drools-core/src/main/java/org/drools/leaps/ColumnConstraints.java index 1b3f6f03be6..99de09d18ab 100644 --- a/drools-core/src/main/java/org/drools/leaps/ColumnConstraints.java +++ b/drools-core/src/main/java/org/drools/leaps/ColumnConstraints.java @@ -85,7 +85,7 @@ public final boolean isAllowedAlpha(final InternalFactHandle factHandle, if ( this.alphaPresent ) { for ( int i = 0, length = this.alphaConstraints.length; i < length; i++ ) { // escape immediately if some condition does not match - if ( !this.alphaConstraints[i].isAllowed( factHandle, + if ( !this.alphaConstraints[i].isAllowed( factHandle.getObject(), tuple, workingMemory ) ) { return false; diff --git a/drools-core/src/main/java/org/drools/leaps/LeapsWorkingMemory.java b/drools-core/src/main/java/org/drools/leaps/LeapsWorkingMemory.java index af7d4513e68..32bee42bb93 100644 --- a/drools-core/src/main/java/org/drools/leaps/LeapsWorkingMemory.java +++ b/drools-core/src/main/java/org/drools/leaps/LeapsWorkingMemory.java @@ -71,7 +71,7 @@ class LeapsWorkingMemory extends AbstractWorkingMemory implements EventSupport, PropertyChangeListener { - private static final long serialVersionUID = -2524904474925421759L; + private static final long serialVersionUID = 320; private final Map queryResults; @@ -326,8 +326,8 @@ public void modifyObject(final FactHandle factHandle, final Object object, final Rule rule, final Activation activation) throws FactException { - this.getLock().lock(); try { + this.getLock().lock(); final PropagationContext propagationContext = new PropagationContextImpl( this.propagationIdCounter++, PropagationContext.MODIFICATION, rule, diff --git a/drools-core/src/main/java/org/drools/reteoo/AlphaNode.java b/drools-core/src/main/java/org/drools/reteoo/AlphaNode.java index 709baa87638..b65c2fd9f76 100644 --- a/drools-core/src/main/java/org/drools/reteoo/AlphaNode.java +++ b/drools-core/src/main/java/org/drools/reteoo/AlphaNode.java @@ -131,7 +131,7 @@ public void assertObject(final DefaultFactHandle handle, final PropagationContext context, final ReteooWorkingMemory workingMemory) throws FactException { final Set memory = (Set) workingMemory.getNodeMemory( this ); - if ( this.constraint.isAllowed( handle, + if ( this.constraint.isAllowed( handle.getObject(), null, workingMemory ) ) { memory.add( handle ); @@ -157,7 +157,7 @@ public void modifyObject(final DefaultFactHandle handle, final ReteooWorkingMemory workingMemory) { final Set memory = (Set) workingMemory.getNodeMemory( this ); - if ( this.constraint.isAllowed( handle, + if ( this.constraint.isAllowed( handle.getObject(), null, workingMemory ) ) { if ( memory.add( handle ) ) { diff --git a/drools-core/src/main/java/org/drools/reteoo/FromNode.java b/drools-core/src/main/java/org/drools/reteoo/FromNode.java new file mode 100644 index 00000000000..d0b263655ae --- /dev/null +++ b/drools-core/src/main/java/org/drools/reteoo/FromNode.java @@ -0,0 +1,236 @@ +package org.drools.reteoo; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.drools.RuleBaseConfiguration; +import org.drools.common.BetaNodeBinder; +import org.drools.common.DefaultFactHandle; +import org.drools.common.InternalFactHandle; +import org.drools.common.NodeMemory; +import org.drools.common.PropagationContextImpl; +import org.drools.rule.Declaration; +import org.drools.rule.EvalCondition; +import org.drools.rule.From; +import org.drools.spi.Constraint; +import org.drools.spi.DataProvider; +import org.drools.spi.FieldConstraint; +import org.drools.spi.PropagationContext; + +public class FromNode extends TupleSource + implements + TupleSink, + NodeMemory { + /** + * + */ + private static final long serialVersionUID = 320; + + private DataProvider dataProvider; + private TupleSource tupleSource; + private FieldConstraint[] constraints; + private BetaNodeBinder binder; + + public FromNode(final int id, + final DataProvider dataProvider, + final TupleSource tupleSource, + final FieldConstraint[] constraints, + final BetaNodeBinder binder) { + super( id ); + this.dataProvider = dataProvider; + this.tupleSource = tupleSource; + this.constraints = constraints; + if ( binder == null ) { + this.binder = new BetaNodeBinder(); + } else { + this.binder = binder; + } + } + + /** + * This method isn't as efficient as it could be, as its using the standard join node mechanisms - so everything is bidirectionally + * linked. As FactHandle's are never retracted, this relationship does not need to be maintined - but as this optimisation would + * need refactoring, I've used the standard join node mechanism for now. + * + */ + public void assertTuple(ReteTuple leftTuple, + PropagationContext context, + ReteooWorkingMemory workingMemory) { + final BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( this ); + + memory.add( workingMemory, + leftTuple ); + + for ( Iterator it = this.dataProvider.getResults( leftTuple ); it.hasNext(); ) { + Object object = it.next(); + + // First alpha node filters + boolean isAllowed = true; + for ( int i = 0, length = this.constraints.length; i < length; i++ ) { + if ( !this.constraints[i].isAllowed( object, leftTuple, workingMemory ) ) { + isAllowed = false; + break; + } + } + + if ( !isAllowed ) { + continue; + } + + final InternalFactHandle handle = workingMemory.getFactHandleFactory().newFactHandle( object ); + final ObjectMatches objectMatches = new ObjectMatches( (DefaultFactHandle) handle ); + + if ( binder.isAllowed( handle, + leftTuple, + workingMemory ) ) { + final TupleMatch tupleMatch = new TupleMatch( leftTuple, + objectMatches ); + + leftTuple.addTupleMatch( (DefaultFactHandle) handle, + tupleMatch ); + + propagateAssertTuple( new ReteTuple( leftTuple, + (DefaultFactHandle) handle ), + tupleMatch, + context, + workingMemory ); + } + } + } + + /** + * This could be made more intelligent by finding out if the modified Fact is depended upon by the requiredDeclarations. + * If it isn't then we can continue to just propagate as a normal modify, without having to retrieve and check values + * from the DataProvider. + */ + public void modifyTuple(ReteTuple leftTuple, + PropagationContext context, + ReteooWorkingMemory workingMemory) { + final BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( this ); + + // We remove the tuple as now its modified it needs to go to the top of + // the stack, which is added back in else where + memory.remove( workingMemory, + leftTuple ); + + final Map matches = leftTuple.getTupleMatches(); + + if ( matches.isEmpty() ) { + // No child propagations, so try as a new assert, will ensure the + // tuple is added to the top of the memory + assertTuple( leftTuple, + context, + workingMemory ); + } else { + // first purge the network of all future uses of the 'from' facts + for ( final Iterator it = matches.values().iterator(); it.hasNext(); ) { + final TupleMatch tupleMatch = (TupleMatch) it.next(); + workingMemory.getFactHandleFactory().destroyFactHandle( tupleMatch.getObjectMatches().getFactHandle() ); + propagateRetractTuple( tupleMatch, + context, + workingMemory ); + } + + // now all existing matches must now be cleared and the DataProvider re-processed. + leftTuple.clearTupleMatches(); + + assertTuple( leftTuple, + context, + workingMemory ); + + } + } + + public void retractTuple(ReteTuple leftTuple, + PropagationContext context, + ReteooWorkingMemory workingMemory) { + final BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( this ); + memory.remove( workingMemory, + leftTuple ); + + final Map matches = leftTuple.getTupleMatches(); + + if ( !matches.isEmpty() ) { + for ( final Iterator it = matches.values().iterator(); it.hasNext(); ) { + final TupleMatch tupleMatch = (TupleMatch) it.next(); + workingMemory.getFactHandleFactory().destroyFactHandle( tupleMatch.getObjectMatches().getFactHandle() ); + propagateRetractTuple( tupleMatch, + context, + workingMemory ); + } + } + } + + public List getPropagatedTuples(ReteooWorkingMemory workingMemory, + TupleSink sink) { + // TODO Auto-generated method stub + return null; + } + + public void attach() { + this.tupleSource.addTupleSink( this ); + } + + public void attach(ReteooWorkingMemory[] workingMemories) { + attach(); + + for ( int i = 0, length = workingMemories.length; i < length; i++ ) { + final ReteooWorkingMemory workingMemory = workingMemories[i]; + final PropagationContext propagationContext = new PropagationContextImpl( workingMemory.getNextPropagationIdCounter(), + PropagationContext.RULE_ADDITION, + null, + null ); + this.tupleSource.updateNewNode( workingMemory, + propagationContext ); + } + } + + public void remove(BaseNode node, + ReteooWorkingMemory[] workingMemories) { + getTupleSinks().remove( node ); + removeShare(); + + if ( this.sharedCount < 0 ) { + for ( int i = 0, length = workingMemories.length; i < length; i++ ) { + workingMemories[i].clearNodeMemory( this ); + } + this.tupleSource.remove( this, + workingMemories ); + } + } + + public void updateNewNode(ReteooWorkingMemory workingMemory, + PropagationContext context) { + this.attachingNewNode = true; + + final BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( this ); + + // @todo:as there is no right memory + + // for ( final Iterator it = memory.getRightObjectMemory().iterator(); it.hasNext(); ) { + // final ObjectMatches objectMatches = (ObjectMatches) it.next(); + // final DefaultFactHandle handle = objectMatches.getFactHandle(); + // for ( TupleMatch tupleMatch = objectMatches.getFirstTupleMatch(); tupleMatch != null; tupleMatch = (TupleMatch) tupleMatch.getNext() ) { + // final ReteTuple tuple = new ReteTuple( tupleMatch.getTuple(), + // handle ); + // final TupleSink sink = (TupleSink) this.tupleSinks.get( this.tupleSinks.size() - 1 ); + // if ( sink != null ) { + // tupleMatch.addJoinedTuple( tuple ); + // sink.assertTuple( tuple, + // context, + // workingMemory ); + // } else { + // throw new RuntimeException( "Possible BUG: trying to propagate an assert to a node that was the last added node" ); + // } + // } + // } + + this.attachingNewNode = false; + } + + public Object createMemory(RuleBaseConfiguration config) { + return new BetaMemory( config, + this.binder ); + } +} diff --git a/drools-core/src/main/java/org/drools/reteoo/ReteTuple.java b/drools-core/src/main/java/org/drools/reteoo/ReteTuple.java index 7a2ed20cf43..2b04de5b13d 100644 --- a/drools-core/src/main/java/org/drools/reteoo/ReteTuple.java +++ b/drools-core/src/main/java/org/drools/reteoo/ReteTuple.java @@ -203,6 +203,10 @@ public void clearLinkedTuple() { this.linkedTuples.clear(); } + public void clearTupleMatches() { + this.matches.clear(); + } + public void addTupleMatch(final DefaultFactHandle handle, final TupleMatch node) { if ( this.matches == Collections.EMPTY_MAP ) { diff --git a/drools-core/src/main/java/org/drools/reteoo/ReteooBuilder.java b/drools-core/src/main/java/org/drools/reteoo/ReteooBuilder.java index 53dd1363397..4cf8a5bb231 100644 --- a/drools-core/src/main/java/org/drools/reteoo/ReteooBuilder.java +++ b/drools-core/src/main/java/org/drools/reteoo/ReteooBuilder.java @@ -41,6 +41,7 @@ import org.drools.rule.Declaration; import org.drools.rule.EvalCondition; import org.drools.rule.Exists; +import org.drools.rule.From; import org.drools.rule.GroupElement; import org.drools.rule.InvalidPatternException; import org.drools.rule.LiteralConstraint; @@ -555,6 +556,63 @@ private TupleSource attachNode(final TupleSource candidate) { return node; } + + private void attachFrom(final TupleSource tupleSource, + final From from) { + Column column = from.getColumn(); + + // Adjusting offset in case a previous Initial-Fact was added to the network + column.adjustOffset( this.currentOffsetAdjustment ); + + final List constraints = column.getConstraints(); + + // Check if the Column is bound + if ( column.getDeclaration() != null ) { + final Declaration declaration = column.getDeclaration(); + // Add the declaration the map of previously bound declarations + this.declarations.put( declaration.getIdentifier(), + declaration ); + } + + final List predicateConstraints = new ArrayList(); + final List alphaNodeConstraints = new ArrayList(); + + for ( final Iterator it = constraints.iterator(); it.hasNext(); ) { + final Object object = it.next(); + // Check if its a declaration + if ( object instanceof Declaration ) { + final Declaration declaration = (Declaration) object; + // Add the declaration the map of previously bound declarations + this.declarations.put( declaration.getIdentifier(), + declaration ); + continue; + } + + final FieldConstraint fieldConstraint = (FieldConstraint) object; + if ( fieldConstraint instanceof LiteralConstraint ) { + alphaNodeConstraints.add( fieldConstraint ); + } else { + checkUnboundDeclarations( fieldConstraint.getRequiredDeclarations() ); + predicateConstraints.add( fieldConstraint ); + } + } + + + BetaNodeBinder binder; + + if ( !predicateConstraints.isEmpty() ) { + binder = new BetaNodeBinder( (FieldConstraint[]) predicateConstraints.toArray( new FieldConstraint[predicateConstraints.size()] ) ); + } else { + binder = new BetaNodeBinder(); + } + + FromNode node = new FromNode( id, + from.getDataProvider(), + this.tupleSource, + ( FieldConstraint[] ) alphaNodeConstraints.toArray( new FieldConstraint[ alphaNodeConstraints.size() ] ), + binder ); + + } private ObjectSource attachNode(final ObjectSource candidate) { ObjectSource node = (ObjectSource) this.attachedNodes.get( candidate ); diff --git a/drools-core/src/main/java/org/drools/reteoo/ReteooWorkingMemory.java b/drools-core/src/main/java/org/drools/reteoo/ReteooWorkingMemory.java index 6e58b189b2c..d0fc775bfb5 100644 --- a/drools-core/src/main/java/org/drools/reteoo/ReteooWorkingMemory.java +++ b/drools-core/src/main/java/org/drools/reteoo/ReteooWorkingMemory.java @@ -48,7 +48,7 @@ public class ReteooWorkingMemory extends AbstractWorkingMemory { /** * */ - private static final long serialVersionUID = -5107074490638575715L; + private static final long serialVersionUID = 320; /** * Construct. @@ -87,8 +87,8 @@ public void modifyObject(final FactHandle factHandle, final Object object, final Rule rule, final Activation activation) throws FactException { - this.lock.lock(); try { + this.lock.lock(); final int status = ((InternalFactHandle) factHandle).getEqualityKey().getStatus(); final InternalFactHandle handle = (InternalFactHandle) factHandle; final Object originalObject = handle.getObject(); diff --git a/drools-core/src/main/java/org/drools/reteoo/TerminalNode.java b/drools-core/src/main/java/org/drools/reteoo/TerminalNode.java index 1b20118858c..2d28ed3cbc9 100644 --- a/drools-core/src/main/java/org/drools/reteoo/TerminalNode.java +++ b/drools-core/src/main/java/org/drools/reteoo/TerminalNode.java @@ -52,7 +52,7 @@ final class TerminalNode extends BaseNode /** * */ - private static final long serialVersionUID = -4172639826881353001L; + private static final long serialVersionUID = 320; /** The rule to invoke upon match. */ private final Rule rule; private final TupleSource tupleSource; diff --git a/drools-core/src/main/java/org/drools/rule/AndCompositeRestriction.java b/drools-core/src/main/java/org/drools/rule/AndCompositeRestriction.java index 0c2fe45e803..1fd2668ef83 100644 --- a/drools-core/src/main/java/org/drools/rule/AndCompositeRestriction.java +++ b/drools-core/src/main/java/org/drools/rule/AndCompositeRestriction.java @@ -4,7 +4,6 @@ import java.util.Set; import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Restriction; import org.drools.spi.Tuple; @@ -17,13 +16,11 @@ public AndCompositeRestriction(Restriction[] restriction) { } public boolean isAllowed(final Object object, - final InternalFactHandle handle, final Tuple tuple, final WorkingMemory workingMemory) { for ( int i = 0, ilength = this.restrictions.length; i < ilength; i++ ) { if ( !restrictions[i].isAllowed( object, - handle, tuple, workingMemory ) ) { return false; diff --git a/drools-core/src/main/java/org/drools/rule/Column.java b/drools-core/src/main/java/org/drools/rule/Column.java index 6358c5140aa..e05f550552b 100644 --- a/drools-core/src/main/java/org/drools/rule/Column.java +++ b/drools-core/src/main/java/org/drools/rule/Column.java @@ -33,7 +33,7 @@ public class Column /** * */ - private static final long serialVersionUID = 9167552040211010022L; + private static final long serialVersionUID = 320; private final ObjectType objectType; private List constraints = Collections.EMPTY_LIST; final Declaration declaration; diff --git a/drools-core/src/main/java/org/drools/rule/From.java b/drools-core/src/main/java/org/drools/rule/From.java new file mode 100644 index 00000000000..1d34d086dc5 --- /dev/null +++ b/drools-core/src/main/java/org/drools/rule/From.java @@ -0,0 +1,25 @@ +package org.drools.rule; + +import java.io.Serializable; + +import org.drools.spi.DataProvider; + +public class From implements Serializable{ + private Column column; + + private DataProvider dataProvider; + + public From(final Column column, + final DataProvider dataProvider) { + this.column = column; + this.dataProvider = dataProvider; + } + + public Column getColumn() { + return column; + } + + public DataProvider getDataProvider() { + return dataProvider; + } +} diff --git a/drools-core/src/main/java/org/drools/rule/LiteralConstraint.java b/drools-core/src/main/java/org/drools/rule/LiteralConstraint.java index 8091b853a65..e84e1abea08 100644 --- a/drools-core/src/main/java/org/drools/rule/LiteralConstraint.java +++ b/drools-core/src/main/java/org/drools/rule/LiteralConstraint.java @@ -17,7 +17,6 @@ */ import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Evaluator; import org.drools.spi.FieldConstraint; import org.drools.spi.FieldExtractor; @@ -71,10 +70,10 @@ public Declaration[] getRequiredDeclarations() { return this.restriction.getRequiredDeclarations(); } - public boolean isAllowed(final InternalFactHandle handle, + public boolean isAllowed(final Object object, final Tuple tuple, final WorkingMemory workingMemory) { - return this.restriction.isAllowed( this.extractor.getValue( handle.getObject() ), handle, tuple, workingMemory ); + return this.restriction.isAllowed( this.extractor.getValue( object ), tuple, workingMemory ); } public String toString() { diff --git a/drools-core/src/main/java/org/drools/rule/LiteralRestriction.java b/drools-core/src/main/java/org/drools/rule/LiteralRestriction.java index 5e2f85db22b..c422d889941 100644 --- a/drools-core/src/main/java/org/drools/rule/LiteralRestriction.java +++ b/drools-core/src/main/java/org/drools/rule/LiteralRestriction.java @@ -17,7 +17,6 @@ */ import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Evaluator; import org.drools.spi.FieldConstraint; import org.drools.spi.FieldExtractor; @@ -64,7 +63,6 @@ public Declaration[] getRequiredDeclarations() { } public boolean isAllowed(final Object object, - final InternalFactHandle handle, final Tuple tuple, final WorkingMemory workingMemory) { return this.evaluator.evaluate( object, diff --git a/drools-core/src/main/java/org/drools/rule/MultiRestrictionFieldConstraint.java b/drools-core/src/main/java/org/drools/rule/MultiRestrictionFieldConstraint.java index 143690164fd..ddca8293c45 100644 --- a/drools-core/src/main/java/org/drools/rule/MultiRestrictionFieldConstraint.java +++ b/drools-core/src/main/java/org/drools/rule/MultiRestrictionFieldConstraint.java @@ -4,7 +4,6 @@ import java.util.Set; import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Evaluator; import org.drools.spi.Extractor; import org.drools.spi.FieldConstraint; @@ -40,11 +39,10 @@ public Declaration[] getRequiredDeclarations() { return this.restrictions.getRequiredDeclarations(); } - public boolean isAllowed(final InternalFactHandle handle, + public boolean isAllowed(final Object object, final Tuple tuple, final WorkingMemory workingMemory) { - return this.restrictions.isAllowed( this.extractor.getValue( handle.getObject() ), - handle, + return this.restrictions.isAllowed( this.extractor.getValue( object ), tuple, workingMemory ); } diff --git a/drools-core/src/main/java/org/drools/rule/OrCompositeRestriction.java b/drools-core/src/main/java/org/drools/rule/OrCompositeRestriction.java index 56fb080a103..6f8e19f36bb 100644 --- a/drools-core/src/main/java/org/drools/rule/OrCompositeRestriction.java +++ b/drools-core/src/main/java/org/drools/rule/OrCompositeRestriction.java @@ -5,7 +5,6 @@ import java.util.Set; import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Restriction; import org.drools.spi.Tuple; @@ -18,13 +17,11 @@ public OrCompositeRestriction(Restriction[] restriction) { } public boolean isAllowed(final Object object, - final InternalFactHandle handle, final Tuple tuple, final WorkingMemory workingMemory) { for ( int i = 0, ilength = this.restrictions.length; i < ilength; i++ ) { if ( restrictions[i].isAllowed( object, - handle, tuple, workingMemory ) ) { return true; diff --git a/drools-core/src/main/java/org/drools/rule/PredicateConstraint.java b/drools-core/src/main/java/org/drools/rule/PredicateConstraint.java index ea108f33a21..a0e5e40bf93 100644 --- a/drools-core/src/main/java/org/drools/rule/PredicateConstraint.java +++ b/drools-core/src/main/java/org/drools/rule/PredicateConstraint.java @@ -18,7 +18,6 @@ import org.drools.RuntimeDroolsException; import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.FieldConstraint; import org.drools.spi.PredicateExpression; import org.drools.spi.Tuple; @@ -85,12 +84,12 @@ public String toString() { return "[PredicateConstraint declarations=" + this.requiredDeclarations + "]"; } - public boolean isAllowed(final InternalFactHandle handle, + public boolean isAllowed(final Object object, final Tuple tuple, final WorkingMemory workingMemory) { try { - return this.expression.evaluate( tuple, - handle, + return this.expression.evaluate( object, + tuple, this.declaration, this.requiredDeclarations, workingMemory ); diff --git a/drools-core/src/main/java/org/drools/rule/ReturnValueConstraint.java b/drools-core/src/main/java/org/drools/rule/ReturnValueConstraint.java index 66ab234668a..26fe31fc289 100644 --- a/drools-core/src/main/java/org/drools/rule/ReturnValueConstraint.java +++ b/drools-core/src/main/java/org/drools/rule/ReturnValueConstraint.java @@ -18,7 +18,6 @@ import org.drools.RuntimeDroolsException; import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Evaluator; import org.drools.spi.FieldConstraint; import org.drools.spi.FieldExtractor; @@ -77,11 +76,10 @@ public Evaluator getEvaluator() { return this.restriction.getEvaluator(); } - public boolean isAllowed(final InternalFactHandle handle, + public boolean isAllowed(final Object object, final Tuple tuple, final WorkingMemory workingMemory) { - return this.restriction.isAllowed( this.fieldExtractor.getValue( handle.getObject() ), - handle, + return this.restriction.isAllowed( this.fieldExtractor.getValue( object ), tuple, workingMemory ); } diff --git a/drools-core/src/main/java/org/drools/rule/ReturnValueRestriction.java b/drools-core/src/main/java/org/drools/rule/ReturnValueRestriction.java index bf37691e79c..53f23a2e946 100644 --- a/drools-core/src/main/java/org/drools/rule/ReturnValueRestriction.java +++ b/drools-core/src/main/java/org/drools/rule/ReturnValueRestriction.java @@ -20,7 +20,6 @@ import org.drools.RuntimeDroolsException; import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Evaluator; import org.drools.spi.FieldConstraint; import org.drools.spi.FieldExtractor; @@ -93,7 +92,6 @@ public Evaluator getEvaluator() { } public boolean isAllowed(final Object object, - final InternalFactHandle handle, final Tuple tuple, final WorkingMemory workingMemory) { try { diff --git a/drools-core/src/main/java/org/drools/rule/VariableConstraint.java b/drools-core/src/main/java/org/drools/rule/VariableConstraint.java index d8acc9e768e..6b138d7b516 100644 --- a/drools-core/src/main/java/org/drools/rule/VariableConstraint.java +++ b/drools-core/src/main/java/org/drools/rule/VariableConstraint.java @@ -17,7 +17,6 @@ */ import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Evaluator; import org.drools.spi.FieldConstraint; import org.drools.spi.FieldExtractor; @@ -61,11 +60,10 @@ public Evaluator getEvaluator() { return this.restriction.getEvaluator(); } - public boolean isAllowed(final InternalFactHandle handle, + public boolean isAllowed(final Object object, final Tuple tuple, final WorkingMemory workingMemory) { - return this.restriction.isAllowed( this.fieldExtractor.getValue( handle.getObject() ), - handle, + return this.restriction.isAllowed( this.fieldExtractor.getValue( object ), tuple, workingMemory ); } diff --git a/drools-core/src/main/java/org/drools/rule/VariableRestriction.java b/drools-core/src/main/java/org/drools/rule/VariableRestriction.java index 574e18bae04..483c7e36b6f 100644 --- a/drools-core/src/main/java/org/drools/rule/VariableRestriction.java +++ b/drools-core/src/main/java/org/drools/rule/VariableRestriction.java @@ -19,7 +19,6 @@ import java.util.Arrays; import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.spi.Evaluator; import org.drools.spi.FieldConstraint; import org.drools.spi.FieldExtractor; @@ -57,7 +56,6 @@ public Evaluator getEvaluator() { } public boolean isAllowed(final Object object, - final InternalFactHandle handle, final Tuple tuple, final WorkingMemory workingMemory) { return this.evaluator.evaluate( object, diff --git a/drools-core/src/main/java/org/drools/spi/DataProvider.java b/drools-core/src/main/java/org/drools/spi/DataProvider.java new file mode 100644 index 00000000000..d8ec24c4eb1 --- /dev/null +++ b/drools-core/src/main/java/org/drools/spi/DataProvider.java @@ -0,0 +1,13 @@ +package org.drools.spi; + +import java.util.Iterator; +import java.util.List; + +import org.drools.rule.Declaration; + +public interface DataProvider { + + public Declaration[] getRequiredDeclarations(); + + public Iterator getResults(Tuple tuple); +} diff --git a/drools-core/src/main/java/org/drools/spi/FieldConstraint.java b/drools-core/src/main/java/org/drools/spi/FieldConstraint.java index db3b3bb67e2..b6382ce41a8 100644 --- a/drools-core/src/main/java/org/drools/spi/FieldConstraint.java +++ b/drools-core/src/main/java/org/drools/spi/FieldConstraint.java @@ -17,13 +17,12 @@ */ import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.rule.Declaration; public interface FieldConstraint extends Constraint { - public boolean isAllowed(InternalFactHandle handle, + public boolean isAllowed(Object object, Tuple tuple, WorkingMemory workingMemory); diff --git a/drools-core/src/main/java/org/drools/spi/PredicateExpression.java b/drools-core/src/main/java/org/drools/spi/PredicateExpression.java index 32b43c48525..e1b420a2bea 100644 --- a/drools-core/src/main/java/org/drools/spi/PredicateExpression.java +++ b/drools-core/src/main/java/org/drools/spi/PredicateExpression.java @@ -23,8 +23,8 @@ public interface PredicateExpression extends Invoker { - public boolean evaluate(Tuple tuple, - FactHandle factHandle, + public boolean evaluate(Object object, + Tuple tuple, Declaration declaration, Declaration[] requiredDeclarations, WorkingMemory workingMemory) throws Exception; diff --git a/drools-core/src/main/java/org/drools/spi/Restriction.java b/drools-core/src/main/java/org/drools/spi/Restriction.java index 5a8db16e415..e90b1681c7e 100644 --- a/drools-core/src/main/java/org/drools/spi/Restriction.java +++ b/drools-core/src/main/java/org/drools/spi/Restriction.java @@ -3,14 +3,12 @@ import java.io.Serializable; import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.rule.Declaration; public interface Restriction extends Serializable { Declaration[] getRequiredDeclarations(); public boolean isAllowed(Object object, - InternalFactHandle handle, Tuple tuple, WorkingMemory workingMemory); } diff --git a/drools-core/src/test/java/org/drools/reteoo/FromNodeTest.java b/drools-core/src/test/java/org/drools/reteoo/FromNodeTest.java new file mode 100644 index 00000000000..f97eafda904 --- /dev/null +++ b/drools-core/src/test/java/org/drools/reteoo/FromNodeTest.java @@ -0,0 +1,423 @@ +package org.drools.reteoo; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; + +import org.drools.Cheese; +import org.drools.FactHandle; +import org.drools.RuleBaseFactory; +import org.drools.base.ClassFieldExtractor; +import org.drools.base.ClassObjectType; +import org.drools.base.ValueType; +import org.drools.base.evaluators.Operator; +import org.drools.common.DefaultFactHandle; +import org.drools.common.InternalFactHandle; +import org.drools.common.PropagationContextImpl; +import org.drools.rule.Column; +import org.drools.rule.Declaration; +import org.drools.rule.From; +import org.drools.rule.LiteralConstraint; +import org.drools.rule.VariableConstraint; +import org.drools.spi.DataProvider; +import org.drools.spi.Evaluator; +import org.drools.spi.FieldConstraint; +import org.drools.spi.FieldValue; +import org.drools.spi.MockField; +import org.drools.spi.PropagationContext; +import org.drools.spi.Tuple; + +import junit.framework.TestCase; + +public class FromNodeTest extends TestCase { + + public void testAlphaNode() { + final PropagationContext context = new PropagationContextImpl( 0, + PropagationContext.ASSERTION, + null, + null ); + final ReteooWorkingMemory workingMemory = new ReteooWorkingMemory( 1, + (ReteooRuleBase) RuleBaseFactory.newRuleBase() ); + final ClassFieldExtractor extractor = new ClassFieldExtractor( Cheese.class, + "type" ); + + final FieldValue field = new MockField( "stilton" ); + final LiteralConstraint constraint = new LiteralConstraint( extractor, + ValueType.STRING_TYPE.getEvaluator( Operator.EQUAL ), + field ); + + List list = new ArrayList(); + Cheese cheese1 = new Cheese( "cheddar", + 20 ); + Cheese cheese2 = new Cheese( "brie", + 20 ); + list.add( cheese1 ); + list.add( cheese2 ); + MockDataProvider dataProvider = new MockDataProvider( list ); + + FromNode from = new FromNode( 3, + dataProvider, + null, + new FieldConstraint[]{constraint}, + null ); + MockTupleSink sink = new MockTupleSink( 5 ); + from.addTupleSink( sink ); + + Person person1 = new Person( "xxx1", + 30 ); + FactHandle person1Handle = workingMemory.assertObject( person1 ); + ReteTuple tuple1 = new ReteTuple( (DefaultFactHandle) person1Handle ); + from.assertTuple( tuple1, + context, + workingMemory ); + + // nothing should be asserted, as cheese1 is cheddar and we are filtering on stilton + assertEquals( 0, + sink.getAsserted().size() ); + + //Set cheese1 to stilton and it should now propagate + cheese1.setType( "stilton" ); + Person person2 = new Person( "xxx2", + 30 ); + FactHandle person2Handle = workingMemory.assertObject( person2 ); + ReteTuple tuple2 = new ReteTuple( (DefaultFactHandle) person2Handle ); + from.assertTuple( tuple2, + context, + workingMemory ); + + List asserted = sink.getAsserted(); + assertEquals( 1, + asserted.size() ); + ReteTuple tuple = (ReteTuple) ((Object[]) asserted.get( 0 ))[0]; + assertSame( person2, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese1, + tuple.getFactHandles()[1].getObject() ); + + cheese2.setType( "stilton" ); + Person person3 = new Person( "xxx2", + 30 ); + FactHandle person3Handle = workingMemory.assertObject( person3 ); + ReteTuple tuple3 = new ReteTuple( (DefaultFactHandle) person3Handle ); + from.assertTuple( tuple3, + context, + workingMemory ); + + assertEquals( 3, + asserted.size() ); + tuple = (ReteTuple) ((Object[]) asserted.get( 1 ))[0]; + assertSame( person3, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese1, + tuple.getFactHandles()[1].getObject() ); + tuple = (ReteTuple) ((Object[]) asserted.get( 2 ))[0]; + assertSame( person3, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese2, + tuple.getFactHandles()[1].getObject() ); + + assertNotSame( cheese1, + cheese2 ); + } + + public void testBetaNode() { + final PropagationContext context = new PropagationContextImpl( 0, + PropagationContext.ASSERTION, + null, + null ); + + final ReteooWorkingMemory workingMemory = new ReteooWorkingMemory( 1, + (ReteooRuleBase) RuleBaseFactory.newRuleBase() ); + + final ClassFieldExtractor priceExtractor = new ClassFieldExtractor( Cheese.class, + "price" ); + + final ClassFieldExtractor ageExtractor = new ClassFieldExtractor( Person.class, + "age" ); + + Declaration declaration = new Declaration( "age", + ageExtractor, + 0 ); + + VariableConstraint variableConstraint = new VariableConstraint( priceExtractor, + declaration, + ValueType.INTEGER_TYPE.getEvaluator( Operator.EQUAL ) ); + + List list = new ArrayList(); + Cheese cheese1 = new Cheese( "cheddar", + 18 ); + Cheese cheese2 = new Cheese( "brie", + 12 ); + list.add( cheese1 ); + list.add( cheese2 ); + MockDataProvider dataProvider = new MockDataProvider( list ); + + FromNode from = new FromNode( 3, + dataProvider, + null, + new FieldConstraint[]{variableConstraint}, + null ); + MockTupleSink sink = new MockTupleSink( 5 ); + from.addTupleSink( sink ); + + Person person1 = new Person( "xxx1", + 30 ); + FactHandle person1Handle = workingMemory.assertObject( person1 ); + ReteTuple tuple1 = new ReteTuple( (DefaultFactHandle) person1Handle ); + from.assertTuple( tuple1, + context, + workingMemory ); + + // nothing should be asserted, as cheese1 is cheddar and we are filtering on stilton + assertEquals( 0, + sink.getAsserted().size() ); + + //Set cheese1 to stilton and it should now propagate + cheese1.setPrice( 30 ); + Person person2 = new Person( "xxx2", + 30 ); + FactHandle person2Handle = workingMemory.assertObject( person2 ); + ReteTuple tuple2 = new ReteTuple( (DefaultFactHandle) person2Handle ); + from.assertTuple( tuple2, + context, + workingMemory ); + + List asserted = sink.getAsserted(); + assertEquals( 1, + asserted.size() ); + ReteTuple tuple = (ReteTuple) ((Object[]) asserted.get( 0 ))[0]; + assertSame( person2, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese1, + tuple.getFactHandles()[1].getObject() ); + + cheese2.setPrice( 30 ); + Person person3 = new Person( "xxx2", + 30 ); + FactHandle person3Handle = workingMemory.assertObject( person3 ); + ReteTuple tuple3 = new ReteTuple( (DefaultFactHandle) person3Handle ); + from.assertTuple( tuple3, + context, + workingMemory ); + + assertEquals( 3, + asserted.size() ); + tuple = (ReteTuple) ((Object[]) asserted.get( 1 ))[0]; + assertSame( person3, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese1, + tuple.getFactHandles()[1].getObject() ); + tuple = (ReteTuple) ((Object[]) asserted.get( 2 ))[0]; + assertSame( person3, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese2, + tuple.getFactHandles()[1].getObject() ); + + assertNotSame( cheese1, + cheese2 ); + } + + public void testRestract() { + final PropagationContext context = new PropagationContextImpl( 0, + PropagationContext.ASSERTION, + null, + null ); + final ReteooWorkingMemory workingMemory = new ReteooWorkingMemory( 1, + (ReteooRuleBase) RuleBaseFactory.newRuleBase() ); + final ClassFieldExtractor extractor = new ClassFieldExtractor( Cheese.class, + "type" ); + + final FieldValue field = new MockField( "stilton" ); + final LiteralConstraint constraint = new LiteralConstraint( extractor, + ValueType.STRING_TYPE.getEvaluator( Operator.EQUAL ), + field ); + + List list = new ArrayList(); + Cheese cheese1 = new Cheese( "stilton", + 5 ); + Cheese cheese2 = new Cheese( "stilton", + 15 ); + list.add( cheese1 ); + list.add( cheese2 ); + MockDataProvider dataProvider = new MockDataProvider( list ); + + FromNode from = new FromNode( 3, + dataProvider, + null, + new FieldConstraint[]{constraint}, + null ); + MockTupleSink sink = new MockTupleSink( 5 ); + from.addTupleSink( sink ); + + List asserted = sink.getAsserted(); + + Person person1 = new Person( "xxx2", + 30 ); + FactHandle person1Handle = workingMemory.assertObject( person1 ); + ReteTuple tuple = new ReteTuple( (DefaultFactHandle) person1Handle ); + from.assertTuple( tuple, + context, + workingMemory ); + + assertEquals( 2, + asserted.size() ); + + BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( from ); + assertEquals( 1, + memory.getLeftTupleMemory().size() ); + assertEquals( 0, + memory.getRightObjectMemory().size() ); + assertEquals( 2, + tuple.getTupleMatches().size() ); + + list = new ArrayList(); + for ( Iterator it = tuple.getTupleMatches().values().iterator(); it.hasNext(); ) { + TupleMatch tupleMatch = (TupleMatch) it.next(); + list.add( tupleMatch.getObjectMatches().getFactHandle().getObject() ); + } + assertEquals( 2, + list.size() ); + assertTrue( list.contains( cheese1 ) ); + assertTrue( list.contains( cheese2 ) ); + + from.retractTuple( tuple, + context, + workingMemory ); + assertEquals( 0, + memory.getLeftTupleMemory().size() ); + assertEquals( 0, + memory.getRightObjectMemory().size() ); + } + + public void testModify() { + final PropagationContext context = new PropagationContextImpl( 0, + PropagationContext.ASSERTION, + null, + null ); + final ReteooWorkingMemory workingMemory = new ReteooWorkingMemory( 1, + (ReteooRuleBase) RuleBaseFactory.newRuleBase() ); + final ClassFieldExtractor extractor = new ClassFieldExtractor( Cheese.class, + "type" ); + + final FieldValue field = new MockField( "stilton" ); + final LiteralConstraint constraint = new LiteralConstraint( extractor, + ValueType.STRING_TYPE.getEvaluator( Operator.EQUAL ), + field ); + + List list = new ArrayList(); + Cheese cheese1 = new Cheese( "cheddar", + 20 ); + Cheese cheese2 = new Cheese( "brie", + 20 ); + list.add( cheese1 ); + list.add( cheese2 ); + MockDataProvider dataProvider = new MockDataProvider( list ); + + FromNode from = new FromNode( 3, + dataProvider, + null, + new FieldConstraint[]{constraint}, + null ); + MockTupleSink sink = new MockTupleSink( 5 ); + from.addTupleSink( sink ); + + Person person1 = new Person( "xxx1", + 30 ); + FactHandle person1Handle = workingMemory.assertObject( person1 ); + ReteTuple tuple1 = new ReteTuple( (DefaultFactHandle) person1Handle ); + from.assertTuple( tuple1, + context, + workingMemory ); + + // nothing should be asserted, as cheese1 is cheddar and we are filtering on stilton + assertEquals( 0, + sink.getAsserted().size() ); + + //Set cheese1 to stilton and it should now propagate + cheese1.setType( "stilton" ); + from.modifyTuple( tuple1, + context, + workingMemory ); + List asserted = sink.getAsserted(); + assertEquals( 1, + asserted.size() ); + ReteTuple tuple = (ReteTuple) ((Object[]) asserted.get( 0 ))[0]; + assertSame( person1, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese1, + tuple.getFactHandles()[1].getObject() ); + + cheese2.setType( "stilton" ); + from.modifyTuple( tuple1, + context, + workingMemory ); + + // A modify when using from involves a retract and an assert - so make sure there was a retraction and no modify propagations + assertEquals( 0 , sink.getModified().size() ); + assertEquals( 1, sink.getRetracted().size() ); + + assertEquals( 3, + asserted.size() ); + tuple = (ReteTuple) ((Object[]) asserted.get( 1 ))[0]; + assertSame( person1, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese1, + tuple.getFactHandles()[1].getObject() ); + + tuple = (ReteTuple) ((Object[]) asserted.get( 2 ))[0]; + assertSame( person1, + tuple.getFactHandles()[0].getObject() ); + assertSame( cheese2, + tuple.getFactHandles()[1].getObject() ); + + // Double check the nodes memory + BetaMemory memory = (BetaMemory) workingMemory.getNodeMemory( from ); + assertEquals( 1, + memory.getLeftTupleMemory().size() ); + assertEquals( 0, + memory.getRightObjectMemory().size() ); + assertEquals( 2, + tuple1.getTupleMatches().size() ); + } + + public static class MockDataProvider + implements + DataProvider { + + private Collection collection; + + public Declaration[] getRequiredDeclarations() { + return null; + } + + public MockDataProvider(Collection collection) { + this.collection = collection; + } + + public Iterator getResults(Tuple tuple) { + return this.collection.iterator(); + } + } + + public static class Person { + private String name; + private int age; + + public Person(String name, + int age) { + super(); + this.name = name; + this.age = age; + } + + public int getAge() { + return age; + } + + public String getName() { + return name; + } + } +} diff --git a/drools-core/src/test/java/org/drools/reteoo/MockTupleSource.java b/drools-core/src/test/java/org/drools/reteoo/MockTupleSource.java index 4a7f82b6189..2d0b59099d4 100644 --- a/drools-core/src/test/java/org/drools/reteoo/MockTupleSource.java +++ b/drools-core/src/test/java/org/drools/reteoo/MockTupleSource.java @@ -38,7 +38,6 @@ public MockTupleSource(final int id) { public void attach() { this.attached++; - } public int getAttached() { diff --git a/drools-core/src/test/java/org/drools/rule/FieldConstraintTest.java b/drools-core/src/test/java/org/drools/rule/FieldConstraintTest.java index 0e7eafd6cb4..bdba597c349 100644 --- a/drools-core/src/test/java/org/drools/rule/FieldConstraintTest.java +++ b/drools-core/src/test/java/org/drools/rule/FieldConstraintTest.java @@ -78,7 +78,7 @@ public void testLiteralConstraint() throws IntrospectionException { final InternalFactHandle cheddarHandle = (InternalFactHandle) workingMemory.assertObject( cheddar ); // check constraint - assertTrue( constraint.isAllowed( cheddarHandle, + assertTrue( constraint.isAllowed( cheddarHandle.getObject(), null, workingMemory ) ); @@ -88,7 +88,7 @@ public void testLiteralConstraint() throws IntrospectionException { final InternalFactHandle stiltonHandle = (InternalFactHandle) workingMemory.assertObject( stilton ); // check constraint - assertFalse( constraint.isAllowed( stiltonHandle, + assertFalse( constraint.isAllowed( stiltonHandle.getObject(), null, workingMemory ) ); } @@ -131,13 +131,13 @@ public void testPredicateConstraint() throws IntrospectionException { */ private static final long serialVersionUID = -7805842671538257493L; - public boolean evaluate(Tuple tuple, - FactHandle factHandle, + public boolean evaluate(Object object, + Tuple tuple, Declaration declaration, Declaration[] declarations, WorkingMemory workingMemory) { int price1 = ((Integer) declarations[0].getValue( workingMemory.getObject( tuple.get( declarations[0] ) ) )).intValue(); - int price2 = ((Integer) declaration.getValue( workingMemory.getObject( factHandle ) )).intValue(); + int price2 = ((Integer) declaration.getValue( object )).intValue(); return (price2 == (price1 * 2)); @@ -160,7 +160,7 @@ public boolean evaluate(Tuple tuple, tuple = new InstrumentedReteTuple( tuple, f1 ); - assertTrue( constraint1.isAllowed( f1, + assertTrue( constraint1.isAllowed( f1.getObject(), tuple, workingMemory ) ); } @@ -228,11 +228,11 @@ public Object evaluate(Tuple tuple, // ?price tuple = new InstrumentedReteTuple( tuple, f1 ); - assertTrue( constraint1.isAllowed( f1, + assertTrue( constraint1.isAllowed( f1.getObject(), tuple, workingMemory ) ); - assertFalse( constraint2.isAllowed( f1, + assertFalse( constraint2.isAllowed( f1.getObject(), tuple, workingMemory ) ); @@ -241,7 +241,7 @@ public Object evaluate(Tuple tuple, // ?price final InternalFactHandle f2 = (InternalFactHandle) workingMemory.assertObject( cheddar2 ); - assertTrue( constraint2.isAllowed( f2, + assertTrue( constraint2.isAllowed( f2.getObject(), tuple, workingMemory ) ); } diff --git a/drools-core/src/test/java/org/drools/spi/MockConstraint.java b/drools-core/src/test/java/org/drools/spi/MockConstraint.java index c9fd203b045..7cc2f2a23e6 100644 --- a/drools-core/src/test/java/org/drools/spi/MockConstraint.java +++ b/drools-core/src/test/java/org/drools/spi/MockConstraint.java @@ -17,7 +17,6 @@ */ import org.drools.WorkingMemory; -import org.drools.common.InternalFactHandle; import org.drools.rule.Declaration; public class MockConstraint @@ -33,7 +32,7 @@ public class MockConstraint public boolean isAllowed = true; - public boolean isAllowed(final InternalFactHandle handle, + public boolean isAllowed(final Object object, final Tuple tuple, final WorkingMemory workingMemory) { return this.isAllowed;
175d222bfc03ad84023cefb40e48b27356148ec5
hadoop
YARN-2830. Add backwords compatible- ContainerId.newInstance constructor. Contributed by Jonathan Eagles.--(cherry picked from commit 43cd07b408c6613d2c9aa89203cfa3110d830538)-
a
https://github.com/apache/hadoop
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java index 19efe171356e9..74dfb39af4966 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java @@ -140,7 +140,7 @@ public void handle(ContainerAllocatorEvent event) { LOG.info("Processing the event " + event.toString()); // Assign the same container ID as the AM ContainerId cID = - ContainerId.newInstance(getContext().getApplicationAttemptId(), + ContainerId.newContainerId(getContext().getApplicationAttemptId(), this.containerId.getContainerId()); Container container = recordFactory.newRecordInstance(Container.class); container.setId(cID); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java index 1edadb9742e07..de35d840b9473 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java @@ -716,7 +716,7 @@ private class TestParams { ApplicationId appId = ApplicationId.newInstance(200, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); TaskID taskID = TaskID.forName("task_200707121733_0003_m_000005"); TaskAttemptID taskAttemptID = new TaskAttemptID(taskID, 0); JobId jobId = MRBuilderUtils.newJobId(appId, 1); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index 9885582d88a0d..3100d12ce1499 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -179,7 +179,7 @@ private static ContainerId getContainerId(ApplicationId applicationId, ApplicationAttemptId appAttemptId = getApplicationAttemptId(applicationId, startCount); ContainerId containerId = - ContainerId.newInstance(appAttemptId, startCount); + ContainerId.newContainerId(appAttemptId, startCount); return containerId; } @@ -565,7 +565,7 @@ protected class MRAppContainerAllocator @Override public void handle(ContainerAllocatorEvent event) { ContainerId cId = - ContainerId.newInstance(getContext().getApplicationAttemptId(), + ContainerId.newContainerId(getContext().getApplicationAttemptId(), containerCount++); NodeId nodeId = NodeId.newInstance(NM_HOST, NM_PORT); Resource resource = Resource.newInstance(1234, 2); @@ -773,7 +773,7 @@ public static ContainerId newContainerId(int appId, int appAttemptId, ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, appAttemptId); - return ContainerId.newInstance(applicationAttemptId, containerId); + return ContainerId.newContainerId(applicationAttemptId, containerId); } public static ContainerTokenIdentifier newContainerTokenIdentifier( diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java index 160303289568e..744ca103affba 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java @@ -140,7 +140,7 @@ public void run() { if (concurrentRunningTasks < maxConcurrentRunningTasks) { event = eventQueue.take(); ContainerId cId = - ContainerId.newInstance(getContext() + ContainerId.newContainerId(getContext() .getApplicationAttemptId(), containerCount++); //System.out.println("Allocating " + containerCount); @@ -233,7 +233,7 @@ public AllocateResponse allocate(AllocateRequest request) int numContainers = req.getNumContainers(); for (int i = 0; i < numContainers; i++) { ContainerId containerId = - ContainerId.newInstance( + ContainerId.newContainerId( getContext().getApplicationAttemptId(), request.getResponseId() + i); containers.add(Container.newInstance(containerId, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java index 19ac0db98dbb8..fd9c094901ae4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java @@ -183,7 +183,7 @@ public static TaskReport newTaskReport(TaskId id) { public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) { ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( id.getTaskId().getJobId().getAppId(), 0); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 0); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0); TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class); report.setTaskAttemptId(id); report @@ -315,7 +315,7 @@ public ContainerId getAssignedContainerID() { ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(taid.getTaskId().getJobId() .getAppId(), 0); - ContainerId id = ContainerId.newInstance(appAttemptId, 0); + ContainerId id = ContainerId.newContainerId(appAttemptId, 0); return id; } @@ -640,7 +640,7 @@ public void setQueueName(String queueName) { private static AMInfo createAMInfo(int attempt) { ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(100, 1), attempt); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); return MRBuilderUtils.newAMInfo(appAttemptId, System.currentTimeMillis(), containerId, NM_HOST, NM_PORT, NM_HTTP_PORT); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java index d356eca623d2e..70437c1ba36cf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java @@ -382,7 +382,7 @@ public void testMRAppMasterCredentials() throws Exception { ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId containerId = - ContainerId.newInstance(applicationAttemptId, 546); + ContainerId.newContainerId(applicationAttemptId, 546); String userName = UserGroupInformation.getCurrentUser().getShortUserName(); // Create staging dir, so MRAppMaster doesn't barf. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index 1037e7c2ba3dd..fc64996a8e676 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -253,7 +253,7 @@ private class TestMRApp extends MRAppMaster { public TestMRApp(ApplicationAttemptId applicationAttemptId, ContainerAllocator allocator) { - super(applicationAttemptId, ContainerId.newInstance( + super(applicationAttemptId, ContainerId.newContainerId( applicationAttemptId, 1), "testhost", 2222, 3333, System.currentTimeMillis()); this.allocator = allocator; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 13303449c879e..1807c1c3e099f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -359,7 +359,7 @@ public void testLaunchFailedWhileKilling() throws Exception { new SystemClock(), null); NodeId nid = NodeId.newInstance("127.0.0.1", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -415,7 +415,7 @@ public void testContainerCleanedWhileRunning() throws Exception { new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.2", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -472,7 +472,7 @@ public void testContainerCleanedWhileCommitting() throws Exception { new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -532,7 +532,7 @@ public void testDoubleTooManyFetchFailure() throws Exception { new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -599,7 +599,7 @@ public void testAppDiognosticEventOnUnassignedTask() throws Exception { new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -649,7 +649,7 @@ public void testTooManyFetchFailureAfterKill() throws Exception { new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -714,7 +714,7 @@ public void testAppDiognosticEventOnNewTask() throws Exception { new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -760,7 +760,7 @@ public void testFetchFailureAttemptFinishTime() throws Exception{ new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -830,7 +830,7 @@ public void testContainerKillAfterAssigned() throws Exception { new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.2", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -884,7 +884,7 @@ public void testContainerKillWhileRunning() throws Exception { new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.2", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); @@ -941,7 +941,7 @@ public void testContainerKillWhileCommitPending() throws Exception { new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.2", 0); - ContainerId contId = ContainerId.newInstance(appAttemptId, 3); + ContainerId contId = ContainerId.newContainerId(appAttemptId, 3); Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java index f2c1841e1b0a2..dc1d72f89f0f7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java @@ -115,7 +115,7 @@ public void testPoolSize() throws InterruptedException { containerLauncher.expectedCorePoolSize = ContainerLauncherImpl.INITIAL_POOL_SIZE; for (int i = 0; i < 10; i++) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, i); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, i); TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, i); containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host" + i + ":1234", null, @@ -137,7 +137,7 @@ public void testPoolSize() throws InterruptedException { Assert.assertEquals(10, containerLauncher.numEventsProcessed.get()); containerLauncher.finishEventHandling = false; for (int i = 0; i < 10; i++) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, + ContainerId containerId = ContainerId.newContainerId(appAttemptId, i + 10); TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, i + 10); @@ -154,7 +154,7 @@ public void testPoolSize() throws InterruptedException { // Core pool size should be 21 but the live pool size should be only 11. containerLauncher.expectedCorePoolSize = 11 + ContainerLauncherImpl.INITIAL_POOL_SIZE; containerLauncher.finishEventHandling = false; - ContainerId containerId = ContainerId.newInstance(appAttemptId, 21); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 21); TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 21); containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host11:1234", null, @@ -174,7 +174,7 @@ public void testPoolLimits() throws InterruptedException { JobId jobId = MRBuilderUtils.newJobId(appId, 8); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP); TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 10); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10); AppContext context = mock(AppContext.class); CustomContainerLauncher containerLauncher = new CustomContainerLauncher( diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java index 74e532a2b4d46..184f1b244d549 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java @@ -139,7 +139,7 @@ public void waitForPoolToIdle() throws InterruptedException { public static ContainerId makeContainerId(long ts, int appId, int attemptId, int id) { - return ContainerId.newInstance( + return ContainerId.newContainerId( ApplicationAttemptId.newInstance( ApplicationId.newInstance(ts, appId), attemptId), id); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java index b8e13ff96bd57..3a7343c07b2d9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java @@ -688,7 +688,7 @@ public void testReportedAppProgress() throws Exception { rm.sendAMLaunched(appAttemptId); rmDispatcher.await(); - MRApp mrApp = new MRApp(appAttemptId, ContainerId.newInstance( + MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId( appAttemptId, 0), 10, 10, false, this.getClass().getName(), true, 1) { @Override protected Dispatcher createDispatcher() { @@ -840,7 +840,7 @@ public void testReportedAppProgressWithOnlyMaps() throws Exception { rm.sendAMLaunched(appAttemptId); rmDispatcher.await(); - MRApp mrApp = new MRApp(appAttemptId, ContainerId.newInstance( + MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId( appAttemptId, 0), 10, 0, false, this.getClass().getName(), true, 1) { @Override protected Dispatcher createDispatcher() { @@ -2021,7 +2021,7 @@ public void testCompletedContainerEvent() { ApplicationId applicationId = ApplicationId.newInstance(1, 1); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance( applicationId, 1); - ContainerId containerId = ContainerId.newInstance(applicationAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, 1); ContainerStatus status = ContainerStatus.newInstance( containerId, ContainerState.RUNNING, "", 0); @@ -2038,7 +2038,7 @@ public void testCompletedContainerEvent() { abortedStatus, attemptId); Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType()); - ContainerId containerId2 = ContainerId.newInstance(applicationAttemptId, 2); + ContainerId containerId2 = ContainerId.newContainerId(applicationAttemptId, 2); ContainerStatus status2 = ContainerStatus.newInstance(containerId2, ContainerState.RUNNING, "", 0); @@ -2077,7 +2077,7 @@ public void testUnregistrationOnlyIfRegistered() throws Exception { rmDispatcher.await(); MRApp mrApp = - new MRApp(appAttemptId, ContainerId.newInstance(appAttemptId, 0), 10, + new MRApp(appAttemptId, ContainerId.newContainerId(appAttemptId, 0), 10, 0, false, this.getClass().getName(), true, 1) { @Override protected Dispatcher createDispatcher() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java index 82d578aa12a79..723136769e61f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java @@ -133,7 +133,7 @@ public void testAttemptsBlock() { ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1); - ContainerId containerId = ContainerIdPBImpl.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerIdPBImpl.newContainerId(appAttemptId, 1); when(attempt.getAssignedContainerID()).thenReturn(containerId); when(attempt.getAssignedContainerMgrAddress()).thenReturn( diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java index 9fba91dbb1ac2..f9236a926ae83 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java @@ -169,7 +169,7 @@ private void verifyJobReport(JobReport jobReport, JobId jobId) { Assert.assertEquals(1, amInfos.size()); AMInfo amInfo = amInfos.get(0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(jobId.getAppId(), 1); - ContainerId amContainerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId amContainerId = ContainerId.newContainerId(appAttemptId, 1); Assert.assertEquals(appAttemptId, amInfo.getAppAttemptId()); Assert.assertEquals(amContainerId, amInfo.getContainerId()); Assert.assertTrue(jobReport.getSubmitTime() > 0); diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 2245a79531bba..78d501096b2ff 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -895,6 +895,9 @@ Release 2.6.0 - UNRELEASED YARN-2607. Fixed issues in TestDistributedShell. (Wangda Tan via vinodkv) + YARN-2830. Add backwords compatible ContainerId.newInstance constructor. + (jeagles via acmurthy) + Release 2.5.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java index 5499a19646e73..5d0d65a966a4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java @@ -42,7 +42,7 @@ public abstract class ContainerId implements Comparable<ContainerId>{ @Private @Unstable - public static ContainerId newInstance(ApplicationAttemptId appAttemptId, + public static ContainerId newContainerId(ApplicationAttemptId appAttemptId, long containerId) { ContainerId id = Records.newRecord(ContainerId.class); id.setContainerId(containerId); @@ -51,6 +51,18 @@ public static ContainerId newInstance(ApplicationAttemptId appAttemptId, return id; } + @Private + @Deprecated + @Unstable + public static ContainerId newInstance(ApplicationAttemptId appAttemptId, + int containerId) { + ContainerId id = Records.newRecord(ContainerId.class); + id.setContainerId(containerId); + id.setApplicationAttemptId(appAttemptId); + id.build(); + return id; + } + /** * Get the <code>ApplicationAttemptId</code> of the application to which the * <code>Container</code> was assigned. @@ -214,7 +226,7 @@ public static ContainerId fromString(String containerIdStr) { } long id = Long.parseLong(it.next()); long cid = (epoch << 40) | id; - ContainerId containerId = ContainerId.newInstance(appAttemptID, cid); + ContainerId containerId = ContainerId.newContainerId(appAttemptID, cid); return containerId; } catch (NumberFormatException n) { throw new IllegalArgumentException("Invalid ContainerId: " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java index 2414a6777f736..d41434e94dcae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java @@ -214,7 +214,7 @@ public void launchAM(ApplicationAttemptId attemptId) if(!setClasspath && classpath!=null) { envAMList.add("CLASSPATH="+classpath); } - ContainerId containerId = ContainerId.newInstance(attemptId, 0); + ContainerId containerId = ContainerId.newContainerId(attemptId, 0); String hostname = InetAddress.getLocalHost().getHostName(); envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index ec00d45a2f13d..da7d50529ac9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -667,7 +667,7 @@ public ApplicationAttemptId createFakeApplicationAttemptId() { } public ContainerId createFakeContainerId() { - return ContainerId.newInstance(createFakeApplicationAttemptId(), 0); + return ContainerId.newContainerId(createFakeApplicationAttemptId(), 0); } public YarnClusterMetrics createFakeYarnClusterMetrics() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java index b00598a5d2ec9..74d4aa47cbcde 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java @@ -402,7 +402,7 @@ public static ContainerId newContainerId(int appId, int appAttemptId, ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, appAttemptId); - return ContainerId.newInstance(applicationAttemptId, containerId); + return ContainerId.newContainerId(applicationAttemptId, containerId); } private class TestCallbackHandler implements AMRMClientAsync.CallbackHandler { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java index 0e059d7540d5e..6f9d41d8d5090 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java @@ -547,7 +547,7 @@ private Container mockContainer(int i) { ApplicationId.newInstance(System.currentTimeMillis(), 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(attemptId, i); + ContainerId containerId = ContainerId.newContainerId(attemptId, i); nodeId = NodeId.newInstance("localhost", 0); // Create an empty record containerToken = recordFactory.newRecordInstance(Token.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java index d3c182b9cf423..a88189e5c0d84 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java @@ -157,9 +157,9 @@ public void testGetContainers() throws YarnException, IOException { List<ContainerReport> reports = client.getContainers(appAttemptId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getContainerId(), - (ContainerId.newInstance(appAttemptId, 1))); + (ContainerId.newContainerId(appAttemptId, 1))); Assert.assertEquals(reports.get(1).getContainerId(), - (ContainerId.newInstance(appAttemptId, 2))); + (ContainerId.newContainerId(appAttemptId, 2))); client.stop(); } @@ -176,11 +176,11 @@ public void testGetContainerReport() throws YarnException, IOException { ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(applicationId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); ContainerReport report = client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(), (ContainerId - .newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(), 1)) + .newContainerId(expectedReports.get(0).getCurrentApplicationAttemptId(), 1)) .toString()); client.stop(); } @@ -349,7 +349,7 @@ private void createAppReports() { "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, - ContainerId.newInstance( + ContainerId.newContainerId( newApplicationReport.getCurrentApplicationAttemptId(), 1)); appAttempts.add(attempt); ApplicationAttemptReport attempt1 = @@ -361,7 +361,7 @@ private void createAppReports() { "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, - ContainerId.newInstance( + ContainerId.newContainerId( newApplicationReport.getCurrentApplicationAttemptId(), 2)); appAttempts.add(attempt1); attempts.put(applicationId, appAttempts); @@ -369,14 +369,14 @@ private void createAppReports() { List<ContainerReport> containerReports = new ArrayList<ContainerReport>(); ContainerReport container = ContainerReport.newInstance( - ContainerId.newInstance(attempt.getApplicationAttemptId(), 1), + ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); containerReports.add(container); ContainerReport container1 = ContainerReport.newInstance( - ContainerId.newInstance(attempt.getApplicationAttemptId(), 2), + ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); containerReports.add(container1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java index ce3086f57020c..108ad377c6b02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java @@ -352,7 +352,7 @@ public void testAMRMClientForUnregisterAMOnRMRestart() throws Exception { // new NM to represent NM re-register nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService()); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); NMContainerStatus containerReport = NMContainerStatus.newInstance(containerId, ContainerState.RUNNING, Resource.newInstance(1024, 1), "recover container", 0, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index ca7c50a1270af..02f2882155541 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -348,9 +348,9 @@ public void testGetContainers() throws YarnException, IOException { List<ContainerReport> reports = client.getContainers(appAttemptId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getContainerId(), - (ContainerId.newInstance(appAttemptId, 1))); + (ContainerId.newContainerId(appAttemptId, 1))); Assert.assertEquals(reports.get(1).getContainerId(), - (ContainerId.newInstance(appAttemptId, 2))); + (ContainerId.newContainerId(appAttemptId, 2))); client.stop(); } @@ -367,11 +367,11 @@ public void testGetContainerReport() throws YarnException, IOException { ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( applicationId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); ContainerReport report = client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(), - (ContainerId.newInstance(expectedReports.get(0) + (ContainerId.newContainerId(expectedReports.get(0) .getCurrentApplicationAttemptId(), 1)).toString()); client.stop(); } @@ -481,7 +481,7 @@ private List<ApplicationReport> createAppReports() { "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, - ContainerId.newInstance( + ContainerId.newContainerId( newApplicationReport.getCurrentApplicationAttemptId(), 1)); appAttempts.add(attempt); ApplicationAttemptReport attempt1 = ApplicationAttemptReport.newInstance( @@ -492,20 +492,20 @@ private List<ApplicationReport> createAppReports() { "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, - ContainerId.newInstance( + ContainerId.newContainerId( newApplicationReport.getCurrentApplicationAttemptId(), 2)); appAttempts.add(attempt1); attempts.put(applicationId, appAttempts); List<ContainerReport> containerReports = new ArrayList<ContainerReport>(); ContainerReport container = ContainerReport.newInstance( - ContainerId.newInstance(attempt.getApplicationAttemptId(), 1), null, + ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); containerReports.add(container); ContainerReport container1 = ContainerReport.newInstance( - ContainerId.newInstance(attempt.getApplicationAttemptId(), 2), null, + ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); containerReports.add(container1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java index 5ed839847f06b..ef9439d1a77bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java @@ -172,9 +172,9 @@ public void testFetchApplictionLogs() throws Exception { ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1); - ContainerId containerId0 = ContainerIdPBImpl.newInstance(appAttemptId, 0); - ContainerId containerId1 = ContainerIdPBImpl.newInstance(appAttemptId, 1); - ContainerId containerId2 = ContainerIdPBImpl.newInstance(appAttemptId, 2); + ContainerId containerId0 = ContainerIdPBImpl.newContainerId(appAttemptId, 0); + ContainerId containerId1 = ContainerIdPBImpl.newContainerId(appAttemptId, 1); + ContainerId containerId2 = ContainerIdPBImpl.newContainerId(appAttemptId, 2); NodeId nodeId = NodeId.newInstance("localhost", 1234); // create local logs diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index d87277a7a7ad9..9d9a86a310000 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -146,7 +146,7 @@ public void testGetApplicationAttemptReport() throws Exception { applicationId, 1); ApplicationAttemptReport attemptReport = ApplicationAttemptReport .newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics", - YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( + YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( attemptId, 1)); when( client @@ -182,11 +182,11 @@ public void testGetApplicationAttempts() throws Exception { applicationId, 2); ApplicationAttemptReport attemptReport = ApplicationAttemptReport .newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics", - YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( + YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( attemptId, 1)); ApplicationAttemptReport attemptReport1 = ApplicationAttemptReport .newInstance(attemptId1, "host", 124, "url", "oUrl", "diagnostics", - YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( + YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( attemptId1, 1)); List<ApplicationAttemptReport> reports = new ArrayList<ApplicationAttemptReport>(); reports.add(attemptReport); @@ -223,7 +223,7 @@ public void testGetContainerReport() throws Exception { ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( applicationId, 1); - ContainerId containerId = ContainerId.newInstance(attemptId, 1); + ContainerId containerId = ContainerId.newContainerId(attemptId, 1); ContainerReport container = ContainerReport.newInstance(containerId, null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); @@ -255,8 +255,8 @@ public void testGetContainers() throws Exception { ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( applicationId, 1); - ContainerId containerId = ContainerId.newInstance(attemptId, 1); - ContainerId containerId1 = ContainerId.newInstance(attemptId, 2); + ContainerId containerId = ContainerId.newContainerId(attemptId, 1); + ContainerId containerId1 = ContainerId.newContainerId(attemptId, 2); ContainerReport container = ContainerReport.newInstance(containerId, null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE); @@ -766,7 +766,7 @@ public void testContainersHelpCommand() throws Exception { sysOutStream.toString()); sysOutStream.reset(); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 7); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 7); result = cli.run( new String[] { "container", "-status", containerId.toString(), "args" }); verify(spyCli).printUsage(any(String.class), any(Options.class)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java index 45b2a06429a0b..e2071ddc494e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java @@ -97,7 +97,7 @@ private void testRPCTimeout(String rpcClass) throws Exception { ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 0); ContainerId containerId = - ContainerId.newInstance(applicationAttemptId, 100); + ContainerId.newContainerId(applicationAttemptId, 100); NodeId nodeId = NodeId.newInstance("localhost", 1234); Resource resource = Resource.newInstance(1234, 2); ContainerTokenIdentifier containerTokenIdentifier = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index 8271713e26fad..39e616229de17 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -124,7 +124,7 @@ private void test(String rpcClass) throws Exception { ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 0); ContainerId containerId = - ContainerId.newInstance(applicationAttemptId, 100); + ContainerId.newContainerId(applicationAttemptId, 100); NodeId nodeId = NodeId.newInstance("localhost", 1234); Resource resource = Resource.newInstance(1234, 2); ContainerTokenIdentifier containerTokenIdentifier = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java index 2259294bc0fae..1643301072b81 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java @@ -79,6 +79,6 @@ public static ContainerId newContainerId(int appId, int appAttemptId, ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, appAttemptId); - return ContainerId.newInstance(applicationAttemptId, containerId); + return ContainerId.newContainerId(applicationAttemptId, containerId); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java index f497d27a0ca18..29b0ffe38f27d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java @@ -33,7 +33,7 @@ public class TestContainerResourceDecrease { @Test public void testResourceDecreaseContext() { ContainerId containerId = ContainerId - .newInstance(ApplicationAttemptId.newInstance( + .newContainerId(ApplicationAttemptId.newInstance( ApplicationId.newInstance(1234, 3), 3), 7); Resource resource = Resource.newInstance(1023, 3); ContainerResourceDecrease ctx = ContainerResourceDecrease.newInstance( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java index d307e390afb37..932d5a7a87ccf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java @@ -38,7 +38,7 @@ public void testResourceIncreaseContext() { byte[] identifier = new byte[] { 1, 2, 3, 4 }; Token token = Token.newInstance(identifier, "", "".getBytes(), ""); ContainerId containerId = ContainerId - .newInstance(ApplicationAttemptId.newInstance( + .newContainerId(ApplicationAttemptId.newInstance( ApplicationId.newInstance(1234, 3), 3), 7); Resource resource = Resource.newInstance(1023, 3); ContainerResourceIncrease ctx = ContainerResourceIncrease.newInstance( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java index 0acad00d7e96f..cf4dabf71bede 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java @@ -33,7 +33,7 @@ public class TestContainerResourceIncreaseRequest { @Test public void ContainerResourceIncreaseRequest() { ContainerId containerId = ContainerId - .newInstance(ApplicationAttemptId.newInstance( + .newContainerId(ApplicationAttemptId.newInstance( ApplicationId.newInstance(1234, 3), 3), 7); Resource resource = Resource.newInstance(1023, 3); ContainerResourceIncreaseRequest context = ContainerResourceIncreaseRequest diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java index 405cb3d52a500..4301bc9eee7d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java @@ -295,7 +295,7 @@ public void testContainerLogsFileAccess() throws IOException { ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 1); ContainerId testContainerId1 = - ContainerId.newInstance(applicationAttemptId, 1); + ContainerId.newContainerId(applicationAttemptId, 1); Path appDir = new Path(srcFileRoot, testContainerId1.getApplicationAttemptId() .getApplicationId().toString()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java index 0a17433c44fca..2a5762c30228a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java @@ -207,7 +207,7 @@ private void writeLog(Configuration configuration, String user) throws Exception { ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1); - ContainerId containerId = ContainerIdPBImpl.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerIdPBImpl.newContainerId(appAttemptId, 1); String path = "target/logs/" + user + "/logs/application_0_0001/localhost_1234"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java index dc4f9e2a41dd8..834dcf131c498 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java @@ -134,7 +134,7 @@ public void testClientToAMTokenIdentifier() throws IOException { @Test public void testContainerTokenIdentifier() throws IOException { - ContainerId containerID = ContainerId.newInstance( + ContainerId containerID = ContainerId.newContainerId( ApplicationAttemptId.newInstance(ApplicationId.newInstance( 1, 1), 1), 1); String hostName = "host0"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java index 1708da250f901..de4051a494c4f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java @@ -58,7 +58,7 @@ protected void writeApplicationAttemptStartData( ApplicationAttemptId appAttemptId) throws IOException { store.applicationAttemptStarted(ApplicationAttemptStartData.newInstance( appAttemptId, appAttemptId.toString(), 0, - ContainerId.newInstance(appAttemptId, 1))); + ContainerId.newContainerId(appAttemptId, 1))); } protected void writeApplicationAttemptFinishData( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java index 60027e9283a83..7c2593d9e0aca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java @@ -142,7 +142,7 @@ public void testContainerReport() throws IOException, YarnException { ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); GetContainerReportRequest request = GetContainerReportRequest.newInstance(containerId); GetContainerReportResponse response = @@ -160,8 +160,8 @@ public void testContainers() throws IOException, YarnException { ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); - ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 2); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); + ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 2); GetContainersRequest request = GetContainersRequest.newInstance(appAttemptId); GetContainersResponse response = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java index 856b88d28e65c..a093f19f9eaf1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java @@ -141,7 +141,7 @@ private static void prepareTimelineStore(TimelineStore store, int scale) store.put(entities); for (int k = 1; k <= scale; ++k) { entities = new TimelineEntities(); - ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, k); entities.addEntity(createContainerEntity(containerId)); store.put(entities); } @@ -238,7 +238,7 @@ public ApplicationAttemptReport run() throws Exception { } Assert.assertNotNull(appAttempt); Assert.assertEquals(appAttemptId, appAttempt.getApplicationAttemptId()); - Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1), + Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1), appAttempt.getAMContainerId()); Assert.assertEquals("test host", appAttempt.getHost()); Assert.assertEquals(100, appAttempt.getRpcPort()); @@ -253,7 +253,7 @@ public ApplicationAttemptReport run() throws Exception { @Test public void testGetContainerReport() throws Exception { final ContainerId containerId = - ContainerId.newInstance(ApplicationAttemptId.newInstance( + ContainerId.newContainerId(ApplicationAttemptId.newInstance( ApplicationId.newInstance(0, 1), 1), 1); ContainerReport container; if (callerUGI == null) { @@ -466,7 +466,7 @@ private static TimelineEntity createAppAttemptTimelineEntity( eventInfo.put(AppAttemptMetricsConstants.HOST_EVENT_INFO, "test host"); eventInfo.put(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO, 100); eventInfo.put(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO, - ContainerId.newInstance(appAttemptId, 1)); + ContainerId.newContainerId(appAttemptId, 1)); tEvent.setEventInfo(eventInfo); entity.addEvent(tEvent); tEvent = new TimelineEvent(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java index 3a75d9e275733..c91d9f5a6d5ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java @@ -121,7 +121,7 @@ private void testWriteHistoryData( } // write container history data for (int k = 1; k <= num; ++k) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, k); writeContainerStartData(containerId); if (missingContainer && k == num) { continue; @@ -172,7 +172,7 @@ private void testReadHistoryData( // read container history data Assert.assertEquals(num, store.getContainers(appAttemptId).size()); for (int k = 1; k <= num; ++k) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, k); ContainerHistoryData containerData = store.getContainer(containerId); Assert.assertNotNull(containerData); Assert.assertEquals(Priority.newInstance(containerId.getId()), @@ -187,7 +187,7 @@ private void testReadHistoryData( ContainerHistoryData masterContainer = store.getAMContainer(appAttemptId); Assert.assertNotNull(masterContainer); - Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1), + Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1), masterContainer.getContainerId()); } } @@ -215,7 +215,7 @@ public void testWriteAfterApplicationFinish() throws IOException { Assert.assertTrue(e.getMessage().contains("is not opened")); } // write container history data - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); try { writeContainerStartData(containerId); Assert.fail(); @@ -240,7 +240,7 @@ public void testMassiveWriteContainerHistoryData() throws IOException { ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); for (int i = 1; i <= 100000; ++i) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, i); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, i); writeContainerStartData(containerId); writeContainerFinishData(containerId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java index 6e9e242637a17..556db2beaf4b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java @@ -137,7 +137,7 @@ public void testReadWriteContainerHistory() throws Exception { ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); try { writeContainerFinishData(containerId); Assert.fail(); @@ -149,14 +149,14 @@ public void testReadWriteContainerHistory() throws Exception { writeApplicationAttemptStartData(appAttemptId); int numContainers = 5; for (int i = 1; i <= numContainers; ++i) { - containerId = ContainerId.newInstance(appAttemptId, i); + containerId = ContainerId.newContainerId(appAttemptId, i); writeContainerStartData(containerId); writeContainerFinishData(containerId); } Assert .assertEquals(numContainers, store.getContainers(appAttemptId).size()); for (int i = 1; i <= numContainers; ++i) { - containerId = ContainerId.newInstance(appAttemptId, i); + containerId = ContainerId.newContainerId(appAttemptId, i); ContainerHistoryData data = store.getContainer(containerId); Assert.assertNotNull(data); Assert.assertEquals(Priority.newInstance(containerId.getId()), @@ -165,11 +165,11 @@ public void testReadWriteContainerHistory() throws Exception { } ContainerHistoryData masterContainer = store.getAMContainer(appAttemptId); Assert.assertNotNull(masterContainer); - Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1), + Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1), masterContainer.getContainerId()); writeApplicationAttemptFinishData(appAttemptId); // Write again - containerId = ContainerId.newInstance(appAttemptId, 1); + containerId = ContainerId.newContainerId(appAttemptId, 1); try { writeContainerStartData(containerId); Assert.fail(); @@ -194,7 +194,7 @@ public void testMassiveWriteContainerHistory() throws IOException { ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); for (int i = 1; i <= numContainers; ++i) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, i); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, i); writeContainerStartData(containerId); writeContainerFinishData(containerId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java index 82c42766b6694..7bac6f265c256 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java @@ -134,7 +134,7 @@ public void testContainerPage() throws Exception { containerPageInstance.set( YarnWebParams.CONTAINER_ID, ContainerId - .newInstance( + .newContainerId( ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1), 1).toString()); containerPageInstance.render(); @@ -153,7 +153,7 @@ ApplicationHistoryManager mockApplicationHistoryManager(int numApps, ApplicationAttemptId.newInstance(appId, j); writeApplicationAttemptStartData(appAttemptId); for (int k = 1; k <= numContainers; ++k) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, k); writeContainerStartData(containerId); writeContainerFinishData(containerId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java index da39ce3928bd0..76bf8c3c75594 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java @@ -338,7 +338,7 @@ public void testSingleContainer() throws Exception { ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("applicationhistory").path("apps") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index 8f042a87aa36c..a7e5d9cd82081 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -139,7 +139,7 @@ public static ApplicationId convert(long clustertimestamp, CharSequence id) { public static ContainerId newContainerId(ApplicationAttemptId appAttemptId, long containerId) { - return ContainerId.newInstance(appAttemptId, containerId); + return ContainerId.newContainerId(appAttemptId, containerId); } public static ContainerId newContainerId(int appId, int appAttemptId, @@ -164,7 +164,7 @@ public static Token newContainerToken(ContainerId cId, String host, public static ContainerId newContainerId(RecordFactory recordFactory, ApplicationId appId, ApplicationAttemptId appAttemptId, int containerId) { - return ContainerId.newInstance(appAttemptId, containerId); + return ContainerId.newContainerId(appAttemptId, containerId); } public static NodeId newNodeId(String host, int port) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java index da25aa275a543..20983b6109ffb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java @@ -223,7 +223,7 @@ private ApplicationAttemptId getApplicationAttemptId(int appAttemptId) { } private ContainerId getContainerId(int containerID, int appAttemptId) { - ContainerId containerId = ContainerIdPBImpl.newInstance( + ContainerId containerId = ContainerIdPBImpl.newContainerId( getApplicationAttemptId(appAttemptId), containerID); return containerId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java index ed902baa7ead2..86e49f05e1de5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java @@ -51,7 +51,7 @@ public class TestProtocolRecords { public void testNMContainerStatus() { ApplicationId appId = ApplicationId.newInstance(123456789, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(attemptId, 1); + ContainerId containerId = ContainerId.newContainerId(attemptId, 1); Resource resource = Resource.newInstance(1000, 200); NMContainerStatus report = @@ -76,7 +76,7 @@ public void testNMContainerStatus() { public void testRegisterNodeManagerRequest() { ApplicationId appId = ApplicationId.newInstance(123456789, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(attemptId, 1); + ContainerId containerId = ContainerId.newContainerId(attemptId, 1); NMContainerStatus containerReport = NMContainerStatus.newInstance(containerId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java index fdacd924d9b1c..947dec19745f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java @@ -38,7 +38,7 @@ public void testRegisterNodeManagerRequest() { RegisterNodeManagerRequest.newInstance( NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0), "version", Arrays.asList(NMContainerStatus.newInstance( - ContainerId.newInstance( + ContainerId.newContainerId( ApplicationAttemptId.newInstance( ApplicationId.newInstance(1234L, 1), 1), 1), ContainerState.RUNNING, Resource.newInstance(1024, 1), "good", -1, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index fabb03bf3f6fb..d2caefe88fed7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -139,7 +139,7 @@ public long getRMIdentifier() { ApplicationId applicationId = ApplicationId.newInstance(0, 0); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 0); - ContainerId cID = ContainerId.newInstance(applicationAttemptId, 0); + ContainerId cID = ContainerId.newContainerId(applicationAttemptId, 0); String user = "testing"; StartContainerRequest scRequest = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java index ff477a38ec704..f837bbc72d293 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java @@ -431,7 +431,7 @@ public void testPostExecuteAfterReacquisition() throws Exception { ApplicationId appId = ApplicationId.newInstance(12345, 67890); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 54321); - ContainerId cid = ContainerId.newInstance(attemptId, 9876); + ContainerId cid = ContainerId.newContainerId(attemptId, 9876); Configuration conf = new YarnConfiguration(); conf.setClass(YarnConfiguration.NM_LINUX_CONTAINER_RESOURCES_HANDLER, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java index e9aea0ef6c43d..41c16a9d8fc0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java @@ -290,7 +290,7 @@ private ContainerId createContainerId() { ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 0); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0); return containerId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java index 85bafb3dee585..a58294fe48156 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java @@ -592,7 +592,7 @@ public static NMContainerStatus createNMContainerStatus(int id, ApplicationId applicationId = ApplicationId.newInstance(0, 1); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 1); - ContainerId containerId = ContainerId.newInstance(applicationAttemptId, id); + ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, id); NMContainerStatus containerReport = NMContainerStatus.newInstance(containerId, containerState, Resource.newInstance(1024, 1), "recover container", 0, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index 11575b8373399..c079006ac10aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -260,7 +260,7 @@ public static ContainerId createContainerId() { ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 0); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0); return containerId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 925a249ed1cbc..b34262b461d78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -224,7 +224,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) ApplicationAttemptId appAttemptID = ApplicationAttemptId.newInstance(appId1, 0); ContainerId firstContainerID = - ContainerId.newInstance(appAttemptID, heartBeatID); + ContainerId.newContainerId(appAttemptID, heartBeatID); ContainerLaunchContext launchContext = recordFactory .newRecordInstance(ContainerLaunchContext.class); Resource resource = BuilderUtils.newResource(2, 1); @@ -254,7 +254,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) ApplicationAttemptId appAttemptID = ApplicationAttemptId.newInstance(appId2, 0); ContainerId secondContainerID = - ContainerId.newInstance(appAttemptID, heartBeatID); + ContainerId.newContainerId(appAttemptID, heartBeatID); ContainerLaunchContext launchContext = recordFactory .newRecordInstance(ContainerLaunchContext.class); long currentTime = System.currentTimeMillis(); @@ -818,7 +818,7 @@ public void testRecentlyFinishedContainers() throws Exception { ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); - ContainerId cId = ContainerId.newInstance(appAttemptId, 0); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); nm.getNMContext().getApplications().putIfAbsent(appId, mock(Application.class)); nm.getNMContext().getContainers().putIfAbsent(cId, mock(Container.class)); @@ -855,7 +855,7 @@ public void testRemovePreviousCompletedContainersFromContext() throws Exception ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); - ContainerId cId = ContainerId.newInstance(appAttemptId, 1); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 1); Token containerToken = BuilderUtils.newContainerToken(cId, "anyHost", 1234, "anyUser", BuilderUtils.newResource(1024, 1), 0, 123, @@ -876,7 +876,7 @@ public org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont }; ContainerId runningContainerId = - ContainerId.newInstance(appAttemptId, 3); + ContainerId.newContainerId(appAttemptId, 3); Token runningContainerToken = BuilderUtils.newContainerToken(runningContainerId, "anyHost", 1234, "anyUser", BuilderUtils.newResource(1024, 1), 0, 123, @@ -936,7 +936,7 @@ public void testCleanedupApplicationContainerCleanup() throws IOException { ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); - ContainerId cId = ContainerId.newInstance(appAttemptId, 1); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 1); Token containerToken = BuilderUtils.newContainerToken(cId, "anyHost", 1234, "anyUser", BuilderUtils.newResource(1024, 1), 0, 123, @@ -1494,7 +1494,7 @@ public static ContainerStatus createContainerStatus(int id, ApplicationId applicationId = ApplicationId.newInstance(0, 1); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 1); - ContainerId contaierId = ContainerId.newInstance(applicationAttemptId, id); + ContainerId contaierId = ContainerId.newContainerId(applicationAttemptId, id); ContainerStatus containerStatus = BuilderUtils.newContainerStatus(contaierId, containerState, "test_containerStatus: id=" + id + ", containerState: " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index 59cc947e3b5d0..757cdc8f3ee63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -189,7 +189,7 @@ public void testAuxEventDispatch() { ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId1, 1); ContainerTokenIdentifier cti = new ContainerTokenIdentifier( - ContainerId.newInstance(attemptId, 1), "", "", + ContainerId.newContainerId(attemptId, 1), "", "", Resource.newInstance(1, 1), 0,0,0, Priority.newInstance(0), 0); Container container = new ContainerImpl(null, null, null, null, null, null, cti); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index 45d9925a61c87..86cc4dcedeb8b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -104,7 +104,7 @@ private ContainerId createContainerId(int id) { ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId containerId = ContainerId.newInstance(appAttemptId, id); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, id); return containerId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index 007fc36fcde75..a73d58341bbdf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -111,7 +111,7 @@ public void testApplicationRecovery() throws Exception { ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId cid = ContainerId.newInstance(attemptId, 1); + ContainerId cid = ContainerId.newContainerId(attemptId, 1); Map<String, LocalResource> localResources = Collections.emptyMap(); Map<String, String> containerEnv = Collections.emptyMap(); List<String> containerCmds = Collections.emptyList(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index 001643b434ed9..cbc41c411ed3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -385,7 +385,7 @@ public void testContainerEnvVariables() throws Exception { ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId cId = ContainerId.newInstance(appAttemptId, 0); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); Map<String, String> userSetEnv = new HashMap<String, String>(); userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id"); userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST"); @@ -634,7 +634,7 @@ private void internalKillTest(boolean delayed) throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId cId = ContainerId.newInstance(appAttemptId, 0); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); File processStartFile = new File(tmpDir, "pid.txt").getAbsoluteFile(); @@ -771,7 +771,7 @@ public void testImmediateKill() throws Exception { @Test (timeout = 10000) public void testCallFailureWithNullLocalizedResources() { Container container = mock(Container.class); - when(container.getContainerId()).thenReturn(ContainerId.newInstance( + when(container.getContainerId()).thenReturn(ContainerId.newContainerId( ApplicationAttemptId.newInstance(ApplicationId.newInstance( System.currentTimeMillis(), 1), 1), 1)); ContainerLaunchContext clc = mock(ContainerLaunchContext.class); @@ -980,7 +980,7 @@ public void testKillProcessGroup() throws Exception { ApplicationId appId = ApplicationId.newInstance(2, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId cId = ContainerId.newInstance(appAttemptId, 0); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); File processStartFile = new File(tmpDir, "pid.txt").getAbsoluteFile(); File childProcessStartFile = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java index 99d722f9c7a38..1f2d0677c5f7a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java @@ -206,7 +206,7 @@ public void testContainerKillOnMemoryOverflow() throws IOException, // ////// Construct the Container-id ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - ContainerId cId = ContainerId.newInstance(appAttemptId, 0); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); int port = 12345; URL resource_alpha = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java index db377f5f0c7ca..438cec3a793a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java @@ -226,7 +226,7 @@ public void testContainerStorage() throws IOException { ApplicationId appId = ApplicationId.newInstance(1234, 3); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 4); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 5); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 5); LocalResource lrsrc = LocalResource.newInstance( URL.newInstance("hdfs", "somehost", 12345, "/some/path/to/rsrc"), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, 123L, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java index c07882da0ad8f..891130f0f7616 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java @@ -378,7 +378,7 @@ public void testAppCleanupWhenRMRestartedBeforeAppFinished() throws Exception { // nm1/nm2 register to rm2, and do a heartbeat nm1.setResourceTrackerService(rm2.getResourceTrackerService()); nm1.registerNode(Arrays.asList(NMContainerStatus.newInstance( - ContainerId.newInstance(am0.getApplicationAttemptId(), 1), + ContainerId.newContainerId(am0.getApplicationAttemptId(), 1), ContainerState.COMPLETE, Resource.newInstance(1024, 1), "", 0, Priority.newInstance(0), 1234)), Arrays.asList(app0.getApplicationId())); nm2.setResourceTrackerService(rm2.getResourceTrackerService()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 5652b6ea68295..15aca428268e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -404,7 +404,7 @@ public void testGetContainerReport() throws YarnException, IOException { .newRecordInstance(GetContainerReportRequest.class); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(123456, 1), 1); - ContainerId containerId = ContainerId.newInstance(attemptId, 1); + ContainerId containerId = ContainerId.newContainerId(attemptId, 1); request.setContainerId(containerId); try { @@ -425,7 +425,7 @@ public void testGetContainers() throws YarnException, IOException { .newRecordInstance(GetContainersRequest.class); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(123456, 1), 1); - ContainerId containerId = ContainerId.newInstance(attemptId, 1); + ContainerId containerId = ContainerId.newContainerId(attemptId, 1); request.setApplicationAttemptId(attemptId); try { GetContainersResponse response = rmService.getContainers(request); @@ -1213,7 +1213,7 @@ public ApplicationReport createAndGetApplicationReport( RMAppAttemptImpl rmAppAttemptImpl = spy(new RMAppAttemptImpl(attemptId, rmContext, yarnScheduler, null, asContext, config, false, null)); Container container = Container.newInstance( - ContainerId.newInstance(attemptId, 1), null, "", null, null, null); + ContainerId.newContainerId(attemptId, 1), null, "", null, null, null); RMContainerImpl containerimpl = spy(new RMContainerImpl(container, attemptId, null, "", rmContext)); Map<ApplicationAttemptId, RMAppAttempt> attempts = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java index c200df46c70fd..b9397bf070f20 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java @@ -207,7 +207,7 @@ public void testUsageWithMultipleContainersAndRMRestart() throws Exception { // usage metrics. This will cause the attempt to fail, and, since the max // attempt retries is 1, the app will also fail. This is intentional so // that all containers will complete prior to saving. - ContainerId cId = ContainerId.newInstance(attempt0.getAppAttemptId(), 1); + ContainerId cId = ContainerId.newContainerId(attempt0.getAppAttemptId(), 1); nm.nodeHeartbeat(attempt0.getAppAttemptId(), cId.getContainerId(), ContainerState.COMPLETE); rm0.waitForState(nm, cId, RMContainerState.COMPLETED); @@ -289,7 +289,7 @@ private void amRestartTests(boolean keepRunningContainers) // launch the 2nd container. ContainerId containerId2 = - ContainerId.newInstance(am0.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am0.getApplicationAttemptId(), 2); nm.nodeHeartbeat(am0.getApplicationAttemptId(), containerId2.getContainerId(), ContainerState.RUNNING); rm.waitForState(nm, containerId2, RMContainerState.RUNNING); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index a9683f13dfc13..a0f86272b782d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -1963,7 +1963,7 @@ private void writeToHostsFile(String... hosts) throws IOException { public static NMContainerStatus createNMContainerStatus( ApplicationAttemptId appAttemptId, int id, ContainerState containerState) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, id); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, id); NMContainerStatus containerReport = NMContainerStatus.newInstance(containerId, containerState, Resource.newInstance(1024, 1), "recover container", 0, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java index 28d1d6383d37a..7c128481c73b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java @@ -510,7 +510,7 @@ public void testHandleContainerStatusInvalidCompletions() throws Exception { // Case 1.1: AppAttemptId is null NMContainerStatus report = NMContainerStatus.newInstance( - ContainerId.newInstance( + ContainerId.newContainerId( ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), ContainerState.COMPLETE, Resource.newInstance(1024, 1), "Dummy Completed", 0, Priority.newInstance(10), 1234); @@ -522,7 +522,7 @@ public void testHandleContainerStatusInvalidCompletions() throws Exception { (RMAppAttemptImpl) app.getCurrentAppAttempt(); currentAttempt.setMasterContainer(null); report = NMContainerStatus.newInstance( - ContainerId.newInstance(currentAttempt.getAppAttemptId(), 0), + ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0), ContainerState.COMPLETE, Resource.newInstance(1024, 1), "Dummy Completed", 0, Priority.newInstance(10), 1234); rm.getResourceTrackerService().handleNMContainerStatus(report, null); @@ -533,7 +533,7 @@ public void testHandleContainerStatusInvalidCompletions() throws Exception { // Case 2.1: AppAttemptId is null report = NMContainerStatus.newInstance( - ContainerId.newInstance( + ContainerId.newContainerId( ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), ContainerState.COMPLETE, Resource.newInstance(1024, 1), "Dummy Completed", 0, Priority.newInstance(10), 1234); @@ -549,7 +549,7 @@ public void testHandleContainerStatusInvalidCompletions() throws Exception { (RMAppAttemptImpl) app.getCurrentAppAttempt(); currentAttempt.setMasterContainer(null); report = NMContainerStatus.newInstance( - ContainerId.newInstance(currentAttempt.getAppAttemptId(), 0), + ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0), ContainerState.COMPLETE, Resource.newInstance(1024, 1), "Dummy Completed", 0, Priority.newInstance(10), 1234); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index 536dbd77d318c..2f0a839e9e95c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -849,7 +849,7 @@ public void testReleasedContainerNotRecovered() throws Exception { // try to release a container before the container is actually recovered. final ContainerId runningContainer = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); am1.allocate(null, Arrays.asList(runningContainer)); // send container statuses to recover the containers diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java index 78077d4fa3416..f827bf4285d69 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java @@ -153,7 +153,7 @@ private static RMAppAttempt createRMAppAttempt( when(appAttempt.getRpcPort()).thenReturn(-100); Container container = mock(Container.class); when(container.getId()) - .thenReturn(ContainerId.newInstance(appAttemptId, 1)); + .thenReturn(ContainerId.newContainerId(appAttemptId, 1)); when(appAttempt.getMasterContainer()).thenReturn(container); when(appAttempt.getDiagnostics()).thenReturn("test diagnostics info"); when(appAttempt.getTrackingUrl()).thenReturn("test url"); @@ -254,7 +254,7 @@ public void testWriteApplicationAttempt() throws Exception { Assert.assertNotNull(appAttemptHD); Assert.assertEquals("test host", appAttemptHD.getHost()); Assert.assertEquals(-100, appAttemptHD.getRPCPort()); - Assert.assertEquals(ContainerId.newInstance( + Assert.assertEquals(ContainerId.newContainerId( ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1), 1), appAttemptHD.getMasterContainerId()); @@ -281,14 +281,14 @@ public void testWriteApplicationAttempt() throws Exception { @Test public void testWriteContainer() throws Exception { RMContainer container = - createRMContainer(ContainerId.newInstance( + createRMContainer(ContainerId.newContainerId( ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1), 1)); writer.containerStarted(container); ContainerHistoryData containerHD = null; for (int i = 0; i < MAX_RETRIES; ++i) { containerHD = - store.getContainer(ContainerId.newInstance(ApplicationAttemptId + store.getContainer(ContainerId.newContainerId(ApplicationAttemptId .newInstance(ApplicationId.newInstance(0, 1), 1), 1)); if (containerHD != null) { break; @@ -307,7 +307,7 @@ public void testWriteContainer() throws Exception { writer.containerFinished(container); for (int i = 0; i < MAX_RETRIES; ++i) { containerHD = - store.getContainer(ContainerId.newInstance(ApplicationAttemptId + store.getContainer(ContainerId.newContainerId(ApplicationAttemptId .newInstance(ApplicationId.newInstance(0, 1), 1), 1)); if (containerHD.getContainerState() != null) { break; @@ -337,7 +337,7 @@ public void testParallelWrite() throws Exception { RMAppAttempt appAttempt = createRMAppAttempt(appAttemptId); writer.applicationAttemptStarted(appAttempt); for (int k = 1; k <= 10; ++k) { - ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, k); RMContainer container = createRMContainer(containerId); writer.containerStarted(container); writer.containerFinished(container); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index 800f65baaae68..62e3e5c8b9d0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -189,7 +189,7 @@ public static RMApp newApplication(int i) { final ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(newAppID(i), 0); final Container masterContainer = Records.newRecord(Container.class); - ContainerId containerId = ContainerId.newInstance(appAttemptId, 0); + ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0); masterContainer.setId(containerId); masterContainer.setNodeHttpAddress("node:port"); final String user = newUserName(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java index fcb4e450b0890..a93123e91871d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java @@ -101,20 +101,20 @@ public void testAMRestartWithExistingContainers() throws Exception { // launch the 2nd container, for testing running container transferred. nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING); ContainerId containerId2 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING); // launch the 3rd container, for testing container allocated by previous // attempt is completed by the next new attempt/ nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING); ContainerId containerId3 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 3); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING); // 4th container still in AQUIRED state. for testing Acquired container is // always killed. ContainerId containerId4 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 4); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 4); rm1.waitForState(nm1, containerId4, RMContainerState.ACQUIRED); // 5th container is in Allocated state. for testing allocated container is @@ -122,14 +122,14 @@ public void testAMRestartWithExistingContainers() throws Exception { am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>()); nm1.nodeHeartbeat(true); ContainerId containerId5 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 5); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 5); rm1.waitForContainerAllocated(nm1, containerId5); rm1.waitForState(nm1, containerId5, RMContainerState.ALLOCATED); // 6th container is in Reserved state. am1.allocate("127.0.0.1", 6000, 1, new ArrayList<ContainerId>()); ContainerId containerId6 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 6); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 6); nm1.nodeHeartbeat(true); SchedulerApplicationAttempt schedulerAttempt = ((AbstractYarnScheduler) rm1.getResourceScheduler()) @@ -295,12 +295,12 @@ public void testNMTokensRebindOnAMRestart() throws Exception { // launch the container-2 nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING); ContainerId containerId2 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING); // launch the container-3 nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING); ContainerId containerId3 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 3); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING); // fail am1 @@ -335,7 +335,7 @@ public void testNMTokensRebindOnAMRestart() throws Exception { } nm1.nodeHeartbeat(am2.getApplicationAttemptId(), 2, ContainerState.RUNNING); ContainerId am2ContainerId2 = - ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); rm1.waitForState(nm1, am2ContainerId2, RMContainerState.RUNNING); // fail am2. @@ -379,7 +379,7 @@ public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception { CapacityScheduler scheduler = (CapacityScheduler) rm1.getResourceScheduler(); ContainerId amContainer = - ContainerId.newInstance(am1.getApplicationAttemptId(), 1); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); // Preempt the first attempt; scheduler.killContainer(scheduler.getRMContainer(amContainer)); @@ -396,7 +396,7 @@ public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception { // Preempt the second attempt. ContainerId amContainer2 = - ContainerId.newInstance(am2.getApplicationAttemptId(), 1); + ContainerId.newContainerId(am2.getApplicationAttemptId(), 1); scheduler.killContainer(scheduler.getRMContainer(amContainer2)); am2.waitForState(RMAppAttemptState.FAILED); @@ -487,7 +487,7 @@ public void testPreemptedAMRestartOnRMRestart() throws Exception { CapacityScheduler scheduler = (CapacityScheduler) rm1.getResourceScheduler(); ContainerId amContainer = - ContainerId.newInstance(am1.getApplicationAttemptId(), 1); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); // Forcibly preempt the am container; scheduler.killContainer(scheduler.getRMContainer(amContainer)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java index bc509a0e2508f..65c8547218097 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java @@ -250,7 +250,7 @@ public void testPublishAppAttemptMetrics() throws Exception { @Test(timeout = 10000) public void testPublishContainerMetrics() throws Exception { ContainerId containerId = - ContainerId.newInstance(ApplicationAttemptId.newInstance( + ContainerId.newContainerId(ApplicationAttemptId.newInstance( ApplicationId.newInstance(0, 1), 1), 1); RMContainer container = createRMContainer(containerId); metricsPublisher.containerCreated(container, container.getCreationTime()); @@ -347,7 +347,7 @@ private static RMAppAttempt createRMAppAttempt( when(appAttempt.getRpcPort()).thenReturn(-100); Container container = mock(Container.class); when(container.getId()) - .thenReturn(ContainerId.newInstance(appAttemptId, 1)); + .thenReturn(ContainerId.newContainerId(appAttemptId, 1)); when(appAttempt.getMasterContainer()).thenReturn(container); when(appAttempt.getDiagnostics()).thenReturn("test diagnostics info"); when(appAttempt.getTrackingUrl()).thenReturn("test tracking url"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java index a0c2b01607ba7..24e70bb4c9e08 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java @@ -728,7 +728,7 @@ FiCaSchedulerApp mockApp(int qid, int id, int used, int pending, int reserved, RMContainer mockContainer(ApplicationAttemptId appAttId, int id, Resource r, int priority) { - ContainerId cId = ContainerId.newInstance(appAttId, id); + ContainerId cId = ContainerId.newContainerId(appAttId, id); Container c = mock(Container.class); when(c.getResource()).thenReturn(r); when(c.getPriority()).thenReturn(Priority.create(priority)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index e5daf6fd320ad..2b5c2b882b9ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -1395,7 +1395,7 @@ public void testFailedToFailed() { // failed attempt captured the container finished event. assertEquals(0, applicationAttempt.getJustFinishedContainers().size()); ContainerStatus cs2 = - ContainerStatus.newInstance(ContainerId.newInstance(appAttemptId, 2), + ContainerStatus.newInstance(ContainerId.newContainerId(appAttemptId, 2), ContainerState.COMPLETE, "", 0); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent( appAttemptId, cs2, anyNodeId)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java index 553587ed17447..76cdcaeb0b24a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java @@ -249,7 +249,7 @@ public void testExistenceOfResourceRequestInRMContainer() throws Exception { // request a container. am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>()); - ContainerId containerId2 = ContainerId.newInstance( + ContainerId containerId2 = ContainerId.newContainerId( am1.getApplicationAttemptId(), 2); rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index c168b955c1e73..c648b83ad4bf5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -138,7 +138,7 @@ private RMContainer createReservedRMContainer(ApplicationAttemptId appAttId, private RMContainer createRMContainer(ApplicationAttemptId appAttId, int id, Resource resource) { - ContainerId containerId = ContainerId.newInstance(appAttId, id); + ContainerId containerId = ContainerId.newContainerId(appAttId, id); RMContainer rmContainer = mock(RMContainer.class); Container container = mock(Container.class); when(container.getResource()).thenReturn(resource); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index c3ae38c364a97..c9e81eebb9714 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -560,7 +560,7 @@ public void testComparePriorities(){ @Test public void testCreateAbnormalContainerStatus() { ContainerStatus cd = SchedulerUtils.createAbnormalContainerStatus( - ContainerId.newInstance(ApplicationAttemptId.newInstance( + ContainerId.newContainerId(ApplicationAttemptId.newInstance( ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x"); Assert.assertEquals(ContainerExitStatus.ABORTED, cd.getExitStatus()); } @@ -568,7 +568,7 @@ public void testCreateAbnormalContainerStatus() { @Test public void testCreatePreemptedContainerStatus() { ContainerStatus cd = SchedulerUtils.createPreemptedContainerStatus( - ContainerId.newInstance(ApplicationAttemptId.newInstance( + ContainerId.newContainerId(ApplicationAttemptId.newInstance( ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x"); Assert.assertEquals(ContainerExitStatus.PREEMPTED, cd.getExitStatus()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 98dc673da2563..2aa57a0d79524 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -1085,7 +1085,7 @@ public void testRecoverRequestAfterPreemption() throws Exception { // request a container. am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>()); - ContainerId containerId1 = ContainerId.newInstance( + ContainerId containerId1 = ContainerId.newContainerId( am1.getApplicationAttemptId(), 2); rm1.waitForState(nm1, containerId1, RMContainerState.ALLOCATED); @@ -1122,7 +1122,7 @@ public void testRecoverRequestAfterPreemption() throws Exception { } // New container will be allocated and will move to ALLOCATED state - ContainerId containerId2 = ContainerId.newInstance( + ContainerId containerId2 = ContainerId.newContainerId( am1.getApplicationAttemptId(), 3); rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index 0c32c0cecf716..ad834ac1b3c39 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -164,7 +164,7 @@ public void testContainerTokenGeneratedOnPullRequest() throws Exception { // request a container. am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>()); ContainerId containerId2 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED); RMContainer container = @@ -194,7 +194,7 @@ public void testNormalContainerAllocationWhenDNSUnavailable() throws Exception{ // request a container. am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>()); ContainerId containerId2 = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED); // acquire the container. @@ -247,7 +247,7 @@ private LogAggregationContext getLogAggregationContextFromContainerToken( // request a container. am2.allocate("127.0.0.1", 512, 1, new ArrayList<ContainerId>()); ContainerId containerId = - ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); rm1.waitForState(nm1, containerId, RMContainerState.ALLOCATED); // acquire the container. @@ -480,13 +480,13 @@ public RMNodeLabelsManager createNodeLabelManager() { // A has only 10% of x, so it can only allocate one container in label=empty ContainerId containerId = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), ""); Assert.assertTrue(rm1.waitForState(nm3, containerId, RMContainerState.ALLOCATED, 10 * 1000)); // Cannot allocate 2nd label=empty container containerId = - ContainerId.newInstance(am1.getApplicationAttemptId(), 3); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), ""); Assert.assertFalse(rm1.waitForState(nm3, containerId, RMContainerState.ALLOCATED, 10 * 1000)); @@ -495,7 +495,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // We can allocate floor(8000 / 1024) = 7 containers for (int id = 3; id <= 8; id++) { containerId = - ContainerId.newInstance(am1.getApplicationAttemptId(), id); + ContainerId.newContainerId(am1.getApplicationAttemptId(), id); am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x"); Assert.assertTrue(rm1.waitForState(nm1, containerId, RMContainerState.ALLOCATED, 10 * 1000)); @@ -571,7 +571,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // request a container (label = x && y). can only allocate on nm2 am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x && y"); containerId = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm1, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm2, containerId, @@ -588,7 +588,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // and now b1's queue capacity will be used, cannot allocate more containers // (Maximum capacity reached) am2.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm4, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertFalse(rm1.waitForState(nm5, containerId, @@ -601,7 +601,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // request a container. try to allocate on nm1 (label = x) and nm3 (label = // y,z). Will successfully allocate on nm3 am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y"); - containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm1, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm3, containerId, @@ -612,7 +612,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // try to allocate container (request label = y && z) on nm3 (label = y) and // nm4 (label = y,z). Will sucessfully allocate on nm4 only. am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y && z"); - containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 3); + containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 3); Assert.assertFalse(rm1.waitForState(nm3, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm4, containerId, @@ -654,7 +654,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // request a container. am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x"); containerId = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm2, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm1, containerId, @@ -669,7 +669,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // request a container. am2.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y"); - containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm1, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm2, containerId, @@ -684,7 +684,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // request a container. am3.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm2, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm3, containerId, @@ -730,7 +730,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // request a container. am1.allocate("*", 1024, 1, new ArrayList<ContainerId>()); containerId = - ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm3, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm1, containerId, @@ -745,7 +745,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // request a container. am2.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm3, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm2, containerId, @@ -760,7 +760,7 @@ public RMNodeLabelsManager createNodeLabelManager() { // request a container. am3.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); Assert.assertFalse(rm1.waitForState(nm2, containerId, RMContainerState.ALLOCATED, 10 * 1000)); Assert.assertTrue(rm1.waitForState(nm3, containerId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 519425145a89e..ca0e954e7290c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -3530,7 +3530,7 @@ public void testRecoverRequestAfterPreemption() throws Exception { // ResourceRequest will be empty once NodeUpdate is completed Assert.assertNull(app.getResourceRequest(priority, host)); - ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 1); + ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1); RMContainer rmContainer = app.getRMContainer(containerId1); // Create a preempt event and register for preemption diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index de8d3029778d2..f0dcb562a234c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -231,7 +231,7 @@ private void testNMTokens(Configuration conf) throws Exception { ApplicationAttemptId.newInstance(appId, 1); ContainerId validContainerId = - ContainerId.newInstance(validAppAttemptId, 0); + ContainerId.newContainerId(validAppAttemptId, 0); NodeId validNode = yarnCluster.getNodeManager(0).getNMContext().getNodeId(); NodeId invalidNode = NodeId.newInstance("InvalidHost", 1234); @@ -311,7 +311,7 @@ private void testNMTokens(Configuration conf) throws Exception { ApplicationAttemptId.newInstance(appId, 2); ContainerId validContainerId2 = - ContainerId.newInstance(validAppAttemptId2, 0); + ContainerId.newContainerId(validAppAttemptId2, 0); org.apache.hadoop.yarn.api.records.Token validContainerToken2 = containerTokenSecretManager.createContainerToken(validContainerId2, @@ -401,7 +401,7 @@ private void testNMTokens(Configuration conf) throws Exception { .createNMToken(validAppAttemptId, validNode, user); org.apache.hadoop.yarn.api.records.Token newContainerToken = containerTokenSecretManager.createContainerToken( - ContainerId.newInstance(attempt2, 1), validNode, user, r, + ContainerId.newContainerId(attempt2, 1), validNode, user, r, Priority.newInstance(0), 0); Assert.assertTrue(testStartContainer(rpc, attempt2, validNode, newContainerToken, attempt1NMToken, false).isEmpty()); @@ -638,7 +638,7 @@ private void testContainerToken(Configuration conf) throws IOException, ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); - ContainerId cId = ContainerId.newInstance(appAttemptId, 0); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); NodeManager nm = yarnCluster.getNodeManager(0); NMTokenSecretManagerInNM nmTokenSecretManagerInNM = nm.getNMContext().getNMTokenSecretManager(); @@ -691,7 +691,7 @@ private void testContainerToken(Configuration conf) throws IOException, } while (containerTokenSecretManager.getCurrentKey().getKeyId() == tamperedContainerTokenSecretManager.getCurrentKey().getKeyId()); - ContainerId cId2 = ContainerId.newInstance(appAttemptId, 1); + ContainerId cId2 = ContainerId.newContainerId(appAttemptId, 1); // Creating modified containerToken Token containerToken2 = tamperedContainerTokenSecretManager.createContainerToken(cId2, nodeId, @@ -733,7 +733,7 @@ private void testContainerTokenWithEpoch(Configuration conf) ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0); - ContainerId cId = ContainerId.newInstance(appAttemptId, (5L << 40) | 3L); + ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L); NodeManager nm = yarnCluster.getNodeManager(0); NMTokenSecretManagerInNM nmTokenSecretManagerInNM = nm.getNMContext().getNMTokenSecretManager();
ede53193d6eba06b1f3ee657cb5777f4ca2f8e76
Delta Spike
DELTASPIKE-403 make MessageBundles PassivationCapable
c
https://github.com/apache/deltaspike
diff --git a/deltaspike/core/impl/src/main/java/org/apache/deltaspike/core/impl/message/MessageBundleExtension.java b/deltaspike/core/impl/src/main/java/org/apache/deltaspike/core/impl/message/MessageBundleExtension.java index 213c0f417..6835c84ec 100644 --- a/deltaspike/core/impl/src/main/java/org/apache/deltaspike/core/impl/message/MessageBundleExtension.java +++ b/deltaspike/core/impl/src/main/java/org/apache/deltaspike/core/impl/message/MessageBundleExtension.java @@ -194,14 +194,16 @@ protected void installMessageBundleProducerBeans(@Observes AfterBeanDiscovery ab } } - private static <T> Bean<T> createMessageBundleBean(Bean<Object> delegate, - AnnotatedType<T> annotatedType, - BeanManager beanManager) + private <T> Bean<T> createMessageBundleBean(Bean<Object> delegate, + AnnotatedType<T> annotatedType, + BeanManager beanManager) { WrappingBeanBuilder<T> beanBuilder = new WrappingBeanBuilder<T>(delegate, beanManager) .readFromType(annotatedType); //X TODO re-visit type.getBaseType() in combination with #addQualifier beanBuilder.types(annotatedType.getJavaClass(), Object.class, Serializable.class); + beanBuilder.passivationCapable(true); + beanBuilder.id("MessageBundleBean#" + annotatedType.getJavaClass().getName()); return beanBuilder.create(); } diff --git a/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/MessageUser.java b/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/MessageUser.java new file mode 100644 index 000000000..d91d32d20 --- /dev/null +++ b/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/MessageUser.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.deltaspike.test.core.api.message; + +import javax.enterprise.context.SessionScoped; +import javax.inject.Inject; +import java.io.Serializable; + +/** + * A SessionScoped (passivating!) user which uses a DeltaSpike message + */ +@SessionScoped +public class MessageUser implements Serializable +{ + @Inject + private SimpleMessage msg; + + public SimpleMessage getMsg() { + return msg; + } +} diff --git a/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/SimpleMessageTest.java b/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/SimpleMessageTest.java index 5ee43d738..e5dadd8c2 100644 --- a/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/SimpleMessageTest.java +++ b/deltaspike/core/impl/src/test/java/org/apache/deltaspike/test/core/api/message/SimpleMessageTest.java @@ -52,6 +52,10 @@ public class SimpleMessageTest @Inject private LocaleResolver localeResolver; + @Inject + private MessageUser messageUser; + + /** * X TODO creating a WebArchive is only a workaround because JavaArchive * cannot contain other archives. @@ -103,10 +107,18 @@ public void testNullMessage() } @Test - public void testMessageSerialisation() { + public void testMessageSerialisation() + { Serializer<SimpleMessage> simpleMessageSerializer = new Serializer<SimpleMessage>(); SimpleMessage sm2 = simpleMessageSerializer.roundTrip(simpleMessage); assertNotNull(sm2); } + + @Test + public void testPassivationCapability() + { + assertEquals("Welcome to DeltaSpike", messageUser.getMsg().welcomeToDeltaSpike()); + assertEquals("Welcome to DeltaSpike", messageUser.getMsg().welcomeWithStringVariable("DeltaSpike")); + } }
4405698ee99fe26d0ac9317a2df96096f2731a7b
hbase
HBASE-7703 Eventually all online snapshots fail- due to Timeout at same regionserver.--Online snapshot attempts would fail due to timeout because a rowlock could not be obtained. Prior to this a-cancellation occurred which likely grabbed the lock without cleaning it properly. The fix here is to use nice cancel-instead of interrupting cancel on failures.----git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-7290@1445866 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hbase
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 3e5238e7b281..1282585d52eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -347,7 +347,11 @@ void cancelTasks() throws InterruptedException { Collection<Future<Void>> tasks = futures; LOG.debug("cancelling " + tasks.size() + " tasks for snapshot " + name); for (Future<Void> f: tasks) { - f.cancel(true); + // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there + // are places in the HBase code where row/region locks are taken and not released in a + // finally block. Thus we cancel without interrupting. Cancellations will be slower to + // complete but we won't suffer from unreleased locks due to poor code discipline. + f.cancel(false); } // evict remaining tasks and futures from taskPool.
9da487e0fdbf657f9b401e62d165ab13105488a0
hadoop
YARN-3853. Add docker container runtime support to- LinuxContainterExecutor. Contributed by Sidharta Seethana.--(cherry picked from commit 3e6fce91a471b4a5099de109582e7c6417e8a822)--Conflicts:-
a
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 993828da00951..724ddd0eea536 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -98,6 +98,10 @@ Release 2.8.0 - UNRELEASED YARN-3852. Add docker container support to container-executor (Abin Shahab via vvasudev) + YARN-3853. Add docker container runtime support to LinuxContainterExecutor. + (Sidharta Seethana via vvasudev) + + IMPROVEMENTS YARN-644. Basic null check is not performed on passed in arguments before diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 79f9b0d2910f0..68bfbbfdd148b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -24,8 +24,10 @@ import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -39,6 +41,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -160,7 +163,7 @@ public abstract void deleteAsUser(DeletionAsUserContext ctx) * @return true if container is still alive * @throws IOException */ - public abstract boolean isContainerProcessAlive(ContainerLivenessContext ctx) + public abstract boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException; /** @@ -174,6 +177,7 @@ public abstract boolean isContainerProcessAlive(ContainerLivenessContext ctx) */ public int reacquireContainer(ContainerReacquisitionContext ctx) throws IOException, InterruptedException { + Container container = ctx.getContainer(); String user = ctx.getUser(); ContainerId containerId = ctx.getContainerId(); @@ -193,10 +197,11 @@ public int reacquireContainer(ContainerReacquisitionContext ctx) LOG.info("Reacquiring " + containerId + " with pid " + pid); ContainerLivenessContext livenessContext = new ContainerLivenessContext .Builder() + .setContainer(container) .setUser(user) .setPid(pid) .build(); - while(isContainerProcessAlive(livenessContext)) { + while(isContainerAlive(livenessContext)) { Thread.sleep(1000); } @@ -243,9 +248,20 @@ public void writeLaunchEnv(OutputStream out, Map<String, String> environment, Map<Path, List<String>> resources, List<String> command) throws IOException{ ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create(); + Set<String> whitelist = new HashSet<String>(); + whitelist.add(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME); + whitelist.add(ApplicationConstants.Environment.HADOOP_YARN_HOME.name()); + whitelist.add(ApplicationConstants.Environment.HADOOP_COMMON_HOME.name()); + whitelist.add(ApplicationConstants.Environment.HADOOP_HDFS_HOME.name()); + whitelist.add(ApplicationConstants.Environment.HADOOP_CONF_DIR.name()); + whitelist.add(ApplicationConstants.Environment.JAVA_HOME.name()); if (environment != null) { for (Map.Entry<String,String> env : environment.entrySet()) { - sb.env(env.getKey().toString(), env.getValue().toString()); + if (!whitelist.contains(env.getKey())) { + sb.env(env.getKey().toString(), env.getValue().toString()); + } else { + sb.whitelistedEnv(env.getKey().toString(), env.getValue().toString()); + } } } if (resources != null) { @@ -492,6 +508,7 @@ public void run() { try { Thread.sleep(delay); containerExecutor.signalContainer(new ContainerSignalContext.Builder() + .setContainer(container) .setUser(user) .setPid(pid) .setSignal(signal) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java index b9be2b110e2d0..5819f2378d669 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java @@ -430,7 +430,7 @@ public boolean signalContainer(ContainerSignalContext ctx) } @Override - public boolean isContainerProcessAlive(ContainerLivenessContext ctx) + public boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException { String pid = ctx.getPid(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java index d3b5d0a7dda2e..9dffff3037b34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java @@ -413,7 +413,7 @@ public boolean signalContainer(ContainerSignalContext ctx) } @Override - public boolean isContainerProcessAlive(ContainerLivenessContext ctx) + public boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException { String pid = ctx.getPid(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index c60b80b8e97c2..f8e58c1ddddd9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -20,15 +20,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; - -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.regex.Pattern; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -46,10 +37,14 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DelegatingLinuxContainerRuntime; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; @@ -60,6 +55,22 @@ import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler; import org.apache.hadoop.yarn.util.ConverterUtils; +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; + +/** Container execution for Linux. Provides linux-specific localization + * mechanisms, resource management via cgroups and can switch between multiple + * container runtimes - e.g Standard "Process Tree", Docker etc + */ + public class LinuxContainerExecutor extends ContainerExecutor { private static final Log LOG = LogFactory @@ -73,6 +84,15 @@ public class LinuxContainerExecutor extends ContainerExecutor { private int containerSchedPriorityAdjustment = 0; private boolean containerLimitUsers; private ResourceHandler resourceHandlerChain; + private LinuxContainerRuntime linuxContainerRuntime; + + public LinuxContainerExecutor() { + } + + // created primarily for testing + public LinuxContainerExecutor(LinuxContainerRuntime linuxContainerRuntime) { + this.linuxContainerRuntime = linuxContainerRuntime; + } @Override public void setConf(Configuration conf) { @@ -87,8 +107,8 @@ public void setConf(Configuration conf) { if (conf.get(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY) != null) { containerSchedPriorityIsSet = true; containerSchedPriorityAdjustment = conf - .getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, - YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY); + .getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, + YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY); } nonsecureLocalUser = conf.get( YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, @@ -122,48 +142,6 @@ String getRunAsUser(String user) { } } - - - /** - * List of commands that the setuid script will execute. - */ - enum Commands { - INITIALIZE_CONTAINER(0), - LAUNCH_CONTAINER(1), - SIGNAL_CONTAINER(2), - DELETE_AS_USER(3); - - private int value; - Commands(int value) { - this.value = value; - } - int getValue() { - return value; - } - } - - /** - * Result codes returned from the C container-executor. - * These must match the values in container-executor.h. - */ - enum ResultCode { - OK(0), - INVALID_USER_NAME(2), - UNABLE_TO_EXECUTE_CONTAINER_SCRIPT(7), - INVALID_CONTAINER_PID(9), - INVALID_CONTAINER_EXEC_PERMISSIONS(22), - INVALID_CONFIG_FILE(24), - WRITE_CGROUP_FAILED(27); - - private final int value; - ResultCode(int value) { - this.value = value; - } - int getValue() { - return value; - } - } - protected String getContainerExecutorExecutablePath(Configuration conf) { String yarnHomeEnvVar = System.getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key()); @@ -205,9 +183,9 @@ public void init() throws IOException { + " (error=" + exitCode + ")", e); } - try { - Configuration conf = super.getConf(); + Configuration conf = super.getConf(); + try { resourceHandlerChain = ResourceHandlerModule .getConfiguredResourceHandlerChain(conf); if (resourceHandlerChain != null) { @@ -218,9 +196,20 @@ public void init() throws IOException { throw new IOException("Failed to bootstrap configured resource subsystems!"); } + try { + if (linuxContainerRuntime == null) { + LinuxContainerRuntime runtime = new DelegatingLinuxContainerRuntime(); + + runtime.initialize(conf); + this.linuxContainerRuntime = runtime; + } + } catch (ContainerExecutionException e) { + throw new IOException("Failed to initialize linux container runtime(s)!"); + } + resourcesHandler.init(this); } - + @Override public void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException { @@ -240,7 +229,7 @@ public void startLocalizer(LocalizerStartContext ctx) command.addAll(Arrays.asList(containerExecutorExe, runAsUser, user, - Integer.toString(Commands.INITIALIZE_CONTAINER.getValue()), + Integer.toString(PrivilegedOperation.RunAsUserCommand.INITIALIZE_CONTAINER.getValue()), appId, nmPrivateContainerTokensPath.toUri().getPath().toString(), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, @@ -296,6 +285,7 @@ public int launchContainer(ContainerStartContext ctx) throws IOException { Path containerWorkDir = ctx.getContainerWorkDir(); List<String> localDirs = ctx.getLocalDirs(); List<String> logDirs = ctx.getLogDirs(); + Map<Path, List<String>> localizedResources = ctx.getLocalizedResources(); verifyUsernamePattern(user); String runAsUser = getRunAsUser(user); @@ -353,50 +343,48 @@ public int launchContainer(ContainerStartContext ctx) throws IOException { throw new IOException("ResourceHandlerChain.preStart() failed!"); } - ShellCommandExecutor shExec = null; - try { Path pidFilePath = getPidFilePath(containerId); if (pidFilePath != null) { - List<String> command = new ArrayList<String>(); - addSchedPriorityCommand(command); - command.addAll(Arrays.asList( - containerExecutorExe, runAsUser, user, Integer - .toString(Commands.LAUNCH_CONTAINER.getValue()), appId, - containerIdStr, containerWorkDir.toString(), - nmPrivateContainerScriptPath.toUri().getPath().toString(), - nmPrivateTokensPath.toUri().getPath().toString(), - pidFilePath.toString(), - StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, - localDirs), - StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, - logDirs), - resourcesOptions)); + List<String> prefixCommands= new ArrayList<>(); + ContainerRuntimeContext.Builder builder = new ContainerRuntimeContext + .Builder(container); + + addSchedPriorityCommand(prefixCommands); + if (prefixCommands.size() > 0) { + builder.setExecutionAttribute(CONTAINER_LAUNCH_PREFIX_COMMANDS, + prefixCommands); + } + + builder.setExecutionAttribute(LOCALIZED_RESOURCES, localizedResources) + .setExecutionAttribute(RUN_AS_USER, runAsUser) + .setExecutionAttribute(USER, user) + .setExecutionAttribute(APPID, appId) + .setExecutionAttribute(CONTAINER_ID_STR, containerIdStr) + .setExecutionAttribute(CONTAINER_WORK_DIR, containerWorkDir) + .setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH, + nmPrivateContainerScriptPath) + .setExecutionAttribute(NM_PRIVATE_TOKENS_PATH, nmPrivateTokensPath) + .setExecutionAttribute(PID_FILE_PATH, pidFilePath) + .setExecutionAttribute(LOCAL_DIRS, localDirs) + .setExecutionAttribute(LOG_DIRS, logDirs) + .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions); if (tcCommandFile != null) { - command.add(tcCommandFile); + builder.setExecutionAttribute(TC_COMMAND_FILE, tcCommandFile); } - String[] commandArray = command.toArray(new String[command.size()]); - shExec = new ShellCommandExecutor(commandArray, null, // NM's cwd - container.getLaunchContext().getEnvironment()); // sanitized env - if (LOG.isDebugEnabled()) { - LOG.debug("launchContainer: " + Arrays.toString(commandArray)); - } - shExec.execute(); - if (LOG.isDebugEnabled()) { - logOutput(shExec.getOutput()); - } + linuxContainerRuntime.launchContainer(builder.build()); } else { LOG.info("Container was marked as inactive. Returning terminated error"); return ExitCode.TERMINATED.getExitCode(); } - } catch (ExitCodeException e) { - int exitCode = shExec.getExitCode(); + } catch (ContainerExecutionException e) { + int exitCode = e.getExitCode(); LOG.warn("Exit code from container " + containerId + " is : " + exitCode); // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the container was // terminated/killed forcefully. In all other cases, log the - // container-executor's output + // output if (exitCode != ExitCode.FORCE_KILLED.getExitCode() && exitCode != ExitCode.TERMINATED.getExitCode()) { LOG.warn("Exception from container-launch with container ID: " @@ -406,13 +394,13 @@ public int launchContainer(ContainerStartContext ctx) throws IOException { builder.append("Exception from container-launch.\n"); builder.append("Container id: " + containerId + "\n"); builder.append("Exit code: " + exitCode + "\n"); - if (!Optional.fromNullable(e.getMessage()).or("").isEmpty()) { - builder.append("Exception message: " + e.getMessage() + "\n"); + if (!Optional.fromNullable(e.getErrorOutput()).or("").isEmpty()) { + builder.append("Exception message: " + e.getErrorOutput() + "\n"); } builder.append("Stack trace: " + StringUtils.stringifyException(e) + "\n"); - if (!shExec.getOutput().isEmpty()) { - builder.append("Shell output: " + shExec.getOutput() + "\n"); + if (!e.getOutput().isEmpty()) { + builder.append("Shell output: " + e.getOutput() + "\n"); } String diagnostics = builder.toString(); logOutput(diagnostics); @@ -435,10 +423,7 @@ public int launchContainer(ContainerStartContext ctx) throws IOException { "containerId: " + containerId + ". Exception: " + e); } } - if (LOG.isDebugEnabled()) { - LOG.debug("Output from LinuxContainerExecutor's launchContainer follows:"); - logOutput(shExec.getOutput()); - } + return 0; } @@ -476,6 +461,7 @@ public int reacquireContainer(ContainerReacquisitionContext ctx) @Override public boolean signalContainer(ContainerSignalContext ctx) throws IOException { + Container container = ctx.getContainer(); String user = ctx.getUser(); String pid = ctx.getPid(); Signal signal = ctx.getSignal(); @@ -483,30 +469,27 @@ public boolean signalContainer(ContainerSignalContext ctx) verifyUsernamePattern(user); String runAsUser = getRunAsUser(user); - String[] command = - new String[] { containerExecutorExe, - runAsUser, - user, - Integer.toString(Commands.SIGNAL_CONTAINER.getValue()), - pid, - Integer.toString(signal.getValue()) }; - ShellCommandExecutor shExec = new ShellCommandExecutor(command); - if (LOG.isDebugEnabled()) { - LOG.debug("signalContainer: " + Arrays.toString(command)); - } + ContainerRuntimeContext runtimeContext = new ContainerRuntimeContext + .Builder(container) + .setExecutionAttribute(RUN_AS_USER, runAsUser) + .setExecutionAttribute(USER, user) + .setExecutionAttribute(PID, pid) + .setExecutionAttribute(SIGNAL, signal) + .build(); + try { - shExec.execute(); - } catch (ExitCodeException e) { - int ret_code = shExec.getExitCode(); - if (ret_code == ResultCode.INVALID_CONTAINER_PID.getValue()) { + linuxContainerRuntime.signalContainer(runtimeContext); + } catch (ContainerExecutionException e) { + int retCode = e.getExitCode(); + if (retCode == PrivilegedOperation.ResultCode.INVALID_CONTAINER_PID.getValue()) { return false; } LOG.warn("Error in signalling container " + pid + " with " + signal - + "; exit = " + ret_code, e); - logOutput(shExec.getOutput()); + + "; exit = " + retCode, e); + logOutput(e.getOutput()); throw new IOException("Problem signalling container " + pid + " with " - + signal + "; output: " + shExec.getOutput() + " and exitCode: " - + ret_code, e); + + signal + "; output: " + e.getOutput() + " and exitCode: " + + retCode, e); } return true; } @@ -526,7 +509,8 @@ public void deleteAsUser(DeletionAsUserContext ctx) { Arrays.asList(containerExecutorExe, runAsUser, user, - Integer.toString(Commands.DELETE_AS_USER.getValue()), + Integer.toString(PrivilegedOperation. + RunAsUserCommand.DELETE_AS_USER.getValue()), dirString)); List<String> pathsToDelete = new ArrayList<String>(); if (baseDirs == null || baseDirs.size() == 0) { @@ -560,13 +544,15 @@ public void deleteAsUser(DeletionAsUserContext ctx) { } @Override - public boolean isContainerProcessAlive(ContainerLivenessContext ctx) + public boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException { String user = ctx.getUser(); String pid = ctx.getPid(); + Container container = ctx.getContainer(); // Send a test signal to the process as the user to see if it's alive return signalContainer(new ContainerSignalContext.Builder() + .setContainer(container) .setUser(user) .setPid(pid) .setSignal(Signal.NULL) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index af168c5943bd6..bf00d74dce7ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -303,6 +303,7 @@ public Integer call() { exec.activateContainer(containerID, pidFilePath); ret = exec.launchContainer(new ContainerStartContext.Builder() .setContainer(container) + .setLocalizedResources(localResources) .setNmPrivateContainerScriptPath(nmPrivateContainerScriptPath) .setNmPrivateTokensPath(nmPrivateTokensPath) .setUser(user) @@ -427,6 +428,7 @@ public void cleanupContainer() throws IOException { boolean result = exec.signalContainer( new ContainerSignalContext.Builder() + .setContainer(container) .setUser(user) .setPid(processId) .setSignal(signal) @@ -528,6 +530,8 @@ public static ShellScriptBuilder create() { public abstract void command(List<String> command) throws IOException; + public abstract void whitelistedEnv(String key, String value) throws IOException; + public abstract void env(String key, String value) throws IOException; public final void symlink(Path src, Path dst) throws IOException { @@ -585,6 +589,11 @@ public void command(List<String> command) { errorCheck(); } + @Override + public void whitelistedEnv(String key, String value) { + line("export ", key, "=${", key, ":-", "\"", value, "\"}"); + } + @Override public void env(String key, String value) { line("export ", key, "=\"", value, "\""); @@ -626,6 +635,12 @@ public void command(List<String> command) throws IOException { errorCheck(); } + @Override + public void whitelistedEnv(String key, String value) throws IOException { + lineWithLenCheck("@set ", key, "=", value); + errorCheck(); + } + @Override public void env(String key, String value) throws IOException { lineWithLenCheck("@set ", key, "=", value); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java index f220cbd98f7cd..cbbf7a80c0b00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java @@ -45,10 +45,12 @@ public enum OperationType { LAUNCH_CONTAINER(""), //no CLI switch supported yet SIGNAL_CONTAINER(""), //no CLI switch supported yet DELETE_AS_USER(""), //no CLI switch supported yet + LAUNCH_DOCKER_CONTAINER(""), //no CLI switch supported yet TC_MODIFY_STATE("--tc-modify-state"), TC_READ_STATE("--tc-read-state"), TC_READ_STATS("--tc-read-stats"), - ADD_PID_TO_CGROUP(""); //no CLI switch supported yet. + ADD_PID_TO_CGROUP(""), //no CLI switch supported yet. + RUN_DOCKER_CMD("--run-docker"); private final String option; @@ -62,6 +64,7 @@ public String getOption() { } public static final String CGROUP_ARG_PREFIX = "cgroups="; + public static final String CGROUP_ARG_NO_TASKS = "none"; private final OperationType opType; private final List<String> args; @@ -117,4 +120,45 @@ public boolean equals(Object other) { public int hashCode() { return opType.hashCode() + 97 * args.hashCode(); } + + /** + * List of commands that the container-executor will execute. + */ + public enum RunAsUserCommand { + INITIALIZE_CONTAINER(0), + LAUNCH_CONTAINER(1), + SIGNAL_CONTAINER(2), + DELETE_AS_USER(3), + LAUNCH_DOCKER_CONTAINER(4); + + private int value; + RunAsUserCommand(int value) { + this.value = value; + } + public int getValue() { + return value; + } + } + + /** + * Result codes returned from the C container-executor. + * These must match the values in container-executor.h. + */ + public enum ResultCode { + OK(0), + INVALID_USER_NAME(2), + UNABLE_TO_EXECUTE_CONTAINER_SCRIPT(7), + INVALID_CONTAINER_PID(9), + INVALID_CONTAINER_EXEC_PERMISSIONS(22), + INVALID_CONFIG_FILE(24), + WRITE_CGROUP_FAILED(27); + + private final int value; + ResultCode(int value) { + this.value = value; + } + public int getValue() { + return value; + } + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java index 20c234d984a0e..3622489a499f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java @@ -24,6 +24,9 @@ public class PrivilegedOperationException extends YarnException { private static final long serialVersionUID = 1L; + private Integer exitCode; + private String output; + private String errorOutput; public PrivilegedOperationException() { super(); @@ -33,11 +36,36 @@ public PrivilegedOperationException(String message) { super(message); } + public PrivilegedOperationException(String message, Integer exitCode, + String output, String errorOutput) { + super(message); + this.exitCode = exitCode; + this.output = output; + this.errorOutput = errorOutput; + } + public PrivilegedOperationException(Throwable cause) { super(cause); } + public PrivilegedOperationException(Throwable cause, Integer exitCode, String + output, String errorOutput) { + super(cause); + this.exitCode = exitCode; + this.output = output; + this.errorOutput = errorOutput; + } public PrivilegedOperationException(String message, Throwable cause) { super(message, cause); } -} + + public Integer getExitCode() { + return exitCode; + } + + public String getOutput() { + return output; + } + + public String getErrorOutput() { return errorOutput; } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java index 6fe0f5ce47f3d..1d71938ba2626 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java @@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -101,7 +102,13 @@ public String[] getPrivilegedOperationExecutionCommand(List<String> } fullCommand.add(containerExecutorExe); - fullCommand.add(operation.getOperationType().getOption()); + + String cliSwitch = operation.getOperationType().getOption(); + + if (!cliSwitch.isEmpty()) { + fullCommand.add(cliSwitch); + } + fullCommand.addAll(operation.getArguments()); String[] fullCommandArray = @@ -142,6 +149,8 @@ public String executePrivilegedOperation(List<String> prefixCommands, try { exec.execute(); if (LOG.isDebugEnabled()) { + LOG.debug("command array:"); + LOG.debug(Arrays.toString(fullCommandArray)); LOG.debug("Privileged Execution Operation Output:"); LOG.debug(exec.getOutput()); } @@ -152,7 +161,11 @@ public String executePrivilegedOperation(List<String> prefixCommands, .append(System.lineSeparator()).append(exec.getOutput()).toString(); LOG.warn(logLine); - throw new PrivilegedOperationException(e); + + //stderr from shell executor seems to be stuffed into the exception + //'message' - so, we have to extract it and set it as the error out + throw new PrivilegedOperationException(e, e.getExitCode(), + exec.getOutput(), e.getMessage()); } catch (IOException e) { LOG.warn("IOException executing command: ", e); throw new PrivilegedOperationException(e); @@ -202,7 +215,7 @@ public String executePrivilegedOperation(PrivilegedOperation operation, StringBuffer finalOpArg = new StringBuffer(PrivilegedOperation .CGROUP_ARG_PREFIX); - boolean noneArgsOnly = true; + boolean noTasks = true; for (PrivilegedOperation op : ops) { if (!op.getOperationType() @@ -227,23 +240,24 @@ public String executePrivilegedOperation(PrivilegedOperation operation, throw new PrivilegedOperationException("Invalid argument: " + arg); } - if (tasksFile.equals("none")) { + if (tasksFile.equals(PrivilegedOperation.CGROUP_ARG_NO_TASKS)) { //Don't append to finalOpArg continue; } - if (noneArgsOnly == false) { + if (noTasks == false) { //We have already appended at least one tasks file. finalOpArg.append(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR); finalOpArg.append(tasksFile); } else { finalOpArg.append(tasksFile); - noneArgsOnly = false; + noTasks = false; } } - if (noneArgsOnly) { - finalOpArg.append("none"); //there were no tasks file to append + if (noTasks) { + finalOpArg.append(PrivilegedOperation.CGROUP_ARG_NO_TASKS); //there + // were no tasks file to append } PrivilegedOperation finalOp = new PrivilegedOperation( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java index 70dc8181de888..6020bc1379fd1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java @@ -78,6 +78,14 @@ public String createCGroup(CGroupController controller, String cGroupId) public void deleteCGroup(CGroupController controller, String cGroupId) throws ResourceHandlerException; + /** + * Gets the relative path for the cgroup, independent of a controller, for a + * given cgroup id. + * @param cGroupId - id of the cgroup + * @return path for the cgroup relative to the root of (any) controller. + */ + public String getRelativePathForCGroup(String cGroupId); + /** * Gets the full path for the cgroup, given a controller and a cgroup id * @param controller - controller type for the cgroup diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java index ff5612133990e..0d71a9da83cf5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java @@ -147,9 +147,9 @@ static Map<CGroupController, String> initializeControllerPathsFromMtab( } else { String error = new StringBuffer("Mount point Based on mtab file: ") - .append(mtab) - .append(". Controller mount point not writable for: ") - .append(name).toString(); + .append(mtab) + .append(". Controller mount point not writable for: ") + .append(name).toString(); LOG.error(error); throw new ResourceHandlerException(error); @@ -271,6 +271,12 @@ public void mountCGroupController(CGroupController controller) } } + @Override + public String getRelativePathForCGroup(String cGroupId) { + return new StringBuffer(cGroupPrefix).append("/") + .append(cGroupId).toString(); + } + @Override public String getPathForCGroup(CGroupController controller, String cGroupId) { return new StringBuffer(getControllerPath(controller)) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java new file mode 100644 index 0000000000000..633fa668ae411 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java @@ -0,0 +1,148 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; + +import java.util.List; + +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; + [email protected] [email protected] +public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime { + private static final Log LOG = LogFactory + .getLog(DefaultLinuxContainerRuntime.class); + private Configuration conf; + private final PrivilegedOperationExecutor privilegedOperationExecutor; + + public DefaultLinuxContainerRuntime(PrivilegedOperationExecutor + privilegedOperationExecutor) { + this.privilegedOperationExecutor = privilegedOperationExecutor; + } + + @Override + public void initialize(Configuration conf) + throws ContainerExecutionException { + this.conf = conf; + } + + @Override + public void prepareContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + //nothing to do here at the moment. + } + + @Override + public void launchContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + Container container = ctx.getContainer(); + PrivilegedOperation launchOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.LAUNCH_CONTAINER, (String) null); + + //All of these arguments are expected to be available in the runtime context + launchOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER), + ctx.getExecutionAttribute(USER), + Integer.toString(PrivilegedOperation. + RunAsUserCommand.LAUNCH_CONTAINER.getValue()), + ctx.getExecutionAttribute(APPID), + ctx.getExecutionAttribute(CONTAINER_ID_STR), + ctx.getExecutionAttribute(CONTAINER_WORK_DIR).toString(), + ctx.getExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH).toUri() + .getPath(), + ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH).toUri().getPath(), + ctx.getExecutionAttribute(PID_FILE_PATH).toString(), + StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, + ctx.getExecutionAttribute(LOCAL_DIRS)), + StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, + ctx.getExecutionAttribute(LOG_DIRS)), + ctx.getExecutionAttribute(RESOURCES_OPTIONS)); + + String tcCommandFile = ctx.getExecutionAttribute(TC_COMMAND_FILE); + + if (tcCommandFile != null) { + launchOp.appendArgs(tcCommandFile); + } + + //List<String> -> stored as List -> fetched/converted to List<String> + //we can't do better here thanks to type-erasure + @SuppressWarnings("unchecked") + List<String> prefixCommands = (List<String>) ctx.getExecutionAttribute( + CONTAINER_LAUNCH_PREFIX_COMMANDS); + + try { + privilegedOperationExecutor.executePrivilegedOperation(prefixCommands, + launchOp, null, container.getLaunchContext().getEnvironment(), + false); + } catch (PrivilegedOperationException e) { + LOG.warn("Launch container failed. Exception: ", e); + + throw new ContainerExecutionException("Launch container failed", e + .getExitCode(), e.getOutput(), e.getErrorOutput()); + } + } + + @Override + public void signalContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + Container container = ctx.getContainer(); + PrivilegedOperation signalOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.SIGNAL_CONTAINER, (String) null); + + signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER), + ctx.getExecutionAttribute(USER), + Integer.toString(PrivilegedOperation.RunAsUserCommand + .SIGNAL_CONTAINER.getValue()), + ctx.getExecutionAttribute(PID), + Integer.toString(ctx.getExecutionAttribute(SIGNAL).getValue())); + + try { + PrivilegedOperationExecutor executor = PrivilegedOperationExecutor + .getInstance(conf); + + executor.executePrivilegedOperation(null, + signalOp, null, container.getLaunchContext().getEnvironment(), + false); + } catch (PrivilegedOperationException e) { + LOG.warn("Signal container failed. Exception: ", e); + + throw new ContainerExecutionException("Signal container failed", e + .getExitCode(), e.getOutput(), e.getErrorOutput()); + } + } + + @Override + public void reapContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java new file mode 100644 index 0000000000000..a59415fff3fcd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java @@ -0,0 +1,110 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; + +import java.util.Map; + [email protected] [email protected] +public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime { + private static final Log LOG = LogFactory + .getLog(DelegatingLinuxContainerRuntime.class); + private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime; + private DockerLinuxContainerRuntime dockerLinuxContainerRuntime; + + @Override + public void initialize(Configuration conf) + throws ContainerExecutionException { + PrivilegedOperationExecutor privilegedOperationExecutor = + PrivilegedOperationExecutor.getInstance(conf); + + defaultLinuxContainerRuntime = new DefaultLinuxContainerRuntime( + privilegedOperationExecutor); + defaultLinuxContainerRuntime.initialize(conf); + dockerLinuxContainerRuntime = new DockerLinuxContainerRuntime( + privilegedOperationExecutor); + dockerLinuxContainerRuntime.initialize(conf); + } + + private LinuxContainerRuntime pickContainerRuntime(Container container) { + Map<String, String> env = container.getLaunchContext().getEnvironment(); + LinuxContainerRuntime runtime; + + if (DockerLinuxContainerRuntime.isDockerContainerRequested(env)){ + runtime = dockerLinuxContainerRuntime; + } else { + runtime = defaultLinuxContainerRuntime; + } + + if (LOG.isInfoEnabled()) { + LOG.info("Using container runtime: " + runtime.getClass() + .getSimpleName()); + } + + return runtime; + } + + @Override + public void prepareContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + Container container = ctx.getContainer(); + LinuxContainerRuntime runtime = pickContainerRuntime(container); + + runtime.prepareContainer(ctx); + } + + @Override + public void launchContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + Container container = ctx.getContainer(); + LinuxContainerRuntime runtime = pickContainerRuntime(container); + + runtime.launchContainer(ctx); + } + + @Override + public void signalContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + Container container = ctx.getContainer(); + LinuxContainerRuntime runtime = pickContainerRuntime(container); + + runtime.signalContainer(ctx); + } + + @Override + public void reapContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + Container container = ctx.getContainer(); + LinuxContainerRuntime runtime = pickContainerRuntime(container); + + runtime.reapContainer(ctx); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java new file mode 100644 index 0000000000000..2430a7878e82f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java @@ -0,0 +1,273 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerClient; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerRunCommand; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; + + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; + [email protected] [email protected] +public class DockerLinuxContainerRuntime implements LinuxContainerRuntime { + private static final Log LOG = LogFactory.getLog( + DockerLinuxContainerRuntime.class); + + @InterfaceAudience.Private + public static final String ENV_DOCKER_CONTAINER_IMAGE = + "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE"; + @InterfaceAudience.Private + public static final String ENV_DOCKER_CONTAINER_IMAGE_FILE = + "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE"; + @InterfaceAudience.Private + public static final String ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE = + "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE"; + + + private Configuration conf; + private DockerClient dockerClient; + private PrivilegedOperationExecutor privilegedOperationExecutor; + + public static boolean isDockerContainerRequested( + Map<String, String> env) { + if (env == null) { + return false; + } + + String type = env.get(ContainerRuntimeConstants.ENV_CONTAINER_TYPE); + + return type != null && type.equals("docker"); + } + + public DockerLinuxContainerRuntime(PrivilegedOperationExecutor + privilegedOperationExecutor) { + this.privilegedOperationExecutor = privilegedOperationExecutor; + } + + @Override + public void initialize(Configuration conf) + throws ContainerExecutionException { + this.conf = conf; + dockerClient = new DockerClient(conf); + } + + @Override + public void prepareContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + + } + + public void addCGroupParentIfRequired(String resourcesOptions, + String containerIdStr, DockerRunCommand runCommand) + throws ContainerExecutionException { + if (resourcesOptions.equals( + (PrivilegedOperation.CGROUP_ARG_PREFIX + PrivilegedOperation + .CGROUP_ARG_NO_TASKS))) { + if (LOG.isInfoEnabled()) { + LOG.info("no resource restrictions specified. not using docker's " + + "cgroup options"); + } + } else { + if (LOG.isInfoEnabled()) { + LOG.info("using docker's cgroups options"); + } + + try { + CGroupsHandler cGroupsHandler = ResourceHandlerModule + .getCGroupsHandler(conf); + String cGroupPath = "/" + cGroupsHandler.getRelativePathForCGroup( + containerIdStr); + + if (LOG.isInfoEnabled()) { + LOG.info("using cgroup parent: " + cGroupPath); + } + + runCommand.setCGroupParent(cGroupPath); + } catch (ResourceHandlerException e) { + LOG.warn("unable to use cgroups handler. Exception: ", e); + throw new ContainerExecutionException(e); + } + } + } + + + @Override + public void launchContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + Container container = ctx.getContainer(); + Map<String, String> environment = container.getLaunchContext() + .getEnvironment(); + String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE); + + if (imageName == null) { + throw new ContainerExecutionException(ENV_DOCKER_CONTAINER_IMAGE + + " not set!"); + } + + String containerIdStr = container.getContainerId().toString(); + String runAsUser = ctx.getExecutionAttribute(RUN_AS_USER); + Path containerWorkDir = ctx.getExecutionAttribute(CONTAINER_WORK_DIR); + //List<String> -> stored as List -> fetched/converted to List<String> + //we can't do better here thanks to type-erasure + @SuppressWarnings("unchecked") + List<String> localDirs = ctx.getExecutionAttribute(LOCAL_DIRS); + @SuppressWarnings("unchecked") + List<String> logDirs = ctx.getExecutionAttribute(LOG_DIRS); + @SuppressWarnings("unchecked") + DockerRunCommand runCommand = new DockerRunCommand(containerIdStr, + runAsUser, imageName) + .detachOnRun() + .setContainerWorkDir(containerWorkDir.toString()) + .setNetworkType("host") + .addMountLocation("/etc/passwd", "/etc/password:ro"); + List<String> allDirs = new ArrayList<>(localDirs); + + allDirs.add(containerWorkDir.toString()); + allDirs.addAll(logDirs); + for (String dir: allDirs) { + runCommand.addMountLocation(dir, dir); + } + + String resourcesOpts = ctx.getExecutionAttribute(RESOURCES_OPTIONS); + + /** Disabling docker's cgroup parent support for the time being. Docker + * needs to use a more recent libcontainer that supports net_cls. In + * addition we also need to revisit current cgroup creation in YARN. + */ + //addCGroupParentIfRequired(resourcesOpts, containerIdStr, runCommand); + + Path nmPrivateContainerScriptPath = ctx.getExecutionAttribute( + NM_PRIVATE_CONTAINER_SCRIPT_PATH); + + String disableOverride = environment.get( + ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE); + + if (disableOverride != null && disableOverride.equals("true")) { + if (LOG.isInfoEnabled()) { + LOG.info("command override disabled"); + } + } else { + List<String> overrideCommands = new ArrayList<>(); + Path launchDst = + new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT); + + overrideCommands.add("bash"); + overrideCommands.add(launchDst.toUri().getPath()); + runCommand.setOverrideCommandWithArgs(overrideCommands); + } + + String commandFile = dockerClient.writeCommandToTempFile(runCommand, + containerIdStr); + PrivilegedOperation launchOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.LAUNCH_DOCKER_CONTAINER, (String) + null); + + launchOp.appendArgs(runAsUser, ctx.getExecutionAttribute(USER), + Integer.toString(PrivilegedOperation + .RunAsUserCommand.LAUNCH_DOCKER_CONTAINER.getValue()), + ctx.getExecutionAttribute(APPID), + containerIdStr, containerWorkDir.toString(), + nmPrivateContainerScriptPath.toUri().getPath(), + ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH).toUri().getPath(), + ctx.getExecutionAttribute(PID_FILE_PATH).toString(), + StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, + localDirs), + StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, + logDirs), + commandFile, + resourcesOpts); + + String tcCommandFile = ctx.getExecutionAttribute(TC_COMMAND_FILE); + + if (tcCommandFile != null) { + launchOp.appendArgs(tcCommandFile); + } + + try { + privilegedOperationExecutor.executePrivilegedOperation(null, + launchOp, null, container.getLaunchContext().getEnvironment(), + false); + } catch (PrivilegedOperationException e) { + LOG.warn("Launch container failed. Exception: ", e); + + throw new ContainerExecutionException("Launch container failed", e + .getExitCode(), e.getOutput(), e.getErrorOutput()); + } + } + + @Override + public void signalContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + Container container = ctx.getContainer(); + PrivilegedOperation signalOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.SIGNAL_CONTAINER, (String) null); + + signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER), + ctx.getExecutionAttribute(USER), + Integer.toString(PrivilegedOperation + .RunAsUserCommand.SIGNAL_CONTAINER.getValue()), + ctx.getExecutionAttribute(PID), + Integer.toString(ctx.getExecutionAttribute(SIGNAL).getValue())); + + try { + PrivilegedOperationExecutor executor = PrivilegedOperationExecutor + .getInstance(conf); + + executor.executePrivilegedOperation(null, + signalOp, null, container.getLaunchContext().getEnvironment(), + false); + } catch (PrivilegedOperationException e) { + LOG.warn("Signal container failed. Exception: ", e); + + throw new ContainerExecutionException("Signal container failed", e + .getExitCode(), e.getOutput(), e.getErrorOutput()); + } + } + + @Override + public void reapContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java new file mode 100644 index 0000000000000..38aea9d7ba899 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java @@ -0,0 +1,38 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime; + +/** Linux-specific container runtime implementations must implement this + * interface. + */ + [email protected] [email protected] +public interface LinuxContainerRuntime extends ContainerRuntime { + void initialize(Configuration conf) throws ContainerExecutionException; +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java new file mode 100644 index 0000000000000..d2069a9356697 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java @@ -0,0 +1,69 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext.Attribute; + +import java.util.List; +import java.util.Map; + +public final class LinuxContainerRuntimeConstants { + private LinuxContainerRuntimeConstants() { + } + + public static final Attribute<Map> LOCALIZED_RESOURCES = Attribute + .attribute(Map.class, "localized_resources"); + public static final Attribute<List> CONTAINER_LAUNCH_PREFIX_COMMANDS = + Attribute.attribute(List.class, "container_launch_prefix_commands"); + public static final Attribute<String> RUN_AS_USER = + Attribute.attribute(String.class, "run_as_user"); + public static final Attribute<String> USER = Attribute.attribute(String.class, + "user"); + public static final Attribute<String> APPID = + Attribute.attribute(String.class, "appid"); + public static final Attribute<String> CONTAINER_ID_STR = Attribute + .attribute(String.class, "container_id_str"); + public static final Attribute<Path> CONTAINER_WORK_DIR = Attribute + .attribute(Path.class, "container_work_dir"); + public static final Attribute<Path> NM_PRIVATE_CONTAINER_SCRIPT_PATH = + Attribute.attribute(Path.class, "nm_private_container_script_path"); + public static final Attribute<Path> NM_PRIVATE_TOKENS_PATH = Attribute + .attribute(Path.class, "nm_private_tokens_path"); + public static final Attribute<Path> PID_FILE_PATH = Attribute.attribute( + Path.class, "pid_file_path"); + public static final Attribute<List> LOCAL_DIRS = Attribute.attribute( + List.class, "local_dirs"); + public static final Attribute<List> LOG_DIRS = Attribute.attribute( + List.class, "log_dirs"); + public static final Attribute<String> RESOURCES_OPTIONS = Attribute.attribute( + String.class, "resources_options"); + public static final Attribute<String> TC_COMMAND_FILE = Attribute.attribute( + String.class, "tc_command_file"); + public static final Attribute<String> CGROUP_RELATIVE_PATH = Attribute + .attribute(String.class, "cgroup_relative_path"); + + public static final Attribute<String> PID = Attribute.attribute( + String.class, "pid"); + public static final Attribute<ContainerExecutor.Signal> SIGNAL = Attribute + .attribute(ContainerExecutor.Signal.class, "signal"); +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java new file mode 100644 index 0000000000000..faf955f8eea61 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java @@ -0,0 +1,82 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.io.Writer; + [email protected] [email protected] +public final class DockerClient { + private static final Log LOG = LogFactory.getLog(DockerClient.class); + private static final String TMP_FILE_PREFIX = "docker."; + private static final String TMP_FILE_SUFFIX = ".cmd"; + private final String tmpDirPath; + + public DockerClient(Configuration conf) throws ContainerExecutionException { + + String tmpDirBase = conf.get("hadoop.tmp.dir"); + if (tmpDirBase == null) { + throw new ContainerExecutionException("hadoop.tmp.dir not set!"); + } + tmpDirPath = tmpDirBase + "/nm-docker-cmds"; + + File tmpDir = new File(tmpDirPath); + if (!(tmpDir.exists() || tmpDir.mkdirs())) { + LOG.warn("Unable to create directory: " + tmpDirPath); + throw new ContainerExecutionException("Unable to create directory: " + + tmpDirPath); + } + } + + public String writeCommandToTempFile(DockerCommand cmd, String filePrefix) + throws ContainerExecutionException { + File dockerCommandFile = null; + try { + dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix, + TMP_FILE_SUFFIX, new + File(tmpDirPath)); + + Writer writer = new OutputStreamWriter(new FileOutputStream(dockerCommandFile), + "UTF-8"); + PrintWriter printWriter = new PrintWriter(writer); + printWriter.print(cmd.getCommandWithArguments()); + printWriter.close(); + + return dockerCommandFile.getAbsolutePath(); + } catch (IOException e) { + LOG.warn("Unable to write docker command to temporary file!"); + throw new ContainerExecutionException(e); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java new file mode 100644 index 0000000000000..3b76a5cca40ce --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java @@ -0,0 +1,66 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.StringUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + [email protected] [email protected] + +/** Represents a docker sub-command + * e.g 'run', 'load', 'inspect' etc., + */ + +public abstract class DockerCommand { + private final String command; + private final List<String> commandWithArguments; + + protected DockerCommand(String command) { + this.command = command; + this.commandWithArguments = new ArrayList<>(); + commandWithArguments.add(command); + } + + /** Returns the docker sub-command string being used + * e.g 'run' + */ + public final String getCommandOption() { + return this.command; + } + + /** Add command commandWithArguments - this method is only meant for use by + * sub-classes + * @param arguments to be added + */ + protected final void addCommandArguments(String... arguments) { + this.commandWithArguments.addAll(Arrays.asList(arguments)); + } + + public String getCommandWithArguments() { + return StringUtils.join(" ", commandWithArguments); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerLoadCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerLoadCommand.java new file mode 100644 index 0000000000000..e4d92e08bc168 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerLoadCommand.java @@ -0,0 +1,30 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; + +public class DockerLoadCommand extends DockerCommand { + private static final String LOAD_COMMAND = "load"; + + public DockerLoadCommand(String localImageFile) { + super(LOAD_COMMAND); + super.addCommandArguments("--i=" + localImageFile); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java new file mode 100644 index 0000000000000..f9a890e9d3041 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java @@ -0,0 +1,107 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; + +import org.apache.hadoop.util.StringUtils; + +import java.util.ArrayList; +import java.util.List; + +public class DockerRunCommand extends DockerCommand { + private static final String RUN_COMMAND = "run"; + private final String image; + private List<String> overrrideCommandWithArgs; + + /** The following are mandatory: */ + public DockerRunCommand(String containerId, String user, String image) { + super(RUN_COMMAND); + super.addCommandArguments("--name=" + containerId, "--user=" + user); + this.image = image; + } + + public DockerRunCommand removeContainerOnExit() { + super.addCommandArguments("--rm"); + return this; + } + + public DockerRunCommand detachOnRun() { + super.addCommandArguments("-d"); + return this; + } + + public DockerRunCommand setContainerWorkDir(String workdir) { + super.addCommandArguments("--workdir=" + workdir); + return this; + } + + public DockerRunCommand setNetworkType(String type) { + super.addCommandArguments("--net=" + type); + return this; + } + + public DockerRunCommand addMountLocation(String sourcePath, String + destinationPath) { + super.addCommandArguments("-v", sourcePath + ":" + destinationPath); + return this; + } + + public DockerRunCommand setCGroupParent(String parentPath) { + super.addCommandArguments("--cgroup-parent=" + parentPath); + return this; + } + + public DockerRunCommand addDevice(String sourceDevice, String + destinationDevice) { + super.addCommandArguments("--device=" + sourceDevice + ":" + + destinationDevice); + return this; + } + + public DockerRunCommand enableDetach() { + super.addCommandArguments("--detach=true"); + return this; + } + + public DockerRunCommand disableDetach() { + super.addCommandArguments("--detach=false"); + return this; + } + + public DockerRunCommand setOverrideCommandWithArgs( + List<String> overrideCommandWithArgs) { + this.overrrideCommandWithArgs = overrideCommandWithArgs; + return this; + } + + @Override + public String getCommandWithArguments() { + List<String> argList = new ArrayList<>(); + + argList.add(super.getCommandWithArguments()); + argList.add(image); + + if (overrrideCommandWithArgs != null) { + argList.addAll(overrrideCommandWithArgs); + } + + return StringUtils.join(" ", argList); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java new file mode 100644 index 0000000000000..1fbece2205e27 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java @@ -0,0 +1,85 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** Exception caused in a container runtime impl. 'Runtime' is not used in + * the class name to avoid confusion with a java RuntimeException + */ + [email protected] [email protected] +public class ContainerExecutionException extends YarnException { + private static final long serialVersionUID = 1L; + private static final Integer EXIT_CODE_UNSET = -1; + private static final String OUTPUT_UNSET = "<unknown>"; + + private Integer exitCode; + private String output; + private String errorOutput; + + public ContainerExecutionException(String message) { + super(message); + exitCode = EXIT_CODE_UNSET; + output = OUTPUT_UNSET; + errorOutput = OUTPUT_UNSET; + } + + public ContainerExecutionException(Throwable throwable) { + super(throwable); + exitCode = EXIT_CODE_UNSET; + output = OUTPUT_UNSET; + errorOutput = OUTPUT_UNSET; + } + + + public ContainerExecutionException(String message, Integer exitCode, String + output, String errorOutput) { + super(message); + this.exitCode = exitCode; + this.output = output; + this.errorOutput = errorOutput; + } + + public ContainerExecutionException(Throwable cause, Integer exitCode, String + output, String errorOutput) { + super(cause); + this.exitCode = exitCode; + this.output = output; + this.errorOutput = errorOutput; + } + + public Integer getExitCode() { + return exitCode; + } + + public String getOutput() { + return output; + } + + public String getErrorOutput() { + return errorOutput; + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java new file mode 100644 index 0000000000000..e05f3fcb12c52 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java @@ -0,0 +1,50 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** An abstraction for various container runtime implementations. Examples + * include Process Tree, Docker, Appc runtimes etc., These implementations + * are meant for low-level OS container support - dependencies on + * higher-level nodemananger constructs should be avoided. + */ + [email protected] [email protected] +public interface ContainerRuntime { + /** Prepare a container to be ready for launch */ + void prepareContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException; + + /** Launch a container. */ + void launchContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException; + + /** Signal a container - request to terminate, status check etc., */ + void signalContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException; + + /** Any container cleanup that may be required. */ + void reapContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException; +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java new file mode 100644 index 0000000000000..4473856a2d6ef --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java @@ -0,0 +1,33 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime; + +import org.apache.hadoop.classification.InterfaceAudience.Private; + +public class ContainerRuntimeConstants { + + /* Switch container runtimes. Work in progress: These + * parameters may be changed/removed in the future. */ + + @Private + public static final String ENV_CONTAINER_TYPE = + "YARN_CONTAINER_RUNTIME_TYPE"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java new file mode 100644 index 0000000000000..4194b99300683 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java @@ -0,0 +1,105 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + [email protected] [email protected] +public final class ContainerRuntimeContext { + private final Container container; + private final Map<Attribute<?>, Object> executionAttributes; + + /** An attribute class that attempts to provide better type safety as compared + * with using a map of string to object. + * @param <T> + */ + public static final class Attribute<T> { + private final Class<T> valueClass; + private final String id; + + private Attribute(Class<T> valueClass, String id) { + this.valueClass = valueClass; + this.id = id; + } + + @Override + public int hashCode() { + return valueClass.hashCode() + 31 * id.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || !(obj instanceof Attribute)){ + return false; + } + + Attribute<?> attribute = (Attribute<?>) obj; + + return valueClass.equals(attribute.valueClass) && id.equals(attribute.id); + } + public static <T> Attribute<T> attribute(Class<T> valueClass, String id) { + return new Attribute<T>(valueClass, id); + } + } + + public static final class Builder { + private final Container container; + private Map<Attribute<?>, Object> executionAttributes; + + public Builder(Container container) { + executionAttributes = new HashMap<>(); + this.container = container; + } + + public <E> Builder setExecutionAttribute(Attribute<E> attribute, E value) { + this.executionAttributes.put(attribute, attribute.valueClass.cast(value)); + return this; + } + + public ContainerRuntimeContext build() { + return new ContainerRuntimeContext(this); + } + } + + private ContainerRuntimeContext(Builder builder) { + this.container = builder.container; + this.executionAttributes = builder.executionAttributes; + } + + public Container getContainer() { + return this.container; + } + + public Map<Attribute<?>, Object> getExecutionAttributes() { + return Collections.unmodifiableMap(this.executionAttributes); + } + + public <E> E getExecutionAttribute(Attribute<E> attribute) { + return attribute.valueClass.cast(executionAttributes.get(attribute)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java index acadae9e957c1..43113efb88dd6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java @@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; /** * Encapsulates information required for container liveness checks. @@ -30,16 +31,23 @@ @InterfaceAudience.Private @InterfaceStability.Unstable public final class ContainerLivenessContext { + private final Container container; private final String user; private final String pid; public static final class Builder { + private Container container; private String user; private String pid; public Builder() { } + public Builder setContainer(Container container) { + this.container = container; + return this; + } + public Builder setUser(String user) { this.user = user; return this; @@ -56,10 +64,15 @@ public ContainerLivenessContext build() { } private ContainerLivenessContext(Builder builder) { + this.container = builder.container; this.user = builder.user; this.pid = builder.pid; } + public Container getContainer() { + return this.container; + } + public String getUser() { return this.user; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java index 8adcab7bf4160..d93cdafd768e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; /** * Encapsulates information required for container reacquisition. @@ -31,16 +32,23 @@ @InterfaceAudience.Private @InterfaceStability.Unstable public final class ContainerReacquisitionContext { + private final Container container; private final String user; private final ContainerId containerId; public static final class Builder { + private Container container; private String user; private ContainerId containerId; public Builder() { } + public Builder setContainer(Container container) { + this.container = container; + return this; + } + public Builder setUser(String user) { this.user = user; return this; @@ -57,10 +65,15 @@ public ContainerReacquisitionContext build() { } private ContainerReacquisitionContext(Builder builder) { + this.container = builder.container; this.user = builder.user; this.containerId = builder.containerId; } + public Container getContainer() { + return this.container; + } + public String getUser() { return this.user; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java index cc40af534291a..56b571bb23cc1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; /** * Encapsulates information required for container signaling. @@ -31,11 +32,13 @@ @InterfaceAudience.Private @InterfaceStability.Unstable public final class ContainerSignalContext { + private final Container container; private final String user; private final String pid; private final Signal signal; public static final class Builder { + private Container container; private String user; private String pid; private Signal signal; @@ -43,6 +46,11 @@ public static final class Builder { public Builder() { } + public Builder setContainer(Container container) { + this.container = container; + return this; + } + public Builder setUser(String user) { this.user = user; return this; @@ -64,11 +72,16 @@ public ContainerSignalContext build() { } private ContainerSignalContext(Builder builder) { + this.container = builder.container; this.user = builder.user; this.pid = builder.pid; this.signal = builder.signal; } + public Container getContainer() { + return this.container; + } + public String getUser() { return this.user; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java index 7dfff02626af4..ffcc519f8b7ba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java @@ -25,7 +25,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import java.util.Collections; import java.util.List; +import java.util.Map; /** * Encapsulates information required for starting/launching containers. @@ -35,6 +37,7 @@ @InterfaceStability.Unstable public final class ContainerStartContext { private final Container container; + private final Map<Path, List<String>> localizedResources; private final Path nmPrivateContainerScriptPath; private final Path nmPrivateTokensPath; private final String user; @@ -45,6 +48,7 @@ public final class ContainerStartContext { public static final class Builder { private Container container; + private Map<Path, List<String>> localizedResources; private Path nmPrivateContainerScriptPath; private Path nmPrivateTokensPath; private String user; @@ -61,6 +65,12 @@ public Builder setContainer(Container container) { return this; } + public Builder setLocalizedResources(Map<Path, + List<String>> localizedResources) { + this.localizedResources = localizedResources; + return this; + } + public Builder setNmPrivateContainerScriptPath( Path nmPrivateContainerScriptPath) { this.nmPrivateContainerScriptPath = nmPrivateContainerScriptPath; @@ -104,6 +114,7 @@ public ContainerStartContext build() { private ContainerStartContext(Builder builder) { this.container = builder.container; + this.localizedResources = builder.localizedResources; this.nmPrivateContainerScriptPath = builder.nmPrivateContainerScriptPath; this.nmPrivateTokensPath = builder.nmPrivateTokensPath; this.user = builder.user; @@ -117,6 +128,14 @@ public Container getContainer() { return this.container; } + public Map<Path, List<String>> getLocalizedResources() { + if (this.localizedResources != null) { + return Collections.unmodifiableMap(this.localizedResources); + } else { + return null; + } + } + public Path getNmPrivateContainerScriptPath() { return this.nmPrivateContainerScriptPath; } @@ -138,10 +157,10 @@ public Path getContainerWorkDir() { } public List<String> getLocalDirs() { - return this.localDirs; + return Collections.unmodifiableList(this.localDirs); } public List<String> getLogDirs() { - return this.logDirs; + return Collections.unmodifiableList(this.logDirs); } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java index 30c0392b69914..0ef788bcd9819 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java @@ -32,6 +32,8 @@ import java.io.IOException; import java.io.LineNumberReader; import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -50,6 +52,10 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; @@ -61,11 +67,19 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + public class TestLinuxContainerExecutorWithMocks { private static final Log LOG = LogFactory .getLog(TestLinuxContainerExecutorWithMocks.class); + private static final String MOCK_EXECUTOR = + "./src/test/resources/mock-container-executor"; + private static final String MOCK_EXECUTOR_WITH_ERROR = + "./src/test/resources/mock-container-executer-with-error"; + + private String tmpMockExecutor; private LinuxContainerExecutor mockExec = null; private final File mockParamFile = new File("./params.txt"); private LocalDirsHandlerService dirsHandler; @@ -88,20 +102,42 @@ private List<String> readMockParams() throws IOException { reader.close(); return ret; } - + + private void setupMockExecutor(String executorPath, Configuration conf) + throws IOException { + //we'll always use the tmpMockExecutor - since + // PrivilegedOperationExecutor can only be initialized once. + + Files.copy(Paths.get(executorPath), Paths.get(tmpMockExecutor), + REPLACE_EXISTING); + + File executor = new File(tmpMockExecutor); + + if (!FileUtil.canExecute(executor)) { + FileUtil.setExecutable(executor, true); + } + String executorAbsolutePath = executor.getAbsolutePath(); + conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, + executorAbsolutePath); + } + @Before - public void setup() { + public void setup() throws IOException, ContainerExecutionException { assumeTrue(!Path.WINDOWS); - File f = new File("./src/test/resources/mock-container-executor"); - if(!FileUtil.canExecute(f)) { - FileUtil.setExecutable(f, true); - } - String executorPath = f.getAbsolutePath(); + + tmpMockExecutor = System.getProperty("test.build.data") + + "/tmp-mock-container-executor"; + Configuration conf = new Configuration(); - conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath); - mockExec = new LinuxContainerExecutor(); + LinuxContainerRuntime linuxContainerRuntime; + + setupMockExecutor(MOCK_EXECUTOR, conf); + linuxContainerRuntime = new DefaultLinuxContainerRuntime( + PrivilegedOperationExecutor.getInstance(conf)); dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); + linuxContainerRuntime.initialize(conf); + mockExec = new LinuxContainerExecutor(linuxContainerRuntime); mockExec.setConf(conf); } @@ -114,7 +150,7 @@ public void tearDown() { public void testContainerLaunch() throws IOException { String appSubmitter = "nobody"; String cmd = String.valueOf( - LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue()); + PrivilegedOperation.RunAsUserCommand.LAUNCH_CONTAINER.getValue()); String appId = "APP_ID"; String containerId = "CONTAINER_ID"; Container container = mock(Container.class); @@ -161,13 +197,8 @@ public void testContainerLaunch() throws IOException { public void testContainerLaunchWithPriority() throws IOException { // set the scheduler priority to make sure still works with nice -n prio - File f = new File("./src/test/resources/mock-container-executor"); - if (!FileUtil.canExecute(f)) { - FileUtil.setExecutable(f, true); - } - String executorPath = f.getAbsolutePath(); Configuration conf = new Configuration(); - conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath); + setupMockExecutor(MOCK_EXECUTOR, conf); conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, 2); mockExec.setConf(conf); @@ -231,20 +262,25 @@ public void testStartLocalizer() throws IOException { @Test - public void testContainerLaunchError() throws IOException { + public void testContainerLaunchError() + throws IOException, ContainerExecutionException { // reinitialize executer - File f = new File("./src/test/resources/mock-container-executer-with-error"); - if (!FileUtil.canExecute(f)) { - FileUtil.setExecutable(f, true); - } - String executorPath = f.getAbsolutePath(); Configuration conf = new Configuration(); - conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath); + setupMockExecutor(MOCK_EXECUTOR_WITH_ERROR, conf); conf.set(YarnConfiguration.NM_LOCAL_DIRS, "file:///bin/echo"); conf.set(YarnConfiguration.NM_LOG_DIRS, "file:///dev/null"); - mockExec = spy(new LinuxContainerExecutor()); + + LinuxContainerExecutor exec; + LinuxContainerRuntime linuxContainerRuntime = new + DefaultLinuxContainerRuntime(PrivilegedOperationExecutor.getInstance + (conf)); + + linuxContainerRuntime.initialize(conf); + exec = new LinuxContainerExecutor(linuxContainerRuntime); + + mockExec = spy(exec); doAnswer( new Answer() { @Override @@ -263,7 +299,7 @@ public Object answer(InvocationOnMock invocationOnMock) String appSubmitter = "nobody"; String cmd = String - .valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue()); + .valueOf(PrivilegedOperation.RunAsUserCommand.LAUNCH_CONTAINER.getValue()); String appId = "APP_ID"; String containerId = "CONTAINER_ID"; Container container = mock(Container.class); @@ -299,6 +335,7 @@ public Object answer(InvocationOnMock invocationOnMock) Path pidFile = new Path(workDir, "pid.txt"); mockExec.activateContainer(cId, pidFile); + int ret = mockExec.launchContainer(new ContainerStartContext.Builder() .setContainer(container) .setNmPrivateContainerScriptPath(scriptPath) @@ -330,16 +367,23 @@ public void testInit() throws Exception { } - @Test public void testContainerKill() throws IOException { String appSubmitter = "nobody"; String cmd = String.valueOf( - LinuxContainerExecutor.Commands.SIGNAL_CONTAINER.getValue()); + PrivilegedOperation.RunAsUserCommand.SIGNAL_CONTAINER.getValue()); ContainerExecutor.Signal signal = ContainerExecutor.Signal.QUIT; String sigVal = String.valueOf(signal.getValue()); - + + Container container = mock(Container.class); + ContainerId cId = mock(ContainerId.class); + ContainerLaunchContext context = mock(ContainerLaunchContext.class); + + when(container.getContainerId()).thenReturn(cId); + when(container.getLaunchContext()).thenReturn(context); + mockExec.signalContainer(new ContainerSignalContext.Builder() + .setContainer(container) .setUser(appSubmitter) .setPid("1000") .setSignal(signal) @@ -353,7 +397,7 @@ public void testContainerKill() throws IOException { public void testDeleteAsUser() throws IOException { String appSubmitter = "nobody"; String cmd = String.valueOf( - LinuxContainerExecutor.Commands.DELETE_AS_USER.getValue()); + PrivilegedOperation.RunAsUserCommand.DELETE_AS_USER.getValue()); Path dir = new Path("/tmp/testdir"); Path testFile = new Path("testfile"); Path baseDir0 = new Path("/grid/0/BaseDir"); @@ -395,14 +439,9 @@ public void testDeleteAsUser() throws IOException { Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER, appSubmitter, cmd, "", baseDir0.toString(), baseDir1.toString()), readMockParams()); - - File f = new File("./src/test/resources/mock-container-executer-with-error"); - if (!FileUtil.canExecute(f)) { - FileUtil.setExecutable(f, true); - } - String executorPath = f.getAbsolutePath(); + ; Configuration conf = new Configuration(); - conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath); + setupMockExecutor(MOCK_EXECUTOR, conf); mockExec.setConf(conf); mockExec.deleteAsUser(new DeletionAsUserContext.Builder() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java index 8f297ede75a75..849dbabf20ae2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java @@ -118,7 +118,7 @@ public void testExecutionCommand() { PrivilegedOperationExecutor exec = PrivilegedOperationExecutor .getInstance(confWithExecutorPath); PrivilegedOperation op = new PrivilegedOperation(PrivilegedOperation - .OperationType.LAUNCH_CONTAINER, (String) null); + .OperationType.TC_MODIFY_STATE, (String) null); String[] cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op); //No arguments added - so the resulting array should consist of @@ -127,10 +127,8 @@ public void testExecutionCommand() { Assert.assertEquals(customExecutorPath, cmdArray[0]); Assert.assertEquals(op.getOperationType().getOption(), cmdArray[1]); - //other (dummy) arguments to launch container - String[] additionalArgs = { "test_user", "yarn", "1", "app_01", - "container_01", "workdir", "launch_script.sh", "tokens", "pidfile", - "nm-local-dirs", "nm-log-dirs", "resource-spec" }; + //other (dummy) arguments to tc modify state + String[] additionalArgs = { "cmd_file_1", "cmd_file_2", "cmd_file_3"}; op.appendArgs(additionalArgs); cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java new file mode 100644 index 0000000000000..31ed4963341fb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java @@ -0,0 +1,219 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.*; + +public class TestDockerContainerRuntime { + private Configuration conf; + PrivilegedOperationExecutor mockExecutor; + String containerId; + Container container; + ContainerId cId; + ContainerLaunchContext context; + HashMap<String, String> env; + String image; + String runAsUser; + String user; + String appId; + String containerIdStr = containerId; + Path containerWorkDir; + Path nmPrivateContainerScriptPath; + Path nmPrivateTokensPath; + Path pidFilePath; + List<String> localDirs; + List<String> logDirs; + String resourcesOptions; + + @Before + public void setup() { + String tmpPath = new StringBuffer(System.getProperty("test.build.data")) + .append + ('/').append("hadoop.tmp.dir").toString(); + + conf = new Configuration(); + conf.set("hadoop.tmp.dir", tmpPath); + + mockExecutor = Mockito + .mock(PrivilegedOperationExecutor.class); + containerId = "container_id"; + container = mock(Container.class); + cId = mock(ContainerId.class); + context = mock(ContainerLaunchContext.class); + env = new HashMap<String, String>(); + image = "busybox:latest"; + + env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_IMAGE, image); + when(container.getContainerId()).thenReturn(cId); + when(cId.toString()).thenReturn(containerId); + when(container.getLaunchContext()).thenReturn(context); + when(context.getEnvironment()).thenReturn(env); + + runAsUser = "run_as_user"; + user = "user"; + appId = "app_id"; + containerIdStr = containerId; + containerWorkDir = new Path("/test_container_work_dir"); + nmPrivateContainerScriptPath = new Path("/test_script_path"); + nmPrivateTokensPath = new Path("/test_private_tokens_path"); + pidFilePath = new Path("/test_pid_file_path"); + localDirs = new ArrayList<>(); + logDirs = new ArrayList<>(); + resourcesOptions = "cgroups:none"; + + localDirs.add("/test_local_dir"); + logDirs.add("/test_log_dir"); + } + + @Test + public void testSelectDockerContainerType() { + Map<String, String> envDockerType = new HashMap<>(); + Map<String, String> envOtherType = new HashMap<>(); + + envDockerType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "docker"); + envOtherType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "other"); + + Assert.assertEquals(false, DockerLinuxContainerRuntime + .isDockerContainerRequested(null)); + Assert.assertEquals(true, DockerLinuxContainerRuntime + .isDockerContainerRequested(envDockerType)); + Assert.assertEquals(false, DockerLinuxContainerRuntime + .isDockerContainerRequested(envOtherType)); + } + + @Test + @SuppressWarnings("unchecked") + public void testDockerContainerLaunch() + throws ContainerExecutionException, PrivilegedOperationException, + IOException { + DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime( + mockExecutor); + runtime.initialize(conf); + + ContainerRuntimeContext.Builder builder = new ContainerRuntimeContext + .Builder(container); + + builder.setExecutionAttribute(RUN_AS_USER, runAsUser) + .setExecutionAttribute(USER, user) + .setExecutionAttribute(APPID, appId) + .setExecutionAttribute(CONTAINER_ID_STR, containerIdStr) + .setExecutionAttribute(CONTAINER_WORK_DIR, containerWorkDir) + .setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH, + nmPrivateContainerScriptPath) + .setExecutionAttribute(NM_PRIVATE_TOKENS_PATH, nmPrivateTokensPath) + .setExecutionAttribute(PID_FILE_PATH, pidFilePath) + .setExecutionAttribute(LOCAL_DIRS, localDirs) + .setExecutionAttribute(LOG_DIRS, logDirs) + .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions); + + runtime.launchContainer(builder.build()); + + ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass( + PrivilegedOperation.class); + + //single invocation expected + //due to type erasure + mocking, this verification requires a suppress + // warning annotation on the entire method + verify(mockExecutor, times(1)) + .executePrivilegedOperation(anyList(), opCaptor.capture(), any( + File.class), any(Map.class), eq(false)); + + PrivilegedOperation op = opCaptor.getValue(); + + Assert.assertEquals(PrivilegedOperation.OperationType + .LAUNCH_DOCKER_CONTAINER, op.getOperationType()); + + List<String> args = op.getArguments(); + + //This invocation of container-executor should use 13 arguments in a + // specific order (sigh.) + Assert.assertEquals(13, args.size()); + + //verify arguments + Assert.assertEquals(runAsUser, args.get(0)); + Assert.assertEquals(user, args.get(1)); + Assert.assertEquals(Integer.toString(PrivilegedOperation.RunAsUserCommand + .LAUNCH_DOCKER_CONTAINER.getValue()), args.get(2)); + Assert.assertEquals(appId, args.get(3)); + Assert.assertEquals(containerId, args.get(4)); + Assert.assertEquals(containerWorkDir.toString(), args.get(5)); + Assert.assertEquals(nmPrivateContainerScriptPath.toUri() + .toString(), args.get(6)); + Assert.assertEquals(nmPrivateTokensPath.toUri().getPath(), args.get(7)); + Assert.assertEquals(pidFilePath.toString(), args.get(8)); + Assert.assertEquals(localDirs.get(0), args.get(9)); + Assert.assertEquals(logDirs.get(0), args.get(10)); + Assert.assertEquals(resourcesOptions, args.get(12)); + + String dockerCommandFile = args.get(11); + + //This is the expected docker invocation for this case + StringBuffer expectedCommandTemplate = new StringBuffer("run --name=%1$s ") + .append("--user=%2$s -d ") + .append("--workdir=%3$s ") + .append("--net=host -v /etc/passwd:/etc/password:ro ") + .append("-v %4$s:%4$s ") + .append("-v %5$s:%5$s ") + .append("-v %6$s:%6$s ") + .append("%7$s ") + .append("bash %8$s/launch_container.sh"); + + String expectedCommand = String.format(expectedCommandTemplate.toString(), + containerId, runAsUser, containerWorkDir, localDirs.get(0), + containerWorkDir, logDirs.get(0), image, containerWorkDir); + + List<String> dockerCommands = Files.readAllLines(Paths.get + (dockerCommandFile), Charset.forName("UTF-8")); + + Assert.assertEquals(1, dockerCommands.size()); + Assert.assertEquals(expectedCommand, dockerCommands.get(0)); + } +}
64bed0460e0bab9157e71192a18b2285bf1ef536
hadoop
YARN-1063. Augmented Hadoop common winutils to have- the ability to create containers as domain users. Contributed by Remus- Rusanu. Committed as a YARN patch even though all the code changes are in- common.--(cherry picked from commit 5ca97f1e60b8a7848f6eadd15f6c08ed390a8cda)-
a
https://github.com/apache/hadoop
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/chown.c b/hadoop-common-project/hadoop-common/src/main/winutils/chown.c index bc2aefc79eeb1..1be81216974a5 100644 --- a/hadoop-common-project/hadoop-common/src/main/winutils/chown.c +++ b/hadoop-common-project/hadoop-common/src/main/winutils/chown.c @@ -63,11 +63,11 @@ static DWORD ChangeFileOwnerBySid(__in LPCWSTR path, // SID is not contained in the caller's token, and have the SE_GROUP_OWNER // permission enabled. // - if (!EnablePrivilege(L"SeTakeOwnershipPrivilege")) + if (EnablePrivilege(L"SeTakeOwnershipPrivilege") != ERROR_SUCCESS) { fwprintf(stdout, L"INFO: The user does not have SeTakeOwnershipPrivilege.\n"); } - if (!EnablePrivilege(L"SeRestorePrivilege")) + if (EnablePrivilege(L"SeRestorePrivilege") != ERROR_SUCCESS) { fwprintf(stdout, L"INFO: The user does not have SeRestorePrivilege.\n"); } diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h b/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h index 1c0007a6da922..bae754c9b6e25 100644 --- a/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h +++ b/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h @@ -27,6 +27,8 @@ #include <accctrl.h> #include <strsafe.h> #include <lm.h> +#include <ntsecapi.h> +#include <userenv.h> enum EXIT_CODE { @@ -153,6 +155,26 @@ DWORD ChangeFileModeByMask(__in LPCWSTR path, INT mode); DWORD GetLocalGroupsForUser(__in LPCWSTR user, __out LPLOCALGROUP_USERS_INFO_0 *groups, __out LPDWORD entries); -BOOL EnablePrivilege(__in LPCWSTR privilegeName); - void GetLibraryName(__in LPCVOID lpAddress, __out LPWSTR *filename); + +DWORD EnablePrivilege(__in LPCWSTR privilegeName); + +void AssignLsaString(__inout LSA_STRING * target, __in const char *strBuf); + +DWORD RegisterWithLsa(__in const char *logonProcessName, __out HANDLE * lsaHandle); + +void UnregisterWithLsa(__in HANDLE lsaHandle); + +DWORD LookupKerberosAuthenticationPackageId(__in HANDLE lsaHandle, __out ULONG * packageId); + +DWORD CreateLogonForUser(__in HANDLE lsaHandle, + __in const char * tokenSourceName, + __in const char * tokenOriginName, + __in ULONG authnPkgId, + __in const wchar_t* principalName, + __out HANDLE *tokenHandle); + +DWORD LoadUserProfileForLogon(__in HANDLE logonHandle, __out PROFILEINFO * pi); + +DWORD UnloadProfileForLogon(__in HANDLE logonHandle, __in PROFILEINFO * pi); + diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c b/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c index 391247fccd47d..da16ff5b081c4 100644 --- a/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c +++ b/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c @@ -17,6 +17,8 @@ #pragma comment(lib, "authz.lib") #pragma comment(lib, "netapi32.lib") +#pragma comment(lib, "Secur32.lib") +#pragma comment(lib, "Userenv.lib") #include "winutils.h" #include <authz.h> #include <sddl.h> @@ -797,7 +799,6 @@ DWORD FindFileOwnerAndPermission( __out_opt PINT pMask) { DWORD dwRtnCode = 0; - PSECURITY_DESCRIPTOR pSd = NULL; PSID psidOwner = NULL; @@ -1638,11 +1639,12 @@ DWORD GetLocalGroupsForUser( // to the process's access token. // // Returns: -// TRUE: on success +// ERROR_SUCCESS on success +// GetLastError() on error // // Notes: // -BOOL EnablePrivilege(__in LPCWSTR privilegeName) +DWORD EnablePrivilege(__in LPCWSTR privilegeName) { HANDLE hToken = INVALID_HANDLE_VALUE; TOKEN_PRIVILEGES tp = { 0 }; @@ -1651,28 +1653,31 @@ BOOL EnablePrivilege(__in LPCWSTR privilegeName) if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken)) { - ReportErrorCode(L"OpenProcessToken", GetLastError()); - return FALSE; + dwErrCode = GetLastError(); + ReportErrorCode(L"OpenProcessToken", dwErrCode); + return dwErrCode; } tp.PrivilegeCount = 1; if (!LookupPrivilegeValueW(NULL, privilegeName, &(tp.Privileges[0].Luid))) { - ReportErrorCode(L"LookupPrivilegeValue", GetLastError()); + dwErrCode = GetLastError(); + ReportErrorCode(L"LookupPrivilegeValue", dwErrCode); CloseHandle(hToken); - return FALSE; + return dwErrCode; } tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; // As stated on MSDN, we need to use GetLastError() to check if // AdjustTokenPrivileges() adjusted all of the specified privileges. // - AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL); + if( !AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL) ) { dwErrCode = GetLastError(); + } CloseHandle(hToken); - return dwErrCode == ERROR_SUCCESS; + return dwErrCode; } //---------------------------------------------------------------------------- @@ -1716,9 +1721,6 @@ void ReportErrorCode(LPCWSTR func, DWORD err) // Description: // Given an address, get the file name of the library from which it was loaded. // -// Returns: -// None -// // Notes: // - The function allocates heap memory and points the filename out parameter to // the newly allocated memory, which will contain the name of the file. @@ -1757,3 +1759,290 @@ void GetLibraryName(LPCVOID lpAddress, LPWSTR *filename) *filename = NULL; } } + +// Function: AssignLsaString +// +// Description: +// fills in values of LSA_STRING struct to point to a string buffer +// +// Returns: +// None +// +// IMPORTANT*** strBuf is not copied. It must be globally immutable +// +void AssignLsaString(__inout LSA_STRING * target, __in const char *strBuf) +{ + target->Length = (USHORT)(sizeof(char)*strlen(strBuf)); + target->MaximumLength = target->Length; + target->Buffer = (char *)(strBuf); +} + +//---------------------------------------------------------------------------- +// Function: RegisterWithLsa +// +// Description: +// Registers with local security authority and sets handle for use in later LSA +// operations +// +// Returns: +// ERROR_SUCCESS on success +// Other error code on failure +// +// Notes: +// +DWORD RegisterWithLsa(__in const char *logonProcessName, __out HANDLE * lsaHandle) +{ + LSA_STRING processName; + LSA_OPERATIONAL_MODE o_mode; // never useful as per msdn docs + NTSTATUS registerStatus; + *lsaHandle = 0; + + AssignLsaString(&processName, logonProcessName); + registerStatus = LsaRegisterLogonProcess(&processName, lsaHandle, &o_mode); + + return LsaNtStatusToWinError( registerStatus ); +} + +//---------------------------------------------------------------------------- +// Function: UnregisterWithLsa +// +// Description: +// Closes LSA handle allocated by RegisterWithLsa() +// +// Returns: +// None +// +// Notes: +// +void UnregisterWithLsa(__in HANDLE lsaHandle) +{ + LsaClose(lsaHandle); +} + +//---------------------------------------------------------------------------- +// Function: LookupKerberosAuthenticationPackageId +// +// Description: +// Looks of the current id (integer index) of the Kerberos authentication package on the local +// machine. +// +// Returns: +// ERROR_SUCCESS on success +// Other error code on failure +// +// Notes: +// +DWORD LookupKerberosAuthenticationPackageId(__in HANDLE lsaHandle, __out ULONG * packageId) +{ + NTSTATUS lookupStatus; + LSA_STRING pkgName; + + AssignLsaString(&pkgName, MICROSOFT_KERBEROS_NAME_A); + lookupStatus = LsaLookupAuthenticationPackage(lsaHandle, &pkgName, packageId); + return LsaNtStatusToWinError( lookupStatus ); +} + +//---------------------------------------------------------------------------- +// Function: CreateLogonForUser +// +// Description: +// Contacts the local LSA and performs a logon without credential for the +// given principal. This logon token will be local machine only and have no +// network credentials attached. +// +// Returns: +// ERROR_SUCCESS on success +// Other error code on failure +// +// Notes: +// This call assumes that all required privileges have already been enabled (TCB etc). +// IMPORTANT **** tokenOriginName must be immutable! +// +DWORD CreateLogonForUser(__in HANDLE lsaHandle, + __in const char * tokenSourceName, + __in const char * tokenOriginName, // must be immutable, will not be copied! + __in ULONG authnPkgId, + __in const wchar_t* principalName, + __out HANDLE *tokenHandle) +{ + DWORD logonStatus = ERROR_ASSERTION_FAILURE; // Failure to set status should trigger error + TOKEN_SOURCE tokenSource; + LSA_STRING originName; + void * profile = NULL; + + // from MSDN: + // The ClientUpn and ClientRealm members of the KERB_S4U_LOGON + // structure must point to buffers in memory that are contiguous + // to the structure itself. The value of the + // AuthenticationInformationLength parameter must take into + // account the length of these buffers. + const int principalNameBufLen = lstrlen(principalName)*sizeof(*principalName); + const int totalAuthInfoLen = sizeof(KERB_S4U_LOGON) + principalNameBufLen; + KERB_S4U_LOGON* s4uLogonAuthInfo = (KERB_S4U_LOGON*)calloc(totalAuthInfoLen, 1); + if (s4uLogonAuthInfo == NULL ) { + logonStatus = ERROR_NOT_ENOUGH_MEMORY; + goto done; + } + s4uLogonAuthInfo->MessageType = KerbS4ULogon; + s4uLogonAuthInfo->ClientUpn.Buffer = (wchar_t*)((char*)s4uLogonAuthInfo + sizeof *s4uLogonAuthInfo); + CopyMemory(s4uLogonAuthInfo->ClientUpn.Buffer, principalName, principalNameBufLen); + s4uLogonAuthInfo->ClientUpn.Length = (USHORT)principalNameBufLen; + s4uLogonAuthInfo->ClientUpn.MaximumLength = (USHORT)principalNameBufLen; + + AllocateLocallyUniqueId(&tokenSource.SourceIdentifier); + StringCchCopyA(tokenSource.SourceName, TOKEN_SOURCE_LENGTH, tokenSourceName ); + AssignLsaString(&originName, tokenOriginName); + + { + DWORD cbProfile = 0; + LUID logonId; + QUOTA_LIMITS quotaLimits; + NTSTATUS subStatus; + + NTSTATUS logonNtStatus = LsaLogonUser(lsaHandle, + &originName, + Batch, // SECURITY_LOGON_TYPE + authnPkgId, + s4uLogonAuthInfo, + totalAuthInfoLen, + 0, + &tokenSource, + &profile, + &cbProfile, + &logonId, + tokenHandle, + &quotaLimits, + &subStatus); + logonStatus = LsaNtStatusToWinError( logonNtStatus ); + } +done: + // clean up + if (s4uLogonAuthInfo != NULL) { + free(s4uLogonAuthInfo); + } + if (profile != NULL) { + LsaFreeReturnBuffer(profile); + } + return logonStatus; +} + +// NOTE: must free allocatedName +DWORD GetNameFromLogonToken(__in HANDLE logonToken, __out wchar_t **allocatedName) +{ + DWORD userInfoSize = 0; + PTOKEN_USER user = NULL; + DWORD userNameSize = 0; + wchar_t * userName = NULL; + DWORD domainNameSize = 0; + wchar_t * domainName = NULL; + SID_NAME_USE sidUse = SidTypeUnknown; + DWORD getNameStatus = ERROR_ASSERTION_FAILURE; // Failure to set status should trigger error + BOOL tokenInformation = FALSE; + + // call for sid size then alloc and call for sid + tokenInformation = GetTokenInformation(logonToken, TokenUser, NULL, 0, &userInfoSize); + assert (FALSE == tokenInformation); + + // last call should have failed and filled in allocation size + if ((getNameStatus = GetLastError()) != ERROR_INSUFFICIENT_BUFFER) + { + goto done; + } + user = (PTOKEN_USER)calloc(userInfoSize,1); + if (user == NULL) + { + getNameStatus = ERROR_NOT_ENOUGH_MEMORY; + goto done; + } + if (!GetTokenInformation(logonToken, TokenUser, user, userInfoSize, &userInfoSize)) { + getNameStatus = GetLastError(); + goto done; + } + LookupAccountSid( NULL, user->User.Sid, NULL, &userNameSize, NULL, &domainNameSize, &sidUse ); + // last call should have failed and filled in allocation size + if ((getNameStatus = GetLastError()) != ERROR_INSUFFICIENT_BUFFER) + { + goto done; + } + userName = (wchar_t *)calloc(userNameSize, sizeof(wchar_t)); + if (userName == NULL) { + getNameStatus = ERROR_NOT_ENOUGH_MEMORY; + goto done; + } + domainName = (wchar_t *)calloc(domainNameSize, sizeof(wchar_t)); + if (domainName == NULL) { + getNameStatus = ERROR_NOT_ENOUGH_MEMORY; + goto done; + } + if (!LookupAccountSid( NULL, user->User.Sid, userName, &userNameSize, domainName, &domainNameSize, &sidUse )) { + getNameStatus = GetLastError(); + goto done; + } + + getNameStatus = ERROR_SUCCESS; + *allocatedName = userName; + userName = NULL; +done: + if (user != NULL) { + free( user ); + user = NULL; + } + if (userName != NULL) { + free( userName ); + userName = NULL; + } + if (domainName != NULL) { + free( domainName ); + domainName = NULL; + } + return getNameStatus; +} + +DWORD LoadUserProfileForLogon(__in HANDLE logonHandle, __out PROFILEINFO * pi) +{ + wchar_t *userName = NULL; + DWORD loadProfileStatus = ERROR_ASSERTION_FAILURE; // Failure to set status should trigger error + + loadProfileStatus = GetNameFromLogonToken( logonHandle, &userName ); + if (loadProfileStatus != ERROR_SUCCESS) { + goto done; + } + + assert(pi); + + ZeroMemory( pi, sizeof(*pi) ); + pi->dwSize = sizeof(*pi); + pi->lpUserName = userName; + pi->dwFlags = PI_NOUI; + + // if the profile does not exist it will be created + if ( !LoadUserProfile( logonHandle, pi ) ) { + loadProfileStatus = GetLastError(); + goto done; + } + + loadProfileStatus = ERROR_SUCCESS; +done: + return loadProfileStatus; +} + +DWORD UnloadProfileForLogon(__in HANDLE logonHandle, __in PROFILEINFO * pi) +{ + DWORD touchProfileStatus = ERROR_ASSERTION_FAILURE; // Failure to set status should trigger error + + assert(pi); + + if ( !UnloadUserProfile(logonHandle, pi->hProfile ) ) { + touchProfileStatus = GetLastError(); + goto done; + } + if (pi->lpUserName != NULL) { + free(pi->lpUserName); + pi->lpUserName = NULL; + } + ZeroMemory( pi, sizeof(*pi) ); + + touchProfileStatus = ERROR_SUCCESS; +done: + return touchProfileStatus; +} diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/symlink.c b/hadoop-common-project/hadoop-common/src/main/winutils/symlink.c index ea372cc06dc53..02acd4d2a40e8 100644 --- a/hadoop-common-project/hadoop-common/src/main/winutils/symlink.c +++ b/hadoop-common-project/hadoop-common/src/main/winutils/symlink.c @@ -77,7 +77,7 @@ int Symlink(__in int argc, __in_ecount(argc) wchar_t *argv[]) // This is just an additional step to do the privilege check by not using // error code from CreateSymbolicLink() method. // - if (!EnablePrivilege(L"SeCreateSymbolicLinkPrivilege")) + if (EnablePrivilege(L"SeCreateSymbolicLinkPrivilege") != ERROR_SUCCESS) { fwprintf(stderr, L"No privilege to create symbolic links.\n"); diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/task.c b/hadoop-common-project/hadoop-common/src/main/winutils/task.c index 19bda96a1e6ed..783f162322bd7 100644 --- a/hadoop-common-project/hadoop-common/src/main/winutils/task.c +++ b/hadoop-common-project/hadoop-common/src/main/winutils/task.c @@ -18,6 +18,7 @@ #include "winutils.h" #include <errno.h> #include <psapi.h> +#include <malloc.h> #define PSAPI_VERSION 1 #pragma comment(lib, "psapi.lib") @@ -28,12 +29,18 @@ // process exits with 128 + signal. For SIGKILL, this would be 128 + 9 = 137. #define KILLED_PROCESS_EXIT_CODE 137 +// Name for tracking this logon process when registering with LSA +static const char *LOGON_PROCESS_NAME="Hadoop Container Executor"; +// Name for token source, must be less or eq to TOKEN_SOURCE_LENGTH (currently 8) chars +static const char *TOKEN_SOURCE_NAME = "HadoopEx"; + // List of different task related command line options supported by // winutils. typedef enum TaskCommandOptionType { TaskInvalid, TaskCreate, + TaskCreateAsUser, TaskIsAlive, TaskKill, TaskProcessList @@ -86,37 +93,53 @@ static BOOL ParseCommandLine(__in int argc, } } + if (argc >= 6) { + if (wcscmp(argv[1], L"createAsUser") == 0) + { + *command = TaskCreateAsUser; + return TRUE; + } + } + return FALSE; } //---------------------------------------------------------------------------- -// Function: createTask +// Function: CreateTaskImpl // // Description: // Creates a task via a jobobject. Outputs the // appropriate information to stdout on success, or stderr on failure. +// logonHandle may be NULL, in this case the current logon will be utilized for the +// created process // // Returns: // ERROR_SUCCESS: On success // GetLastError: otherwise -DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) +DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PWSTR cmdLine) { - DWORD err = ERROR_SUCCESS; + DWORD dwErrorCode = ERROR_SUCCESS; DWORD exitCode = EXIT_FAILURE; + DWORD currDirCnt = 0; STARTUPINFO si; PROCESS_INFORMATION pi; HANDLE jobObject = NULL; JOBOBJECT_EXTENDED_LIMIT_INFORMATION jeli = { 0 }; + void * envBlock = NULL; + BOOL createProcessResult = FALSE; + + wchar_t* curr_dir = NULL; + FILE *stream = NULL; // Create un-inheritable job object handle and set job object to terminate // when last handle is closed. So winutils.exe invocation has the only open // job object handle. Exit of winutils.exe ensures termination of job object. // Either a clean exit of winutils or crash or external termination. jobObject = CreateJobObject(NULL, jobObjName); - err = GetLastError(); - if(jobObject == NULL || err == ERROR_ALREADY_EXISTS) + dwErrorCode = GetLastError(); + if(jobObject == NULL || dwErrorCode == ERROR_ALREADY_EXISTS) { - return err; + return dwErrorCode; } jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; if(SetInformationJobObject(jobObject, @@ -124,36 +147,102 @@ DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) &jeli, sizeof(jeli)) == 0) { - err = GetLastError(); + dwErrorCode = GetLastError(); CloseHandle(jobObject); - return err; + return dwErrorCode; } if(AssignProcessToJobObject(jobObject, GetCurrentProcess()) == 0) { - err = GetLastError(); + dwErrorCode = GetLastError(); CloseHandle(jobObject); - return err; + return dwErrorCode; } // the child JVM uses this env var to send the task OS process identifier // to the TaskTracker. We pass the job object name. if(SetEnvironmentVariable(L"JVM_PID", jobObjName) == 0) { - err = GetLastError(); - CloseHandle(jobObject); - return err; + dwErrorCode = GetLastError(); + // We have to explictly Terminate, passing in the error code + // simply closing the job would kill our own process with success exit status + TerminateJobObject(jobObject, dwErrorCode); + return dwErrorCode; } ZeroMemory( &si, sizeof(si) ); si.cb = sizeof(si); ZeroMemory( &pi, sizeof(pi) ); - if (CreateProcess(NULL, cmdLine, NULL, NULL, TRUE, 0, NULL, NULL, &si, &pi) == 0) - { - err = GetLastError(); - CloseHandle(jobObject); - return err; + if( logonHandle != NULL ) { + // create user environment for this logon + if(!CreateEnvironmentBlock(&envBlock, + logonHandle, + TRUE )) { + dwErrorCode = GetLastError(); + // We have to explictly Terminate, passing in the error code + // simply closing the job would kill our own process with success exit status + TerminateJobObject(jobObject, dwErrorCode); + return dwErrorCode; + } + } + + // Get the required buffer size first + currDirCnt = GetCurrentDirectory(0, NULL); + if (0 < currDirCnt) { + curr_dir = (wchar_t*) alloca(currDirCnt * sizeof(wchar_t)); + assert(curr_dir); + currDirCnt = GetCurrentDirectory(currDirCnt, curr_dir); + } + + if (0 == currDirCnt) { + dwErrorCode = GetLastError(); + // We have to explictly Terminate, passing in the error code + // simply closing the job would kill our own process with success exit status + TerminateJobObject(jobObject, dwErrorCode); + return dwErrorCode; + } + + if (logonHandle == NULL) { + createProcessResult = CreateProcess( + NULL, // ApplicationName + cmdLine, // command line + NULL, // process security attributes + NULL, // thread security attributes + TRUE, // inherit handles + 0, // creation flags + NULL, // environment + curr_dir, // current directory + &si, // startup info + &pi); // process info + } + else { + createProcessResult = CreateProcessAsUser( + logonHandle, // logon token handle + NULL, // Application handle + cmdLine, // command line + NULL, // process security attributes + NULL, // thread security attributes + FALSE, // inherit handles + CREATE_UNICODE_ENVIRONMENT, // creation flags + envBlock, // environment + curr_dir, // current directory + &si, // startup info + &pi); // process info + } + + if (FALSE == createProcessResult) { + dwErrorCode = GetLastError(); + if( envBlock != NULL ) { + DestroyEnvironmentBlock( envBlock ); + envBlock = NULL; + } + // We have to explictly Terminate, passing in the error code + // simply closing the job would kill our own process with success exit status + TerminateJobObject(jobObject, dwErrorCode); + + // This is tehnically dead code, we cannot reach this condition + return dwErrorCode; } CloseHandle(pi.hThread); @@ -162,10 +251,15 @@ DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) WaitForSingleObject( pi.hProcess, INFINITE ); if(GetExitCodeProcess(pi.hProcess, &exitCode) == 0) { - err = GetLastError(); + dwErrorCode = GetLastError(); } CloseHandle( pi.hProcess ); + if( envBlock != NULL ) { + DestroyEnvironmentBlock( envBlock ); + envBlock = NULL; + } + // Terminate job object so that all spawned processes are also killed. // This is needed because once this process closes the handle to the job // object and none of the spawned objects have the handle open (via @@ -173,21 +267,134 @@ DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) // program (say winutils task kill) to terminate this job object via its name. if(TerminateJobObject(jobObject, exitCode) == 0) { - err = GetLastError(); + dwErrorCode = GetLastError(); } - // comes here only on failure or TerminateJobObject + // comes here only on failure of TerminateJobObject CloseHandle(jobObject); - if(err != ERROR_SUCCESS) + if(dwErrorCode != ERROR_SUCCESS) { - return err; + return dwErrorCode; } return exitCode; } //---------------------------------------------------------------------------- -// Function: isTaskAlive +// Function: CreateTask +// +// Description: +// Creates a task via a jobobject. Outputs the +// appropriate information to stdout on success, or stderr on failure. +// +// Returns: +// ERROR_SUCCESS: On success +// GetLastError: otherwise +DWORD CreateTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) +{ + // call with null logon in order to create tasks utilizing the current logon + return CreateTaskImpl( NULL, jobObjName, cmdLine ); +} +//---------------------------------------------------------------------------- +// Function: CreateTask +// +// Description: +// Creates a task via a jobobject. Outputs the +// appropriate information to stdout on success, or stderr on failure. +// +// Returns: +// ERROR_SUCCESS: On success +// GetLastError: otherwise +DWORD CreateTaskAsUser(__in PCWSTR jobObjName,__in PWSTR user, __in PWSTR pidFilePath, __in PWSTR cmdLine) +{ + DWORD err = ERROR_SUCCESS; + DWORD exitCode = EXIT_FAILURE; + ULONG authnPkgId; + HANDLE lsaHandle = INVALID_HANDLE_VALUE; + PROFILEINFO pi; + BOOL profileIsLoaded = FALSE; + FILE* pidFile = NULL; + + DWORD retLen = 0; + HANDLE logonHandle = NULL; + + err = EnablePrivilege(SE_TCB_NAME); + if( err != ERROR_SUCCESS ) { + fwprintf(stdout, L"INFO: The user does not have SE_TCB_NAME.\n"); + goto done; + } + err = EnablePrivilege(SE_ASSIGNPRIMARYTOKEN_NAME); + if( err != ERROR_SUCCESS ) { + fwprintf(stdout, L"INFO: The user does not have SE_ASSIGNPRIMARYTOKEN_NAME.\n"); + goto done; + } + err = EnablePrivilege(SE_INCREASE_QUOTA_NAME); + if( err != ERROR_SUCCESS ) { + fwprintf(stdout, L"INFO: The user does not have SE_INCREASE_QUOTA_NAME.\n"); + goto done; + } + err = EnablePrivilege(SE_RESTORE_NAME); + if( err != ERROR_SUCCESS ) { + fwprintf(stdout, L"INFO: The user does not have SE_RESTORE_NAME.\n"); + goto done; + } + + err = RegisterWithLsa(LOGON_PROCESS_NAME ,&lsaHandle); + if( err != ERROR_SUCCESS ) goto done; + + err = LookupKerberosAuthenticationPackageId( lsaHandle, &authnPkgId ); + if( err != ERROR_SUCCESS ) goto done; + + err = CreateLogonForUser(lsaHandle, + LOGON_PROCESS_NAME, + TOKEN_SOURCE_NAME, + authnPkgId, + user, + &logonHandle); + if( err != ERROR_SUCCESS ) goto done; + + err = LoadUserProfileForLogon(logonHandle, &pi); + if( err != ERROR_SUCCESS ) goto done; + profileIsLoaded = TRUE; + + // Create the PID file + + if (!(pidFile = _wfopen(pidFilePath, "w"))) { + err = GetLastError(); + goto done; + } + + if (0 > fprintf_s(pidFile, "%ls", jobObjName)) { + err = GetLastError(); + } + + fclose(pidFile); + + if (err != ERROR_SUCCESS) { + goto done; + } + + err = CreateTaskImpl(logonHandle, jobObjName, cmdLine); + +done: + if( profileIsLoaded ) { + UnloadProfileForLogon( logonHandle, &pi ); + profileIsLoaded = FALSE; + } + if( logonHandle != NULL ) { + CloseHandle(logonHandle); + } + + if (INVALID_HANDLE_VALUE != lsaHandle) { + UnregisterWithLsa(lsaHandle); + } + + return err; +} + + +//---------------------------------------------------------------------------- +// Function: IsTaskAlive // // Description: // Checks if a task is alive via a jobobject. Outputs the @@ -196,7 +403,7 @@ DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) // Returns: // ERROR_SUCCESS: On success // GetLastError: otherwise -DWORD isTaskAlive(const WCHAR* jobObjName, int* isAlive, int* procsInJob) +DWORD IsTaskAlive(const WCHAR* jobObjName, int* isAlive, int* procsInJob) { PJOBOBJECT_BASIC_PROCESS_ID_LIST procList; HANDLE jobObject = NULL; @@ -247,7 +454,7 @@ DWORD isTaskAlive(const WCHAR* jobObjName, int* isAlive, int* procsInJob) } //---------------------------------------------------------------------------- -// Function: killTask +// Function: KillTask // // Description: // Kills a task via a jobobject. Outputs the @@ -256,7 +463,7 @@ DWORD isTaskAlive(const WCHAR* jobObjName, int* isAlive, int* procsInJob) // Returns: // ERROR_SUCCESS: On success // GetLastError: otherwise -DWORD killTask(PCWSTR jobObjName) +DWORD KillTask(PCWSTR jobObjName) { HANDLE jobObject = OpenJobObject(JOB_OBJECT_TERMINATE, FALSE, jobObjName); if(jobObject == NULL) @@ -280,7 +487,7 @@ DWORD killTask(PCWSTR jobObjName) } //---------------------------------------------------------------------------- -// Function: printTaskProcessList +// Function: PrintTaskProcessList // // Description: // Prints resource usage of all processes in the task jobobject @@ -288,7 +495,7 @@ DWORD killTask(PCWSTR jobObjName) // Returns: // ERROR_SUCCESS: On success // GetLastError: otherwise -DWORD printTaskProcessList(const WCHAR* jobObjName) +DWORD PrintTaskProcessList(const WCHAR* jobObjName) { DWORD i; PJOBOBJECT_BASIC_PROCESS_ID_LIST procList; @@ -372,6 +579,21 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) { DWORD dwErrorCode = ERROR_SUCCESS; TaskCommandOption command = TaskInvalid; + wchar_t* cmdLine = NULL; + wchar_t buffer[16*1024] = L""; // 32K max command line + size_t charCountBufferLeft = sizeof (buffer)/sizeof(wchar_t); + int crtArgIndex = 0; + size_t argLen = 0; + size_t wscatErr = 0; + wchar_t* insertHere = NULL; + + enum { + ARGC_JOBOBJECTNAME = 2, + ARGC_USERNAME, + ARGC_PIDFILE, + ARGC_COMMAND, + ARGC_COMMAND_ARGS + }; if (!ParseCommandLine(argc, argv, &command)) { dwErrorCode = ERROR_INVALID_COMMAND_LINE; @@ -385,10 +607,57 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) { // Create the task jobobject // - dwErrorCode = createTask(argv[2], argv[3]); + dwErrorCode = CreateTask(argv[2], argv[3]); + if (dwErrorCode != ERROR_SUCCESS) + { + ReportErrorCode(L"CreateTask", dwErrorCode); + goto TaskExit; + } + } else if (command == TaskCreateAsUser) + { + // Create the task jobobject as a domain user + // createAsUser accepts an open list of arguments. All arguments after the command are + // to be passed as argumrnts to the command itself.Here we're concatenating all + // arguments after the command into a single arg entry. + // + cmdLine = argv[ARGC_COMMAND]; + if (argc > ARGC_COMMAND_ARGS) { + crtArgIndex = ARGC_COMMAND; + insertHere = buffer; + while (crtArgIndex < argc) { + argLen = wcslen(argv[crtArgIndex]); + wscatErr = wcscat_s(insertHere, charCountBufferLeft, argv[crtArgIndex]); + switch (wscatErr) { + case 0: + // 0 means success; + break; + case EINVAL: + dwErrorCode = ERROR_INVALID_PARAMETER; + goto TaskExit; + case ERANGE: + dwErrorCode = ERROR_INSUFFICIENT_BUFFER; + goto TaskExit; + default: + // This case is not MSDN documented. + dwErrorCode = ERROR_GEN_FAILURE; + goto TaskExit; + } + insertHere += argLen; + charCountBufferLeft -= argLen; + insertHere[0] = L' '; + insertHere += 1; + charCountBufferLeft -= 1; + insertHere[0] = 0; + ++crtArgIndex; + } + cmdLine = buffer; + } + + dwErrorCode = CreateTaskAsUser( + argv[ARGC_JOBOBJECTNAME], argv[ARGC_USERNAME], argv[ARGC_PIDFILE], cmdLine); if (dwErrorCode != ERROR_SUCCESS) { - ReportErrorCode(L"createTask", dwErrorCode); + ReportErrorCode(L"CreateTaskAsUser", dwErrorCode); goto TaskExit; } } else if (command == TaskIsAlive) @@ -397,10 +666,10 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) // int isAlive; int numProcs; - dwErrorCode = isTaskAlive(argv[2], &isAlive, &numProcs); + dwErrorCode = IsTaskAlive(argv[2], &isAlive, &numProcs); if (dwErrorCode != ERROR_SUCCESS) { - ReportErrorCode(L"isTaskAlive", dwErrorCode); + ReportErrorCode(L"IsTaskAlive", dwErrorCode); goto TaskExit; } @@ -412,27 +681,27 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) else { dwErrorCode = ERROR_TASK_NOT_ALIVE; - ReportErrorCode(L"isTaskAlive returned false", dwErrorCode); + ReportErrorCode(L"IsTaskAlive returned false", dwErrorCode); goto TaskExit; } } else if (command == TaskKill) { // Check if task jobobject // - dwErrorCode = killTask(argv[2]); + dwErrorCode = KillTask(argv[2]); if (dwErrorCode != ERROR_SUCCESS) { - ReportErrorCode(L"killTask", dwErrorCode); + ReportErrorCode(L"KillTask", dwErrorCode); goto TaskExit; } } else if (command == TaskProcessList) { // Check if task jobobject // - dwErrorCode = printTaskProcessList(argv[2]); + dwErrorCode = PrintTaskProcessList(argv[2]); if (dwErrorCode != ERROR_SUCCESS) { - ReportErrorCode(L"printTaskProcessList", dwErrorCode); + ReportErrorCode(L"PrintTaskProcessList", dwErrorCode); goto TaskExit; } } else @@ -453,10 +722,12 @@ void TaskUsage() // ProcessTree.isSetsidSupported() fwprintf(stdout, L"\ Usage: task create [TASKNAME] [COMMAND_LINE] |\n\ + task createAsUser [TASKNAME] [USERNAME] [PIDFILE] [COMMAND_LINE] |\n\ task isAlive [TASKNAME] |\n\ task kill [TASKNAME]\n\ task processList [TASKNAME]\n\ Creates a new task jobobject with taskname\n\ + Creates a new task jobobject with taskname as the user provided\n\ Checks if task jobobject is alive\n\ Kills task jobobject\n\ Prints to stdout a list of processes in the task\n\ diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java index 588b21761ca81..953039d937a07 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java @@ -20,10 +20,12 @@ import static org.junit.Assert.*; import static org.junit.Assume.assumeTrue; +import static org.junit.matchers.JUnitMatchers.containsString; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; +import java.io.FileWriter; import java.io.IOException; import org.apache.commons.io.FileUtils; @@ -33,7 +35,7 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.junit.Assume.*; + import static org.hamcrest.CoreMatchers.*; /** @@ -521,4 +523,26 @@ public void testReadLink() throws IOException { assertThat(ece.getExitCode(), is(1)); } } + + @SuppressWarnings("deprecation") + @Test(timeout=10000) + public void testTaskCreate() throws IOException { + File batch = new File(TEST_DIR, "testTaskCreate.cmd"); + File proof = new File(TEST_DIR, "testTaskCreate.out"); + FileWriter fw = new FileWriter(batch); + String testNumber = String.format("%f", Math.random()); + fw.write(String.format("echo %s > \"%s\"", testNumber, proof.getAbsolutePath())); + fw.close(); + + assertFalse(proof.exists()); + + Shell.execCommand(Shell.WINUTILS, "task", "create", "testTaskCreate" + testNumber, + batch.getAbsolutePath()); + + assertTrue(proof.exists()); + + String outNumber = FileUtils.readFileToString(proof); + + assertThat(outNumber, containsString(testNumber)); + } } diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 47a7e2cb45b17..e8db12ddc5e57 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -88,6 +88,9 @@ Release 2.6.0 - UNRELEASED YARN-2581. Passed LogAggregationContext to NM via ContainerTokenIdentifier. (Xuan Gong via zjshen) + YARN-1063. Augmented Hadoop common winutils to have the ability to create + containers as domain users. (Remus Rusanu via vinodkv) + IMPROVEMENTS YARN-2242. Improve exception information on AM launch crashes. (Li Lu
f0a32dff9b9cf53707758226f6598ab32cca6b06
hbase
HBASE-11370 SSH doesn't need to scan meta if not- using ZK for assignment--
p
https://github.com/apache/hbase
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 87433d3006f0..66edd66dd601 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -458,10 +458,10 @@ void joinCluster() throws IOException, // need to be handled. // Scan hbase:meta to build list of existing regions, servers, and assignment - // Returns servers who have not checked in (assumed dead) and their regions - Map<ServerName, List<HRegionInfo>> deadServers; + // Returns servers who have not checked in (assumed dead) that some regions + // were assigned to (according to the meta) + Set<ServerName> deadServers = rebuildUserRegions(); - deadServers = rebuildUserRegions(); // This method will assign all user regions if a clean server startup or // it will reconstruct master state and cleanup any leftovers from // previous master process. @@ -489,8 +489,8 @@ void joinCluster() throws IOException, * @throws InterruptedException */ boolean processDeadServersAndRegionsInTransition( - final Map<ServerName, List<HRegionInfo>> deadServers) - throws KeeperException, IOException, InterruptedException, CoordinatedStateException { + final Set<ServerName> deadServers) throws KeeperException, + IOException, InterruptedException, CoordinatedStateException { List<String> nodes = ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode); @@ -2702,13 +2702,12 @@ boolean waitUntilNoRegionsInTransition(final long timeout) /** * Rebuild the list of user regions and assignment information. * <p> - * Returns a map of servers that are not found to be online and the regions - * they were hosting. - * @return map of servers not online to their assigned regions, as stored - * in META + * Returns a set of servers that are not found to be online that hosted + * some regions. + * @return set of servers not online that hosted some regions per meta * @throws IOException */ - Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws + Set<ServerName> rebuildUserRegions() throws IOException, KeeperException, CoordinatedStateException { Set<TableName> disabledOrEnablingTables = tableStateManager.getTablesInStates( ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.ENABLING); @@ -2722,16 +2721,16 @@ Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws List<Result> results = MetaReader.fullScan(this.catalogTracker); // Get any new but slow to checkin region server that joined the cluster Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet(); - // Map of offline servers and their regions to be returned - Map<ServerName, List<HRegionInfo>> offlineServers = - new TreeMap<ServerName, List<HRegionInfo>>(); + // Set of offline servers to be returned + Set<ServerName> offlineServers = new HashSet<ServerName>(); // Iterate regions in META for (Result result : results) { HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(result); if (regionInfo == null) continue; State state = RegionStateStore.getRegionState(result); + ServerName lastHost = HRegionInfo.getServerName(result); ServerName regionLocation = RegionStateStore.getRegionServer(result); - regionStates.createRegionState(regionInfo, state, regionLocation); + regionStates.createRegionState(regionInfo, state, regionLocation, lastHost); if (!regionStates.isRegionInState(regionInfo, State.OPEN)) { // Region is not open (either offline or in transition), skip continue; @@ -2739,13 +2738,10 @@ Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws TableName tableName = regionInfo.getTable(); if (!onlineServers.contains(regionLocation)) { // Region is located on a server that isn't online - List<HRegionInfo> offlineRegions = offlineServers.get(regionLocation); - if (offlineRegions == null) { - offlineRegions = new ArrayList<HRegionInfo>(1); - offlineServers.put(regionLocation, offlineRegions); + offlineServers.add(regionLocation); + if (useZKForAssignment) { + regionStates.regionOffline(regionInfo); } - regionStates.regionOffline(regionInfo); - offlineRegions.add(regionInfo); } else if (!disabledOrEnablingTables.contains(tableName)) { // Region is being served and on an active server // add only if region not in disabled or enabling table @@ -2838,13 +2834,9 @@ private void recoverTableInEnablingState() * @throws KeeperException */ private void processDeadServersAndRecoverLostRegions( - Map<ServerName, List<HRegionInfo>> deadServers) - throws IOException, KeeperException { - if (deadServers != null) { - for (Map.Entry<ServerName, List<HRegionInfo>> server: deadServers.entrySet()) { - ServerName serverName = server.getKey(); - // We need to keep such info even if the server is known dead - regionStates.setLastRegionServerOfRegions(serverName, server.getValue()); + Set<ServerName> deadServers) throws IOException, KeeperException { + if (deadServers != null && !deadServers.isEmpty()) { + for (ServerName serverName: deadServers) { if (!serverManager.isServerDead(serverName)) { serverManager.expireServer(serverName); // Let SSH do region re-assign } @@ -3420,7 +3412,7 @@ private String onRegionSplit(ServerName sn, TransitionCode code, } } else if (code == TransitionCode.SPLIT_PONR) { try { - regionStateStore.splitRegion(p, a, b, sn); + regionStates.splitRegion(p, a, b, sn); } catch (IOException ioe) { LOG.info("Failed to record split region " + p.getShortNameToLog()); return "Failed to record the splitting in meta"; @@ -3469,7 +3461,7 @@ private String onRegionMerge(ServerName sn, TransitionCode code, } } else if (code == TransitionCode.MERGE_PONR) { try { - regionStateStore.mergeRegions(p, a, b, sn); + regionStates.mergeRegions(p, a, b, sn); } catch (IOException ioe) { LOG.info("Failed to record merged region " + p.getShortNameToLog()); return "Failed to record the merging in meta"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 153ffcbf4794..360996508179 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; -import java.util.NavigableMap; import java.util.Set; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -47,14 +46,11 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.catalog.MetaReader; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -340,30 +336,6 @@ private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOExcept return logDirs; } - /** - * Mark regions in recovering state when distributedLogReplay are set true - * @param serverNames Set of ServerNames to be replayed wals in order to recover changes contained - * in them - * @throws IOException - */ - public void prepareLogReplay(Set<ServerName> serverNames) throws IOException { - if (!this.distributedLogReplay) { - return; - } - // mark regions in recovering state - for (ServerName serverName : serverNames) { - NavigableMap<HRegionInfo, Result> regions = this.getServerUserRegions(serverName); - if (regions == null) { - continue; - } - try { - this.splitLogManager.markRegionsRecoveringInZK(serverName, regions.keySet()); - } catch (KeeperException e) { - throw new IOException(e); - } - } - } - /** * Mark regions in recovering state when distributedLogReplay are set true * @param serverName Failed region server whose wals to be replayed @@ -675,19 +647,6 @@ public HTableDescriptor addColumn(TableName tableName, HColumnDescriptor hcd) return htd; } - private NavigableMap<HRegionInfo, Result> getServerUserRegions(ServerName serverName) - throws IOException { - if (!this.master.isStopped()) { - try { - this.master.getCatalogTracker().waitForMeta(); - return MetaReader.getServerUserRegions(this.master.getCatalogTracker(), serverName); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - } - return null; - } - /** * The function is used in SSH to set recovery mode based on configuration after all outstanding * log split tasks drained. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 85677af8cbd6..fd75b3ffa976 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** @@ -248,16 +249,22 @@ public void createRegionStates( * no effect, and the original state is returned. */ public RegionState createRegionState(final HRegionInfo hri) { - return createRegionState(hri, null, null); + return createRegionState(hri, null, null, null); } /** * Add a region to RegionStates with the specified state. * If the region is already in RegionStates, this call has * no effect, and the original state is returned. - */ - public synchronized RegionState createRegionState( - final HRegionInfo hri, State newState, ServerName serverName) { + * + * @param hri the region info to create a state for + * @param newState the state to the region in set to + * @param serverName the server the region is transitioning on + * @param lastHost the last server that hosts the region + * @return the current state + */ + public synchronized RegionState createRegionState(final HRegionInfo hri, + State newState, ServerName serverName, ServerName lastHost) { if (newState == null || (newState == State.OPEN && serverName == null)) { newState = State.OFFLINE; } @@ -274,16 +281,24 @@ public synchronized RegionState createRegionState( regionState = new RegionState(hri, newState, serverName); regionStates.put(encodedName, regionState); if (newState == State.OPEN) { - regionAssignments.put(hri, serverName); - lastAssignments.put(encodedName, serverName); - Set<HRegionInfo> regions = serverHoldings.get(serverName); + if (!serverName.equals(lastHost)) { + LOG.warn("Open region's last host " + lastHost + + " should be the same as the current one " + serverName + + ", ignored the last and used the current one"); + lastHost = serverName; + } + lastAssignments.put(encodedName, lastHost); + regionAssignments.put(hri, lastHost); + } else if (!regionState.isUnassignable()) { + regionsInTransition.put(encodedName, regionState); + } + if (lastHost != null && newState != State.SPLIT) { + Set<HRegionInfo> regions = serverHoldings.get(lastHost); if (regions == null) { regions = new HashSet<HRegionInfo>(); - serverHoldings.put(serverName, regions); + serverHoldings.put(lastHost, regions); } regions.add(hri); - } else if (!regionState.isUnassignable()) { - regionsInTransition.put(encodedName, regionState); } } return regionState; @@ -590,6 +605,31 @@ public void tableDeleted(final TableName tableName) { } } + /** + * Get a copy of all regions assigned to a server + */ + public synchronized Set<HRegionInfo> getServerRegions(ServerName serverName) { + Set<HRegionInfo> regions = serverHoldings.get(serverName); + if (regions == null) return null; + return new HashSet<HRegionInfo>(regions); + } + + /** + * Remove a region from all state maps. + */ + @VisibleForTesting + public synchronized void deleteRegion(final HRegionInfo hri) { + String encodedName = hri.getEncodedName(); + regionsInTransition.remove(encodedName); + regionStates.remove(encodedName); + lastAssignments.remove(encodedName); + ServerName sn = regionAssignments.remove(hri); + if (sn != null) { + Set<HRegionInfo> regions = serverHoldings.get(sn); + regions.remove(hri); + } + } + /** * Checking if a region was assigned to a server which is not online now. * If so, we should hold re-assign this region till SSH has split its hlogs. @@ -651,6 +691,38 @@ synchronized void setLastRegionServerOfRegion( lastAssignments.put(encodedName, serverName); } + void splitRegion(HRegionInfo p, + HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { + regionStateStore.splitRegion(p, a, b, sn); + synchronized (this) { + // After PONR, split is considered to be done. + // Update server holdings to be aligned with the meta. + Set<HRegionInfo> regions = serverHoldings.get(sn); + if (regions == null) { + throw new IllegalStateException(sn + " should host some regions"); + } + regions.remove(p); + regions.add(a); + regions.add(b); + } + } + + void mergeRegions(HRegionInfo p, + HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { + regionStateStore.mergeRegions(p, a, b, sn); + synchronized (this) { + // After PONR, merge is considered to be done. + // Update server holdings to be aligned with the meta. + Set<HRegionInfo> regions = serverHoldings.get(sn); + if (regions == null) { + throw new IllegalStateException(sn + " should host some regions"); + } + regions.remove(a); + regions.remove(b); + regions.add(p); + } + } + /** * At cluster clean re/start, mark all user regions closed except those of tables * that are excluded, such as disabled/disabling/enabling tables. All user regions @@ -661,8 +733,11 @@ synchronized Map<HRegionInfo, ServerName> closeAllUserRegions(Set<TableName> exc Set<HRegionInfo> toBeClosed = new HashSet<HRegionInfo>(regionStates.size()); for(RegionState state: regionStates.values()) { HRegionInfo hri = state.getRegion(); + if (state.isSplit() || hri.isSplit()) { + continue; + } TableName tableName = hri.getTable(); - if (!TableName.META_TABLE_NAME.equals(tableName) && !hri.isSplit() + if (!TableName.META_TABLE_NAME.equals(tableName) && (noExcludeTables || !excludedTables.contains(tableName))) { toBeClosed.add(hri); } @@ -859,19 +934,4 @@ private RegionState updateRegionState(final HRegionInfo hri, } return regionState; } - - /** - * Remove a region from all state maps. - */ - private synchronized void deleteRegion(final HRegionInfo hri) { - String encodedName = hri.getEncodedName(); - regionsInTransition.remove(encodedName); - regionStates.remove(encodedName); - lastAssignments.remove(encodedName); - ServerName sn = regionAssignments.remove(hri); - if (sn != null) { - Set<HRegionInfo> regions = serverHoldings.get(sn); - regions.remove(hri); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 50e09add5e66..70648e1c6cb3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; +import org.apache.hadoop.hbase.util.ConfigUtil; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.zookeeper.KeeperException; @@ -162,8 +163,16 @@ public void process() throws IOException { this.server.getCatalogTracker().waitForMeta(); // Skip getting user regions if the server is stopped. if (!this.server.isStopped()) { - hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(), - this.serverName).keySet(); + if (ConfigUtil.useZKForAssignment(server.getConfiguration())) { + hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(), + this.serverName).keySet(); + } else { + // Not using ZK for assignment, regionStates has everything we want + hris = am.getRegionStates().getServerRegions(serverName); + if (hris != null) { + hris.remove(HRegionInfo.FIRST_META_REGIONINFO); + } + } } break; } catch (InterruptedException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 10c23356e7cf..b4a780f6a44c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -418,7 +418,7 @@ public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx AssignmentManager.TEST_SKIP_SPLIT_HANDLING = true; // Now try splitting and it should work. split(hri, server, regionCount); - // Assert the ephemeral node is up in zk. + String path = ZKAssign.getNodeName(TESTING_UTIL.getZooKeeperWatcher(), hri.getEncodedName()); RegionTransition rt = null; @@ -437,7 +437,7 @@ public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx } LOG.info("EPHEMERAL NODE BEFORE SERVER ABORT, path=" + path + ", stats=" + stats); assertTrue(rt != null && rt.getEventType().equals(EventType.RS_ZK_REGION_SPLIT)); - // Now crash the server + // Now crash the server, for ZK-less assignment, the server is auto aborted cluster.abortRegionServer(tableRegionIndex); } waitUntilRegionServerDead(); @@ -1329,12 +1329,12 @@ private void printOutRegions(final HRegionServer hrs, final String prefix) private void waitUntilRegionServerDead() throws InterruptedException, InterruptedIOException { // Wait until the master processes the RS shutdown for (int i=0; cluster.getMaster().getClusterStatus(). - getServers().size() == NB_SERVERS && i<100; i++) { + getServers().size() > NB_SERVERS && i<100; i++) { LOG.info("Waiting on server to go down"); Thread.sleep(100); } assertFalse("Waited too long for RS to die", cluster.getMaster().getClusterStatus(). - getServers().size() == NB_SERVERS); + getServers().size() > NB_SERVERS); } private void awaitDaughters(byte[] tableName, int numDaughters) throws InterruptedException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 0a2e8fd4855a..bd0fbd3347a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -1426,6 +1426,12 @@ public void testSplitDaughtersNotInMeta() throws Exception { meta.delete(new Delete(daughters.getSecond().getRegionName())); meta.flushCommits(); + // Remove daughters from regionStates + RegionStates regionStates = TEST_UTIL.getMiniHBaseCluster().getMaster(). + getAssignmentManager().getRegionStates(); + regionStates.deleteRegion(daughters.getFirst()); + regionStates.deleteRegion(daughters.getSecond()); + HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); //no LINGERING_SPLIT_PARENT
5f9b4443194d3aa3948d76956897c0a1d918d546
spring-framework
bean properties of type enum array/collection can- be populated with comma-separated String (SPR-6547)--
c
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.beans/src/main/java/org/springframework/beans/TypeConverterDelegate.java b/org.springframework.beans/src/main/java/org/springframework/beans/TypeConverterDelegate.java index 85100b836c2d..049a0694385f 100644 --- a/org.springframework.beans/src/main/java/org/springframework/beans/TypeConverterDelegate.java +++ b/org.springframework.beans/src/main/java/org/springframework/beans/TypeConverterDelegate.java @@ -202,6 +202,13 @@ private <T> T convertIfNecessary(String propertyName, Object oldValue, Object ne // Value not of required type? if (editor != null || (requiredType != null && !ClassUtils.isAssignableValue(requiredType, convertedValue))) { + if (requiredType != null && Collection.class.isAssignableFrom(requiredType) && + convertedValue instanceof String && typeDescriptor.getMethodParameter() != null) { + Class elementType = GenericCollectionTypeResolver.getCollectionParameterType(typeDescriptor.getMethodParameter()); + if (elementType != null && Enum.class.isAssignableFrom(elementType)) { + convertedValue = StringUtils.commaDelimitedListToStringArray((String) convertedValue); + } + } if (editor == null) { editor = findDefaultEditor(requiredType, typeDescriptor); } @@ -214,6 +221,9 @@ private <T> T convertIfNecessary(String propertyName, Object oldValue, Object ne if (convertedValue != null) { if (requiredType.isArray()) { // Array required -> apply appropriate conversion of elements. + if (convertedValue instanceof String && Enum.class.isAssignableFrom(requiredType.getComponentType())) { + convertedValue = StringUtils.commaDelimitedListToStringArray((String) convertedValue); + } return (T) convertToTypedArray(convertedValue, propertyName, requiredType.getComponentType()); } else if (convertedValue instanceof Collection) { diff --git a/org.springframework.beans/src/test/java/org/springframework/beans/BeanWrapperEnumTests.java b/org.springframework.beans/src/test/java/org/springframework/beans/BeanWrapperEnumTests.java index 43182fe69acd..d41aa7a215e6 100644 --- a/org.springframework.beans/src/test/java/org/springframework/beans/BeanWrapperEnumTests.java +++ b/org.springframework.beans/src/test/java/org/springframework/beans/BeanWrapperEnumTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2002-2008 the original author or authors. + * Copyright 2002-2009 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,9 +17,7 @@ package org.springframework.beans; import static org.junit.Assert.*; - import org.junit.Test; - import test.beans.CustomEnum; import test.beans.GenericBean; @@ -53,4 +51,62 @@ public void testCustomEnumWithEmptyString() { assertEquals(null, gb.getCustomEnum()); } + @Test + public void testCustomEnumArrayWithSingleValue() { + GenericBean<?> gb = new GenericBean<Object>(); + BeanWrapper bw = new BeanWrapperImpl(gb); + bw.setPropertyValue("customEnumArray", "VALUE_1"); + assertEquals(1, gb.getCustomEnumArray().length); + assertEquals(CustomEnum.VALUE_1, gb.getCustomEnumArray()[0]); + } + + @Test + public void testCustomEnumArrayWithMultipleValues() { + GenericBean<?> gb = new GenericBean<Object>(); + BeanWrapper bw = new BeanWrapperImpl(gb); + bw.setPropertyValue("customEnumArray", new String[] {"VALUE_1", "VALUE_2"}); + assertEquals(2, gb.getCustomEnumArray().length); + assertEquals(CustomEnum.VALUE_1, gb.getCustomEnumArray()[0]); + assertEquals(CustomEnum.VALUE_2, gb.getCustomEnumArray()[1]); + } + + @Test + public void testCustomEnumArrayWithMultipleValuesAsCsv() { + GenericBean<?> gb = new GenericBean<Object>(); + BeanWrapper bw = new BeanWrapperImpl(gb); + bw.setPropertyValue("customEnumArray", "VALUE_1,VALUE_2"); + assertEquals(2, gb.getCustomEnumArray().length); + assertEquals(CustomEnum.VALUE_1, gb.getCustomEnumArray()[0]); + assertEquals(CustomEnum.VALUE_2, gb.getCustomEnumArray()[1]); + } + + @Test + public void testCustomEnumSetWithSingleValue() { + GenericBean<?> gb = new GenericBean<Object>(); + BeanWrapper bw = new BeanWrapperImpl(gb); + bw.setPropertyValue("customEnumSet", "VALUE_1"); + assertEquals(1, gb.getCustomEnumSet().size()); + assertTrue(gb.getCustomEnumSet().contains(CustomEnum.VALUE_1)); + } + + @Test + public void testCustomEnumSetWithMultipleValues() { + GenericBean<?> gb = new GenericBean<Object>(); + BeanWrapper bw = new BeanWrapperImpl(gb); + bw.setPropertyValue("customEnumSet", new String[] {"VALUE_1", "VALUE_2"}); + assertEquals(2, gb.getCustomEnumSet().size()); + assertTrue(gb.getCustomEnumSet().contains(CustomEnum.VALUE_1)); + assertTrue(gb.getCustomEnumSet().contains(CustomEnum.VALUE_2)); + } + + @Test + public void testCustomEnumSetWithMultipleValuesAsCsv() { + GenericBean<?> gb = new GenericBean<Object>(); + BeanWrapper bw = new BeanWrapperImpl(gb); + bw.setPropertyValue("customEnumSet", "VALUE_1,VALUE_2"); + assertEquals(2, gb.getCustomEnumSet().size()); + assertTrue(gb.getCustomEnumSet().contains(CustomEnum.VALUE_1)); + assertTrue(gb.getCustomEnumSet().contains(CustomEnum.VALUE_2)); + } + } diff --git a/org.springframework.beans/src/test/java/test/beans/GenericBean.java b/org.springframework.beans/src/test/java/test/beans/GenericBean.java index 25f61c4aa756..acb9bdb76e5a 100644 --- a/org.springframework.beans/src/test/java/test/beans/GenericBean.java +++ b/org.springframework.beans/src/test/java/test/beans/GenericBean.java @@ -60,11 +60,14 @@ public class GenericBean<T> { private CustomEnum customEnum; + private CustomEnum[] customEnumArray; + + private Set<CustomEnum> customEnumSet; + private T genericProperty; private List<T> genericListProperty; - public GenericBean() { } @@ -225,6 +228,22 @@ public void setCustomEnum(CustomEnum customEnum) { this.customEnum = customEnum; } + public CustomEnum[] getCustomEnumArray() { + return customEnumArray; + } + + public void setCustomEnumArray(CustomEnum[] customEnum) { + this.customEnumArray = customEnum; + } + + public Set<CustomEnum> getCustomEnumSet() { + return customEnumSet; + } + + public void setCustomEnumSet(Set<CustomEnum> customEnumSet) { + this.customEnumSet = customEnumSet; + } + public static GenericBean createInstance(Set<Integer> integerSet) { return new GenericBean(integerSet); }
83d5b1e6a0280cc78625bacc2d3f7d1676c7385e
kotlin
Supported propagation for subclass of- j.u.Collection and similar classes.--
a
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaToKotlinMethodMap.java b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaToKotlinMethodMap.java new file mode 100644 index 0000000000000..cea6587792692 --- /dev/null +++ b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaToKotlinMethodMap.java @@ -0,0 +1,149 @@ +/* + * Copyright 2010-2012 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.jetbrains.jet.lang.resolve.java; + +import com.google.common.collect.*; +import com.intellij.openapi.util.Pair; +import com.intellij.psi.PsiMethod; +import com.intellij.psi.util.PsiFormatUtil; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.jet.lang.descriptors.ClassDescriptor; +import org.jetbrains.jet.lang.descriptors.FunctionDescriptor; +import org.jetbrains.jet.lang.resolve.DescriptorUtils; +import org.jetbrains.jet.lang.resolve.name.Name; +import org.jetbrains.jet.lang.types.JetType; +import org.jetbrains.jet.lang.types.TypeUtils; +import org.jetbrains.jet.lang.types.lang.KotlinBuiltIns; +import org.jetbrains.jet.resolve.DescriptorRenderer; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class JavaToKotlinMethodMap { + public static final JavaToKotlinMethodMap INSTANCE = new JavaToKotlinMethodMap(); + + private final JavaToKotlinMethodMapGenerated mapContainer = new JavaToKotlinMethodMapGenerated(); + + private JavaToKotlinMethodMap() { + } + + @NotNull + private static Set<ClassDescriptor> getAllSuperClasses(@NotNull ClassDescriptor klass) { + Set<JetType> allSupertypes = TypeUtils.getAllSupertypes(klass.getDefaultType()); + Set<ClassDescriptor> allSuperclasses = Sets.newHashSet(); + for (JetType supertype : allSupertypes) { + ClassDescriptor superclass = TypeUtils.getClassDescriptor(supertype); + assert superclass != null; + allSuperclasses.add(superclass); + } + return allSuperclasses; + } + + @NotNull + public List<FunctionDescriptor> getFunctions(@NotNull PsiMethod psiMethod, @NotNull ClassDescriptor containingClass) { + ImmutableCollection<ClassData> classDatas = mapContainer.map.get(psiMethod.getContainingClass().getQualifiedName()); + + List<FunctionDescriptor> result = Lists.newArrayList(); + + Set<ClassDescriptor> allSuperClasses = getAllSuperClasses(containingClass); + + String serializedPsiMethod = serializePsiMethod(psiMethod); + for (ClassData classData : classDatas) { + String expectedSerializedFunction = classData.method2Function.get(serializedPsiMethod); + if (expectedSerializedFunction == null) continue; + + ClassDescriptor kotlinClass = classData.kotlinClass; + if (!allSuperClasses.contains(kotlinClass)) continue; + + + Collection<FunctionDescriptor> functions = + kotlinClass.getDefaultType().getMemberScope().getFunctions(Name.identifier(psiMethod.getName())); + + for (FunctionDescriptor function : functions) { + if (expectedSerializedFunction.equals(serializeFunction(function))) { + result.add(function); + } + } + } + + return result; + } + + @NotNull + public static String serializePsiMethod(@NotNull PsiMethod psiMethod) { + String externalName = PsiFormatUtil.getExternalName(psiMethod); + assert externalName != null : "couldn't find external name for " + psiMethod.getText(); + return externalName; + } + + @NotNull + public static String serializeFunction(@NotNull FunctionDescriptor fun) { + return DescriptorRenderer.TEXT.render(fun); + } + + // used in generated code + static Pair<String, String> pair(String a, String b) { + return Pair.create(a, b); + } + + // used in generated code + static void put( + ImmutableMultimap.Builder<String, ClassData> builder, + String javaFqName, + String kotlinQualifiedName, + Pair<String, String>... methods2Functions + ) { + ImmutableMap<String, String> methods2FunctionsMap = pairs2Map(methods2Functions); + + ClassDescriptor kotlinClass; + if (kotlinQualifiedName.contains(".")) { // Map.Entry and MutableMap.MutableEntry + String[] kotlinNames = kotlinQualifiedName.split("\\."); + assert kotlinNames.length == 2 : "unexpected qualified name " + kotlinQualifiedName; + + ClassDescriptor outerClass = KotlinBuiltIns.getInstance().getBuiltInClassByName(Name.identifier(kotlinNames[0])); + kotlinClass = DescriptorUtils.getInnerClassByName(outerClass, kotlinNames[1]); + assert kotlinClass != null : "Class not found: " + kotlinQualifiedName; + } + else { + kotlinClass = KotlinBuiltIns.getInstance().getBuiltInClassByName(Name.identifier(kotlinQualifiedName)); + } + + builder.put(javaFqName, new ClassData(kotlinClass, methods2FunctionsMap)); + } + + private static ImmutableMap<String, String> pairs2Map(Pair<String, String>[] pairs) { + ImmutableMap.Builder<String, String> builder = ImmutableMap.builder(); + for (Pair<String, String> pair : pairs) { + builder.put(pair.first, pair.second); + } + return builder.build(); + } + + static class ClassData { + @NotNull + public final ClassDescriptor kotlinClass; + @NotNull + public Map<String, String> method2Function; + + public ClassData(@NotNull ClassDescriptor kotlinClass, @NotNull Map<String, String> method2Function) { + this.kotlinClass = kotlinClass; + this.method2Function = method2Function; + } + } +} diff --git a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaToKotlinMethodMapGenerated.java b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaToKotlinMethodMapGenerated.java new file mode 100644 index 0000000000000..70de02d0f8a10 --- /dev/null +++ b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaToKotlinMethodMapGenerated.java @@ -0,0 +1,245 @@ +/* + * Copyright 2010-2012 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.jetbrains.jet.lang.resolve.java; + +import com.google.common.collect.ImmutableMultimap; + +import static org.jetbrains.jet.lang.resolve.java.JavaToKotlinMethodMap.*; + +/* This file is generated by org.jetbrains.jet.generators.GenerateJavaToKotlinMethodMap. DO NOT EDIT! */ +@SuppressWarnings("unchecked") +class JavaToKotlinMethodMapGenerated { + final ImmutableMultimap<String, JavaToKotlinMethodMap.ClassData> map; + + JavaToKotlinMethodMapGenerated() { + ImmutableMultimap.Builder<String, JavaToKotlinMethodMap.ClassData> b = ImmutableMultimap.builder(); + + put(b, "java.lang.String", "String", + pair("java.lang.String int compareTo(java.lang.String)", "public open fun compareTo(that : jet.String) : jet.Int defined in jet.String"), + pair("java.lang.String boolean equals(java.lang.Object)", "public final fun equals(other : jet.Any?) : jet.Boolean defined in jet.String"), + pair("java.lang.String java.lang.String toString()", "public open fun toString() : jet.String defined in jet.String") + ); + + put(b, "java.lang.CharSequence", "CharSequence", + pair("java.lang.CharSequence java.lang.String toString()", "public abstract fun toString() : jet.String defined in jet.CharSequence") + ); + + put(b, "java.lang.Throwable", "Throwable", + pair("java.lang.Throwable java.lang.Throwable getCause()", "public final fun getCause() : jet.Throwable? defined in jet.Throwable"), + pair("java.lang.Throwable java.lang.String getMessage()", "public final fun getMessage() : jet.String? defined in jet.Throwable"), + pair("java.lang.Throwable void printStackTrace()", "public final fun printStackTrace() : Unit defined in jet.Throwable") + ); + + put(b, "java.lang.Comparable", "Comparable", + pair("java.lang.Comparable int compareTo(T)", "public abstract fun compareTo(other : T) : jet.Int defined in jet.Comparable") + ); + + put(b, "java.lang.Enum", "Enum", + pair("java.lang.Enum java.lang.String name()", "public final fun name() : jet.String defined in jet.Enum"), + pair("java.lang.Enum int ordinal()", "public final fun ordinal() : jet.Int defined in jet.Enum") + ); + + put(b, "java.lang.Iterable", "Iterable", + pair("java.lang.Iterable java.util.Iterator<T> iterator()", "public abstract fun iterator() : jet.Iterator<T> defined in jet.Iterable") + ); + + put(b, "java.lang.Iterable", "MutableIterable", + pair("java.lang.Iterable java.util.Iterator<T> iterator()", "public abstract fun iterator() : jet.MutableIterator<T> defined in jet.MutableIterable") + ); + + put(b, "java.util.Iterator", "Iterator", + pair("java.util.Iterator boolean hasNext()", "public abstract fun hasNext() : jet.Boolean defined in jet.Iterator"), + pair("java.util.Iterator E next()", "public abstract fun next() : T defined in jet.Iterator") + ); + + put(b, "java.util.Iterator", "MutableIterator", + pair("java.util.Iterator boolean hasNext()", "public abstract fun hasNext() : jet.Boolean defined in jet.MutableIterator"), + pair("java.util.Iterator E next()", "public abstract fun next() : T defined in jet.MutableIterator"), + pair("java.util.Iterator void remove()", "public abstract fun remove() : Unit defined in jet.MutableIterator") + ); + + put(b, "java.util.Collection", "Collection", + pair("java.util.Collection boolean contains(java.lang.Object)", "public abstract fun contains(o : jet.Any?) : jet.Boolean defined in jet.Collection"), + pair("java.util.Collection boolean containsAll(java.util.Collection<?>)", "public abstract fun containsAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.Collection"), + pair("java.util.Collection boolean equals(java.lang.Object)", "public abstract fun equals(other : jet.Any?) : jet.Boolean defined in jet.Collection"), + pair("java.util.Collection int hashCode()", "public abstract fun hashCode() : jet.Int defined in jet.Collection"), + pair("java.util.Collection boolean isEmpty()", "public abstract fun isEmpty() : jet.Boolean defined in jet.Collection"), + pair("java.util.Collection java.util.Iterator<E> iterator()", "public abstract fun iterator() : jet.Iterator<E> defined in jet.Collection"), + pair("java.util.Collection int size()", "public abstract fun size() : jet.Int defined in jet.Collection"), + pair("java.util.Collection T[] toArray(T[])", "public abstract fun <T> toArray(a : jet.Array<out T>) : jet.Array<T> defined in jet.Collection"), + pair("java.util.Collection java.lang.Object[] toArray()", "public abstract fun toArray() : jet.Array<jet.Any?> defined in jet.Collection") + ); + + put(b, "java.util.Collection", "MutableCollection", + pair("java.util.Collection boolean add(E)", "public abstract fun add(e : E) : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection boolean addAll(java.util.Collection<? extends E>)", "public abstract fun addAll(c : jet.Collection<E>) : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection void clear()", "public abstract fun clear() : Unit defined in jet.MutableCollection"), + pair("java.util.Collection boolean contains(java.lang.Object)", "public abstract fun contains(o : jet.Any?) : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection boolean containsAll(java.util.Collection<?>)", "public abstract fun containsAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection boolean equals(java.lang.Object)", "public abstract fun equals(other : jet.Any?) : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection int hashCode()", "public abstract fun hashCode() : jet.Int defined in jet.MutableCollection"), + pair("java.util.Collection boolean isEmpty()", "public abstract fun isEmpty() : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection java.util.Iterator<E> iterator()", "public abstract fun iterator() : jet.MutableIterator<E> defined in jet.MutableCollection"), + pair("java.util.Collection boolean remove(java.lang.Object)", "public abstract fun remove(o : jet.Any?) : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection boolean removeAll(java.util.Collection<?>)", "public abstract fun removeAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection boolean retainAll(java.util.Collection<?>)", "public abstract fun retainAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableCollection"), + pair("java.util.Collection int size()", "public abstract fun size() : jet.Int defined in jet.MutableCollection"), + pair("java.util.Collection T[] toArray(T[])", "public abstract fun <T> toArray(a : jet.Array<out T>) : jet.Array<T> defined in jet.MutableCollection"), + pair("java.util.Collection java.lang.Object[] toArray()", "public abstract fun toArray() : jet.Array<jet.Any?> defined in jet.MutableCollection") + ); + + put(b, "java.util.List", "List", + pair("java.util.List boolean contains(java.lang.Object)", "public abstract fun contains(o : jet.Any?) : jet.Boolean defined in jet.List"), + pair("java.util.List boolean containsAll(java.util.Collection<?>)", "public abstract fun containsAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.List"), + pair("java.util.List boolean equals(java.lang.Object)", "public abstract fun equals(other : jet.Any?) : jet.Boolean defined in jet.List"), + pair("java.util.List E get(int)", "public abstract fun get(index : jet.Int) : E defined in jet.List"), + pair("java.util.List int hashCode()", "public abstract fun hashCode() : jet.Int defined in jet.List"), + pair("java.util.List int indexOf(java.lang.Object)", "public abstract fun indexOf(o : jet.Any?) : jet.Int defined in jet.List"), + pair("java.util.List boolean isEmpty()", "public abstract fun isEmpty() : jet.Boolean defined in jet.List"), + pair("java.util.List java.util.Iterator<E> iterator()", "public abstract fun iterator() : jet.Iterator<E> defined in jet.List"), + pair("java.util.List int lastIndexOf(java.lang.Object)", "public abstract fun lastIndexOf(o : jet.Any?) : jet.Int defined in jet.List"), + pair("java.util.List java.util.ListIterator<E> listIterator()", "public abstract fun listIterator() : jet.ListIterator<E> defined in jet.List"), + pair("java.util.List java.util.ListIterator<E> listIterator(int)", "public abstract fun listIterator(index : jet.Int) : jet.ListIterator<E> defined in jet.List"), + pair("java.util.List int size()", "public abstract fun size() : jet.Int defined in jet.List"), + pair("java.util.List java.util.List<E> subList(int, int)", "public abstract fun subList(fromIndex : jet.Int, toIndex : jet.Int) : jet.List<E> defined in jet.List"), + pair("java.util.List T[] toArray(T[])", "public abstract fun <T> toArray(a : jet.Array<out T>) : jet.Array<T> defined in jet.List"), + pair("java.util.List java.lang.Object[] toArray()", "public abstract fun toArray() : jet.Array<jet.Any?> defined in jet.List") + ); + + put(b, "java.util.List", "MutableList", + pair("java.util.List boolean add(E)", "public abstract fun add(e : E) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List void add(int, E)", "public abstract fun add(index : jet.Int, element : E) : Unit defined in jet.MutableList"), + pair("java.util.List boolean addAll(int, java.util.Collection<? extends E>)", "public abstract fun addAll(index : jet.Int, c : jet.Collection<E>) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List boolean addAll(java.util.Collection<? extends E>)", "public abstract fun addAll(c : jet.Collection<E>) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List void clear()", "public abstract fun clear() : Unit defined in jet.MutableList"), + pair("java.util.List boolean contains(java.lang.Object)", "public abstract fun contains(o : jet.Any?) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List boolean containsAll(java.util.Collection<?>)", "public abstract fun containsAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List boolean equals(java.lang.Object)", "public abstract fun equals(other : jet.Any?) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List E get(int)", "public abstract fun get(index : jet.Int) : E defined in jet.MutableList"), + pair("java.util.List int hashCode()", "public abstract fun hashCode() : jet.Int defined in jet.MutableList"), + pair("java.util.List int indexOf(java.lang.Object)", "public abstract fun indexOf(o : jet.Any?) : jet.Int defined in jet.MutableList"), + pair("java.util.List boolean isEmpty()", "public abstract fun isEmpty() : jet.Boolean defined in jet.MutableList"), + pair("java.util.List java.util.Iterator<E> iterator()", "public abstract fun iterator() : jet.Iterator<E> defined in jet.MutableList"), + pair("java.util.List int lastIndexOf(java.lang.Object)", "public abstract fun lastIndexOf(o : jet.Any?) : jet.Int defined in jet.MutableList"), + pair("java.util.List java.util.ListIterator<E> listIterator()", "public abstract fun listIterator() : jet.MutableListIterator<E> defined in jet.MutableList"), + pair("java.util.List java.util.ListIterator<E> listIterator(int)", "public abstract fun listIterator(index : jet.Int) : jet.MutableListIterator<E> defined in jet.MutableList"), + pair("java.util.List E remove(int)", "public abstract fun remove(index : jet.Int) : E defined in jet.MutableList"), + pair("java.util.List boolean remove(java.lang.Object)", "public abstract fun remove(o : jet.Any?) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List boolean removeAll(java.util.Collection<?>)", "public abstract fun removeAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List boolean retainAll(java.util.Collection<?>)", "public abstract fun retainAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableList"), + pair("java.util.List E set(int, E)", "public abstract fun set(index : jet.Int, element : E) : E defined in jet.MutableList"), + pair("java.util.List int size()", "public abstract fun size() : jet.Int defined in jet.MutableList"), + pair("java.util.List java.util.List<E> subList(int, int)", "public abstract fun subList(fromIndex : jet.Int, toIndex : jet.Int) : jet.MutableList<E> defined in jet.MutableList"), + pair("java.util.List T[] toArray(T[])", "public abstract fun <T> toArray(a : jet.Array<out T>) : jet.Array<T> defined in jet.MutableList"), + pair("java.util.List java.lang.Object[] toArray()", "public abstract fun toArray() : jet.Array<jet.Any?> defined in jet.MutableList") + ); + + put(b, "java.util.Set", "Set", + pair("java.util.Set boolean contains(java.lang.Object)", "public abstract fun contains(o : jet.Any?) : jet.Boolean defined in jet.Set"), + pair("java.util.Set boolean containsAll(java.util.Collection<?>)", "public abstract fun containsAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.Set"), + pair("java.util.Set boolean equals(java.lang.Object)", "public abstract fun equals(other : jet.Any?) : jet.Boolean defined in jet.Set"), + pair("java.util.Set int hashCode()", "public abstract fun hashCode() : jet.Int defined in jet.Set"), + pair("java.util.Set boolean isEmpty()", "public abstract fun isEmpty() : jet.Boolean defined in jet.Set"), + pair("java.util.Set java.util.Iterator<E> iterator()", "public abstract fun iterator() : jet.Iterator<E> defined in jet.Set"), + pair("java.util.Set int size()", "public abstract fun size() : jet.Int defined in jet.Set"), + pair("java.util.Set T[] toArray(T[])", "public abstract fun <T> toArray(a : jet.Array<out T>) : jet.Array<T> defined in jet.Set"), + pair("java.util.Set java.lang.Object[] toArray()", "public abstract fun toArray() : jet.Array<jet.Any?> defined in jet.Set") + ); + + put(b, "java.util.Set", "MutableSet", + pair("java.util.Set boolean add(E)", "public abstract fun add(e : E) : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set boolean addAll(java.util.Collection<? extends E>)", "public abstract fun addAll(c : jet.Collection<E>) : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set void clear()", "public abstract fun clear() : Unit defined in jet.MutableSet"), + pair("java.util.Set boolean contains(java.lang.Object)", "public abstract fun contains(o : jet.Any?) : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set boolean containsAll(java.util.Collection<?>)", "public abstract fun containsAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set boolean equals(java.lang.Object)", "public abstract fun equals(other : jet.Any?) : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set int hashCode()", "public abstract fun hashCode() : jet.Int defined in jet.MutableSet"), + pair("java.util.Set boolean isEmpty()", "public abstract fun isEmpty() : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set java.util.Iterator<E> iterator()", "public abstract fun iterator() : jet.MutableIterator<E> defined in jet.MutableSet"), + pair("java.util.Set boolean remove(java.lang.Object)", "public abstract fun remove(o : jet.Any?) : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set boolean removeAll(java.util.Collection<?>)", "public abstract fun removeAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set boolean retainAll(java.util.Collection<?>)", "public abstract fun retainAll(c : jet.Collection<jet.Any?>) : jet.Boolean defined in jet.MutableSet"), + pair("java.util.Set int size()", "public abstract fun size() : jet.Int defined in jet.MutableSet"), + pair("java.util.Set T[] toArray(T[])", "public abstract fun <T> toArray(a : jet.Array<out T>) : jet.Array<T> defined in jet.MutableSet"), + pair("java.util.Set java.lang.Object[] toArray()", "public abstract fun toArray() : jet.Array<jet.Any?> defined in jet.MutableSet") + ); + + put(b, "java.util.Map", "Map", + pair("java.util.Map boolean containsKey(java.lang.Object)", "public abstract fun containsKey(key : jet.Any?) : jet.Boolean defined in jet.Map"), + pair("java.util.Map boolean containsValue(java.lang.Object)", "public abstract fun containsValue(value : jet.Any?) : jet.Boolean defined in jet.Map"), + pair("java.util.Map java.util.Set<java.util.Map.Entry<K,V>> entrySet()", "public abstract fun entrySet() : jet.Set<jet.Map.Entry<K, V>> defined in jet.Map"), + pair("java.util.Map V get(java.lang.Object)", "public abstract fun get(key : jet.Any?) : V? defined in jet.Map"), + pair("java.util.Map boolean isEmpty()", "public abstract fun isEmpty() : jet.Boolean defined in jet.Map"), + pair("java.util.Map java.util.Set<K> keySet()", "public abstract fun keySet() : jet.Set<K> defined in jet.Map"), + pair("java.util.Map int size()", "public abstract fun size() : jet.Int defined in jet.Map"), + pair("java.util.Map java.util.Collection<V> values()", "public abstract fun values() : jet.Collection<V> defined in jet.Map") + ); + + put(b, "java.util.Map", "MutableMap", + pair("java.util.Map void clear()", "public abstract fun clear() : Unit defined in jet.MutableMap"), + pair("java.util.Map boolean containsKey(java.lang.Object)", "public abstract fun containsKey(key : jet.Any?) : jet.Boolean defined in jet.MutableMap"), + pair("java.util.Map boolean containsValue(java.lang.Object)", "public abstract fun containsValue(value : jet.Any?) : jet.Boolean defined in jet.MutableMap"), + pair("java.util.Map java.util.Set<java.util.Map.Entry<K,V>> entrySet()", "public abstract fun entrySet() : jet.MutableSet<jet.MutableMap.MutableEntry<K, V>> defined in jet.MutableMap"), + pair("java.util.Map V get(java.lang.Object)", "public abstract fun get(key : jet.Any?) : V? defined in jet.MutableMap"), + pair("java.util.Map boolean isEmpty()", "public abstract fun isEmpty() : jet.Boolean defined in jet.MutableMap"), + pair("java.util.Map java.util.Set<K> keySet()", "public abstract fun keySet() : jet.MutableSet<K> defined in jet.MutableMap"), + pair("java.util.Map V put(K, V)", "public abstract fun put(key : K, value : V) : V? defined in jet.MutableMap"), + pair("java.util.Map void putAll(java.util.Map<? extends K,? extends V>)", "public abstract fun putAll(m : jet.Map<out K, out V>) : Unit defined in jet.MutableMap"), + pair("java.util.Map V remove(java.lang.Object)", "public abstract fun remove(key : jet.Any?) : V? defined in jet.MutableMap"), + pair("java.util.Map int size()", "public abstract fun size() : jet.Int defined in jet.MutableMap"), + pair("java.util.Map java.util.Collection<V> values()", "public abstract fun values() : jet.MutableCollection<V> defined in jet.MutableMap") + ); + + put(b, "java.util.Map.Entry", "Map.Entry", + pair("java.util.Map.Entry boolean equals(java.lang.Object)", "public abstract fun equals(other : jet.Any?) : jet.Boolean defined in jet.Map.Entry"), + pair("java.util.Map.Entry K getKey()", "public abstract fun getKey() : K defined in jet.Map.Entry"), + pair("java.util.Map.Entry V getValue()", "public abstract fun getValue() : V defined in jet.Map.Entry"), + pair("java.util.Map.Entry int hashCode()", "public abstract fun hashCode() : jet.Int defined in jet.Map.Entry") + ); + + put(b, "java.util.Map.Entry", "MutableMap.MutableEntry", + pair("java.util.Map.Entry boolean equals(java.lang.Object)", "public abstract fun equals(other : jet.Any?) : jet.Boolean defined in jet.MutableMap.MutableEntry"), + pair("java.util.Map.Entry K getKey()", "public abstract fun getKey() : K defined in jet.MutableMap.MutableEntry"), + pair("java.util.Map.Entry V getValue()", "public abstract fun getValue() : V defined in jet.MutableMap.MutableEntry"), + pair("java.util.Map.Entry int hashCode()", "public abstract fun hashCode() : jet.Int defined in jet.MutableMap.MutableEntry"), + pair("java.util.Map.Entry V setValue(V)", "public abstract fun setValue(value : V) : V defined in jet.MutableMap.MutableEntry") + ); + + put(b, "java.util.ListIterator", "ListIterator", + pair("java.util.ListIterator boolean hasNext()", "public abstract fun hasNext() : jet.Boolean defined in jet.ListIterator"), + pair("java.util.ListIterator boolean hasPrevious()", "public abstract fun hasPrevious() : jet.Boolean defined in jet.ListIterator"), + pair("java.util.ListIterator E next()", "public abstract fun next() : T defined in jet.ListIterator"), + pair("java.util.ListIterator int nextIndex()", "public abstract fun nextIndex() : jet.Int defined in jet.ListIterator"), + pair("java.util.ListIterator E previous()", "public abstract fun previous() : T defined in jet.ListIterator"), + pair("java.util.ListIterator int previousIndex()", "public abstract fun previousIndex() : jet.Int defined in jet.ListIterator") + ); + + put(b, "java.util.ListIterator", "MutableListIterator", + pair("java.util.ListIterator void add(E)", "public abstract fun add(e : T) : Unit defined in jet.MutableListIterator"), + pair("java.util.ListIterator boolean hasNext()", "public abstract fun hasNext() : jet.Boolean defined in jet.MutableListIterator"), + pair("java.util.ListIterator boolean hasPrevious()", "public abstract fun hasPrevious() : jet.Boolean defined in jet.MutableListIterator"), + pair("java.util.ListIterator E next()", "public abstract fun next() : T defined in jet.MutableListIterator"), + pair("java.util.ListIterator int nextIndex()", "public abstract fun nextIndex() : jet.Int defined in jet.MutableListIterator"), + pair("java.util.ListIterator E previous()", "public abstract fun previous() : T defined in jet.MutableListIterator"), + pair("java.util.ListIterator int previousIndex()", "public abstract fun previousIndex() : jet.Int defined in jet.MutableListIterator"), + pair("java.util.ListIterator void remove()", "public abstract fun remove() : Unit defined in jet.MutableListIterator"), + pair("java.util.ListIterator void set(E)", "public abstract fun set(e : T) : Unit defined in jet.MutableListIterator") + ); + + map = b.build(); + } +} diff --git a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/kotlinSignature/SignaturesPropagationData.java b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/kotlinSignature/SignaturesPropagationData.java index 4eecef9f415d4..238e5343ed568 100644 --- a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/kotlinSignature/SignaturesPropagationData.java +++ b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/kotlinSignature/SignaturesPropagationData.java @@ -28,7 +28,10 @@ import org.jetbrains.jet.lang.resolve.BindingTrace; import org.jetbrains.jet.lang.resolve.java.CollectionClassMapping; import org.jetbrains.jet.lang.resolve.java.JavaDescriptorResolver; +import org.jetbrains.jet.lang.resolve.java.JavaToKotlinClassMap; +import org.jetbrains.jet.lang.resolve.java.JavaToKotlinMethodMap; import org.jetbrains.jet.lang.resolve.java.wrapper.PsiMethodWrapper; +import org.jetbrains.jet.lang.resolve.name.FqName; import org.jetbrains.jet.lang.resolve.name.FqNameUnsafe; import org.jetbrains.jet.lang.resolve.scopes.JetScope; import org.jetbrains.jet.lang.types.*; @@ -50,13 +53,14 @@ public class SignaturesPropagationData { private final Map<TypeParameterDescriptor, TypeParameterDescriptorImpl> autoTypeParameterToModified; public SignaturesPropagationData( + @NotNull ClassDescriptor containingClass, @NotNull JetType autoReturnType, // type built by JavaTypeTransformer from Java signature and @NotNull annotations @NotNull JavaDescriptorResolver.ValueParameterDescriptors autoValueParameters, // descriptors built by parameters resolver @NotNull List<TypeParameterDescriptor> autoTypeParameters, // descriptors built by signature resolver @NotNull PsiMethodWrapper method, @NotNull BindingTrace trace ) { - superFunctions = getSuperFunctionsForMethod(method, trace); + superFunctions = getSuperFunctionsForMethod(method, trace, containingClass); autoTypeParameterToModified = SignaturesUtil.recreateTypeParametersAndReturnMapping(autoTypeParameters); @@ -187,7 +191,8 @@ public JetType fun(FunctionDescriptor superFunction) { private static List<FunctionDescriptor> getSuperFunctionsForMethod( @NotNull PsiMethodWrapper method, - @NotNull BindingTrace trace + @NotNull BindingTrace trace, + @NotNull ClassDescriptor containingClass ) { List<FunctionDescriptor> superFunctions = Lists.newArrayList(); for (HierarchicalMethodSignature superSignature : method.getPsiMethod().getHierarchicalMethodSignature().getSuperSignatures()) { @@ -196,15 +201,22 @@ private static List<FunctionDescriptor> getSuperFunctionsForMethod( superFunctions.add(((FunctionDescriptor) superFun)); } else { - // TODO assert is temporarily disabled - // It fails because of bug in IDEA on Mac: it adds invalid roots to JDK classpath and it leads to the problem that - // getHierarchicalMethodSignature() returns elements from invalid virtual files - - // Function descriptor can't be find iff superclass is java.lang.Collection or similar (translated to jet.* collections) - //assert !JavaToKotlinClassMap.getInstance().mapPlatformClass( - // new FqName(superSignature.getMethod().getContainingClass().getQualifiedName())).isEmpty(): - // "Can't find super function for " + method.getPsiMethod() + " defined in " - // + method.getPsiMethod().getContainingClass(); + String fqName = superSignature.getMethod().getContainingClass().getQualifiedName(); + assert fqName != null; + Collection<ClassDescriptor> platformClasses = JavaToKotlinClassMap.getInstance().mapPlatformClass(new FqName(fqName)); + if (platformClasses.isEmpty()) { + // TODO assert is temporarily disabled + // It fails because of bug in IDEA on Mac: it adds invalid roots to JDK classpath and it leads to the problem that + // getHierarchicalMethodSignature() returns elements from invalid virtual files + + //assert false : "Can't find super function for " + method.getPsiMethod() + + // " defined in " + method.getPsiMethod().getContainingClass() + } + else { + List<FunctionDescriptor> funsFromMap = + JavaToKotlinMethodMap.INSTANCE.getFunctions(superSignature.getMethod(), containingClass); + superFunctions.addAll(funsFromMap); + } } } diff --git a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/resolver/JavaFunctionResolver.java b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/resolver/JavaFunctionResolver.java index fb1c73aca457c..a528a0577ec0c 100644 --- a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/resolver/JavaFunctionResolver.java +++ b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/resolver/JavaFunctionResolver.java @@ -144,8 +144,8 @@ private SimpleFunctionDescriptor resolveMethodToFunctionDescriptor( List<FunctionDescriptor> superFunctions; if (ownerDescriptor instanceof ClassDescriptor) { - SignaturesPropagationData signaturesPropagationData = - new SignaturesPropagationData(returnType, valueParameterDescriptors, methodTypeParameters, method, trace); + SignaturesPropagationData signaturesPropagationData = new SignaturesPropagationData( + (ClassDescriptor) ownerDescriptor, returnType, valueParameterDescriptors, methodTypeParameters, method, trace); superFunctions = signaturesPropagationData.getSuperFunctions(); returnType = signaturesPropagationData.getModifiedReturnType(); @@ -214,6 +214,9 @@ private static void checkFunctionsOverrideCorrectly( ((ClassDescriptor) functionDescriptor.getContainingDeclaration()).getDefaultType()); FunctionDescriptor superFunctionSubstituted = superFunction.substitute(substitutor); + assert superFunctionSubstituted != null : + "Couldn't substitute super function: " + superFunction + ", substitutor = " + substitutor; + OverrideCompatibilityInfo.Result overridableResult = isOverridableBy(superFunctionSubstituted, functionDescriptor).getResult(); boolean paramsOk = overridableResult == OverrideCompatibilityInfo.Result.OVERRIDABLE; diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/types/lang/KotlinBuiltIns.java b/compiler/frontend/src/org/jetbrains/jet/lang/types/lang/KotlinBuiltIns.java index 75e97187252d0..c8d2153ce2534 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/types/lang/KotlinBuiltIns.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/types/lang/KotlinBuiltIns.java @@ -334,12 +334,17 @@ public JetScope getBuiltInsScope() { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @NotNull - private ClassDescriptor getBuiltInClassByName(@NotNull String simpleName) { - ClassifierDescriptor classifier = getBuiltInsScope().getClassifier(Name.identifier(simpleName)); + public ClassDescriptor getBuiltInClassByName(@NotNull Name simpleName) { + ClassifierDescriptor classifier = getBuiltInsScope().getClassifier(simpleName); assert classifier instanceof ClassDescriptor : "Must be a class descriptor " + simpleName + ", but was " + classifier; return (ClassDescriptor) classifier; } + @NotNull + private ClassDescriptor getBuiltInClassByName(@NotNull String simpleName) { + return getBuiltInClassByName(Name.identifier(simpleName)); + } + // Special @NotNull diff --git a/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.java b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.java new file mode 100644 index 0000000000000..d5e36c9d1635b --- /dev/null +++ b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.java @@ -0,0 +1,8 @@ +package test; + +import java.util.*; + +public interface SubclassOfCollection<E> extends Collection<E> { + Iterator<E> iterator(); + +} diff --git a/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.kt b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.kt new file mode 100644 index 0000000000000..d4159b0dd5dc3 --- /dev/null +++ b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.kt @@ -0,0 +1,5 @@ +package test + +public trait SubclassOfCollection<E>: MutableCollection<E> { + override fun iterator() : MutableIterator<E> +} diff --git a/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.txt b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.txt new file mode 100644 index 0000000000000..c5005631e02fd --- /dev/null +++ b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.txt @@ -0,0 +1,17 @@ +namespace test + +public abstract trait test.SubclassOfCollection</*0*/ E : jet.Any?> : jet.MutableCollection<E> { + public abstract override /*1*/ /*fake_override*/ fun add(/*0*/ e: E): jet.Boolean + public abstract override /*1*/ /*fake_override*/ fun addAll(/*0*/ c: jet.Collection<E>): jet.Boolean + public abstract override /*1*/ /*fake_override*/ fun clear(): jet.Tuple0 + public abstract override /*1*/ /*fake_override*/ fun contains(/*0*/ o: jet.Any?): jet.Boolean + public abstract override /*1*/ /*fake_override*/ fun containsAll(/*0*/ c: jet.Collection<jet.Any?>): jet.Boolean + public abstract override /*1*/ /*fake_override*/ fun isEmpty(): jet.Boolean + public abstract override /*1*/ fun iterator(): jet.MutableIterator<E> + public abstract override /*1*/ /*fake_override*/ fun remove(/*0*/ o: jet.Any?): jet.Boolean + public abstract override /*1*/ /*fake_override*/ fun removeAll(/*0*/ c: jet.Collection<jet.Any?>): jet.Boolean + public abstract override /*1*/ /*fake_override*/ fun retainAll(/*0*/ c: jet.Collection<jet.Any?>): jet.Boolean + public abstract override /*1*/ /*fake_override*/ fun size(): jet.Int + public abstract override /*1*/ /*fake_override*/ fun toArray(): jet.Array<jet.Any?> + public abstract override /*1*/ /*fake_override*/ fun </*0*/ T : jet.Any?>toArray(/*0*/ a: jet.Array<out T>): jet.Array<T> +} diff --git a/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.java b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.java new file mode 100644 index 0000000000000..0924783402ae3 --- /dev/null +++ b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.java @@ -0,0 +1,7 @@ +package test; + +import java.util.*; + +public interface SubclassOfMapEntry<K, V> extends Map.Entry<K, V> { + V setValue(V v); +} diff --git a/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.kt b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.kt new file mode 100644 index 0000000000000..73127c5cab896 --- /dev/null +++ b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.kt @@ -0,0 +1,5 @@ +package test + +public trait SubclassOfMapEntry<K, V>: MutableMap.MutableEntry<K, V> { + override fun setValue(p0: V) : V +} diff --git a/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.txt b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.txt new file mode 100644 index 0000000000000..0c728cfaf3dc9 --- /dev/null +++ b/compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.txt @@ -0,0 +1,7 @@ +namespace test + +public abstract trait test.SubclassOfMapEntry</*0*/ K : jet.Any?, /*1*/ V : jet.Any?> : jet.MutableMap.MutableEntry<K, V> { + public abstract override /*1*/ /*fake_override*/ fun getKey(): K + public abstract override /*1*/ /*fake_override*/ fun getValue(): V + public abstract override /*1*/ fun setValue(/*0*/ p0: V): V +} diff --git a/compiler/testData/loadJava/modality/ModalityOfFakeOverrides.txt b/compiler/testData/loadJava/modality/ModalityOfFakeOverrides.txt index 0f79be88c2f99..aaf74fc13a53b 100644 --- a/compiler/testData/loadJava/modality/ModalityOfFakeOverrides.txt +++ b/compiler/testData/loadJava/modality/ModalityOfFakeOverrides.txt @@ -18,7 +18,7 @@ public open class test.ModalityOfFakeOverrides : java.util.AbstractList<jet.Stri public open override /*1*/ /*fake_override*/ fun listIterator(/*0*/ p0: jet.Int): jet.MutableListIterator<jet.String> protected final override /*1*/ /*fake_override*/ var modCount: jet.Int public open override /*1*/ /*fake_override*/ fun remove(/*0*/ p0: jet.Any?): jet.Boolean - public open override /*1*/ /*fake_override*/ fun remove(/*0*/ p0: jet.Int): jet.String? + public open override /*1*/ /*fake_override*/ fun remove(/*0*/ p0: jet.Int): jet.String public open override /*1*/ /*fake_override*/ fun removeAll(/*0*/ p0: jet.Collection<jet.Any?>): jet.Boolean protected open override /*1*/ /*fake_override*/ fun removeRange(/*0*/ p0: jet.Int, /*1*/ p1: jet.Int): jet.Tuple0 public open override /*1*/ /*fake_override*/ fun retainAll(/*0*/ p0: jet.Collection<jet.Any?>): jet.Boolean diff --git a/compiler/tests/org/jetbrains/jet/jvm/compiler/LoadJavaTestGenerated.java b/compiler/tests/org/jetbrains/jet/jvm/compiler/LoadJavaTestGenerated.java index 09f489924fed1..cd3dd08bbb775 100644 --- a/compiler/tests/org/jetbrains/jet/jvm/compiler/LoadJavaTestGenerated.java +++ b/compiler/tests/org/jetbrains/jet/jvm/compiler/LoadJavaTestGenerated.java @@ -649,6 +649,16 @@ public void testSameProjectionKind() throws Exception { doTest("compiler/testData/loadJava/kotlinSignature/propagation/return/SameProjectionKind.java"); } + @TestMetadata("SubclassOfCollection.java") + public void testSubclassOfCollection() throws Exception { + doTest("compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.java"); + } + + @TestMetadata("SubclassOfMapEntry.java") + public void testSubclassOfMapEntry() throws Exception { + doTest("compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.java"); + } + @TestMetadata("TwoSuperclassesConflictingProjectionKinds.java") public void testTwoSuperclassesConflictingProjectionKinds() throws Exception { doTest("compiler/testData/loadJava/kotlinSignature/propagation/return/TwoSuperclassesConflictingProjectionKinds.java"); diff --git a/compiler/tests/org/jetbrains/jet/lang/resolve/lazy/LazyResolveNamespaceComparingTestGenerated.java b/compiler/tests/org/jetbrains/jet/lang/resolve/lazy/LazyResolveNamespaceComparingTestGenerated.java index c22111f4f4093..736054fa0d33d 100644 --- a/compiler/tests/org/jetbrains/jet/lang/resolve/lazy/LazyResolveNamespaceComparingTestGenerated.java +++ b/compiler/tests/org/jetbrains/jet/lang/resolve/lazy/LazyResolveNamespaceComparingTestGenerated.java @@ -1539,6 +1539,16 @@ public void testSameProjectionKind() throws Exception { doTestSinglePackage("compiler/testData/loadJava/kotlinSignature/propagation/return/SameProjectionKind.kt"); } + @TestMetadata("SubclassOfCollection.kt") + public void testSubclassOfCollection() throws Exception { + doTestSinglePackage("compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfCollection.kt"); + } + + @TestMetadata("SubclassOfMapEntry.kt") + public void testSubclassOfMapEntry() throws Exception { + doTestSinglePackage("compiler/testData/loadJava/kotlinSignature/propagation/return/SubclassOfMapEntry.kt"); + } + @TestMetadata("TwoSuperclassesConflictingProjectionKinds.kt") public void testTwoSuperclassesConflictingProjectionKinds() throws Exception { doTestSinglePackage("compiler/testData/loadJava/kotlinSignature/propagation/return/TwoSuperclassesConflictingProjectionKinds.kt"); diff --git a/generators/generators.iml b/generators/generators.iml index a02cc242438e9..06ad1f8bf7f10 100644 --- a/generators/generators.iml +++ b/generators/generators.iml @@ -14,6 +14,7 @@ <orderEntry type="module" module-name="frontend" scope="TEST" /> <orderEntry type="module" module-name="frontend.java" scope="TEST" /> <orderEntry type="module" module-name="injector-generator" scope="TEST" /> + <orderEntry type="module" module-name="cli" scope="TEST" /> </component> </module> diff --git a/generators/org/jetbrains/jet/generators/GenerateJavaToKotlinMethodMap.java b/generators/org/jetbrains/jet/generators/GenerateJavaToKotlinMethodMap.java new file mode 100644 index 0000000000000..7cfbc1d08c962 --- /dev/null +++ b/generators/org/jetbrains/jet/generators/GenerateJavaToKotlinMethodMap.java @@ -0,0 +1,248 @@ +/* + * Copyright 2010-2012 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.jetbrains.jet.generators; + +import com.google.common.collect.Lists; +import com.intellij.openapi.components.ServiceManager; +import com.intellij.openapi.project.Project; +import com.intellij.openapi.util.Pair; +import com.intellij.openapi.util.io.FileUtil; +import com.intellij.psi.PsiClass; +import com.intellij.psi.PsiMethod; +import com.intellij.psi.impl.file.impl.JavaFileManager; +import com.intellij.psi.search.GlobalSearchScope; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.jetbrains.jet.CompileCompilerDependenciesTest; +import org.jetbrains.jet.ConfigurationKind; +import org.jetbrains.jet.TestJdkKind; +import org.jetbrains.jet.cli.jvm.compiler.CompileEnvironmentUtil; +import org.jetbrains.jet.cli.jvm.compiler.JetCoreEnvironment; +import org.jetbrains.jet.lang.descriptors.ClassDescriptor; +import org.jetbrains.jet.lang.descriptors.DeclarationDescriptor; +import org.jetbrains.jet.lang.descriptors.FunctionDescriptor; +import org.jetbrains.jet.lang.resolve.DescriptorUtils; +import org.jetbrains.jet.lang.resolve.java.JavaToKotlinClassMapBuilder; +import org.jetbrains.jet.lang.types.lang.KotlinBuiltIns; +import org.jetbrains.jet.resolve.DescriptorRenderer; +import org.jetbrains.jet.utils.Printer; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +import static org.jetbrains.jet.lang.resolve.java.JavaToKotlinMethodMap.serializeFunction; +import static org.jetbrains.jet.lang.resolve.java.JavaToKotlinMethodMap.serializePsiMethod; + +public class GenerateJavaToKotlinMethodMap { + + public static final String BUILTINS_FQNAME_PREFIX = KotlinBuiltIns.BUILT_INS_PACKAGE_FQ_NAME.getFqName() + "."; + + public static void main(String[] args) throws IOException { + JetCoreEnvironment coreEnvironment = new JetCoreEnvironment( + CompileEnvironmentUtil.createMockDisposable(), + CompileCompilerDependenciesTest.compilerConfigurationForTests(ConfigurationKind.JDK_ONLY, TestJdkKind.FULL_JDK)); + + StringBuilder buf = new StringBuilder(); + Printer printer = new Printer(buf); + + printer.print(FileUtil.loadFile(new File("injector-generator/copyright.txt"))) + .println() + .println("package org.jetbrains.jet.lang.resolve.java;") + .println() + .println("import com.google.common.collect.ImmutableMultimap;") + .println() + .println("import static org.jetbrains.jet.lang.resolve.java.JavaToKotlinMethodMap.*;") + .println() + .println("/* This file is generated by ", GenerateJavaToKotlinMethodMap.class.getName(), ". DO NOT EDIT! */") + .println("@SuppressWarnings(\"unchecked\")") + .println("class JavaToKotlinMethodMapGenerated {").pushIndent() + .println("final ImmutableMultimap<String, JavaToKotlinMethodMap.ClassData> map;") + .println() + .println("JavaToKotlinMethodMapGenerated() {").pushIndent() + .println("ImmutableMultimap.Builder<String, JavaToKotlinMethodMap.ClassData> b = ImmutableMultimap.builder();") + .println(); + + MyMapBuilder builder = new MyMapBuilder(coreEnvironment.getProject()); + printer.printWithNoIndent(builder.toString()); + + printer.println("map = b.build();"); + printer.popIndent().println("}"); + printer.popIndent().println("}"); + + //noinspection IOResourceOpenedButNotSafelyClosed + FileWriter out = + new FileWriter("compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaToKotlinMethodMapGenerated.java"); + + out.write(buf.toString()); + out.close(); + } + + private static class MyMapBuilder extends JavaToKotlinClassMapBuilder { + private final Project project; + private final StringBuilder buf = new StringBuilder(); + private final Printer printer = new Printer(buf).pushIndent().pushIndent(); + + public MyMapBuilder(@NotNull Project project) { + this.project = project; + init(); + } + + @Override + protected void register(@NotNull Class<?> javaClass, @NotNull ClassDescriptor kotlinDescriptor, @NotNull Direction direction) { + processClass(javaClass, kotlinDescriptor); + } + + @Override + protected void register(@NotNull Class<?> javaClass, + @NotNull ClassDescriptor kotlinDescriptor, + @NotNull ClassDescriptor kotlinMutableDescriptor, + @NotNull Direction direction) { + processClass(javaClass, kotlinDescriptor); + processClass(javaClass, kotlinMutableDescriptor); + } + + private void processClass(@NotNull Class<?> javaClass, @NotNull ClassDescriptor kotlinClass) { + JavaFileManager javaFileManager = ServiceManager.getService(project, JavaFileManager.class); + PsiClass psiClass = javaFileManager.findClass(javaClass.getCanonicalName(), GlobalSearchScope.allScope(project)); + assert psiClass != null; + + List<Pair<PsiMethod, FunctionDescriptor>> methods2Functions = getClassMethods2Functions(kotlinClass, psiClass); + if (!methods2Functions.isEmpty()) { + appendBeforeClass(kotlinClass, psiClass); + appendClass(methods2Functions); + appendAfterClass(); + } + } + + private static List<Pair<PsiMethod, FunctionDescriptor>> getClassMethods2Functions( + @NotNull ClassDescriptor kotlinClass, + @NotNull PsiClass psiClass + ) { + PsiMethod[] methods = psiClass.getMethods(); + + List<Pair<PsiMethod, FunctionDescriptor>> result = Lists.newArrayList(); + + for (DeclarationDescriptor member : kotlinClass.getDefaultType().getMemberScope().getAllDescriptors()) { + if (!(member instanceof FunctionDescriptor) || member.getContainingDeclaration() != kotlinClass) { + continue; + } + + FunctionDescriptor fun = (FunctionDescriptor) member; + PsiMethod foundMethod = findMethod(methods, fun); + if (foundMethod != null) { + result.add(Pair.create(foundMethod, fun)); + } + } + + Collections.sort(result, new Comparator<Pair<PsiMethod, FunctionDescriptor>>() { + @Override + public int compare(Pair<PsiMethod, FunctionDescriptor> pair1, Pair<PsiMethod, FunctionDescriptor> pair2) { + PsiMethod method1 = pair1.first; + PsiMethod method2 = pair2.first; + + String name1 = method1.getName(); + String name2 = method2.getName(); + if (!name1.equals(name2)) { + return name1.compareTo(name2); + } + + String serialized1 = serializePsiMethod(method1); + String serialized2 = serializePsiMethod(method2); + return serialized1.compareTo(serialized2); + } + }); + return result; + } + + private static boolean match(@NotNull PsiMethod method, @NotNull FunctionDescriptor fun) { + // Compare method an function by name and parameters count. For all methods except one (List.remove) it is enough. + // If this changes, there will be assertion error in findMethod() + if (method.getName().equals(fun.getName().getIdentifier()) + && method.getParameterList().getParametersCount() == fun.getValueParameters().size()) { + + // "special case": remove(Int) and remove(Any?) in MutableList + if (method.getName().equals("remove") && method.getContainingClass().getName().equals("List")) { + String psiType = method.getParameterList().getParameters()[0].getType().getPresentableText(); + String jetType = DescriptorRenderer.TEXT.renderTypeWithShortNames(fun.getValueParameters().get(0).getType()); + String string = psiType + "|" + jetType; + + return "int|Int".equals(string) || "Object|Any?".equals(string); + } + + return true; + } + return false; + } + + @Nullable + private static PsiMethod findMethod(@NotNull PsiMethod[] methods, @NotNull FunctionDescriptor fun) { + PsiMethod found = null; + for (PsiMethod method : methods) { + if (match(method, fun)) { + if (found != null) { + throw new AssertionError("Duplicate for " + fun); + } + + found = method; + } + } + + return found; + } + + private void appendBeforeClass(@NotNull ClassDescriptor kotlinClass, @NotNull PsiClass psiClass) { + String psiFqName = psiClass.getQualifiedName(); + String kotlinFqName = DescriptorUtils.getFQName(kotlinClass).toSafe().getFqName(); + + assert kotlinFqName.startsWith(BUILTINS_FQNAME_PREFIX); + String kotlinSubQualifiedName = kotlinFqName.substring(BUILTINS_FQNAME_PREFIX.length()); + printer.println("put(b, \"", psiFqName, "\", \"", kotlinSubQualifiedName, "\",").pushIndent(); + } + + private void appendClass(@NotNull List<Pair<PsiMethod, FunctionDescriptor>> methods2Functions) { + int index = 0; + for (Pair<PsiMethod, FunctionDescriptor> method2Function : methods2Functions) { + printer.print("pair(\"", serializePsiMethod(method2Function.first), "\", \"", serializeFunction(method2Function.second), + "\")"); + + if (index != methods2Functions.size() - 1) { + printer.printWithNoIndent(","); + } + + printer.println(); + + index++; + } + } + + private void appendAfterClass() { + printer.popIndent().println(");").println(); + } + + + public String toString() { + return buf.toString(); + } + } + + private GenerateJavaToKotlinMethodMap() { + } +} diff --git a/jdk-annotations/java/util/annotations.xml b/jdk-annotations/java/util/annotations.xml index 3251649264e78..c1f62eced16e3 100644 --- a/jdk-annotations/java/util/annotations.xml +++ b/jdk-annotations/java/util/annotations.xml @@ -1,4 +1,10 @@ <root> + <item name='java.util.Dictionary V put(K, V)'> + <annotation name='jet.runtime.typeinfo.KotlinSignature'> + <val name="value" val="&quot;fun put(key : K, value : V) : V?&quot;"/> + </annotation> + </item> + <item name='java.util.AbstractList boolean add(E)'> <annotation name='jet.runtime.typeinfo.KotlinSignature'> <val name="value" val="&quot;fun add(e : E) : Boolean&quot;"/> @@ -679,12 +685,12 @@ </item> <item name='java.util.AbstractMap java.util.Set<java.util.Map.Entry<K,V>> entrySet()'> <annotation name='jet.runtime.typeinfo.KotlinSignature'> - <val name="value" val="&quot;fun entrySet() : Set&lt;Map.Entry&lt;K, V&gt;&gt;&quot;"/> + <val name="value" val="&quot;fun entrySet() : MutableSet&lt;MutableMap.MutableEntry&lt;K, V&gt;&gt;&quot;"/> </annotation> </item> <item name='java.util.AbstractMap java.util.Set<K> keySet()'> <annotation name='jet.runtime.typeinfo.KotlinSignature'> - <val name="value" val="&quot;fun keySet() : Set&lt;K&gt;&quot;"/> + <val name="value" val="&quot;fun keySet() : MutableSet&lt;K&gt;&quot;"/> </annotation> </item> <item name='java.util.AbstractMap V put(K, V)'> @@ -699,7 +705,7 @@ </item> <item name='java.util.AbstractMap java.util.Collection<V> values()'> <annotation name='jet.runtime.typeinfo.KotlinSignature'> - <val name="value" val="&quot;fun values() : Collection&lt;V&gt;&quot;"/> + <val name="value" val="&quot;fun values() : MutableCollection&lt;V&gt;&quot;"/> </annotation> </item> <item name='java.util.AbstractSequentialList void add(int, E)'>
50ccd7ec86dc105c4c6030cd152423ec7b1483a2
restlet-framework-java
JAX-RS-Extension: - added javadoc to util methods.--
p
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/util/Util.java b/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/util/Util.java index 1378510e3c..0df57b574c 100644 --- a/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/util/Util.java +++ b/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/util/Util.java @@ -41,7 +41,6 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; -import java.util.TreeSet; import java.util.logging.Logger; import javax.ws.rs.Path; @@ -334,19 +333,6 @@ public static <A> Set<A> createSet(A... objects) { return set; } - /** - * @param <A> - * @param collection - * @param comparator - * @return - */ - public static <A> Collection<A> createTreeSet(Collection<A> collection, - Comparator<A> comparator) { - Collection<A> coll2 = new TreeSet<A>(comparator); - coll2.addAll(collection); - return coll2; - } - /** * Check if the given objects are equal. Can deal with null references. if * both elements are null, than the result is true. @@ -362,7 +348,7 @@ public static boolean equals(Object object1, Object object2) { } /** - * Converte the given Date into a String. Copied from + * Converts the given Date into a String. Copied from * {@link com.noelios.restlet.HttpCall}. * * @param date @@ -380,16 +366,19 @@ public static String formatDate(Date date, boolean cookie) { } /** + * Returns the first element of the given collection. Throws an exception if + * the collection is empty. + * * @param coll * @param <A> * @return Returns the first Element of the collection - * @throws IndexOutOfBoundsException - * If the list is empty + * @throws NoSuchElementException + * If the collection is empty. */ public static <A> A getFirstElement(Collection<A> coll) - throws IndexOutOfBoundsException { + throws NoSuchElementException { if (coll.isEmpty()) - throw new IndexOutOfBoundsException( + throw new NoSuchElementException( "The Collection is empty; you can't get the first element of it."); if (coll instanceof LinkedList) return ((LinkedList<A>) coll).getFirst(); @@ -399,14 +388,17 @@ public static <A> A getFirstElement(Collection<A> coll) } /** + * Returns the first element of the given {@link Iterable}. Throws an + * exception if the {@link Iterable} is empty. + * * @param coll * @param <A> * @return Returns the first Element of the collection - * @throws IndexOutOfBoundsException - * If the list is empty + * @throws NoSuchElementException + * If the collection is empty */ public static <A> A getFirstElement(Iterable<A> coll) - throws IndexOutOfBoundsException { + throws NoSuchElementException { if (coll instanceof LinkedList) return ((LinkedList<A>) coll).getFirst(); if (coll instanceof List) @@ -415,6 +407,9 @@ public static <A> A getFirstElement(Iterable<A> coll) } /** + * Returns the first element of the {@link List}. Throws an exception if + * the list is empty. + * * @param list * @param <A> * @return Returns the first Element of the collection @@ -432,14 +427,15 @@ public static <A> A getFirstElement(List<A> list) } /** + * Returns the first element of the given {@link Iterable}. Returns null, + * if the {@link Iterable} is empty. + * * @param coll * @param <A> - * @return Returns the first Element of the collection - * @throws IndexOutOfBoundsException - * If the list is empty + * @return the first element of the collection, or null if the iterable is + * empty. */ - public static <A> A getFirstElementOrNull(Iterable<A> coll) - throws IndexOutOfBoundsException { + public static <A> A getFirstElementOrNull(Iterable<A> coll) { if (coll instanceof LinkedList) { LinkedList<A> linkedList = ((LinkedList<A>) coll); if (linkedList.isEmpty()) @@ -460,12 +456,13 @@ public static <A> A getFirstElementOrNull(Iterable<A> coll) } /** + * Returns the first entry of the given {@link Map}. Throws an exception if + * the Map is empty. + * * @param map * @param <K> * @param <V> - * @return Returns the first element, returned by the iterator over the - * map.entrySet() - * + * @return the first entry of the given {@link Map}. * @throws NoSuchElementException * If the map is empty. */ @@ -475,12 +472,13 @@ public static <K, V> Map.Entry<K, V> getFirstEntry(Map<K, V> map) } /** - * @return Returns the first element, returned by the iterator over the - * map.keySet() + * Returns the key of the first entry of the given {@link Map}. Throws an + * exception if the Map is empty. * * @param map * @param <K> * @param <V> + * @return the key of the first entry of the given {@link Map}. * @throws NoSuchElementException * If the map is empty. */ @@ -490,11 +488,13 @@ public static <K, V> K getFirstKey(Map<K, V> map) } /** - * @return Returns the first element, returned by the iterator over the - * map.values() + * Returns the value of the first entry of the given {@link Map}. Throws an + * exception if the Map is empty. + * * @param map * @param <K> * @param <V> + * @return the value of the first entry of the given {@link Map}. * @throws NoSuchElementException * If the map is empty. */ @@ -504,8 +504,10 @@ public static <K, V> V getFirstValue(Map<K, V> map) } /** + * Returns the HTTP headers of the Restlet {@link Request} as {@link Form}. + * * @param request - * @return Returns the HTTP-Headers-Form from the Request. + * @return Returns the HTTP headers of the Request. */ public static Form getHttpHeaders(Request request) { Form headers = (Form) request.getAttributes().get( @@ -518,9 +520,10 @@ public static Form getHttpHeaders(Request request) { } /** + * Returns the HTTP headers of the Restlet {@link Response} as {@link Form}. + * * @param response - * a Restlet response - * @return Returns the HTTP-Headers-Form from the Response. + * @return Returns the HTTP headers of the Response. */ public static Form getHttpHeaders(Response response) { Form headers = (Form) response.getAttributes().get( @@ -553,9 +556,12 @@ public static MultivaluedMap<String, String> getJaxRsHttpHeaders( } /** + * Returns the last element of the given {@link Iterable}. Throws an + * exception if the given iterable is empty. + * * @param iterable * @param <A> - * @return Returns the last Element of the {@link Iterable} + * @return Returns the last element of the {@link Iterable} * @throws IndexOutOfBoundsException * If the {@link Iterable} is a {@link List} and its is * empty. @@ -575,9 +581,12 @@ public static <A> A getLastElement(Iterable<A> iterable) } /** + * Returns the last element of the given {@link Iterator}. Throws an + * exception if the given iterator is empty. + * * @param iter * @param <A> - * @return Returns the last Element of the {@link Iterator}. + * @return Returns the last element of the {@link Iterator}. * @throws NoSuchElementException * If the {@link Iterator} is empty. */ @@ -590,9 +599,12 @@ public static <A> A getLastElement(Iterator<A> iter) } /** + * Returns the last element of the given {@link List}. Throws an exception + * if the given list is empty. + * * @param list * @param <A> - * @return Returns the last Element of the list + * @return Returns the last element of the list * @throws IndexOutOfBoundsException * If the list is empty */ @@ -604,7 +616,8 @@ public static <A> A getLastElement(List<A> list) } /** - * Returns the last element of the given Iterable, or null, if it is empty. + * Returns the last element of the given {@link Iterable}, or null, if the + * iterable is empty. Returns null, if the iterable is empty. * * @param iterable * @param <A> @@ -632,11 +645,12 @@ public static <A> A getLastElementOrNull(Iterable<A> iterable) { } /** + * Returns the last element of the given {@link Iterator}, or null, if the + * iterator is empty. Returns null, if the iterator is empty. + * * @param iter * @param <A> * @return Returns the last Element of the {@link Iterator}. - * @throws NoSuchElementException - * If the {@link Iterator} is empty. */ public static <A> A getLastElementOrNull(Iterator<A> iter) { A e = null; @@ -716,9 +730,13 @@ public static String getOnlyMetadataName(List<? extends Metadata> metadatas) { } /** + * Returns the &#64;{@link Path} annotation of the given root resource + * class. + * * @param jaxRsClass - * @return the path annotation or null, if no is present and requirePath is - * false. + * the root resource class. + * @return the &#64;{@link Path} annotation of the given root resource + * class. * @throws MissingAnnotationException * if the path annotation is missing * @throws IllegalArgumentException @@ -737,9 +755,12 @@ public static Path getPathAnnotation(Class<?> jaxRsClass) } /** + * Returns the &#64;{@link Path} annotation of the given sub resource + * locator. Throws an exception if no &#64;{@link Path} annotation is + * available. + * * @param method * the java method to get the &#64;Path from - * @param pathRequired * @return the &#64;Path annotation. * @throws IllegalArgumentException * if null was given. @@ -759,6 +780,9 @@ public static Path getPathAnnotation(Method method) } /** + * Returns the &#64;{@link Path} annotation of the given sub resource + * locator. Returns null if no &#64;{@link Path} annotation is available. + * * @param method * the java method to get the &#64;Path from * @return the &#64;Path annotation or null, if not present. @@ -774,6 +798,8 @@ public static Path getPathAnnotationOrNull(Method method) } /** + * Returns the perhaps decoded template of the path annotation. + * * @param resource * @return Returns the path template as String. Never returns null. * @throws IllegalPathOnClassException @@ -932,7 +958,7 @@ public Object run() throws Exception { } /** - * Checks, if the list is empty. + * Checks, if the list is empty or null. * * @param list * @return true, if the list is empty or null, or false, if the list @@ -944,7 +970,7 @@ public static boolean isEmpty(List<?> list) { } /** - * Tests, if the given array is empty. Will not throw a + * Tests, if the given array is empty or null. Will not throw a * NullPointerException. * * @param array @@ -959,11 +985,12 @@ public static boolean isEmpty(Object[] array) { } /** - * Tests, if the given String is empty or "/". Will not throw a + * Tests, if the given String is null, empty or "/". Will not throw a * NullPointerException. * * @param string - * @return Returns true, if the given string ist null, empty or equals "/" + * @return Returns true, if the given string ist null, empty or equals "/", + * otherwise false. */ public static boolean isEmptyOrSlash(String string) { return string == null || string.length() == 0 || string.equals("/"); diff --git a/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/wrappers/IntoRrcInjector.java b/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/wrappers/IntoRrcInjector.java index 7020117cc8..6fb83146e9 100644 --- a/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/wrappers/IntoRrcInjector.java +++ b/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/wrappers/IntoRrcInjector.java @@ -179,12 +179,11 @@ protected void inject(ResourceObject resourceObject, } for (Field ppf : this.injectFieldsPathParam) { PathParam headerParam = ppf.getAnnotation(PathParam.class); - DefaultValue defaultValue = ppf.getAnnotation(DefaultValue.class); + // REQUEST forbid @DefaultValue on @PathParam Class<?> convTo = ppf.getType(); Type paramGenericType = ppf.getGenericType(); Object value = WrapperUtil.getPathParamValue(convTo, - paramGenericType, headerParam, leaveEncoded, defaultValue, - callContext); + paramGenericType, headerParam, leaveEncoded, callContext); Util.inject(jaxRsResObj, ppf, value); } for (Field cpf : this.injectFieldsQueryParam) { diff --git a/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/wrappers/WrapperUtil.java b/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/wrappers/WrapperUtil.java index c362ef193e..7b0e25733b 100644 --- a/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/wrappers/WrapperUtil.java +++ b/modules/org.restlet.ext.jaxrs_1.0/src/org/restlet/ext/jaxrs/internal/wrappers/WrapperUtil.java @@ -134,6 +134,8 @@ public void remove() { private static final Collection<Class<? extends Annotation>> VALID_ANNOTATIONS = createValidAnnotations(); /** + * Checks, if the given annotation is annotated with at least one JAX-RS + * related annotation. * * @param javaMethod * Java method, class or something like that. @@ -143,7 +145,7 @@ public void remove() { static boolean checkForJaxRsAnnotations(Method javaMethod) { for (Annotation annotation : javaMethod.getAnnotations()) { Class<? extends Annotation> annoType = annotation.annotationType(); - if (annoType.getName().startsWith(WrapperUtil.JAX_RS_PACKAGE_PREFIX)) + if (annoType.getName().startsWith(JAX_RS_PACKAGE_PREFIX)) return true; if (annoType.isAnnotationPresent(HttpMethod.class)) return true; @@ -217,7 +219,7 @@ private static boolean checkParameterAnnotation( } /** - * converts the given value without any decoding. + * Converts the given value without any decoding. * * @param paramClass * @param paramValue @@ -419,8 +421,12 @@ static List<MediaType> convertToMediaTypes(String[] mimes) { } /** + * Creates the collection for the given + * {@link ParameterizedType parametrized Type}.<br> + * If the given type do not represent an collection, null is returned. + * * @param type - * @return + * @return the created collection or null. */ private static <A> Collection<A> createColl(ParameterizedType type) { Type rawType = type.getRawType(); @@ -432,28 +438,37 @@ else if (rawType.equals(SortedSet.class)) return new TreeSet<A>(); else if (rawType.equals(Collection.class)) { Logger logger = Logger.getAnonymousLogger(); - logger.config(WrapperUtil.COLL_PARAM_NOT_DEFAULT); + logger.config(COLL_PARAM_NOT_DEFAULT); return new ArrayList<A>(); } return null; } /** + * Creates a concrete instance of the given {@link Representation} subtype. + * It must contain a constructor with one parameter of type + * {@link Representation}. + * + * @param representationType + * the class to instantiate * @param entity + * the Representation to use for the constructor. + * @param logger + * the logger to use * @return the created representation, or null, if it could not be * converted. - * @throws ConvertParameterException + * @throws ConvertRepresentationException */ private static Object createConcreteRepresentationInstance( - Class<?> paramType, Representation entity, Logger logger) + Class<?> representationType, Representation entity, Logger logger) throws ConvertRepresentationException { - if (paramType.equals(Representation.class)) + if (representationType.equals(Representation.class)) return entity; Constructor<?> constr; try { - constr = paramType.getConstructor(Representation.class); + constr = representationType.getConstructor(Representation.class); } catch (SecurityException e) { - logger.warning("The constructor " + paramType + logger.warning("The constructor " + representationType + "(Representation) is not accessable."); return null; } catch (NoSuchMethodException e) { @@ -462,7 +477,7 @@ private static Object createConcreteRepresentationInstance( try { return constr.newInstance(entity); } catch (Exception e) { - throw ConvertRepresentationException.object(paramType, + throw ConvertRepresentationException.object(representationType, "the message body", e); } } @@ -518,17 +533,14 @@ static Object createInstance(Constructor<?> constructor, try { return constructor.newInstance(args); } catch (IllegalArgumentException e) { - throw new InstantiateException( - "Could not instantiate " + constructor.getDeclaringClass(), - e); + throw new InstantiateException("Could not instantiate " + + constructor.getDeclaringClass(), e); } catch (InstantiationException e) { - throw new InstantiateException( - "Could not instantiate " + constructor.getDeclaringClass(), - e); + throw new InstantiateException("Could not instantiate " + + constructor.getDeclaringClass(), e); } catch (IllegalAccessException e) { - throw new InstantiateException( - "Could not instantiate " + constructor.getDeclaringClass(), - e); + throw new InstantiateException("Could not instantiate " + + constructor.getDeclaringClass(), e); } } @@ -540,11 +552,12 @@ static Collection<Class<? extends Annotation>> createValidAnnotations() { } /** + * Finds the constructor to use by the JAX-RS runtime. + * * @param jaxRsClass * @return Returns the constructor to use for the given root resource class - * (See JSR-311-Spec, section 2.3). If no constructor could be - * found, null is returned. Than try {@link Class#newInstance()} - * @throws IllegalTypeException + * or provider. If no constructor could be found, null is returned. + * Than try {@link Class#newInstance()} */ static Constructor<?> findJaxRsConstructor(Class<?> jaxRsClass) { Constructor<?> constructor = null; @@ -608,6 +621,8 @@ static javax.ws.rs.ext.ContextResolver<?> getContextResolver(Field field, } /** + * Creates the value of a cookie as the given type. + * * @param paramClass * the class to convert to * @param paramGenericType @@ -674,10 +689,10 @@ static Object getCookieParamValue(Class<?> paramClass, return convertParamValuesFromParam(paramClass, paramGenericType, new ParamValueIter((Series) cookies.subList(cookieName)), getValue(cookies.getFirst(cookieName)), defaultValue, true); + // leaveEncoded = true -> not change } catch (ConvertParameterException e) { throw new ConvertCookieParamException(e); } - // leaveEncoded = true -> not change } /** @@ -707,6 +722,12 @@ static Object getHeaderParamValue(Class<?> paramClass, } } + /** + * Returns the HTTP method related to the given java method. + * + * @param javaMethod + * @return + */ static org.restlet.data.Method getHttpMethod(Method javaMethod) { for (Annotation annotation : javaMethod.getAnnotations()) { Class<? extends Annotation> annoType = annotation.annotationType(); @@ -812,8 +833,7 @@ else if (paramClass.equals(Conditions.class)) } if (annoType.equals(PathParam.class)) { return getPathParamValue(paramClass, paramGenericType, - (PathParam) annotation, leaveEncoded, defaultValue, - callContext); + (PathParam) annotation, leaveEncoded, callContext); } if (annoType.equals(MatrixParam.class)) { return getMatrixParamValue(paramClass, paramGenericType, @@ -992,15 +1012,13 @@ private static Object getParamValueForPrimitive(Class<?> paramClass, * the generic type to convert to * @param pathParam * @param leaveEncoded - * @param defaultValue * @param callContext * @param logger * @return * @throws ConvertPathParamException */ static Object getPathParamValue(Class<?> paramClass, Type paramGenericType, - PathParam pathParam, boolean leaveEncoded, - DefaultValue defaultValue, CallContext callContext) + PathParam pathParam, boolean leaveEncoded, CallContext callContext) throws ConvertPathParamException { // LATER testen Path-Param: List<String> (see PathParamTest.testGet3()) // TODO @PathParam("x") PathSegment allowed. @@ -1008,10 +1026,13 @@ static Object getPathParamValue(Class<?> paramClass, Type paramGenericType, String pathParamValue = callContext.getLastPathParamEnc(pathParam); Iterator<String> pathParamValueIter = callContext .pathParamEncIter(pathParam); + // REQUEST What should happens, if no PathParam could be found? + // Internal Server Error? It could be that someone request a qPathParam + // value of a prior @Path, but this is not good IMO. + // perhaps add another attribute to @PathParam, which allows it. try { return convertParamValuesFromParam(paramClass, paramGenericType, - pathParamValueIter, pathParamValue, defaultValue, - leaveEncoded); + pathParamValueIter, pathParamValue, null, leaveEncoded); } catch (ConvertParameterException e) { throw new ConvertPathParamException(e); }
9aa093303a2580c5cd165e95b0d59062ec9ec835
restlet-framework-java
- Initial code for new default HTTP connector and- SIP connector.--
a
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet/src/org/restlet/engine/http/connector/ClientConnection.java b/modules/org.restlet/src/org/restlet/engine/http/connector/ClientConnection.java index 0cadd7f228..faafd5fbb2 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/connector/ClientConnection.java +++ b/modules/org.restlet/src/org/restlet/engine/http/connector/ClientConnection.java @@ -297,6 +297,8 @@ protected void writeMessageHeadLine(Response message, headStream.write(' '); headStream.write(getRequestUri(request.getResourceRef()).getBytes()); headStream.write(' '); + headStream.write(request.getProtocol().getName().getBytes()); + headStream.write('/'); headStream.write(request.getProtocol().getVersion().getBytes()); HeaderUtils.writeCRLF(getOutboundStream()); }
950225ed644937af40685339d143bd5da1c1e96d
Delta Spike
DELTASPIKE-312 add JavaDoc warning for @Alternative scenarios
p
https://github.com/apache/deltaspike
diff --git a/deltaspike/core/api/src/main/java/org/apache/deltaspike/core/api/provider/BeanProvider.java b/deltaspike/core/api/src/main/java/org/apache/deltaspike/core/api/provider/BeanProvider.java index 51dff71ee..b1a9de4a7 100644 --- a/deltaspike/core/api/src/main/java/org/apache/deltaspike/core/api/provider/BeanProvider.java +++ b/deltaspike/core/api/src/main/java/org/apache/deltaspike/core/api/provider/BeanProvider.java @@ -229,6 +229,9 @@ private static <T> T getContextualReference(Class<T> type, BeanManager beanManag * * <p><b>Attention:</b> please see the notes on manually resolving &#064;Dependent bean * in {@link #getContextualReference(Class, boolean, java.lang.annotation.Annotation...)}!</p> + * <p><b>Attention:</b> This will also return instances for beans which where an Alternative + * exists for! The &#064;Alternative resolving is only done via {@link BeanManager#resolve(java.util.Set)} + * which we cannot use in this case!</p> * * @param type the type of the bean in question * @param optional if <code>true</code> it will return an empty list if no bean could be found or created. @@ -245,6 +248,11 @@ public static <T> List<T> getContextualReferences(Class<T> type, boolean optiona * <p>Get a list of Contextual References by it's type independent of the qualifier. * * Further details are available at {@link #getContextualReferences(Class, boolean)} + * <p><b>Attention:</b> please see the notes on manually resolving &#064;Dependent bean + * in {@link #getContextualReference(Class, boolean, java.lang.annotation.Annotation...)}!</p> + * <p><b>Attention:</b> This will also return instances for beans which where an Alternative + * exists for! The &#064;Alternative resolving is only done via {@link BeanManager#resolve(java.util.Set)} + * which we cannot use in this case!</p> * * @param type the type of the bean in question * @param optional if <code>true</code> it will return an empty list if no bean could be found or created.
ef619fcd7aa5e84de31405431b244cdd3de2e3e0
adangel$pmd
Changes from Brian Remedios. Refactored UseIndexOfChar to extract common functionality into AbstractPoorMethodCall. git-svn-id: https://pmd.svn.sourceforge.net/svnroot/pmd/trunk@4442 51baf565-9d33-0410-a72c-fc3788e3496d
p
https://github.com/adangel/pmd
diff --git a/pmd/etc/changelog.txt b/pmd/etc/changelog.txt index 0d299f5c608..ba05b673c34 100644 --- a/pmd/etc/changelog.txt +++ b/pmd/etc/changelog.txt @@ -11,6 +11,7 @@ Fixed a bug in AvoidProtectedFieldInFinalClass - it no longer reports false posi Fixed a bug in the C++ grammar - the tokenizer now properly recognizes macro definitions which are followed by a multiline comment. Applied patch 1481024 (fulfilling RFE 1490181)- NOPMD messages can now be reported with a user specified msg, e.g., //NOPMD - this is expected Added JSP support to the copy/paste detector. +Refactored UseIndexOfChar to extract common functionality into AbstractPoorMethodCall. June 1, 2006 - 3.7: New rules: diff --git a/pmd/src/net/sourceforge/pmd/rules/AbstractPoorMethodCall.java b/pmd/src/net/sourceforge/pmd/rules/AbstractPoorMethodCall.java new file mode 100644 index 00000000000..a35d912bd9e --- /dev/null +++ b/pmd/src/net/sourceforge/pmd/rules/AbstractPoorMethodCall.java @@ -0,0 +1,120 @@ +package net.sourceforge.pmd.rules; + +import net.sourceforge.pmd.AbstractRule; +import net.sourceforge.pmd.ast.ASTAdditiveExpression; +import net.sourceforge.pmd.ast.ASTLiteral; +import net.sourceforge.pmd.ast.ASTPrimaryExpression; +import net.sourceforge.pmd.ast.ASTVariableDeclaratorId; +import net.sourceforge.pmd.ast.SimpleNode; +import net.sourceforge.pmd.symboltable.NameOccurrence; + +import java.util.Iterator; +import java.util.List; + +/** + * Detects and flags the occurrences of specific method calls against an instance of + * a designated class. I.e. String.indexOf. The goal is to be able to suggest more + * efficient/modern ways of implementing the same function. + * + * Concrete subclasses are expected to provide the name of the target class and an + * array of method names that we are looking for. We then pass judgement on any literal + * arguments we find in the subclass as well. + * + * @author Brian Remedios + * @version $Revision$ + */ +public abstract class AbstractPoorMethodCall extends AbstractRule { + + + /** + * The name of the type the method will be invoked against. + * @return String + */ + protected abstract String targetTypename(); + + /** + * Return the names of all the methods we are scanning for, no brackets or + * argument types. + * + * @return String[] + */ + protected abstract String[] methodNames(); + + /** + * Returns whether the string argument at the stated position being sent to + * the method is ok or not. Return true if you want to record the method call + * as a violation, false otherwise. + * + * @param argIndex int + * @param arg String + * @return boolean + */ + protected abstract boolean isViolationArgument(int argIndex, String arg); + + /** + * Returns whether the name occurrence is one of the method calls + * we are interested in. + * + * @param occurrence NameOccurrence + * @return boolean + */ + private boolean isNotedMethod(NameOccurrence occurrence) { + + if (occurrence == null) return false; + + String methodCall = occurrence.getImage(); + String[] methodNames = methodNames(); + + for (int i=0; i<methodNames.length; i++) { + if (methodCall.indexOf(methodNames[i]) != -1) return true; + } + return false; + } + + /** + * Returns whether the value argument is a single character string. + * + * @param value String + * @return boolean + */ + public static boolean isSingleCharAsString(String value) { + return value.length() == 3 && value.charAt(0) == '\"'; + } + + /** + * Method visit. + * @param node ASTVariableDeclaratorId + * @param data Object + * @return Object + * @see net.sourceforge.pmd.ast.JavaParserVisitor#visit(ASTVariableDeclaratorId, Object) + */ + public Object visit(ASTVariableDeclaratorId node, Object data) { + + if (!node.getNameDeclaration().getTypeImage().equals(targetTypename())) { + return data; + } + + for (Iterator i = node.getUsages().iterator(); i.hasNext();) { + NameOccurrence occ = (NameOccurrence) i.next(); + if (isNotedMethod(occ.getNameForWhichThisIsAQualifier())) { + SimpleNode parent = (SimpleNode)occ.getLocation().jjtGetParent().jjtGetParent(); + if (parent instanceof ASTPrimaryExpression) { + // bail out if it's something like indexOf("a" + "b") + List additives = parent.findChildrenOfType(ASTAdditiveExpression.class); + if (!additives.isEmpty()) { + return data; + } + List literals = parent.findChildrenOfType(ASTLiteral.class); + for (int l=0; l<literals.size(); l++) { + ASTLiteral literal = (ASTLiteral)literals.get(l); + if (isViolationArgument(l, literal.getImage())) { + addViolation(data, occ.getLocation()); + } + } + } + } + } + return data; + } +} + diff --git a/pmd/src/net/sourceforge/pmd/rules/strings/UseIndexOfChar.java b/pmd/src/net/sourceforge/pmd/rules/strings/UseIndexOfChar.java index e928d2d1725..c298736d892 100644 --- a/pmd/src/net/sourceforge/pmd/rules/strings/UseIndexOfChar.java +++ b/pmd/src/net/sourceforge/pmd/rules/strings/UseIndexOfChar.java @@ -1,44 +1,43 @@ package net.sourceforge.pmd.rules.strings; -import net.sourceforge.pmd.AbstractRule; -import net.sourceforge.pmd.ast.ASTAdditiveExpression; -import net.sourceforge.pmd.ast.ASTLiteral; -import net.sourceforge.pmd.ast.ASTPrimaryExpression; -import net.sourceforge.pmd.ast.ASTVariableDeclaratorId; -import net.sourceforge.pmd.ast.SimpleNode; -import net.sourceforge.pmd.symboltable.NameOccurrence; +import net.sourceforge.pmd.rules.AbstractPoorMethodCall; -import java.util.Iterator; -import java.util.List; +/** + */ +public class UseIndexOfChar extends AbstractPoorMethodCall { -public class UseIndexOfChar extends AbstractRule { - public Object visit(ASTVariableDeclaratorId node, Object data) { - if (!node.getNameDeclaration().getTypeImage().equals("String")) { - return data; - } - for (Iterator i = node.getUsages().iterator(); i.hasNext();) { - NameOccurrence occ = (NameOccurrence) i.next(); - if (occ.getNameForWhichThisIsAQualifier() != null && - (occ.getNameForWhichThisIsAQualifier().getImage().indexOf("indexOf") != -1 || - occ.getNameForWhichThisIsAQualifier().getImage().indexOf("lastIndexOf") != -1)) { - SimpleNode parent = (SimpleNode)occ.getLocation().jjtGetParent().jjtGetParent(); - if (parent instanceof ASTPrimaryExpression) { - // bail out if it's something like indexOf("a" + "b") - List additives = parent.findChildrenOfType(ASTAdditiveExpression.class); - if (!additives.isEmpty()) { - return data; - } - List literals = parent.findChildrenOfType(ASTLiteral.class); - for (Iterator j = literals.iterator(); j.hasNext();) { - ASTLiteral literal = (ASTLiteral)j.next(); - if (literal.getImage().length() == 3 && literal.getImage().charAt(0) == '\"') { - addViolation(data, occ.getLocation()); - } - } - } - } - } - return data; + private static final String targetTypeName = "String"; + private static final String[] methodNames = new String[] { "indexOf", "lastIndexOf" }; + + public UseIndexOfChar() { + super(); + } + + /** + * Method targetTypeName. + * @return String + */ + protected String targetTypename() { + return targetTypeName; + } + + /** + * Method methodNames. + * @return String[] + */ + protected String[] methodNames() { + return methodNames; + } + + /** + * Method isViolationArgument. + * @param argIndex int + * @param arg String + * @return boolean + */ + protected boolean isViolationArgument(int argIndex, String arg) { + + return isSingleCharAsString(arg); } -} +} diff --git a/pmd/xdocs/credits.xml b/pmd/xdocs/credits.xml index 5f1e1b78526..30beee2044e 100644 --- a/pmd/xdocs/credits.xml +++ b/pmd/xdocs/credits.xml @@ -105,7 +105,7 @@ <li>Raja Rajan - 2 bug reports for CompareObjectswithEquals</li> <li>Jeff Chamblee - suggested better message for UnnecessaryCaseChange, bug report for CompareObjectsWithEquals</li> <li>Dave Brosius - suggested MisleadingVariableName rule, a couple of nice patches to clean up some string handling inefficiencies, non-static class usages, and unclosed streams/readers - found with Findbugs, I daresay :-)</li> - <li>Brian Remedios - code improvements to Eclipse plugin</li> + <li>Brian Remedios - code improvements to Eclipse plugin, Created AbstractPoorMethodCall & Refactored UseIndexOfChar</li> <li>Chris Grindstaff - fixed SWTException when Eclipse plugin is run on a file with syntax error</li> <li>Eduard Naum - fixed JDK 1.3 runtime problems in Eclipse plugin</li> <li>Jacques Lebourgeois - fix for UTF8 characters in Eclipse plugin</li>
7d5346ed863608b18045d9688b1abd41f0bfa832
agorava$agorava-core
AGOVA-7 Create API and common IMPL for JSR 330 compliant framework Adding support of @OAuthApplication annotation to produce settings
a
https://github.com/agorava/agorava-core
diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/MultiSessionServiceImpl.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/MultiSessionServiceImpl.java index d03f509..6497e55 100644 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/MultiSessionServiceImpl.java +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/MultiSessionServiceImpl.java @@ -22,6 +22,7 @@ import org.agorava.core.api.oauth.OAuthService; import org.agorava.core.api.oauth.OAuthSession; import org.agorava.core.api.service.MultiSessionService; +import org.agorava.core.cdi.extensions.AgoravaExtension; import javax.annotation.PostConstruct; import javax.enterprise.context.SessionScoped; @@ -40,7 +41,7 @@ import static com.google.common.collect.Iterables.getLast; import static com.google.common.collect.Lists.newArrayList; import static com.google.common.collect.Sets.newHashSet; -import static org.agorava.core.cdi.AgoravaExtension.getServicesToQualifier; +import static org.agorava.core.cdi.extensions.AgoravaExtension.getServicesToQualifier; /** * {@inheritDoc} diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthLiteral.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthLiteral.java deleted file mode 100644 index 9b8a1c6..0000000 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthLiteral.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2013 Agorava - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.agorava.core.cdi; - -import org.agorava.core.api.atinject.OAuth; - -import javax.enterprise.util.AnnotationLiteral; - -/** - * @author Antoine Sabot-Durand - */ -public class OAuthLiteral extends AnnotationLiteral<OAuth> implements OAuth { - - private OAuthVersion version; - - public OAuthLiteral(OAuthVersion version) { - this.version = version; - } - - @Override - public OAuthVersion value() { - return version; - } -} diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthServiceImpl.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthServiceImpl.java index 5fb0a48..4de0a7f 100644 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthServiceImpl.java +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthServiceImpl.java @@ -16,7 +16,6 @@ package org.agorava.core.cdi; -import org.agorava.core.api.atinject.Current; import org.agorava.core.api.atinject.GenericBean; import org.agorava.core.api.atinject.InjectWithQualifier; import org.agorava.core.api.event.OAuthComplete; @@ -31,12 +30,12 @@ import org.agorava.core.api.rest.Response; import org.agorava.core.api.rest.Verb; import org.agorava.core.api.service.JsonMapperService; +import org.agorava.core.cdi.extensions.AgoravaExtension; import javax.annotation.PostConstruct; import javax.enterprise.event.Event; import javax.enterprise.inject.Any; import javax.enterprise.inject.Instance; -import javax.enterprise.util.AnnotationLiteral; import javax.inject.Inject; import java.lang.annotation.Annotation; import java.text.MessageFormat; @@ -60,9 +59,6 @@ public class OAuthServiceImpl implements OAuthService { private static final long serialVersionUID = -8423894021913341674L; - private static Annotation currentLiteral = new AnnotationLiteral<Current>() { - private static final long serialVersionUID = -2929657732814790025L; - }; @Inject protected JsonMapperService jsonService; @@ -220,7 +216,7 @@ public void setAccessToken(String token, String secret) { public OAuthSession getSession() { OAuthSession res = null; - Instance<OAuthSession> currentSession = sessions.select(currentLiteral); + Instance<OAuthSession> currentSession = sessions.select(CurrentLiteral.INSTANCE); if (currentSession.isAmbiguous()) { currentSession = currentSession.select(qualifier); diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/AgoravaExtension.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/AgoravaExtension.java similarity index 88% rename from agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/AgoravaExtension.java rename to agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/AgoravaExtension.java index 55c6767..c21c521 100644 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/AgoravaExtension.java +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/AgoravaExtension.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.agorava.core.cdi; +package org.agorava.core.cdi.extensions; import com.google.common.collect.BiMap; import com.google.common.collect.HashBiMap; @@ -26,9 +26,11 @@ import org.agorava.core.api.atinject.TierServiceRelated; import org.agorava.core.api.exception.AgoravaException; import org.agorava.core.api.oauth.OAuthAppSettings; +import org.agorava.core.api.oauth.OAuthAppSettingsBuilder; import org.agorava.core.api.oauth.OAuthApplication; import org.agorava.core.api.oauth.OAuthProvider; import org.agorava.core.api.oauth.OAuthService; +import org.agorava.core.cdi.OAuthServiceImpl; import org.agorava.core.oauth.OAuthSessionImpl; import org.agorava.core.spi.TierConfigOauth; import org.apache.deltaspike.core.api.literal.AnyLiteral; @@ -61,6 +63,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Set; import java.util.logging.Logger; @@ -260,32 +263,53 @@ public void processGenericSession(@Observes ProcessAnnotatedType<? extends OAuth */ public void processOAuthSettingsProducer(@Observes final ProcessProducer<?, OAuthAppSettings> pp) { final AnnotatedMember<OAuthAppSettings> annotatedMember = (AnnotatedMember<OAuthAppSettings>) pp.getAnnotatedMember(); - final Annotation qual = Iterables.getLast(AgoravaExtension.getAnnotationsWithMeta(annotatedMember, - TierServiceRelated.class)); - final Producer<OAuthAppSettings> oldProducer = pp.getProducer(); + + Annotation qual = null; + try { + qual = Iterables.getLast(AgoravaExtension.getAnnotationsWithMeta(annotatedMember, + TierServiceRelated.class)); + } catch (NoSuchElementException e) { + pp.addDefinitionError(new AgoravaException("OAuthAppSettings producers should be annotated with a Service " + + "Provider on " + annotatedMember.getJavaMember().getName() + " in " + annotatedMember.getJavaMember() + .getDeclaringClass())); + } + if (annotatedMember.isAnnotationPresent(OAuthApplication.class)) { - /* TODO:CODE below for future support of OAuthAppSettings creation via annotation + if (annotatedMember instanceof AnnotatedField) { - final OAuthApplication app = annotatedMember.getAnnotation(OAuthApplication.class); + final OAuthApplication app = annotatedMember.getAnnotation(OAuthApplication.class); - Class<? extends OAuthAppSettingsBuilder> builderClass = app.builder(); - OAuthAppSettingsBuilder builderOAuthApp = null; - try { - builderOAuthApp = builderClass.newInstance(); - } catch (Exception e) { - throw new AgoravaException("Unable to create Settings Builder with class " + builderClass, e); - } + Class<? extends OAuthAppSettingsBuilder> builderClass = null; + try { + builderClass = (Class<? extends OAuthAppSettingsBuilder>) Class.forName(app.builder()); + } catch (Exception e) { + pp.addDefinitionError(e); + } + OAuthAppSettingsBuilder builderOAuthApp = null; + try { + builderOAuthApp = builderClass.newInstance(); + } catch (Exception e) { + pp.addDefinitionError(new AgoravaException("Unable to create Settings Builder with class " + + builderClass, e)); + } + + builderOAuthApp.qualifier(qual) + .params(app.params()); - final OAuthAppSettingsBuilder finalBuilderOAuthApp = builderOAuthApp; */ + pp.setProducer(new OAuthAppSettingsProducerWithBuilder(builderOAuthApp, qual)); + } else + pp.addDefinitionError(new AgoravaException("@OAuthApplication are only supported on Field. Agorava cannot " + + "process producer " + annotatedMember.getJavaMember().getName() + " in class " + annotatedMember + .getJavaMember().getDeclaringClass())); + } else { + final Producer<OAuthAppSettings> oldProducer = pp.getProducer(); + pp.setProducer(new OAuthAppSettingsProducerDecorator(oldProducer, qual)); } - pp.setProducer(new OAuthAppSettingsProducerDecorator(oldProducer, qual)); log.log(INFO, "Found settings for {0}", qual); servicesQualifiersConfigured.add(qual); - - //settings = builderOAuthApp.name(servicesHub.getSocialMediaName()).params(app.params()).build(); } @@ -316,7 +340,6 @@ public void processRemoteServiceRoot(@Observes ProcessBean<? extends TierConfigO CommonsProcessOAuthTier(pb); } - private void captureGenericOAuthService(@Observes ProcessBean<? extends OAuthService> pb) { Bean<? extends OAuthService> bean = pb.getBean(); if (bean.getQualifiers().contains(GenericBeanLiteral.INSTANCE)) { diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/GenericBeanLiteral.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/GenericBeanLiteral.java similarity index 86% rename from agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/GenericBeanLiteral.java rename to agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/GenericBeanLiteral.java index 5efd5f6..4a91f9a 100644 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/GenericBeanLiteral.java +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/GenericBeanLiteral.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.agorava.core.cdi; +package org.agorava.core.cdi.extensions; import org.agorava.core.api.atinject.GenericBean; @@ -23,6 +23,6 @@ /** * @author Antoine Sabot-Durand */ -public class GenericBeanLiteral extends AnnotationLiteral<GenericBean> implements GenericBean { +class GenericBeanLiteral extends AnnotationLiteral<GenericBean> implements GenericBean { public static GenericBeanLiteral INSTANCE = new GenericBeanLiteral(); } diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/InjectLiteral.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/InjectLiteral.java similarity index 87% rename from agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/InjectLiteral.java rename to agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/InjectLiteral.java index cb37765..cc12b75 100644 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/InjectLiteral.java +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/InjectLiteral.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.agorava.core.cdi; +package org.agorava.core.cdi.extensions; import javax.enterprise.util.AnnotationLiteral; import javax.inject.Inject; @@ -22,7 +22,7 @@ /** * @author Antoine Sabot-Durand */ -public class InjectLiteral extends AnnotationLiteral<Inject> implements Inject { +class InjectLiteral extends AnnotationLiteral<Inject> implements Inject { public static InjectLiteral instance = new InjectLiteral(); } diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/InjectWithQualifierLiteral.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/InjectWithQualifierLiteral.java similarity index 83% rename from agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/InjectWithQualifierLiteral.java rename to agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/InjectWithQualifierLiteral.java index 5ca48a6..dc4fc30 100644 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/InjectWithQualifierLiteral.java +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/InjectWithQualifierLiteral.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.agorava.core.cdi; +package org.agorava.core.cdi.extensions; import org.agorava.core.api.atinject.InjectWithQualifier; @@ -23,7 +23,7 @@ /** * @author Antoine Sabot-Durand */ -public class InjectWithQualifierLiteral extends AnnotationLiteral<InjectWithQualifierLiteral> implements InjectWithQualifier { +class InjectWithQualifierLiteral extends AnnotationLiteral<InjectWithQualifierLiteral> implements InjectWithQualifier { public static InjectWithQualifierLiteral instance = new InjectWithQualifierLiteral(); } diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthAppSettingsProducerDecorator.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/OAuthAppSettingsProducerDecorator.java similarity index 97% rename from agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthAppSettingsProducerDecorator.java rename to agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/OAuthAppSettingsProducerDecorator.java index a2c9f07..1b96b67 100644 --- a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/OAuthAppSettingsProducerDecorator.java +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/OAuthAppSettingsProducerDecorator.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.agorava.core.cdi; +package org.agorava.core.cdi.extensions; import org.agorava.core.api.oauth.OAuthAppSettings; import org.agorava.core.oauth.SimpleOAuthAppSettingsBuilder; diff --git a/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/OAuthAppSettingsProducerWithBuilder.java b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/OAuthAppSettingsProducerWithBuilder.java new file mode 100644 index 0000000..008f644 --- /dev/null +++ b/agorava-core-impl-cdi/src/main/java/org/agorava/core/cdi/extensions/OAuthAppSettingsProducerWithBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2013 Agorava + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.agorava.core.cdi.extensions; + +import org.agorava.core.api.oauth.OAuthAppSettings; +import org.agorava.core.api.oauth.OAuthAppSettingsBuilder; + +import javax.enterprise.context.spi.CreationalContext; +import javax.enterprise.inject.spi.InjectionPoint; +import javax.enterprise.inject.spi.Producer; +import java.lang.annotation.Annotation; +import java.util.HashSet; +import java.util.Set; + +import static org.agorava.core.cdi.extensions.AgoravaExtension.getServicesToQualifier; + +/** + * @author Antoine Sabot-Durand + */ +class OAuthAppSettingsProducerWithBuilder implements Producer<OAuthAppSettings> { + + private OAuthAppSettingsBuilder builder; + + private Annotation qual; + + + OAuthAppSettingsProducerWithBuilder(OAuthAppSettingsBuilder builder, Annotation qual) { + this.builder = builder; + this.qual = qual; + } + + @Override + public OAuthAppSettings produce(CreationalContext<OAuthAppSettings> ctx) { + builder.name(getServicesToQualifier().inverse().get(qual)); + OAuthAppSettings newSettings = builder.build(); + ctx.push(newSettings); + return newSettings; + } + + @Override + public void dispose(OAuthAppSettings instance) { + } + + @Override + public Set<InjectionPoint> getInjectionPoints() { + return new HashSet<InjectionPoint>(); + } +} diff --git a/agorava-core-impl-cdi/src/main/resources/META-INF/services/javax.enterprise.inject.spi.Extension b/agorava-core-impl-cdi/src/main/resources/META-INF/services/javax.enterprise.inject.spi.Extension index 0d0a2f8..affdf8d 100644 --- a/agorava-core-impl-cdi/src/main/resources/META-INF/services/javax.enterprise.inject.spi.Extension +++ b/agorava-core-impl-cdi/src/main/resources/META-INF/services/javax.enterprise.inject.spi.Extension @@ -1 +1 @@ -org.agorava.core.cdi.AgoravaExtension \ No newline at end of file +org.agorava.core.cdi.extensions.AgoravaExtension \ No newline at end of file
16cb7d8d59add7b3ef513bba0de6a4fc07e3bc52
aeshell$aesh
search is more complete, support forward/reverse in emacs mode more complete readline support in emacs mode
a
https://github.com/aeshell/aesh
diff --git a/src/main/java/org/jboss/jreadline/console/Console.java b/src/main/java/org/jboss/jreadline/console/Console.java index 673a9e1fb..a24060ca1 100644 --- a/src/main/java/org/jboss/jreadline/console/Console.java +++ b/src/main/java/org/jboss/jreadline/console/Console.java @@ -19,9 +19,11 @@ import org.jboss.jreadline.edit.EditMode; import org.jboss.jreadline.edit.EmacsEditMode; import org.jboss.jreadline.edit.PasteManager; +import org.jboss.jreadline.edit.ViEditMode; import org.jboss.jreadline.edit.actions.*; import org.jboss.jreadline.history.History; import org.jboss.jreadline.history.InMemoryHistory; +import org.jboss.jreadline.history.SearchDirection; import org.jboss.jreadline.terminal.POSIXTerminal; import org.jboss.jreadline.terminal.Terminal; import org.jboss.jreadline.undo.UndoAction; @@ -132,29 +134,40 @@ else if(action == Action.SEARCH) { switch (operation.getMovement()) { //init a previous search case PREV: + history.setSearchDirection(SearchDirection.REVERSE); searchTerm = new StringBuilder(buffer.getLine()); if (searchTerm.length() > 0) { - result = history.searchPrevious(searchTerm.toString()); + result = history.search(searchTerm.toString()); + } + break; + + case NEXT: + history.setSearchDirection(SearchDirection.FORWARD); + searchTerm = new StringBuilder(buffer.getLine()); + if (searchTerm.length() > 0) { + result = history.search(searchTerm.toString()); } break; case PREV_WORD: - result = history.searchPrevious(searchTerm.toString()); + history.setSearchDirection(SearchDirection.REVERSE); + result = history.search(searchTerm.toString()); + break; + case NEXT_WORD: + history.setSearchDirection(SearchDirection.FORWARD); + result = history.search(searchTerm.toString()); break; case PREV_BIG_WORD: - - if (searchTerm.length() > 0) { + if (searchTerm.length() > 0) searchTerm.deleteCharAt(searchTerm.length() - 1); - } - break; // new search input, append to search case ALL: searchTerm.appendCodePoint(c); //check if the new searchTerm will find anything - StringBuilder tmpResult = history.searchPrevious(searchTerm.toString()); + StringBuilder tmpResult = history.search(searchTerm.toString()); // if(tmpResult == null) { searchTerm.deleteCharAt(searchTerm.length()-1); @@ -258,6 +271,12 @@ else if(action == Action.PASTE) { else doPaste(0, false); } + else if(action == Action.CHANGE_EDITMODE) { + if(operation.getMovement() == Movement.PREV) + editMode = new EmacsEditMode(); + else if(operation.getMovement() == Movement.NEXT) + editMode = new ViEditMode(); + } else if(action == Action.NO_ACTION) { //atm do nothing } @@ -438,7 +457,11 @@ private void printSearch(String searchTerm, String result) throws IOException { //cursor should be placed at the index of searchTerm int cursor = result.indexOf(searchTerm); - StringBuilder out = new StringBuilder("(reverse-i-search) `"); + StringBuilder out = null; + if(history.getSearchDirection() == SearchDirection.REVERSE) + out = new StringBuilder("(reverse-i-search) `"); + else + out = new StringBuilder("(forward-i-search) `"); out.append(searchTerm).append("': "); cursor += out.length(); out.append(result); //.append("\u001b[K"); diff --git a/src/main/java/org/jboss/jreadline/edit/EmacsEditMode.java b/src/main/java/org/jboss/jreadline/edit/EmacsEditMode.java index ce8e46031..2814b106a 100644 --- a/src/main/java/org/jboss/jreadline/edit/EmacsEditMode.java +++ b/src/main/java/org/jboss/jreadline/edit/EmacsEditMode.java @@ -24,6 +24,9 @@ * TODO: * - add support for different os key values (mainly windows) * + * Trying to follow the gnu readline impl found here: + * http://cnswww.cns.cwru.edu/php/chet/readline/readline.html + * * @author Ståle W. Pedersen <[email protected]> */ public class EmacsEditMode implements EditMode { @@ -45,20 +48,27 @@ public class EmacsEditMode implements EditMode { private final static short CTRL_N = 14; private final static short CTRL_P = 16; private final static short CTRL_R = 18; + private final static short CTRL_S = 19; private final static short CTRL_U = 21; private final static short CTRL_V = 22; private final static short CTRL_W = 23; - private final static short CTRL_X = 24; // prev word + private final static short CTRL_X = 24; + private final static short CTRL_Y = 25; // yank private final static short ESCAPE = 27; + private final static short CTRL__ = 31; private final static short ARROW_START = 91; private final static short LEFT = 68; private final static short RIGHT = 67; private final static short UP = 65; private final static short DOWN = 66; private final static short BACKSPACE = 127; + private final static short F = 102; // needed to handle M-f + private final static short B = 98; // needed to handle M-b + private final static short D = 100; // needed to handle M-d private boolean arrowStart = false; private boolean arrowPrefix = false; + private boolean ctrl_xState = false; private Action mode = Action.EDIT; @@ -75,6 +85,9 @@ public Operation parseInput(int input) { else if(input == CTRL_R) { return new Operation(Movement.PREV_WORD, Action.SEARCH); } + else if(input == CTRL_S) { + return new Operation(Movement.NEXT_WORD, Action.SEARCH); + } else if(input == BACKSPACE) { return new Operation(Movement.PREV_BIG_WORD, Action.SEARCH); } @@ -99,7 +112,7 @@ else if(input == BACKSPACE) else if(input == CTRL_B) return new Operation(Movement.PREV, Action.MOVE); else if(input == CTRL_D) - return new Operation(Movement.PREV, Action.EXIT); + return new Operation(Movement.NEXT, Action.DELETE); else if(input == CTRL_E) return new Operation(Movement.END, Action.MOVE); else if(input == CTRL_F) @@ -111,33 +124,75 @@ else if(input == CTRL_H) else if(input == CTRL_I) return new Operation(Movement.PREV, Action.COMPLETE); else if(input == CTRL_K) - return new Operation(Movement.ALL, Action.DELETE); + return new Operation(Movement.END, Action.DELETE); else if(input == CTRL_L) return new Operation(Movement.ALL, Action.DELETE); //TODO: should change to clear screen else if(input == CTRL_N) return new Operation(Movement.NEXT, Action.HISTORY); else if(input == CTRL_P) return new Operation(Movement.PREV, Action.HISTORY); - else if(input == CTRL_U) - return new Operation(Movement.BEGINNING, Action.DELETE); + else if(input == CTRL__) + return new Operation(Action.UNDO); + + else if(input == CTRL_U) { + //only undo if C-x have been pressed first + if(ctrl_xState) { + ctrl_xState = false; + return new Operation(Action.UNDO); + } + else + return new Operation(Movement.BEGINNING, Action.DELETE); + } else if(input == CTRL_V) return new Operation(Movement.NEXT, Action.PASTE_FROM_CLIPBOARD); + // Kill from the cursor to the previous whitespace else if(input == CTRL_W) - return new Operation(Movement.PREV_WORD, Action.DELETE); - else if(input == CTRL_X) - return new Operation(Movement.PREV_WORD, Action.MOVE); + return new Operation(Movement.PREV_BIG_WORD, Action.DELETE); + + // Yank the most recently killed text back into the buffer at the cursor. + else if(input == CTRL_Y) + return new Operation(Movement.NEXT, Action.PASTE); else if(input == CR) return new Operation(Movement.BEGINNING, Action.MOVE); // search else if(input == CTRL_R) { mode = Action.SEARCH; - return new Operation(Movement.PREV, Action.SEARCH); } + else if(input == CTRL_S) { + mode = Action.SEARCH; + return new Operation(Movement.NEXT, Action.SEARCH); + } + + //enter C-x state + else if(input == CTRL_X) { + ctrl_xState = true; + return new Operation(Action.NO_ACTION); + } + + // handle meta keys + else if(input == F && arrowStart) { + arrowStart = false; + return new Operation(Movement.NEXT_WORD, Action.MOVE); + } + else if(input == B && arrowStart) { + arrowStart = false; + return new Operation(Movement.PREV_WORD, Action.MOVE); + } + else if(input == D && arrowStart) { + arrowStart = false; + return new Operation(Movement.NEXT_WORD, Action.DELETE); + } // handle arrow keys else if(input == ESCAPE) { + // if we've already gotten a escape + if(arrowStart) { + arrowStart = false; + return new Operation(Action.NO_ACTION); + } + //new escape, set status as arrowStart if(!arrowPrefix && !arrowStart) { arrowStart = true; return new Operation(Action.NO_ACTION); diff --git a/src/main/java/org/jboss/jreadline/edit/ViEditMode.java b/src/main/java/org/jboss/jreadline/edit/ViEditMode.java index 0ec257323..7c746b5be 100644 --- a/src/main/java/org/jboss/jreadline/edit/ViEditMode.java +++ b/src/main/java/org/jboss/jreadline/edit/ViEditMode.java @@ -46,6 +46,7 @@ public class ViEditMode implements EditMode { private static final short VI_SHIFT_I = 73; private static final short VI_TILDE = 126; private static final short VI_Y = 121; + private static final short CTRL_E = 5; //movement private static final short VI_H = 104; @@ -272,6 +273,8 @@ else if(c == VI_Y) { else mode = Action.YANK; } + else if(c == CTRL_E) + return new Operation(Movement.PREV, Action.CHANGE_EDITMODE); return new Operation(Movement.BEGINNING, Action.NO_ACTION); } diff --git a/src/main/java/org/jboss/jreadline/edit/actions/Action.java b/src/main/java/org/jboss/jreadline/edit/actions/Action.java index a2fc01001..dea889024 100644 --- a/src/main/java/org/jboss/jreadline/edit/actions/Action.java +++ b/src/main/java/org/jboss/jreadline/edit/actions/Action.java @@ -38,5 +38,6 @@ public enum Action { CASE, EXIT, ABORT, + CHANGE_EDITMODE, NO_ACTION; } diff --git a/src/main/java/org/jboss/jreadline/history/History.java b/src/main/java/org/jboss/jreadline/history/History.java index b3eaf9bc7..c2956aaa3 100644 --- a/src/main/java/org/jboss/jreadline/history/History.java +++ b/src/main/java/org/jboss/jreadline/history/History.java @@ -30,13 +30,15 @@ public interface History { int size(); + void setSearchDirection(SearchDirection direction); + + SearchDirection getSearchDirection(); + StringBuilder getNextFetch(); StringBuilder getPreviousFetch(); - StringBuilder searchNext(String search); - - StringBuilder searchPrevious(String search); + StringBuilder search(String search); void setCurrent(StringBuilder line); diff --git a/src/main/java/org/jboss/jreadline/history/InMemoryHistory.java b/src/main/java/org/jboss/jreadline/history/InMemoryHistory.java index 5b67831c4..55696be8e 100644 --- a/src/main/java/org/jboss/jreadline/history/InMemoryHistory.java +++ b/src/main/java/org/jboss/jreadline/history/InMemoryHistory.java @@ -30,6 +30,7 @@ public class InMemoryHistory implements History { private int lastFetchedId = -1; private int lastSearchedId = 0; private StringBuilder current; + private SearchDirection searchDirection = SearchDirection.REVERSE; @Override public void push(StringBuilder entry) { @@ -60,6 +61,16 @@ public int size() { return historyList.size(); } + @Override + public void setSearchDirection(SearchDirection direction) { + searchDirection = direction; + } + + @Override + public SearchDirection getSearchDirection() { + return searchDirection; + } + @Override public StringBuilder getPreviousFetch() { if(size() < 1) @@ -73,7 +84,14 @@ public StringBuilder getPreviousFetch() { } @Override - public StringBuilder searchNext(String search) { + public StringBuilder search(String search) { + if(searchDirection == SearchDirection.REVERSE) + return searchReverse(search); + else + return searchForward(search); + } + + private StringBuilder searchForward(String search) { for(; lastSearchedId < size(); lastSearchedId++) { if(historyList.get(lastSearchedId).indexOf(search) != -1) return get(lastSearchedId); @@ -83,9 +101,8 @@ public StringBuilder searchNext(String search) { return null; } - @Override - public StringBuilder searchPrevious(String search) { - if(lastSearchedId < 1) + private StringBuilder searchReverse(String search) { + if(lastSearchedId < 1 || lastSearchedId >= size()) lastSearchedId = size()-1; for(; lastSearchedId >= 0; lastSearchedId-- ) { diff --git a/src/main/java/org/jboss/jreadline/history/SearchDirection.java b/src/main/java/org/jboss/jreadline/history/SearchDirection.java new file mode 100644 index 000000000..8a701ca6e --- /dev/null +++ b/src/main/java/org/jboss/jreadline/history/SearchDirection.java @@ -0,0 +1,26 @@ + /* + * JBoss, Home of Professional Open Source + * Copyright 2010, Red Hat Middleware LLC, and individual contributors + * by the @authors tag. See the copyright.txt in the distribution for a + * full listing of individual contributors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.jboss.jreadline.history; + +/** + * + * @author <a href="mailto:[email protected]">Ståle W. Pedersen</a> + */ +public enum SearchDirection { + REVERSE, + FORWARD; +} diff --git a/src/main/java/org/jboss/jreadline/terminal/POSIXTerminal.java b/src/main/java/org/jboss/jreadline/terminal/POSIXTerminal.java index 0e95e20d9..33b399201 100644 --- a/src/main/java/org/jboss/jreadline/terminal/POSIXTerminal.java +++ b/src/main/java/org/jboss/jreadline/terminal/POSIXTerminal.java @@ -76,7 +76,8 @@ public void init() { //checkBackspace(); // set the console to be character-buffered instead of line-buffered - stty("-icanon min 1"); + // -ixon will give access to ctrl-s/ctrl-q + stty("-ixon -icanon min 1"); // disable character echoing stty("-echo");
b6aa74ef6393229a9d08ace867cdccdc63a91c64
hbase
HBASE-8299 ExploringCompactionPolicy can get- stuck in rare cases.--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1475966 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hbase
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java index 5330085ac415..e7784ab2bd3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java @@ -26,10 +26,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; +import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.util.ReflectionUtils; -import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; /** * Default StoreEngine creates the default compactor, policy, and store file manager, or diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 59a463f32e2c..9ea31777753b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -280,6 +280,10 @@ public long getStoreFileTtl() { public long getMemstoreFlushSize() { return this.region.memstoreFlushSize; } + + public long getBlockingFileCount() { + return blockingFileCount; + } /* End implementation of StoreConfigInformation */ /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java index d38d70975724..e7b7774bf877 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java @@ -40,4 +40,9 @@ public interface StoreConfigInformation { * Gets the cf-specific time-to-live for store files. */ public long getStoreFileTtl(); + + /** + * The number of files required before flushes for this store will be blocked. + */ + public long getBlockingFileCount(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index be9a2128f4b5..d27e0b99d3d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.regionserver.compactions; - import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -29,80 +28,128 @@ import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.hbase.regionserver.StoreFile; +/** + * Class to pick which files if any to compact together. + * + * This class will search all possibilities for different and if it gets stuck it will choose + * the smallest set of files to compact. + */ @InterfaceAudience.Private public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { - public ExploringCompactionPolicy(Configuration conf, - StoreConfigInformation storeConfigInfo) { + /** Computed number of files that are needed to assume compactions are stuck. */ + private final long filesNeededToForce; + + /** + * Constructor for ExploringCompactionPolicy. + * @param conf The configuration object + * @param storeConfigInfo An object to provide info about the store. + */ + public ExploringCompactionPolicy(final Configuration conf, + final StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); + filesNeededToForce = storeConfigInfo.getBlockingFileCount(); } @Override - ArrayList<StoreFile> applyCompactionPolicy(ArrayList<StoreFile> candidates, - boolean mayUseOffPeak) throws IOException { + final ArrayList<StoreFile> applyCompactionPolicy(final ArrayList<StoreFile> candidates, + final boolean mayUseOffPeak) throws IOException { // Start off choosing nothing. List<StoreFile> bestSelection = new ArrayList<StoreFile>(0); + List<StoreFile> smallest = new ArrayList<StoreFile>(0); long bestSize = 0; + long smallestSize = Long.MAX_VALUE; + + boolean mightBeStuck = candidates.size() >= filesNeededToForce; // Consider every starting place. for (int start = 0; start < candidates.size(); start++) { // Consider every different sub list permutation in between start and end with min files. - for(int currentEnd = start + comConf.getMinFilesToCompact() - 1; + for (int currentEnd = start + comConf.getMinFilesToCompact() - 1; currentEnd < candidates.size(); currentEnd++) { - List<StoreFile> potentialMatchFiles = candidates.subList(start, currentEnd+1); + List<StoreFile> potentialMatchFiles = candidates.subList(start, currentEnd + 1); // Sanity checks - if (potentialMatchFiles.size() < comConf.getMinFilesToCompact()) continue; - if (potentialMatchFiles.size() > comConf.getMaxFilesToCompact()) continue; - if (!filesInRatio(potentialMatchFiles, mayUseOffPeak)) continue; + if (potentialMatchFiles.size() < comConf.getMinFilesToCompact()) { + continue; + } + if (potentialMatchFiles.size() > comConf.getMaxFilesToCompact()) { + continue; + } // Compute the total size of files that will // have to be read if this set of files is compacted. - long size = 0; + long size = getTotalStoreSize(potentialMatchFiles); + + // Store the smallest set of files. This stored set of files will be used + // if it looks like the algorithm is stuck. + if (size < smallestSize) { + smallest = potentialMatchFiles; + smallestSize = size; + } + + if (size >= comConf.getMinCompactSize() + && !filesInRatio(potentialMatchFiles, mayUseOffPeak)) { + continue; + } - for (StoreFile s:potentialMatchFiles) { - size += s.getReader().length(); + if (size > comConf.getMaxCompactSize()) { + continue; } // Keep if this gets rid of more files. Or the same number of files for less io. - if (potentialMatchFiles.size() > bestSelection.size() || - (potentialMatchFiles.size() == bestSelection.size() && size < bestSize)) { + if (potentialMatchFiles.size() > bestSelection.size() + || (potentialMatchFiles.size() == bestSelection.size() && size < bestSize)) { bestSelection = potentialMatchFiles; bestSize = size; } } } - + if (bestSelection.size() == 0 && mightBeStuck) { + return new ArrayList<StoreFile>(smallest); + } return new ArrayList<StoreFile>(bestSelection); } /** - * Check that all files satisfy the r - * @param files - * @return + * Find the total size of a list of store files. + * @param potentialMatchFiles StoreFile list. + * @return Sum of StoreFile.getReader().length(); */ - private boolean filesInRatio(List<StoreFile> files, boolean isOffPeak) { + private long getTotalStoreSize(final List<StoreFile> potentialMatchFiles) { + long size = 0; + + for (StoreFile s:potentialMatchFiles) { + size += s.getReader().length(); + } + return size; + } + + /** + * Check that all files satisfy the constraint + * FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) ) * Ratio. + * + * @param files List of store files to consider as a compaction candidate. + * @param isOffPeak should the offPeak compaction ratio be used ? + * @return a boolean if these files satisfy the ratio constraints. + */ + private boolean filesInRatio(final List<StoreFile> files, final boolean isOffPeak) { if (files.size() < 2) { return true; } - double currentRatio = isOffPeak ? - comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); + final double currentRatio = + isOffPeak ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); - long totalFileSize = 0; - for (int i = 0; i < files.size(); i++) { - totalFileSize += files.get(i).getReader().length(); - } - for (int i = 0; i < files.size(); i++) { - long singleFileSize = files.get(i).getReader().length(); - long sumAllOtherFilesize = totalFileSize - singleFileSize; + long totalFileSize = getTotalStoreSize(files); - if (( singleFileSize > sumAllOtherFilesize * currentRatio) - && (sumAllOtherFilesize >= comConf.getMinCompactSize())){ + for (StoreFile file : files) { + long singleFileSize = file.getReader().length(); + long sumAllOtherFileSizes = totalFileSize - singleFileSize; + + if (singleFileSize > sumAllOtherFileSizes * currentRatio) { return false; } } - return true; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 78fd9da9c039..c1bc17000dad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -114,6 +114,14 @@ public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles, candidateSelection = checkMinFilesCriteria(candidateSelection); } candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, majorCompaction); + + if (candidateSelection.size() == 0 + && candidateFiles.size() >= storeConfigInfo.getBlockingFileCount()) { + candidateSelection = new ArrayList<StoreFile>(candidateFiles); + candidateSelection + .subList(0, Math.max(0,candidateSelection.size() - comConf.getMinFilesToCompact())) + .clear(); + } CompactionRequest result = new CompactionRequest(candidateSelection); result.setOffPeak(!candidateSelection.isEmpty() && !majorCompaction && mayUseOffPeak); return result; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java index 20c388bdb6b6..b647ff8a838b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java @@ -62,7 +62,7 @@ public class TestDefaultCompactSelection extends TestCase { protected static final int maxFiles = 5; protected static final long minSize = 10; - protected static final long maxSize = 1000; + protected static final long maxSize = 2100; private HLog hlog; private HRegion region; @@ -269,12 +269,8 @@ public void testCompactionRatio() throws IOException { // big size + threshold compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */); // small files = don't care about ratio - compactEquals(sfCreate(8,3,1), 8,3,1); - /* TODO: add sorting + unit test back in when HBASE-2856 is fixed - // sort first so you don't include huge file the tail end. - // happens with HFileOutputFormat bulk migration - compactEquals(sfCreate(100,50,23,12,12, 500), 23, 12, 12); - */ + compactEquals(sfCreate(7,1,1), 7,1,1); + // don't exceed max file compact threshold // note: file selection starts with largest to smallest. compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1); @@ -285,6 +281,15 @@ public void testCompactionRatio() throws IOException { compactEquals(sfCreate(251, 253, 251, maxSize -1), 251, 253, 251); + compactEquals(sfCreate(maxSize -1,maxSize -1,maxSize -1) /* empty */); + + // Always try and compact something to get below blocking storefile count + this.conf.setLong("hbase.hstore.compaction.min.size", 1); + store.storeEngine.getCompactionPolicy().setConf(conf); + compactEquals(sfCreate(512,256,128,64,32,16,8,4,2,1), 4,2,1); + this.conf.setLong("hbase.hstore.compaction.min.size", minSize); + store.storeEngine.getCompactionPolicy().setConf(conf); + /* MAJOR COMPACTION */ // if a major compaction has been forced, then compact everything compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java new file mode 100644 index 000000000000..68d57afd7ca8 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.hbase.regionserver.StoreFile; + +/** + * Class to generate several lists of StoreFiles that are all the same size. + */ +class ConstantSizeFileListGenerator extends StoreFileListGenerator { + + /** How many mb's mock storefiles should be. */ + private static final int FILESIZE = 5; + + ConstantSizeFileListGenerator() { + super(ConstantSizeFileListGenerator.class); + } + + @Override + public final Iterator<List<StoreFile>> iterator() { + return new Iterator<List<StoreFile>>() { + private int count = 0; + + @Override + public boolean hasNext() { + return count < MAX_FILE_GEN_ITERS; + } + + @Override + public List<StoreFile> next() { + count += 1; + ArrayList<StoreFile> files = new ArrayList<StoreFile>(NUM_FILES_GEN); + for (int i = 0; i < NUM_FILES_GEN; i++) { + files.add(createMockStoreFile(FILESIZE)); + } + return files; + } + + @Override + public void remove() { + + } + }; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java new file mode 100644 index 000000000000..5265d8abd0bb --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; +import org.apache.hadoop.hbase.regionserver.StoreFile; + +/** + * Test Policy to compact everything every time. + */ +public class EverythingPolicy extends RatioBasedCompactionPolicy { + /** + * Constructor. + * + * @param conf The Conf. + * @param storeConfigInfo Info about the store. + */ + public EverythingPolicy(final Configuration conf, + final StoreConfigInformation storeConfigInfo) { + super(conf, storeConfigInfo); + } + + @Override + final ArrayList<StoreFile> applyCompactionPolicy(final ArrayList<StoreFile> candidates, + final boolean mayUseOffPeak) throws IOException { + + if (candidates.size() < comConf.getMinFilesToCompact()) { + return new ArrayList<StoreFile>(0); + } + + return new ArrayList<StoreFile>(candidates); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ExplicitFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ExplicitFileListGenerator.java new file mode 100644 index 000000000000..24302b81b688 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ExplicitFileListGenerator.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.hbase.regionserver.StoreFile; + +/** + * Class to create list of mock storefiles of specified length. + * This is great for testing edge cases. + */ +class ExplicitFileListGenerator extends StoreFileListGenerator { + /** The explicit files size lists to return. */ + private int[][] fileSizes = new int[][]{ + {1000, 350, 200, 100, 20, 10, 10}, + {1000, 450, 200, 100, 20, 10, 10}, + {1000, 550, 200, 100, 20, 10, 10}, + {1000, 650, 200, 100, 20, 10, 10}, + {1, 1, 600, 1, 1, 1, 1}, + {1, 1, 600, 600, 600, 600, 600, 1, 1, 1, 1}, + {1, 1, 600, 600, 600, 1, 1, 1, 1}, + {1000, 250, 25, 25, 25, 25, 25, 25}, + {25, 25, 25, 25, 25, 25, 500}, + {1000, 1000, 1000, 1000, 900}, + {107, 50, 10, 10, 10, 10}, + {2000, 107, 50, 10, 10, 10, 10}, + {9, 8, 7, 6, 5, 4, 3, 2, 1}, + {11, 18, 9, 8, 7, 6, 5, 4, 3, 2, 1}, + {110, 18, 18, 18, 18, 9, 8, 7, 6, 5, 4, 3, 2, 1}, + {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15} + }; + + ExplicitFileListGenerator() { + super(ExplicitFileListGenerator.class); + } + + @Override + public final Iterator<List<StoreFile>> iterator() { + return new Iterator<List<StoreFile>>() { + private int nextIndex = 0; + @Override + public boolean hasNext() { + return nextIndex < fileSizes.length; + } + + @Override + public List<StoreFile> next() { + List<StoreFile> files = createStoreFileList(fileSizes[nextIndex]); + nextIndex += 1; + return files; + } + + @Override + public void remove() { + } + }; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java new file mode 100644 index 000000000000..a19e9ad8dc87 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.commons.math.random.GaussianRandomGenerator; +import org.apache.commons.math.random.MersenneTwister; +import org.apache.hadoop.hbase.regionserver.StoreFile; + +class GaussianFileListGenerator extends StoreFileListGenerator { + + GaussianFileListGenerator() { + super(GaussianFileListGenerator.class); + } + + @Override + public Iterator<List<StoreFile>> iterator() { + return new Iterator<List<StoreFile>>() { + private GaussianRandomGenerator gen = + new GaussianRandomGenerator(new MersenneTwister(random.nextInt())); + private int count = 0; + + @Override + public boolean hasNext() { + return count < MAX_FILE_GEN_ITERS; + } + + @Override + public List<StoreFile> next() { + count += 1; + ArrayList<StoreFile> files = new ArrayList<StoreFile>(NUM_FILES_GEN); + for (int i = 0; i < NUM_FILES_GEN; i++) { + files.add(createMockStoreFile( + (int) Math.ceil(Math.max(0, gen.nextNormalizedDouble() * 32 + 32))) + ); + } + + return files; + } + + @Override + public void remove() { + } + }; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java new file mode 100644 index 000000000000..076073647ee5 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + +import java.util.LinkedList; +import java.util.List; +import java.util.Random; + +import com.google.common.base.Objects; +import com.google.common.io.Files; +import org.apache.commons.lang.RandomStringUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.util.StringUtils; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Base class of objects that can create mock store files with a given size. + */ +class MockStoreFileGenerator { + /** How many chars long the store file name will be. */ + private static final int FILENAME_LENGTH = 10; + /** The random number generator. */ + protected Random random; + + MockStoreFileGenerator(Class klass) { + random = new Random(klass.getSimpleName().hashCode()); + } + + protected List<StoreFile> createStoreFileList(final int[] fs) { + List<StoreFile> storeFiles = new LinkedList<StoreFile>(); + for (int fileSize : fs) { + storeFiles.add(createMockStoreFile(fileSize)); + } + return storeFiles; + } + + protected StoreFile createMockStoreFile(final long size) { + return createMockStoreFile(size * 1024 * 1024, -1L); + } + + protected StoreFile createMockStoreFileBytes(final long size) { + return createMockStoreFile(size, -1L); + } + + protected StoreFile createMockStoreFile(final long sizeInBytes, final long seqId) { + StoreFile mockSf = mock(StoreFile.class); + StoreFile.Reader reader = mock(StoreFile.Reader.class); + String stringPath = "/hbase/testTable/regionA/" + + RandomStringUtils.random(FILENAME_LENGTH, 0, 0, true, true, null, random); + Path path = new Path(stringPath); + + + when(reader.getSequenceID()).thenReturn(seqId); + when(reader.getTotalUncompressedBytes()).thenReturn(sizeInBytes); + when(reader.length()).thenReturn(sizeInBytes); + + when(mockSf.getPath()).thenReturn(path); + when(mockSf.excludeFromMinorCompaction()).thenReturn(false); + when(mockSf.isReference()).thenReturn(false); // TODO come back to + // this when selection takes this into account + when(mockSf.getReader()).thenReturn(reader); + String toString = Objects.toStringHelper("MockStoreFile") + .add("isReference", false) + .add("fileSize", StringUtils.humanReadableInt(sizeInBytes)) + .add("seqId", seqId) + .add("path", stringPath).toString(); + when(mockSf.toString()).thenReturn(toString); + + return mockSf; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index e26af1774899..8dc6550c0261 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -18,14 +18,11 @@ package org.apache.hadoop.hbase.regionserver.compactions; -import com.google.common.base.Objects; -import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -37,130 +34,135 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.LinkedList; import java.util.List; -import java.util.Random; import static org.mockito.Mockito.mock; + import static org.mockito.Mockito.when; -@Category(SmallTests.class) +@Category(MediumTests.class) @RunWith(Parameterized.class) -public class PerfTestCompactionPolicies { +public class PerfTestCompactionPolicies extends MockStoreFileGenerator { + - static final Log LOG = LogFactory.getLog(PerfTestCompactionPolicies.class); + private static final Log LOG = LogFactory.getLog(PerfTestCompactionPolicies.class); private final RatioBasedCompactionPolicy cp; + private final StoreFileListGenerator generator; + private final HStore store; + private Class<? extends StoreFileListGenerator> fileGenClass; private final int max; private final int min; private final float ratio; private long written = 0; - private long fileDiff = 0; - private Random random; @Parameterized.Parameters public static Collection<Object[]> data() { - return Arrays.asList(new Object[][] { - {RatioBasedCompactionPolicy.class, 3, 2, 1.2f}, - {ExploringCompactionPolicy.class, 3, 2, 1.2f}, - {RatioBasedCompactionPolicy.class, 4, 2, 1.2f}, - {ExploringCompactionPolicy.class, 4, 2, 1.2f}, - {RatioBasedCompactionPolicy.class, 5, 2, 1.2f}, - {ExploringCompactionPolicy.class, 5, 2, 1.2f}, - {RatioBasedCompactionPolicy.class, 4, 2, 1.3f}, - {ExploringCompactionPolicy.class, 4, 2, 1.3f}, - {RatioBasedCompactionPolicy.class, 4, 2, 1.4f}, - {ExploringCompactionPolicy.class, 4, 2, 1.4f}, - - }); + + + + Class[] policyClasses = new Class[]{ + EverythingPolicy.class, + RatioBasedCompactionPolicy.class, + ExploringCompactionPolicy.class, + }; + + Class[] fileListGenClasses = new Class[]{ + ExplicitFileListGenerator.class, + ConstantSizeFileListGenerator.class, + SemiConstantSizeFileListGenerator.class, + GaussianFileListGenerator.class, + SinusoidalFileListGenerator.class, + SpikyFileListGenerator.class + }; + + int[] maxFileValues = new int[] {10}; + int[] minFilesValues = new int[] {3}; + float[] ratioValues = new float[] {1.2f}; + + List<Object[]> params = new ArrayList<Object[]>( + maxFileValues.length + * minFilesValues.length + * fileListGenClasses.length + * policyClasses.length); + + + for (Class policyClass : policyClasses) { + for (Class genClass: fileListGenClasses) { + for (int maxFile:maxFileValues) { + for (int minFile:minFilesValues) { + for (float ratio:ratioValues) { + params.add(new Object[] {policyClass, genClass, maxFile, minFile, ratio}); + } + } + } + } + } + + return params; } /** - * Test the perf of a CompactionPolicy with settings - * @param cp The compaction policy to test - * @param max The maximum number of file to compact - * @param min The min number of files to compact - * @param ratio The ratio that files must be under to be compacted. + * Test the perf of a CompactionPolicy with settings. + * @param cpClass The compaction policy to test + * @param inMmax The maximum number of file to compact + * @param inMin The min number of files to compact + * @param inRatio The ratio that files must be under to be compacted. */ - public PerfTestCompactionPolicies(Class<? extends CompactionPolicy> cpClass, - int max, int min, float ratio) { - this.max = max; - this.min = min; - this.ratio = ratio; - - //Hide lots of logging so the sysout is usable as a tab delimited file. + public PerfTestCompactionPolicies( + final Class<? extends CompactionPolicy> cpClass, + final Class<? extends StoreFileListGenerator> fileGenClass, + final int inMmax, + final int inMin, + final float inRatio) throws IllegalAccessException, InstantiationException { + super(PerfTestCompactionPolicies.class); + this.fileGenClass = fileGenClass; + this.max = inMmax; + this.min = inMin; + this.ratio = inRatio; + + // Hide lots of logging so the system out is usable as a tab delimited file. org.apache.log4j.Logger.getLogger(CompactionConfiguration.class). setLevel(org.apache.log4j.Level.ERROR); + org.apache.log4j.Logger.getLogger(RatioBasedCompactionPolicy.class). + setLevel(org.apache.log4j.Level.ERROR); org.apache.log4j.Logger.getLogger(cpClass).setLevel(org.apache.log4j.Level.ERROR); + Configuration configuration = HBaseConfiguration.create(); - //Make sure that this doesn't include every file. + // Make sure that this doesn't include every file. configuration.setInt("hbase.hstore.compaction.max", max); configuration.setInt("hbase.hstore.compaction.min", min); configuration.setFloat("hbase.hstore.compaction.ratio", ratio); - HStore store = createMockStore(); + store = createMockStore(); this.cp = ReflectionUtils.instantiateWithCustomCtor(cpClass.getName(), - new Class[] { Configuration.class, StoreConfigInformation.class }, - new Object[] { configuration, store }); + new Class[] {Configuration.class, StoreConfigInformation.class }, + new Object[] {configuration, store }); - //Used for making paths - random = new Random(42); + this.generator = fileGenClass.newInstance(); + // Used for making paths } @Test - public void testSelection() throws Exception { - //Some special cases. To simulate bulk loading patterns. - int[][] fileSizes = new int[][]{ - {1000, 350, 200, 100, 20, 10, 10}, - {1000, 450, 200, 100, 20, 10, 10}, - {1000, 550, 200, 100, 20, 10, 10}, - {1000, 650, 200, 100, 20, 10, 10}, - {1000, 250, 25, 25, 25, 25, 25, 25}, - {25, 25, 25, 25, 25, 25, 500}, - {1000, 1000, 1000, 1000, 900}, - {107, 50, 10, 10, 10, 10}, - {2000, 107, 50, 10, 10, 10, 10}, - {9, 8, 7, 6, 5, 4, 3, 2, 1}, - {11, 18, 9, 8, 7, 6, 5, 4, 3, 2, 1}, - {110, 18, 18, 18, 18, 9, 8, 7, 6, 5, 4, 3, 2, 1}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15} - }; - - for (int[] fs : fileSizes) { - List<StoreFile> storeFiles = createStoreFileList(fs); - storeFiles = runIteration(storeFiles); - runIteration(storeFiles); - } - - for (int i = 0; i < 100; i++) { - List<StoreFile> storeFiles = new LinkedList<StoreFile>(); - - //Add some files to start with so that things are more normal - storeFiles.add(createMockStoreFile(random.nextInt(1700) + 500)); - storeFiles.add(createMockStoreFile(random.nextInt(700) + 400)); - storeFiles.add(createMockStoreFile(random.nextInt(400) + 300)); - storeFiles.add(createMockStoreFile(random.nextInt(400) + 200)); - - for (int x = 0; x < 50; x++) { - storeFiles.add(createMockStoreFile(random.nextInt(90) + 10)); - storeFiles.add(createMockStoreFile(random.nextInt(90) + 10)); - storeFiles.add(createMockStoreFile(random.nextInt(90) + 10)); - storeFiles.add(createMockStoreFile(random.nextInt(90) + 10)); - storeFiles.add(createMockStoreFile(random.nextInt(90) + 10)); - storeFiles.add(createMockStoreFile(random.nextInt(90) + 10)); - storeFiles = runIteration(storeFiles); - storeFiles = runIteration(storeFiles); + public final void testSelection() throws Exception { + long fileDiff = 0; + for (List<StoreFile> storeFileList : generator) { + List<StoreFile> currentFiles = new ArrayList<StoreFile>(18); + for (StoreFile file : storeFileList) { + currentFiles.add(file); + currentFiles = runIteration(currentFiles); } + fileDiff += (storeFileList.size() - currentFiles.size()); } - //print out tab delimited so that it can be used in excel/gdocs. + // print out tab delimited so that it can be used in excel/gdocs. System.out.println( - cp.getClass().getSimpleName() + cp.getClass().getSimpleName() + + "\t" + fileGenClass.getSimpleName() + "\t" + max + "\t" + min + "\t" + ratio @@ -175,7 +177,7 @@ private List<StoreFile> runIteration(List<StoreFile> startingStoreFiles) throws List<StoreFile> storeFiles = new ArrayList<StoreFile>(startingStoreFiles); CompactionRequest req = cp.selectCompaction( storeFiles, new ArrayList<StoreFile>(), false, false, false); - int newFileSize = 0; + long newFileSize = 0; Collection<StoreFile> filesToCompact = req.getFiles(); @@ -188,55 +190,17 @@ private List<StoreFile> runIteration(List<StoreFile> startingStoreFiles) throws newFileSize += storeFile.getReader().length(); } - storeFiles.add(createMockStoreFile(newFileSize)); + storeFiles.add(createMockStoreFileBytes(newFileSize)); } written += newFileSize; - fileDiff += storeFiles.size() - startingStoreFiles.size(); return storeFiles; } - private List<StoreFile> createStoreFileList(int[] fs) { - List<StoreFile> storeFiles = new LinkedList<StoreFile>(); - for (int fileSize : fs) { - storeFiles.add(createMockStoreFile(fileSize)); - } - return storeFiles; - } - - private StoreFile createMockStoreFile(int sizeMb) { - return createMockStoreFile(sizeMb, -1l); - } - - - private StoreFile createMockStoreFile(int sizeMb, long seqId) { - StoreFile mockSf = mock(StoreFile.class); - StoreFile.Reader reader = mock(StoreFile.Reader.class); - String stringPath = "/hbase/" + RandomStringUtils.random(10, 0, 0, true, true, null, random); - Path path = new Path(stringPath); - - when(reader.getSequenceID()).thenReturn(seqId); - when(reader.getTotalUncompressedBytes()).thenReturn(Long.valueOf(sizeMb)); - when(reader.length()).thenReturn(Long.valueOf(sizeMb)); - - when(mockSf.getPath()).thenReturn(path); - when(mockSf.excludeFromMinorCompaction()).thenReturn(false); - when(mockSf.isReference()).thenReturn(false); // TODO come back to - // this when selection takes this into account - when(mockSf.getReader()).thenReturn(reader); - String toString = Objects.toStringHelper("MockStoreFile") - .add("isReference", false) - .add("fileSize", sizeMb) - .add("seqId", seqId) - .add("path", stringPath).toString(); - when(mockSf.toString()).thenReturn(toString); - - return mockSf; - } - private HStore createMockStore() { HStore s = mock(HStore.class); when(s.getStoreFileTtl()).thenReturn(Long.MAX_VALUE); + when(s.getBlockingFileCount()).thenReturn(7L); return s; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java new file mode 100644 index 000000000000..ed4531a3101f --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.hbase.regionserver.StoreFile; + +class SemiConstantSizeFileListGenerator extends StoreFileListGenerator { + SemiConstantSizeFileListGenerator() { + super(SemiConstantSizeFileListGenerator.class); + } + + @Override + public Iterator<List<StoreFile>> iterator() { + return new Iterator<List<StoreFile>>() { + private int count = 0; + + @Override + public boolean hasNext() { + return count < MAX_FILE_GEN_ITERS; + } + + @Override + public List<StoreFile> next() { + count += 1; + ArrayList<StoreFile> files = new ArrayList<StoreFile>(NUM_FILES_GEN); + for (int i = 0; i < NUM_FILES_GEN; i++) { + files.add(createMockStoreFile(random.nextInt(5) + 30)); + } + return files; + } + + @Override + public void remove() { + + } + }; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java new file mode 100644 index 000000000000..6afbb2f2de65 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.hbase.regionserver.StoreFile; + +class SinusoidalFileListGenerator extends StoreFileListGenerator { + SinusoidalFileListGenerator() { + super(SinusoidalFileListGenerator.class); + } + + @Override + public Iterator<List<StoreFile>> iterator() { + + + return new Iterator<List<StoreFile>>() { + private int count = 0; + @Override + public boolean hasNext() { + return count < MAX_FILE_GEN_ITERS; + } + + @Override + public List<StoreFile> next() { + count += 1; + ArrayList<StoreFile> files = new ArrayList<StoreFile>(NUM_FILES_GEN); + for (int x = 0; x < NUM_FILES_GEN; x++) { + int fileSize = (int) Math.abs(64 * Math.sin((Math.PI * x) / 50.0)) + 1; + files.add(createMockStoreFile(fileSize)); + } + return files; + } + + @Override + public void remove() { + } + }; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java new file mode 100644 index 000000000000..ebaa7115a143 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.hbase.regionserver.StoreFile; + +class SpikyFileListGenerator extends StoreFileListGenerator { + + SpikyFileListGenerator() { + super(SpikyFileListGenerator.class); + } + + @Override + public Iterator<List<StoreFile>> iterator() { + return new Iterator<List<StoreFile>>() { + private int count = 0; + + @Override + public boolean hasNext() { + return count < (MAX_FILE_GEN_ITERS); + } + + @Override + public List<StoreFile> next() { + count += 1; + ArrayList<StoreFile> files = new ArrayList<StoreFile>(NUM_FILES_GEN); + for (int x = 0; x < NUM_FILES_GEN; x++) { + int fileSize = random.nextInt(5) + 1; + if ( x % 10 == 0) { + fileSize = random.nextInt(5) + 50; + } + files.add(createMockStoreFile(fileSize)); + } + return files; + } + + @Override + public void remove() { + } + }; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/StoreFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/StoreFileListGenerator.java new file mode 100644 index 000000000000..643f7714fd27 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/StoreFileListGenerator.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.compactions; + +import java.util.List; + +import org.apache.hadoop.hbase.regionserver.StoreFile; + +public abstract class StoreFileListGenerator + extends MockStoreFileGenerator implements Iterable<List<StoreFile>> { + + public static final int MAX_FILE_GEN_ITERS = 10; + public static final int NUM_FILES_GEN = 1000; + + StoreFileListGenerator(final Class klass) { + super(klass); + } +}
f75570734bb41a61d66b10922b6fc1ed48119067
hadoop
YARN-2582. Fixed Log CLI and Web UI for showing- aggregated logs of LRS. Contributed Xuan Gong.--(cherry picked from commit e90718fa5a0e7c18592af61534668acebb9db51b)-
c
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 92512356d7e15..ccfc1db7d70bc 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -346,6 +346,9 @@ Release 2.6.0 - UNRELEASED YARN-2673. Made timeline client put APIs retry if ConnectException happens. (Li Lu via zjshen) + YARN-2582. Fixed Log CLI and Web UI for showing aggregated logs of LRS. (Xuan + Gong via zjshen) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java index eb6169cf36868..0b34a46281baf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java @@ -31,7 +31,6 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Tool; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -39,8 +38,6 @@ import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat; -import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -113,17 +110,16 @@ public int run(String[] args) throws Exception { System.err.println("Invalid ApplicationId specified"); return -1; } - + try { int resultCode = verifyApplicationState(appId); if (resultCode != 0) { - System.out.println("Application has not completed." + - " Logs are only available after an application completes"); + System.out.println("Logs are not avaiable right now."); return resultCode; } } catch (Exception e) { - System.err.println("Unable to get ApplicationState." + - " Attempting to fetch logs directly from the filesystem."); + System.err.println("Unable to get ApplicationState." + + " Attempting to fetch logs directly from the filesystem."); } LogCLIHelpers logCliHelper = new LogCLIHelpers(); @@ -141,18 +137,9 @@ public int run(String[] args) throws Exception { printHelpMessage(printOpts); resultCode = -1; } else { - Path remoteRootLogDir = - new Path(getConf().get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, - YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR)); - AggregatedLogFormat.LogReader reader = - new AggregatedLogFormat.LogReader(getConf(), - LogAggregationUtils.getRemoteNodeLogFileForApp( - remoteRootLogDir, - appId, - appOwner, - ConverterUtils.toNodeId(nodeAddress), - LogAggregationUtils.getRemoteNodeLogDirSuffix(getConf()))); - resultCode = logCliHelper.dumpAContainerLogs(containerIdStr, reader, System.out); + resultCode = + logCliHelper.dumpAContainersLogs(appIdStr, containerIdStr, + nodeAddress, appOwner); } return resultCode; @@ -167,10 +154,10 @@ private int verifyApplicationState(ApplicationId appId) throws IOException, switch (appReport.getYarnApplicationState()) { case NEW: case NEW_SAVING: - case ACCEPTED: case SUBMITTED: - case RUNNING: return -1; + case ACCEPTED: + case RUNNING: case FAILED: case FINISHED: case KILLED: diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java index f02f3358a25f5..132dca245c5d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java @@ -25,21 +25,38 @@ import static org.mockito.Mockito.mock; import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; +import java.io.Writer; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.junit.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat; +import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; import org.junit.Before; import org.junit.Test; @@ -138,6 +155,116 @@ public void testHelpMessage() throws Exception { Assert.assertEquals(appReportStr, sysOutStream.toString()); } + @Test (timeout = 15000) + public void testFetchApplictionLogs() throws Exception { + String remoteLogRootDir = "target/logs/"; + Configuration configuration = new Configuration(); + configuration.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); + configuration + .set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir); + configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + configuration.set(YarnConfiguration.YARN_ADMIN_ACL, "admin"); + FileSystem fs = FileSystem.get(configuration); + + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1); + ApplicationAttemptId appAttemptId = + ApplicationAttemptIdPBImpl.newInstance(appId, 1); + ContainerId containerId1 = ContainerIdPBImpl.newInstance(appAttemptId, 1); + ContainerId containerId2 = ContainerIdPBImpl.newInstance(appAttemptId, 2); + + NodeId nodeId = NodeId.newInstance("localhost", 1234); + + // create local logs + String rootLogDir = "target/LocalLogs"; + Path rootLogDirPath = new Path(rootLogDir); + if (fs.exists(rootLogDirPath)) { + fs.delete(rootLogDirPath, true); + } + assertTrue(fs.mkdirs(rootLogDirPath)); + + Path appLogsDir = new Path(rootLogDirPath, appId.toString()); + if (fs.exists(appLogsDir)) { + fs.delete(appLogsDir, true); + } + assertTrue(fs.mkdirs(appLogsDir)); + List<String> rootLogDirs = Arrays.asList(rootLogDir); + + // create container logs in localLogDir + createContainerLogInLocalDir(appLogsDir, containerId1, fs); + createContainerLogInLocalDir(appLogsDir, containerId2, fs); + + Path path = + new Path(remoteLogRootDir + ugi.getShortUserName() + + "/logs/application_0_0001"); + if (fs.exists(path)) { + fs.delete(path, true); + } + assertTrue(fs.mkdirs(path)); + // upload container logs into remote directory + uploadContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId, + containerId1, path, fs); + uploadContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId, + containerId2, path, fs); + + YarnClient mockYarnClient = + createMockYarnClient(YarnApplicationState.FINISHED); + LogsCLI cli = new LogsCLIForTest(mockYarnClient); + cli.setConf(configuration); + + int exitCode = cli.run(new String[] { "-applicationId", appId.toString() }); + assertTrue(exitCode == 0); + assertTrue(sysOutStream.toString().contains( + "Hello container_0_0001_01_000001!")); + assertTrue(sysOutStream.toString().contains( + "Hello container_0_0001_01_000002!")); + sysOutStream.reset(); + + exitCode = + cli.run(new String[] { "-applicationId", appId.toString(), + "-nodeAddress", nodeId.toString(), "-containerId", + containerId1.toString() }); + assertTrue(exitCode == 0); + assertTrue(sysOutStream.toString().contains( + "Hello container_0_0001_01_000001!")); + + fs.delete(new Path(remoteLogRootDir), true); + fs.delete(new Path(rootLogDir), true); + } + + private static void createContainerLogInLocalDir(Path appLogsDir, + ContainerId containerId, FileSystem fs) throws Exception { + Path containerLogsDir = new Path(appLogsDir, containerId.toString()); + if (fs.exists(containerLogsDir)) { + fs.delete(containerLogsDir, true); + } + assertTrue(fs.mkdirs(containerLogsDir)); + Writer writer = + new FileWriter(new File(containerLogsDir.toString(), "sysout")); + writer.write("Hello " + containerId + "!"); + writer.close(); + } + + private static void uploadContainerLogIntoRemoteDir(UserGroupInformation ugi, + Configuration configuration, List<String> rootLogDirs, NodeId nodeId, + ContainerId containerId, Path appDir, FileSystem fs) throws Exception { + Path path = + new Path(appDir, LogAggregationUtils.getNodeString(nodeId) + + System.currentTimeMillis()); + AggregatedLogFormat.LogWriter writer = + new AggregatedLogFormat.LogWriter(configuration, path, ugi); + writer.writeApplicationOwner(ugi.getUserName()); + + Map<ApplicationAccessType, String> appAcls = + new HashMap<ApplicationAccessType, String>(); + appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName()); + writer.writeApplicationACLs(appAcls); + writer.append(new AggregatedLogFormat.LogKey(containerId), + new AggregatedLogFormat.LogValue(rootLogDirs, containerId, + UserGroupInformation.getCurrentUser().getShortUserName())); + writer.close(); + } + private YarnClient createMockYarnClient(YarnApplicationState appState) throws YarnException, IOException { YarnClient mockClient = mock(YarnClient.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java index fe4983e70b2b1..34c9100cc8ba7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java @@ -110,4 +110,9 @@ public static String getRemoteNodeLogDirSuffix(Configuration conf) { public static String getNodeString(NodeId nodeId) { return nodeId.toString().replace(":", "_"); } + + @VisibleForTesting + public static String getNodeString(String nodeId) { + return nodeId.toString().replace(":", "_"); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java index 3bafdb35438af..9efdef891d2e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java @@ -52,19 +52,47 @@ public int dumpAContainersLogs(String appId, String containerId, YarnConfiguration.NM_REMOTE_APP_LOG_DIR, YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR)); String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(getConf()); - Path logPath = LogAggregationUtils.getRemoteNodeLogFileForApp( + Path remoteAppLogDir = LogAggregationUtils.getRemoteAppLogDir( remoteRootLogDir, ConverterUtils.toApplicationId(appId), jobOwner, - ConverterUtils.toNodeId(nodeId), suffix); - AggregatedLogFormat.LogReader reader; + suffix); + RemoteIterator<FileStatus> nodeFiles; try { - reader = new AggregatedLogFormat.LogReader(getConf(), logPath); - } catch (FileNotFoundException fnfe) { - System.out.println("Logs not available at " + logPath.toString()); - System.out - .println("Log aggregation has not completed or is not enabled."); + Path qualifiedLogDir = + FileContext.getFileContext(getConf()).makeQualified( + remoteAppLogDir); + nodeFiles = + FileContext.getFileContext(qualifiedLogDir.toUri(), getConf()) + .listStatus(remoteAppLogDir); + } catch (FileNotFoundException fnf) { + logDirNotExist(remoteAppLogDir.toString()); + return -1; + } + boolean foundContainerLogs = false; + while (nodeFiles.hasNext()) { + FileStatus thisNodeFile = nodeFiles.next(); + String fileName = thisNodeFile.getPath().getName(); + if (fileName.contains(LogAggregationUtils.getNodeString(nodeId)) + && !fileName.endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) { + AggregatedLogFormat.LogReader reader = null; + try { + reader = + new AggregatedLogFormat.LogReader(getConf(), + thisNodeFile.getPath()); + if (dumpAContainerLogs(containerId, reader, System.out) > -1) { + foundContainerLogs = true; + } + } finally { + if (reader != null) { + reader.close(); + } + } + } + } + if (!foundContainerLogs) { + containerLogNotFound(containerId); return -1; } - return dumpAContainerLogs(containerId, reader, System.out); + return 0; } @Private @@ -81,8 +109,7 @@ public int dumpAContainerLogs(String containerIdStr, } if (valueStream == null) { - System.out.println("Logs for container " + containerIdStr - + " are not present in this log-file."); + containerLogNotFound(containerIdStr); return -1; } @@ -114,42 +141,49 @@ public int dumpAllContainersLogs(ApplicationId appId, String appOwner, nodeFiles = FileContext.getFileContext(qualifiedLogDir.toUri(), getConf()).listStatus(remoteAppLogDir); } catch (FileNotFoundException fnf) { - System.out.println("Logs not available at " + remoteAppLogDir.toString()); - System.out - .println("Log aggregation has not completed or is not enabled."); + logDirNotExist(remoteAppLogDir.toString()); return -1; } + boolean foundAnyLogs = false; while (nodeFiles.hasNext()) { FileStatus thisNodeFile = nodeFiles.next(); - AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader( - getConf(), new Path(remoteAppLogDir, thisNodeFile.getPath().getName())); - try { + if (!thisNodeFile.getPath().getName() + .endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) { + AggregatedLogFormat.LogReader reader = + new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath()); + try { + + DataInputStream valueStream; + LogKey key = new LogKey(); + valueStream = reader.next(key); - DataInputStream valueStream; - LogKey key = new LogKey(); - valueStream = reader.next(key); - - while (valueStream != null) { - String containerString = "\n\nContainer: " + key + " on " - + thisNodeFile.getPath().getName(); - out.println(containerString); - out.println(StringUtils.repeat("=", containerString.length())); - while (true) { - try { - LogReader.readAContainerLogsForALogType(valueStream, out); - } catch (EOFException eof) { - break; + while (valueStream != null) { + String containerString = + "\n\nContainer: " + key + " on " + thisNodeFile.getPath().getName(); + out.println(containerString); + out.println(StringUtils.repeat("=", containerString.length())); + while (true) { + try { + LogReader.readAContainerLogsForALogType(valueStream, out); + foundAnyLogs = true; + } catch (EOFException eof) { + break; + } } - } - // Next container - key = new LogKey(); - valueStream = reader.next(key); + // Next container + key = new LogKey(); + valueStream = reader.next(key); + } + } finally { + reader.close(); } - } finally { - reader.close(); } } + if (! foundAnyLogs) { + emptyLogDir(remoteAppLogDir.toString()); + return -1; + } return 0; } @@ -162,4 +196,18 @@ public void setConf(Configuration conf) { public Configuration getConf() { return this.conf; } + + private static void containerLogNotFound(String containerId) { + System.out.println("Logs for container " + containerId + + " are not present in this log-file."); + } + + private static void logDirNotExist(String remoteAppLogDir) { + System.out.println(remoteAppLogDir + "does not exist."); + System.out.println("Log aggregation has not completed or is not enabled."); + } + + private static void emptyLogDir(String remoteAppLogDir) { + System.out.println(remoteAppLogDir + "does not have any log files."); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java index 2b83e6941e4f8..16e635994b533 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java @@ -30,7 +30,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -59,113 +62,127 @@ public class AggregatedLogsBlock extends HtmlBlock { @Override protected void render(Block html) { - AggregatedLogFormat.LogReader reader = null; - try { - ContainerId containerId = verifyAndGetContainerId(html); - NodeId nodeId = verifyAndGetNodeId(html); - String appOwner = verifyAndGetAppOwner(html); - LogLimits logLimits = verifyAndGetLogLimits(html); - if (containerId == null || nodeId == null || appOwner == null - || appOwner.isEmpty() || logLimits == null) { - return; - } - - ApplicationId applicationId = containerId.getApplicationAttemptId() - .getApplicationId(); - String logEntity = $(ENTITY_STRING); - if (logEntity == null || logEntity.isEmpty()) { - logEntity = containerId.toString(); - } + ContainerId containerId = verifyAndGetContainerId(html); + NodeId nodeId = verifyAndGetNodeId(html); + String appOwner = verifyAndGetAppOwner(html); + LogLimits logLimits = verifyAndGetLogLimits(html); + if (containerId == null || nodeId == null || appOwner == null + || appOwner.isEmpty() || logLimits == null) { + return; + } - if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, - YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { - html.h1() - ._("Aggregation is not enabled. Try the nodemanager at " + nodeId) - ._(); - return; - } + ApplicationId applicationId = containerId.getApplicationAttemptId() + .getApplicationId(); + String logEntity = $(ENTITY_STRING); + if (logEntity == null || logEntity.isEmpty()) { + logEntity = containerId.toString(); + } - Path remoteRootLogDir = new Path(conf.get( - YarnConfiguration.NM_REMOTE_APP_LOG_DIR, - YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR)); + if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, + YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { + html.h1() + ._("Aggregation is not enabled. Try the nodemanager at " + nodeId) + ._(); + return; + } - try { - reader = new AggregatedLogFormat.LogReader(conf, - LogAggregationUtils.getRemoteNodeLogFileForApp(remoteRootLogDir, - applicationId, appOwner, nodeId, - LogAggregationUtils.getRemoteNodeLogDirSuffix(conf))); - } catch (FileNotFoundException e) { - // ACLs not available till the log file is opened. - html.h1() - ._("Logs not available for " + logEntity - + ". Aggregation may not be complete, " - + "Check back later or try the nodemanager at " + nodeId)._(); - return; - } catch (IOException e) { - html.h1()._("Error getting logs for " + logEntity)._(); - LOG.error("Error getting logs for " + logEntity, e); - return; - } + Path remoteRootLogDir = new Path(conf.get( + YarnConfiguration.NM_REMOTE_APP_LOG_DIR, + YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR)); + Path remoteAppDir = LogAggregationUtils.getRemoteAppLogDir( + remoteRootLogDir, applicationId, appOwner, + LogAggregationUtils.getRemoteNodeLogDirSuffix(conf)); + RemoteIterator<FileStatus> nodeFiles; + try { + Path qualifiedLogDir = + FileContext.getFileContext(conf).makeQualified( + remoteAppDir); + nodeFiles = + FileContext.getFileContext(qualifiedLogDir.toUri(), conf) + .listStatus(remoteAppDir); + } catch (FileNotFoundException fnf) { + html.h1() + ._("Logs not available for " + logEntity + + ". Aggregation may not be complete, " + + "Check back later or try the nodemanager at " + nodeId)._(); + return; + } catch (Exception ex) { + html.h1() + ._("Error getting logs at " + nodeId)._(); + return; + } - String owner = null; - Map<ApplicationAccessType, String> appAcls = null; - try { - owner = reader.getApplicationOwner(); - appAcls = reader.getApplicationAcls(); - } catch (IOException e) { - html.h1()._("Error getting logs for " + logEntity)._(); - LOG.error("Error getting logs for " + logEntity, e); - return; - } - ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf); - aclsManager.addApplication(applicationId, appAcls); + boolean foundLog = false; + String desiredLogType = $(CONTAINER_LOG_TYPE); + try { + while (nodeFiles.hasNext()) { + AggregatedLogFormat.LogReader reader = null; + try { + FileStatus thisNodeFile = nodeFiles.next(); + if (!thisNodeFile.getPath().getName() + .contains(LogAggregationUtils.getNodeString(nodeId)) + || thisNodeFile.getPath().getName() + .endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) { + continue; + } + reader = + new AggregatedLogFormat.LogReader(conf, thisNodeFile.getPath()); + + String owner = null; + Map<ApplicationAccessType, String> appAcls = null; + try { + owner = reader.getApplicationOwner(); + appAcls = reader.getApplicationAcls(); + } catch (IOException e) { + LOG.error("Error getting logs for " + logEntity, e); + continue; + } + ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf); + aclsManager.addApplication(applicationId, appAcls); - String remoteUser = request().getRemoteUser(); - UserGroupInformation callerUGI = null; - if (remoteUser != null) { - callerUGI = UserGroupInformation.createRemoteUser(remoteUser); - } - if (callerUGI != null - && !aclsManager.checkAccess(callerUGI, + String remoteUser = request().getRemoteUser(); + UserGroupInformation callerUGI = null; + if (remoteUser != null) { + callerUGI = UserGroupInformation.createRemoteUser(remoteUser); + } + if (callerUGI != null && !aclsManager.checkAccess(callerUGI, ApplicationAccessType.VIEW_APP, owner, applicationId)) { - html.h1() - ._("User [" + remoteUser - + "] is not authorized to view the logs for " + logEntity)._(); - return; - } + html.h1() + ._("User [" + remoteUser + + "] is not authorized to view the logs for " + logEntity + + " in log file [" + thisNodeFile.getPath().getName() + "]")._(); + LOG.error("User [" + remoteUser + + "] is not authorized to view the logs for " + logEntity); + continue; + } - String desiredLogType = $(CONTAINER_LOG_TYPE); - try { - AggregatedLogFormat.ContainerLogsReader logReader = reader + AggregatedLogFormat.ContainerLogsReader logReader = reader .getContainerLogsReader(containerId); - if (logReader == null) { - html.h1() - ._("Logs not available for " + logEntity - + ". Could be caused by the rentention policy")._(); - return; - } - - boolean foundLog = readContainerLogs(html, logReader, logLimits, - desiredLogType); - - if (!foundLog) { - if (desiredLogType.isEmpty()) { - html.h1("No logs available for container " + containerId.toString()); - } else { - html.h1("Unable to locate '" + desiredLogType - + "' log for container " + containerId.toString()); + if (logReader == null) { + continue; } - return; + + foundLog = readContainerLogs(html, logReader, logLimits, + desiredLogType); + } catch (IOException ex) { + LOG.error("Error getting logs for " + logEntity, ex); + continue; + } finally { + if (reader != null) + reader.close(); } - } catch (IOException e) { - html.h1()._("Error getting logs for " + logEntity)._(); - LOG.error("Error getting logs for " + logEntity, e); - return; } - } finally { - if (reader != null) { - reader.close(); + if (!foundLog) { + if (desiredLogType.isEmpty()) { + html.h1("No logs available for container " + containerId.toString()); + } else { + html.h1("Unable to locate '" + desiredLogType + + "' log for container " + containerId.toString()); + } } + } catch (IOException e) { + html.h1()._("Error getting logs for " + logEntity)._(); + LOG.error("Error getting logs for " + logEntity, e); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java index 502d2dc2b584a..0a17433c44fca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java @@ -47,7 +47,6 @@ import org.apache.hadoop.yarn.webapp.view.BlockForTest; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.HtmlBlockForTest; -import org.junit.Ignore; import org.junit.Test; import static org.mockito.Mockito.*; @@ -149,10 +148,8 @@ public void testAggregatedLogsBlock() throws Exception { } /** * Log files was deleted. - * TODO: YARN-2582: fix log web ui for Long Running application * @throws Exception */ - @Ignore @Test public void testNoLogs() throws Exception {
e106a9c058ec31a3048be99359aa35f1dfb16d69
aeshell$aesh
further completion support
a
https://github.com/aeshell/aesh
diff --git a/src/main/java/Example.java b/src/main/java/Example.java index 175af4841..88866babd 100644 --- a/src/main/java/Example.java +++ b/src/main/java/Example.java @@ -1,7 +1,10 @@ +import org.jboss.jreadline.complete.Completion; import org.jboss.jreadline.console.Console; import java.io.IOException; import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.List; /** * @author <a href="mailto:[email protected]">Ståle W. Pedersen</a> @@ -15,6 +18,20 @@ public static void main(String[] args) throws IOException { PrintWriter out = new PrintWriter(System.out); + Completion completer = new Completion() { + @Override + public List<String> complete(String line, int cursor) { + // very simple completor + List<String> commands = new ArrayList<String>(); + if(line.length() < 1 || line.startsWith("f") || line.startsWith("fo")) + commands.add("foo"); + + return commands; + } + }; + + console.addCompletion(completer); + String line; while ((line = console.read("> ")) != null) { console.pushToConsole("======>\"" + line + "\n"); diff --git a/src/main/java/org/jboss/jreadline/console/Console.java b/src/main/java/org/jboss/jreadline/console/Console.java index 0aa5815d0..f8e01f292 100644 --- a/src/main/java/org/jboss/jreadline/console/Console.java +++ b/src/main/java/org/jboss/jreadline/console/Console.java @@ -266,7 +266,7 @@ else if(action == Action.CASE) { changeCase(); } else if(action == Action.COMPLETE) { - //complete(); + complete(); } else if(action == Action.EXIT) { //deleteCurrentCharacter(); @@ -544,7 +544,7 @@ private boolean undo() throws IOException { return false; } - private void complete() { + private void complete() throws IOException { if(completionList.size() < 1) return; @@ -560,21 +560,45 @@ private void complete() { return; // only one hit, do a completion else if(possibleCompletions.size() == 1) - doCompletion(possibleCompletions.get(0)); + displayCompletion(possibleCompletions.get(0)); // more than one hit... else { - //TODO: implement this + String startsWith = buffer.findStartsWith(possibleCompletions); + if(startsWith.length() > 0) + displayCompletion(startsWith); + else { + displayCompletions(possibleCompletions); + } } } /** * TODO: insert the completion into the buffer + * 1. go back a word + * 2. insert the word * * @param completion + * @throws java.io.IOException stream */ - private void doCompletion(String completion) { + private void displayCompletion(String completion) throws IOException { + performAction(new PrevWordAction(buffer.getCursor(), Action.DELETE)); + buffer.write(completion); + outStream.write(completion); + redrawLineFromCursor(); } + private void displayCompletions(List<String> possibleCompletions) { + if(possibleCompletions.size() > 50) { + // display ask... + } + // display all + else { + + } + + } + + }
258a0004d516ffeeb9ea49ac6a9726af79027389
tapiji
Minor clean up.
p
https://github.com/tapiji/tapiji
diff --git a/org.eclipse.babel.core/src/org/eclipse/babel/core/message/manager/RBManager.java b/org.eclipse.babel.core/src/org/eclipse/babel/core/message/manager/RBManager.java index b2d18e40..55ae250d 100644 --- a/org.eclipse.babel.core/src/org/eclipse/babel/core/message/manager/RBManager.java +++ b/org.eclipse.babel.core/src/org/eclipse/babel/core/message/manager/RBManager.java @@ -61,7 +61,6 @@ public class RBManager { private static final String TAPIJI_NATURE = "org.eclipse.babel.tapiji.tools.core.nature"; - // TODO: use logger of MessagesEditorPlugin private static Logger logger = Logger.getLogger(RBManager.class .getSimpleName());
590e82afc18b5d5663f9ad08f3b49d802698b95e
aeshell$aesh
[AESH-303] added support for help info for group commands
a
https://github.com/aeshell/aesh
diff --git a/src/main/java/AeshExample.java b/src/main/java/AeshExample.java index f20290926..33fa74927 100644 --- a/src/main/java/AeshExample.java +++ b/src/main/java/AeshExample.java @@ -474,23 +474,37 @@ public InputStream getManualDocument(String commandName) { } } - @GroupCommandDefinition(name = "group", description = "", groupCommands = {Child1.class, Child2.class}) + @GroupCommandDefinition(name = "group", description = "This is a group command", + groupCommands = {Child1.class, Child2.class}) public static class GroupCommand implements Command { + + @Option(hasValue = false, description = "display this help option") + private boolean help; + @Override public CommandResult execute(CommandInvocation commandInvocation) throws IOException, InterruptedException { - commandInvocation.getShell().out().println("only executed group, it doesnt do much..."); + if(help) + commandInvocation.getShell().out().println(commandInvocation.getHelpInfo("group")); + else + commandInvocation.getShell().out().println("only executed group, it doesnt do much..."); return CommandResult.SUCCESS; } } @CommandDefinition(name = "child1", description = "") public static class Child1 implements Command { - @Option + + @Option(description = "set foo") private String foo; + @Option(hasValue = false, description = "display this help option") + private boolean help; @Override public CommandResult execute(CommandInvocation commandInvocation) throws IOException, InterruptedException { - commandInvocation.getShell().out().println("foo is set to: "+foo); + if(help) + commandInvocation.getShell().out().println(commandInvocation.getHelpInfo("group child1")); + else + commandInvocation.getShell().out().println("foo is set to: "+foo); return CommandResult.SUCCESS; } } diff --git a/src/main/java/org/jboss/aesh/cl/parser/AeshCommandLineParser.java b/src/main/java/org/jboss/aesh/cl/parser/AeshCommandLineParser.java index b1417d322..ac6fb91b2 100644 --- a/src/main/java/org/jboss/aesh/cl/parser/AeshCommandLineParser.java +++ b/src/main/java/org/jboss/aesh/cl/parser/AeshCommandLineParser.java @@ -24,6 +24,7 @@ import org.jboss.aesh.cl.internal.ProcessedCommand; import org.jboss.aesh.cl.internal.ProcessedOption; import org.jboss.aesh.cl.populator.CommandPopulator; +import org.jboss.aesh.console.Config; import org.jboss.aesh.console.command.Command; import org.jboss.aesh.parser.AeshLine; import org.jboss.aesh.parser.Parser; @@ -114,7 +115,20 @@ public CommandPopulator getCommandPopulator() { */ @Override public String printHelp() { - return processedCommand.printHelp(); + if(childParsers != null && childParsers.size() > 0) { + StringBuilder sb = new StringBuilder(); + sb.append(processedCommand.printHelp()) + .append(Config.getLineSeparator()) + .append(processedCommand.getName()) + .append(" commands:") + .append(Config.getLineSeparator()); + for(CommandLineParser child : childParsers) + sb.append(" ").append(child.getProcessedCommand().getName()).append(Config.getLineSeparator()); + + return sb.toString(); + } + else + return processedCommand.printHelp(); } /** diff --git a/src/main/java/org/jboss/aesh/console/AeshConsoleImpl.java b/src/main/java/org/jboss/aesh/console/AeshConsoleImpl.java index 98af4aee8..09a145302 100644 --- a/src/main/java/org/jboss/aesh/console/AeshConsoleImpl.java +++ b/src/main/java/org/jboss/aesh/console/AeshConsoleImpl.java @@ -138,7 +138,7 @@ public void clear() { public String getHelpInfo(String commandName) { try (CommandContainer commandContainer = registry.getCommand(commandName, "")) { if (commandContainer != null) - return commandContainer.getParser().printHelp(); + return commandContainer.printHelp(commandName); } catch (Exception e) { // ignored } diff --git a/src/main/java/org/jboss/aesh/console/command/container/CommandContainer.java b/src/main/java/org/jboss/aesh/console/command/container/CommandContainer.java index 22ec5a878..4cd3386d3 100644 --- a/src/main/java/org/jboss/aesh/console/command/container/CommandContainer.java +++ b/src/main/java/org/jboss/aesh/console/command/container/CommandContainer.java @@ -52,6 +52,12 @@ public interface CommandContainer<T extends Command> extends AutoCloseable { */ boolean haveBuildError(); + /** + * @param childCommandName (for group commands) + * @return help info + */ + String printHelp(String childCommandName); + /** * @return error message */ diff --git a/src/main/java/org/jboss/aesh/console/command/container/DefaultCommandContainer.java b/src/main/java/org/jboss/aesh/console/command/container/DefaultCommandContainer.java index 647a9536a..04fa79750 100644 --- a/src/main/java/org/jboss/aesh/console/command/container/DefaultCommandContainer.java +++ b/src/main/java/org/jboss/aesh/console/command/container/DefaultCommandContainer.java @@ -20,6 +20,7 @@ package org.jboss.aesh.console.command.container; import org.jboss.aesh.cl.CommandLine; +import org.jboss.aesh.cl.parser.CommandLineParser; import org.jboss.aesh.cl.parser.CommandLineParserException; import org.jboss.aesh.cl.validator.CommandValidatorException; import org.jboss.aesh.cl.validator.OptionValidatorException; @@ -53,4 +54,19 @@ public CommandContainerResult executeCommand(AeshLine line, InvocationProviders return new CommandContainerResult(commandLine.getParser().getProcessedCommand().getResultHandler(), result); } + + @Override + public String printHelp(String childCommandName) { + if(getParser().isGroupCommand() && childCommandName.contains(" ")) { + String[] names = childCommandName.split(" "); + if(names.length > 1 && names[1].length() > 0) { + CommandLineParser child = getParser().getChildParser(names[1]); + if(child != null) + return child.printHelp(); + } + return "Child command "+names[1]+" not found."; + } + else + return getParser().printHelp(); + } } diff --git a/src/main/java/org/jboss/aesh/console/command/registry/MutableCommandRegistry.java b/src/main/java/org/jboss/aesh/console/command/registry/MutableCommandRegistry.java index 2c4824ca0..0d2abe506 100644 --- a/src/main/java/org/jboss/aesh/console/command/registry/MutableCommandRegistry.java +++ b/src/main/java/org/jboss/aesh/console/command/registry/MutableCommandRegistry.java @@ -50,6 +50,14 @@ public void setCommandContainerBuilder(CommandContainerBuilder containerBuilder) public CommandContainer getCommand(String name, String line) throws CommandNotFoundException { if(registry.containsKey(name)) return registry.get(name); + //group command + else if(name.contains(" ")) { + String[] names = name.split(" "); + if(registry.containsKey(names[0])) { + return registry.get(names[0]); + } + throw new CommandNotFoundException("Command: "+names[0]+" was not found."); + } else throw new CommandNotFoundException("Command: "+name+" was not found."); } diff --git a/src/test/java/org/jboss/aesh/cl/CommandLineFormatterTest.java b/src/test/java/org/jboss/aesh/cl/CommandLineFormatterTest.java index 5117dd0ba..7ac962038 100644 --- a/src/test/java/org/jboss/aesh/cl/CommandLineFormatterTest.java +++ b/src/test/java/org/jboss/aesh/cl/CommandLineFormatterTest.java @@ -19,7 +19,6 @@ */ package org.jboss.aesh.cl; -import junit.framework.TestCase; import org.jboss.aesh.cl.internal.ProcessedCommandBuilder; import org.jboss.aesh.cl.internal.ProcessedOptionBuilder; import org.jboss.aesh.cl.parser.CommandLineParserException; @@ -27,17 +26,18 @@ import org.jboss.aesh.cl.parser.CommandLineParserBuilder; import org.jboss.aesh.console.Config; import org.jboss.aesh.util.ANSI; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; /** * @author <a href="mailto:[email protected]">Ståle W. Pedersen</a> */ -public class CommandLineFormatterTest extends TestCase { +public class CommandLineFormatterTest { - public CommandLineFormatterTest(String name) { - super(name); - } - public void testFormatter() throws CommandLineParserException { + @Test + public void formatter() throws CommandLineParserException { ProcessedCommandBuilder pb = new ProcessedCommandBuilder().name("man").description("[OPTION...]"); pb.addOption( @@ -68,7 +68,8 @@ public void testFormatter() throws CommandLineParserException { clp.printHelp()); } - public void testFormatter2() throws CommandLineParserException { + @Test + public void formatter2() throws CommandLineParserException { ProcessedCommandBuilder pb = new ProcessedCommandBuilder().name("man").description("[OPTION...]"); pb.addOption( @@ -114,4 +115,58 @@ public void testFormatter2() throws CommandLineParserException { clp.printHelp()); } + @Test + public void groupFormatter() throws CommandLineParserException { + ProcessedCommandBuilder git = new ProcessedCommandBuilder().name("git").description("[OPTION...]"); + git.addOption( + new ProcessedOptionBuilder() + .shortName('h') + .name("help") + .description("display help info") + .type(boolean.class) + .create() + ); + + ProcessedCommandBuilder rebase = new ProcessedCommandBuilder().name("rebase").description("[OPTION...]"); + rebase.addOption( + new ProcessedOptionBuilder() + .shortName('f') + .name("foo") + .required(true) + .description("reset all options to their default values") + .type(String.class) + .create() + ); + + ProcessedCommandBuilder branch = new ProcessedCommandBuilder().name("branch").description("branching"); + branch.addOption( + new ProcessedOptionBuilder() + .shortName('b') + .name("bar") + .required(true) + .description("reset all options to their default values") + .type(String.class) + .create() + ); + + + CommandLineParser clpGit = new CommandLineParserBuilder().processedCommand(git.create()).create(); + CommandLineParser clpBranch = new CommandLineParserBuilder().processedCommand(branch.create()).create(); + CommandLineParser clpRebase = new CommandLineParserBuilder().processedCommand(rebase.create()).create(); + + clpGit.addChildParser(clpBranch); + clpGit.addChildParser(clpRebase); + + assertEquals("Usage: git [OPTION...]" + Config.getLineSeparator() + + Config.getLineSeparator() + + "Options:" + Config.getLineSeparator() + + " -h, --help display help info" + Config.getLineSeparator() + + Config.getLineSeparator()+"git commands:"+Config.getLineSeparator()+ + " branch"+Config.getLineSeparator()+ + " rebase"+Config.getLineSeparator(), + clpGit.printHelp()); + + + } + }
0ace17c659c5e73c9de4003718ba4860bfa3be43
restlet-framework-java
- Initial code for new default HTTP connector and- SIP connector.--
a
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet/src/org/restlet/engine/http/connector/AcceptorTask.java b/modules/org.restlet/src/org/restlet/engine/http/connector/AcceptorTask.java index 4d9b6e0d2a..5ab1b6022d 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/connector/AcceptorTask.java +++ b/modules/org.restlet/src/org/restlet/engine/http/connector/AcceptorTask.java @@ -99,9 +99,8 @@ public void run() { if ((getHelper().getMaxTotalConnections() == -1) || (connectionsCount <= getHelper() .getMaxTotalConnections())) { - final Connection<?> connection = getHelper() - .createConnection(getHelper(), - client.socket()); + Connection<?> connection = getHelper().createConnection( + getHelper(), client.socket()); connection.open(); getHelper().getConnections().add(connection); } else { diff --git a/modules/org.restlet/src/org/restlet/engine/http/connector/ControllerTask.java b/modules/org.restlet/src/org/restlet/engine/http/connector/ControllerTask.java index 1a9eb2dc42..200a8b2377 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/connector/ControllerTask.java +++ b/modules/org.restlet/src/org/restlet/engine/http/connector/ControllerTask.java @@ -146,6 +146,10 @@ public void run() { } }); } + + if (conn.getState() == ConnectionState.CLOSED) { + getHelper().getConnections().remove(conn); + } } // Control if there are some pending requests that could
b2b7e6996be0cea4d872be637c4fc100971d926e
kotlin
Decompiler: Introduce DeserializerForDecompiler--Component which can "resolve" descriptors without project-It builds dummy descriptors for dependencies which are enough to build decompiled text-
a
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend/serialization/src/org/jetbrains/jet/descriptors/serialization/ClassId.java b/compiler/frontend/serialization/src/org/jetbrains/jet/descriptors/serialization/ClassId.java index 88eac1346c08f..c0efac5d2cd3d 100644 --- a/compiler/frontend/serialization/src/org/jetbrains/jet/descriptors/serialization/ClassId.java +++ b/compiler/frontend/serialization/src/org/jetbrains/jet/descriptors/serialization/ClassId.java @@ -22,6 +22,12 @@ import org.jetbrains.jet.lang.resolve.name.Name; public final class ClassId { + + @NotNull + public static ClassId topLevel(@NotNull FqName topLevelFqName) { + return new ClassId(topLevelFqName.parent(), FqNameUnsafe.topLevel(topLevelFqName.shortName())); + } + private final FqName packageFqName; private final FqNameUnsafe relativeClassName; @@ -55,6 +61,7 @@ public boolean isTopLevelClass() { return relativeClassName.parent().isRoot(); } + @NotNull public FqNameUnsafe asSingleFqName() { if (packageFqName.isRoot()) return relativeClassName; return new FqNameUnsafe(packageFqName.asString() + "." + relativeClassName.asString()); diff --git a/compiler/frontend/serialization/src/org/jetbrains/jet/descriptors/serialization/DescriptorFinder.java b/compiler/frontend/serialization/src/org/jetbrains/jet/descriptors/serialization/DescriptorFinder.java index d56637c306e1c..6ed1cb6e5b09e 100644 --- a/compiler/frontend/serialization/src/org/jetbrains/jet/descriptors/serialization/DescriptorFinder.java +++ b/compiler/frontend/serialization/src/org/jetbrains/jet/descriptors/serialization/DescriptorFinder.java @@ -18,6 +18,7 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.jetbrains.annotations.ReadOnly; import org.jetbrains.jet.lang.descriptors.ClassDescriptor; import org.jetbrains.jet.lang.resolve.name.FqName; import org.jetbrains.jet.lang.resolve.name.Name; @@ -43,6 +44,7 @@ public Collection<Name> getClassNames(@NotNull FqName packageName) { @Nullable ClassDescriptor findClass(@NotNull ClassId classId); + @ReadOnly @NotNull Collection<Name> getClassNames(@NotNull FqName packageName); } diff --git a/idea/src/org/jetbrains/jet/plugin/libraries/DeserializerForDecompiler.kt b/idea/src/org/jetbrains/jet/plugin/libraries/DeserializerForDecompiler.kt new file mode 100644 index 0000000000000..44331f5b8c71c --- /dev/null +++ b/idea/src/org/jetbrains/jet/plugin/libraries/DeserializerForDecompiler.kt @@ -0,0 +1,188 @@ +/* + * Copyright 2010-2014 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.jetbrains.jet.plugin.libraries + +import org.jetbrains.jet.descriptors.serialization.ClassId +import org.jetbrains.jet.descriptors.serialization.DescriptorFinder +import org.jetbrains.jet.descriptors.serialization.JavaProtoBufUtil +import org.jetbrains.jet.descriptors.serialization.descriptors.DeserializedClassDescriptor +import org.jetbrains.jet.lang.descriptors.* +import org.jetbrains.jet.lang.descriptors.impl.MutablePackageFragmentDescriptor +import org.jetbrains.jet.lang.resolve.kotlin.KotlinJvmBinaryClass +import org.jetbrains.jet.lang.resolve.name.FqName +import org.jetbrains.jet.lang.resolve.name.Name +import org.jetbrains.jet.lang.types.ErrorUtils +import org.jetbrains.jet.storage.LockBasedStorageManager +import java.util.Collections +import org.jetbrains.jet.lang.resolve.java.resolver.DescriptorResolverUtils +import com.intellij.openapi.vfs.VirtualFile +import org.jetbrains.jet.lang.resolve.kotlin.KotlinBinaryClassCache +import org.jetbrains.jet.lang.resolve.kotlin.DeserializedResolverUtils +import org.jetbrains.jet.descriptors.serialization.descriptors.DeserializedPackageMemberScope +import org.jetbrains.jet.lang.resolve.kotlin.AnnotationDescriptorDeserializer +import org.jetbrains.jet.lang.resolve.java.resolver.ErrorReporter +import org.jetbrains.jet.lang.types.ErrorUtils.getErrorModule +import org.jetbrains.jet.lang.types.error.MissingDependencyErrorClassDescriptor +import org.jetbrains.jet.lang.resolve.java.PackageClassUtils +import com.intellij.openapi.diagnostic.Logger +import org.jetbrains.jet.lang.resolve.kotlin.KotlinClassFinder +import org.jetbrains.jet.lang.resolve.name.FqNameUnsafe + +public fun DeserializerForDecompiler(classFile: VirtualFile): DeserializerForDecompiler { + val kotlinClass = KotlinBinaryClassCache.getKotlinBinaryClass(classFile) + val classFqName = kotlinClass.getClassName().getFqNameForClassNameWithoutDollars() + val packageFqName = classFqName.parent() + return DeserializerForDecompiler(classFile.getParent()!!, packageFqName) +} + +public class DeserializerForDecompiler(val packageDirectory: VirtualFile, val directoryPackageFqName: FqName) : ResolverForDecompiler { + + override fun resolveClass(classFqName: FqName) = classes(classFqName.toClassId()) + + override fun resolveDeclarationsInPackage(packageFqName: FqName): Collection<DeclarationDescriptor> { + assert(packageFqName == directoryPackageFqName, "Was called for $packageFqName but only $directoryPackageFqName is expected.") + val packageClassFqName = PackageClassUtils.getPackageClassFqName(packageFqName) + val binaryClassForPackageClass = localClassFinder.findKotlinClass(packageClassFqName) + val annotationData = binaryClassForPackageClass?.getClassHeader()?.getAnnotationData() + if (annotationData == null) { + LOG.error("Could not read annotation data for $packageFqName from ${binaryClassForPackageClass?.getClassName()}") + return Collections.emptyList() + } + val membersScope = DeserializedPackageMemberScope( + storageManager, + createDummyPackageFragment(packageFqName), + annotationDeserializer, + descriptorFinder, + JavaProtoBufUtil.readPackageDataFrom(annotationData) + ) + return membersScope.getAllDescriptors() + } + + private val localClassFinder = object: KotlinClassFinder { + override fun findKotlinClass(fqName: FqName) = findKotlinClass(fqName.toClassId()) + + fun findKotlinClass(classId: ClassId): KotlinJvmBinaryClass? { + if (classId.getPackageFqName() != directoryPackageFqName) { + return null + } + val segments = DeserializedResolverUtils.kotlinFqNameToJavaFqName(classId.getRelativeClassName()).pathSegments() + val targetName = segments.makeString("$", postfix = ".class") + val virtualFile = packageDirectory.findChild(targetName) + if (virtualFile != null && DecompiledUtils.isKotlinCompiledFile(virtualFile)) { + return KotlinBinaryClassCache.getKotlinBinaryClass(virtualFile) + } + return null + } + } + private val storageManager = LockBasedStorageManager.NO_LOCKS + private val classes = storageManager.createMemoizedFunctionWithNullableValues { + (classId: ClassId) -> + resolveClassByClassId(classId) + } + + private val annotationDeserializer = AnnotationDescriptorDeserializer(storageManager); + { + annotationDeserializer.setClassResolver { + fqName -> + classes(fqName.toClassId()) + } + annotationDeserializer.setKotlinClassFinder(localClassFinder) + annotationDeserializer.setErrorReporter(LOGGING_REPORTER) + } + + private val descriptorFinder = object : DescriptorFinder { + override fun findClass(classId: ClassId): ClassDescriptor? { + return classes(classId) + } + + override fun getClassNames(packageName: FqName): Collection<Name> { + return Collections.emptyList() + } + } + + private val packageFragmentProvider = object : PackageFragmentProvider { + override fun getPackageFragments(fqName: FqName): List<PackageFragmentDescriptor> { + return listOf(createDummyPackageFragment(fqName)) + } + + override fun getSubPackagesOf(fqName: FqName): Collection<FqName> { + throw UnsupportedOperationException("This method is not supposed to be called.") + } + } + + private fun createDummyPackageFragment(fqName: FqName): MutablePackageFragmentDescriptor { + return MutablePackageFragmentDescriptor(ErrorUtils.getErrorModule(), fqName) + } + + private fun resolveClassByClassId(classId: ClassId): ClassDescriptor? { + val fullFqName = classId.asSingleFqName() + if (fullFqName.isSafe()) { + val fromBuiltIns = DescriptorResolverUtils.getKotlinBuiltinClassDescriptor(fullFqName.toSafe()) + if (fromBuiltIns != null) { + return fromBuiltIns + } + } + val binaryClass = localClassFinder.findKotlinClass(classId) + if (binaryClass != null) { + return deserializeBinaryClass(binaryClass) + } + assert(fullFqName.isSafe(), "Safe fq name expected here, got $fullFqName instead") + return MissingDependencyErrorClassDescriptor(fullFqName.toSafe()) + } + + private fun deserializeBinaryClass(kotlinClass: KotlinJvmBinaryClass): ClassDescriptor { + val data = kotlinClass.getClassHeader()?.getAnnotationData() + if (data == null) { + LOG.error("Annotation data missing for ${kotlinClass.getClassName()}") + } + val classData = JavaProtoBufUtil.readClassDataFrom(data!!) + return DeserializedClassDescriptor(storageManager, annotationDeserializer, descriptorFinder, packageFragmentProvider, + classData.getNameResolver(), classData.getClassProto()) + } + + // we need a "magic" way to obtain ClassId from FqName + // the idea behind this function is that we need accurate class ids only for "neighbouring" classes (inner classes, class object, etc) + // for all others we can build any ClassId since it will resolve to MissingDependencyErrorClassDescriptor which only stores fqName + private fun FqName.toClassId(): ClassId { + val segments = pathSegments() + val packageSegmentsCount = directoryPackageFqName.pathSegments().size + if (segments.size <= packageSegmentsCount) { + return ClassId.topLevel(this) + } + val packageFqName = FqName.fromSegments(segments.subList(0, packageSegmentsCount) map { it.asString() }) + if (packageFqName == directoryPackageFqName) { + return ClassId(packageFqName, FqNameUnsafe.fromSegments(segments.subList(packageSegmentsCount, segments.size))) + } + return ClassId.topLevel(this) + } + + class object { + private val LOG = Logger.getInstance(javaClass<DeserializerForDecompiler>()) + + private object LOGGING_REPORTER: ErrorReporter { + override fun reportAnnotationLoadingError(message: String, exception: Exception?) { + LOG.error(message, exception) + } + override fun reportCannotInferVisibility(descriptor: CallableMemberDescriptor) { + LOG.error("Could not infer visibility for $descriptor") + } + override fun reportIncompatibleAbiVersion(kotlinClass: KotlinJvmBinaryClass, actualVersion: Int) { + LOG.error("Incompatible ABI version for class ${kotlinClass.getClassName()}, actual version: $actualVersion") + } + } + } +}
5cc86d3ca2d04a81a234a75520546884eee29b27
duracloud$duracloud
Updates the hadoop file processor classes to be more easily extended, in order to build on this base for further processing capabilities. Pulls all of the file transfer code into the mapper class, to allow this functionality to be more easily reused or reimplemented. Adds tests. git-svn-id: https://svn.duraspace.org/duracloud/trunk@73 1005ed41-97cd-4a8f-848c-be5b5fe45bcb
p
https://github.com/duracloud/duracloud
diff --git a/services/hadoop-file-processor/pom.xml b/services/hadoop-file-processor/pom.xml index 1d2533180..7ab171b24 100644 --- a/services/hadoop-file-processor/pom.xml +++ b/services/hadoop-file-processor/pom.xml @@ -23,7 +23,7 @@ <configuration> <archive> <manifest> - <mainClass>org.duracloud.services.fileprocessor.FileProcessor</mainClass> + <mainClass>org.duracloud.services.fileprocessor.JobRunner</mainClass> </manifest> </archive> </configuration> diff --git a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/JobBuilder.java b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/JobBuilder.java index fdf8e6094..b8644b21c 100644 --- a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/JobBuilder.java +++ b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/JobBuilder.java @@ -17,7 +17,7 @@ import java.text.ParseException; /** - * This class constructs a job. + * This class constructs a hadoop job to process files. * * @author: Bill Branan * Date: Aug 5, 2010 @@ -31,9 +31,9 @@ public class JobBuilder { * Constructs a Job builder * * @param inputPathPrefix - * The S3 path from which the input files can be retrieved. + * The path from which the input files can be retrieved. * @param outputPath - * The S3 path to which output files will be written. + * The path to which output files will be written. */ public JobBuilder(final String inputPathPrefix, final String outputPath) { this.inputPathPrefix = inputPathPrefix; @@ -49,15 +49,15 @@ public JobConf getJobConf() throws IOException, ParseException { " and store results in " + outputPath); JobConf conf = new JobConf(JobBuilder.class); - conf.setJobName("ProcessFiles"); + conf.setJobName(getJobName()); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(Text.class); // Configure mappper - conf.setMapperClass(ProcessFileMapper.class); + conf.setMapperClass(getMapper()); // Configure reducer - conf.setReducerClass(ResultsReducer.class); + conf.setReducerClass(getReducer()); conf.setNumReduceTasks(1); // Configure input path @@ -74,4 +74,36 @@ public JobConf getJobConf() throws IOException, ParseException { return conf; } + /** + * Retrieves the name of the hadoop job. + * + * This method can be overridden to provide an alternate job name. + */ + protected String getJobName() { + return "ProcessFiles"; + } + + /** + * Retrieves the mapper class which will be used for perform the hadoop + * mapping tasks. The default mapper performs a simple file processing task. + * + * This method can be overridden to provide an alternate mapper + * implementation class, possibly a subclass of the default mapper. + */ + protected Class getMapper() { + return ProcessFileMapper.class; + } + + /** + * Retrieves the reducer class which will be used to perform the hadoop + * reduction tasks. The default reducer simply collects all output name/value + * pairs and writes it to an output file. + * + * This method can be overridden to provide an alternate reducer + * implementation class. + */ + protected Class getReducer() { + return ResultsReducer.class; + } + } diff --git a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/FileProcessor.java b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/JobRunner.java similarity index 74% rename from services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/FileProcessor.java rename to services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/JobRunner.java index c5e7b8542..97787a469 100644 --- a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/FileProcessor.java +++ b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/JobRunner.java @@ -19,43 +19,52 @@ import java.io.IOException; /** - * This is the main point of entry for the file processing hadoop application. + * This is the main point of entry for the hadoop file processing application. * * @author: Bill Branan * Date: Aug 5, 2010 */ -public class FileProcessor { - +public class JobRunner { + + private static String inputPath; + private static String outputPath; + /** * Main method that sets up file processing job. */ - public static void main(String[] args) throws IOException, - java.text.ParseException { + public static void main(String[] args) throws Exception { CommandLine cmd = processArgs(args); - String inputPath = cmd.getOptionValue("input"); + inputPath = cmd.getOptionValue("input"); inputPath = appendTrailingSlash(inputPath); - String outputPath = cmd.getOptionValue("output"); + outputPath = cmd.getOptionValue("output"); outputPath = appendTrailingSlash(outputPath); - runJob(inputPath, outputPath); + runJob(); } // Construct and run the job. - private static void runJob(String inputPath, String outputPath) + private static void runJob() throws IOException, java.text.ParseException { - JobBuilder jobBuilder = new JobBuilder(inputPath, outputPath); + JobBuilder jobBuilder = getJobBuilder(); JobConf jobConf = jobBuilder.getJobConf(); - - System.out.println("Running job to process files."); - JobClient.runJob(jobConf); } + /** + * Creates a job builder which is responsible for creating a hadoop job + * which can be run. + * + * This method can be overridden to provide an alternate job builder. + */ + protected static JobBuilder getJobBuilder() { + return new JobBuilder(inputPath, outputPath); + } + // Process the command line arguments - private static CommandLine processArgs(String[] args) { + protected static CommandLine processArgs(String[] args) { Options options = createOptions(); CommandLine cmd = null; try { @@ -84,7 +93,8 @@ private static void printHelpText(Options options) { "-input <path to input> " + "-output <path to output> ", options); - System.exit(1); + throw new RuntimeException("Program arguments must include " + + "both input and output values"); } private static Options createOptions() { diff --git a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/ProcessFileMapper.java b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/ProcessFileMapper.java index 3de2292b0..7359aec76 100644 --- a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/ProcessFileMapper.java +++ b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/ProcessFileMapper.java @@ -8,8 +8,6 @@ package org.duracloud.services.fileprocessor; import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; @@ -19,10 +17,8 @@ import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; -import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; -import java.io.InputStream; /** * Mapper used to process files. @@ -33,65 +29,158 @@ public class ProcessFileMapper extends MapReduceBase implements Mapper<Text, Text, Text, Text> { + public static final String LOCAL_FS = "file://"; + + /** + * Performs the actual file processing. + */ @Override public void map(Text key, Text value, OutputCollector<Text, Text> output, Reporter reporter) throws IOException { - String localFilePath = key.toString(); + String filePath = key.toString(); String outputPath = value.toString(); - reporter.setStatus("Processing file: " + localFilePath); - System.out.println("Beginning map process, processing file: " + - localFilePath + ". Output path: " + outputPath); - - File localFile = new File(localFilePath); + try { + reporter.setStatus("Processing file: " + filePath); + System.out.println("Starting map processing for file: " + filePath); - if(localFile.exists()) { - String fileName = localFile.getName(); + // Copy the input file to local storage + File localFile = copyFileLocal(filePath); - InputStream resultStream = processFile(localFile); + // Process the local file + File resultFile = processFile(localFile); - System.out.println("File processing complete for file " + fileName + - ", moving result to output location"); + System.out.println("File processing complete, result file " + + "generated: " + resultFile.getName()); - copyToOutput(resultStream, fileName, outputPath); + // Move the result file to the output location + String finalResultFilePath = + moveToOutput(resultFile, resultFile.getName(), outputPath); + // Delete the local file FileUtils.deleteQuietly(localFile); - output.collect(new Text(fileName), new Text("success")); + String results = "input: " + filePath + + " output: " + finalResultFilePath; + output.collect(new Text("success:"), new Text(results)); - System.out.println("Map processing completed for: " + fileName); - } else { - output.collect(new Text(localFilePath), new Text("failure")); + System.out.println("Map processing completed successfully for: " + + filePath); + } catch(IOException e) { + String results = "input: " + filePath + + " error: " + e.getMessage(); + output.collect(new Text("failure:"), new Text(results)); - System.out.println("Map processing failed for " + localFilePath + - ". File not found"); + System.out.println("Map processing failed for: " + + filePath + " due to: " + e.getMessage()); + e.printStackTrace(System.err); } - reporter.setStatus("Processing complete for file: " + localFilePath); + reporter.setStatus("Processing complete for file: " + filePath); } - private InputStream processFile(File file) throws IOException { - // Test implementation to be replaced by real processing + /** + * Copies a file from a remote file system to local storage + * + * @param filePath path to remote file + * @return local file + */ + protected File copyFileLocal(String filePath) throws IOException { + Path remotePath = new Path(filePath); + String fileName = remotePath.getName(); + + FileSystem fs = remotePath.getFileSystem(new JobConf()); + + if(fs.isFile(remotePath)) { + File localFile = new File(getTempDir(), fileName); + Path localPath = new Path(LOCAL_FS + localFile.getAbsolutePath()); + + System.out.println("Copying file (" + filePath + + ") to local file system"); + + fs.copyToLocalFile(remotePath, localPath); + + if(localFile.exists()) { + System.out.println("File moved to local storage successfully."); + return localFile; + } else { + String error = "Failure attempting to move remote file (" + + filePath + ") to local filesystem, local file (" + + localFile.getAbsolutePath() + ") not found after transfer."; + System.out.println(error); + throw new IOException(error); + } + } else { + String error = "Failure attempting to access remote file (" + + filePath + "), the file could not be found"; + System.out.println(error); + throw new IOException(error); + } + } + + /** + * Processes a file and produces a result file. The result file should + * be named as intended for the final output file. + * + * A default implementation is provided, but this method should be + * overridden by subclasses. + * + * @param file the file to process + * @return the file resulting from the processing + */ + protected File processFile(File file) throws IOException { + String fileName = file.getName(); + if(!fileName.endsWith(".txt")) { + fileName += ".txt"; + } + + File resultFile = new File(getTempDir(), fileName); + String outputText = "Processed local file: " + file.getAbsolutePath() + " in ProcessFileMapper"; - return new ByteArrayInputStream(outputText.getBytes("UTF-8")); + FileUtils.writeStringToFile(resultFile, outputText, "UTF-8"); + return resultFile; } - private void copyToOutput(InputStream resultStream, - String fileName, - String outputPath) throws IOException { + /** + * Moves the result file to the output location with the given filename. + * + * @param resultFile the file to move to output + * @param fileName the name to give the file in the output filesystem + * @param outputPath the path to where the file should be written + * @return the path of the new file in at the output location + */ + protected String moveToOutput(File resultFile, + String fileName, + String outputPath) throws IOException { if(outputPath != null) { - Path outputFile = new Path(outputPath, fileName); - FileSystem outputFS = outputFile.getFileSystem(new JobConf()); - FSDataOutputStream outputStream = outputFS.create(outputFile); + Path resultFilePath = + new Path(LOCAL_FS + resultFile.getAbsolutePath()); + Path outputFilePath = new Path(outputPath, fileName); + + System.out.println("Moving file: " + resultFilePath.toString() + + " to output " + outputFilePath.toString()); + + FileSystem outputFS = + outputFilePath.getFileSystem(new JobConf()); + outputFS.moveFromLocalFile(resultFilePath, outputFilePath); - IOUtils.copy(resultStream, outputStream); + return outputFilePath.toString(); } else { - System.out.println("Output path is null, not able to " + - "store result of processing local file"); + String error = "Output path is null, not able to " + + "store result of processing local file"; + System.out.println(error); + throw new IOException(error); } } + /** + * Retrieves a temporary directory on the local file system. + */ + public File getTempDir() { + return new File(System.getProperty("java.io.tmpdir")); + } + } diff --git a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/ResultsReducer.java b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/ResultsReducer.java index 12b0058a5..fc7ea3c5d 100644 --- a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/ResultsReducer.java +++ b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/ResultsReducer.java @@ -28,8 +28,6 @@ public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException { - System.out.println("Reducing on key: " + key.toString()); - while (values.hasNext()) { output.collect(key, values.next()); } diff --git a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/SimpleFileRecordReader.java b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/SimpleFileRecordReader.java index f268fa246..bb96ba5d8 100644 --- a/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/SimpleFileRecordReader.java +++ b/services/hadoop-file-processor/src/main/java/org/duracloud/services/fileprocessor/SimpleFileRecordReader.java @@ -7,8 +7,6 @@ */ package org.duracloud.services.fileprocessor; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.FileOutputFormat; @@ -17,17 +15,14 @@ import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; /** - * Record reader used to provide a set of key/value pairs for each file - * in a file split. Assumes the file split is a single file and returns - * the path to the file as the key of a single key/value pair produced - * per file. + * Record reader, used to provide a set of key/value pairs for each file + * in a file split. This reader assumes the file split is a single file + * and creates one key/value pair per file where: + * key = the file path + * value = the output path (for results) * * @author: Bill Branan * Date: Aug 5, 2010 @@ -37,7 +32,6 @@ public class SimpleFileRecordReader implements RecordReader<Text, Text> { private FileSplit inputSplit; private JobConf jobConf; private Reporter reporter; - private String tempDir; private String filePath; public SimpleFileRecordReader(FileSplit inputSplit, @@ -46,9 +40,6 @@ public SimpleFileRecordReader(FileSplit inputSplit, this.inputSplit = inputSplit; this.jobConf = jobConf; this.reporter = reporter; - - this.tempDir = - new File(System.getProperty("java.io.tmpdir")).getAbsolutePath(); } @Override @@ -60,9 +51,8 @@ public boolean next(Text key, Text value) throws IOException { System.out.println("Record reader handling file: " + filePath); - if(filePath != null) { - String localPath = moveLocal(); - key.set(localPath); + if(filePath != null && !filePath.endsWith("-space-metadata")) { + key.set(filePath); Path outputPath = FileOutputFormat.getOutputPath(jobConf); value.set(outputPath.toString()); @@ -83,36 +73,6 @@ public boolean next(Text key, Text value) throws IOException { return result; } - public String moveLocal() throws IOException { - Path path = new Path(filePath); - String fileName = path.getName(); - - reporter.setStatus("Moving file to local system for processing: " + - fileName); - - FileSystem fs = path.getFileSystem(jobConf); - - if(fs.isFile(path)) { - // Copy file from remote file system to local storage - InputStream inputStream = fs.open(path, 2048); - File localFile = new File(tempDir, fileName); - - System.out.println("Record reader about to read S3 file (" + - filePath + ") to local file system " + - localFile.getAbsolutePath()); - - OutputStream localFileStream = new FileOutputStream(localFile); - IOUtils.copy(inputStream, localFileStream); - - System.out.println("File moved to local storage successfully"); - return localFile.getAbsolutePath(); - } else { - System.out.println("Record reader could not retrieve file " + - "from S3: " + filePath); - throw new IOException("Could not retrieve file: " + filePath); - } - } - /** * Create an empty Text object in which the key can be stored */ diff --git a/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/JobBuilderTest.java b/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/JobBuilderTest.java new file mode 100644 index 000000000..972a44e86 --- /dev/null +++ b/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/JobBuilderTest.java @@ -0,0 +1,48 @@ +/* + * The contents of this file are subject to the license and copyright + * detailed in the LICENSE and NOTICE files at the root of the source + * tree and available online at + * + * http://duracloud.org/license/ + */ +package org.duracloud.services.fileprocessor; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.JobConf; +import org.junit.Test; + +import static junit.framework.Assert.assertEquals; + +/** + * @author: Bill Branan + * Date: Aug 11, 2010 + */ +public class JobBuilderTest { + + @Test + public void testJobBuilder() throws Exception { + String inputPath = "file://inputPath"; + String outputPath = "file://outputPath"; + + JobBuilder jobBuilder = new JobBuilder(inputPath, outputPath); + assertEquals("ProcessFiles", jobBuilder.getJobName()); + assertEquals(ProcessFileMapper.class, jobBuilder.getMapper()); + assertEquals(ResultsReducer.class, jobBuilder.getReducer()); + + // An unnecessary stack track is printed when creating a JobConf + // See org.apache.hadoop.conf.Configuration line 211 + System.out.println("--- BEGIN EXPECTED STACK TRACE ---"); + JobConf jobConf = jobBuilder.getJobConf(); + System.out.println("--- END EXPECTED STACK TRACE ---"); + + Path[] paths = FileInputFormat.getInputPaths(jobConf); + assertEquals(1, paths.length); + assertEquals(inputPath, paths[0].toString()); + + assertEquals(outputPath, + FileOutputFormat.getOutputPath(jobConf).toString()); + } + +} diff --git a/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/JobRunnerTest.java b/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/JobRunnerTest.java new file mode 100644 index 000000000..ba121e067 --- /dev/null +++ b/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/JobRunnerTest.java @@ -0,0 +1,38 @@ +/* + * The contents of this file are subject to the license and copyright + * detailed in the LICENSE and NOTICE files at the root of the source + * tree and available online at + * + * http://duracloud.org/license/ + */ +package org.duracloud.services.fileprocessor; + +import org.junit.Test; + +import static junit.framework.Assert.assertNotNull; +import static junit.framework.Assert.fail; + +/** + * @author: Bill Branan + * Date: Aug 11, 2010 + */ +public class JobRunnerTest { + + @Test + public void testProcessArgs() throws Exception { + JobRunner jobRunner = new JobRunner(); + try { + jobRunner.processArgs(null); + fail("Job Runner should fail when no arguments are provided"); + } catch(Exception expected) { + assertNotNull(expected); + } + + String[] args = {"-input", "inputFile", "-output", "outputFile"}; + jobRunner.processArgs(args); + + String[] argsShort = {"-i", "inputFile", "-o", "outputFile"}; + jobRunner.processArgs(argsShort); + } + +} diff --git a/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/ProcessFileMapperTest.java b/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/ProcessFileMapperTest.java new file mode 100644 index 000000000..e9d267291 --- /dev/null +++ b/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/ProcessFileMapperTest.java @@ -0,0 +1,131 @@ +/* + * The contents of this file are subject to the license and copyright + * detailed in the LICENSE and NOTICE files at the root of the source + * tree and available online at + * + * http://duracloud.org/license/ + */ +package org.duracloud.services.fileprocessor; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertNotNull; +import static junit.framework.Assert.assertTrue; + +/** + * Tests the ProcessFileMapper. + * + * Note that there are no tests for methods copyFileLocal() and moveToOutput() + * due to the fact that these methods make calls to hadoop functions which + * use linux-specific tools to perform file transfer activities. + * + * @author: Bill Branan + * Date: Aug 11, 2010 + */ +public class ProcessFileMapperTest { + + private ArrayList<File> testFiles; + + @Before + public void setUp() throws Exception { + testFiles = new ArrayList<File>(); + } + + @After + public void tearDown() throws Exception { + for(File file : testFiles) { + FileUtils.deleteQuietly(file); + } + } + + @Test + public void testProcessFile() throws Exception { + ProcessFileMapper mapper = new ProcessFileMapper(); + + String fileContent = "This is test content"; + File fileToProcess = File.createTempFile("test", "file"); + testFiles.add(fileToProcess); + FileUtils.writeStringToFile(fileToProcess, fileContent); + + File resultFile = mapper.processFile(fileToProcess); + testFiles.add(resultFile); + + assertNotNull(resultFile); + assertTrue(resultFile.exists()); + assertTrue(resultFile.getName().endsWith(".txt")); + + String resultFileContent = FileUtils.readFileToString(resultFile); + assertTrue(resultFileContent.contains(fileToProcess.getAbsolutePath())); + } + + @Test + public void testMap() throws IOException { + Text key = new Text("/file/path"); + Text value = new Text("/output/path"); + + MockProcessFileMapper mapper = new MockProcessFileMapper(); + + SimpleOutputCollector<Text, Text> collector = + new SimpleOutputCollector<Text, Text>(); + mapper.map(key, value, collector, Reporter.NULL); + + HashMap<Text, Text> collection = collector.getCollection(); + assertNotNull(collection); + assertEquals(1, collection.size()); + + Text resultKey = collection.keySet().iterator().next(); + assertNotNull(resultKey); + assertTrue(resultKey.toString().contains("success")); + + Text resultValue = collection.get(resultKey); + assertNotNull(resultValue); + assertTrue(resultValue.toString().contains(key.toString())); + } + + private class SimpleOutputCollector<K, V> + implements OutputCollector<Text, Text> { + + HashMap<Text, Text> collection = new HashMap<Text, Text>(); + + @Override + public void collect(Text key, Text value) throws IOException { + collection.put(key, value); + } + + public HashMap<Text, Text> getCollection() { + return collection; + } + } + + private class MockProcessFileMapper extends ProcessFileMapper { + @Override + protected File copyFileLocal(String filePath) throws IOException { + return new File("/local/file"); + } + + @Override + protected File processFile(File file) throws IOException { + return new File("/processed/file"); + } + + @Override + protected String moveToOutput(File resultFile, + String fileName, + String outputPath) throws IOException { + return outputPath + "/" + fileName; + } + } + +} diff --git a/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/SimpleFileRecordReaderTest.java b/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/SimpleFileRecordReaderTest.java new file mode 100644 index 000000000..0fb455f19 --- /dev/null +++ b/services/hadoop-file-processor/src/test/java/org/duracloud/services/fileprocessor/SimpleFileRecordReaderTest.java @@ -0,0 +1,68 @@ +/* + * The contents of this file are subject to the license and copyright + * detailed in the LICENSE and NOTICE files at the root of the source + * tree and available online at + * + * http://duracloud.org/license/ + */ +package org.duracloud.services.fileprocessor; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.TextOutputFormat; +import org.junit.Test; + +import java.io.IOException; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertNotNull; + +/** + * @author: Bill Branan + * Date: Aug 11, 2010 + */ +public class SimpleFileRecordReaderTest { + + @Test + public void testSimpleFileRecordReader() throws Exception { + String inputPath = "file://inputPath"; + String outputPath = "file://outputPath"; + + // An unnecessary stack track is printed when creating a JobConf + // See org.apache.hadoop.conf.Configuration line 211 + System.out.println("--- BEGIN EXPECTED STACK TRACE ---"); + JobConf conf = new JobConf(); + System.out.println("--- END EXPECTED STACK TRACE ---"); + + conf.setOutputKeyClass(Text.class); + conf.setOutputValueClass(Text.class); + FileOutputFormat.setOutputPath(conf, new Path(outputPath)); + conf.setOutputFormat(TextOutputFormat.class); + + FileSplit split = new FileSplit(new Path(inputPath), 0, 10, conf); + + SimpleFileRecordReader reader = + new SimpleFileRecordReader(split, conf, Reporter.NULL); + + Text key = reader.createKey(); + Text value = reader.createValue(); + + assertNotNull(key); + assertNotNull(value); + + assertEquals(0, reader.getPos()); + assertEquals(Float.valueOf(0), reader.getProgress()); + + reader.next(key, value); + + assertEquals(inputPath, key.toString()); + assertEquals(outputPath, value.toString()); + + assertEquals(1, reader.getPos()); + assertEquals(Float.valueOf(1), reader.getProgress()); + } +}
040fa2581a8a9b51fb154a5e5ae8aff6c8cd291d
elasticsearch
Added GeoDistance test which verifies the- difference in behaviour between ARC and PLANE
a
https://github.com/elastic/elasticsearch
diff --git a/src/test/java/org/elasticsearch/test/unit/index/search/geo/GeoDistanceTests.java b/src/test/java/org/elasticsearch/test/unit/index/search/geo/GeoDistanceTests.java index 3783357c3fe18..649769e26d5cf 100644 --- a/src/test/java/org/elasticsearch/test/unit/index/search/geo/GeoDistanceTests.java +++ b/src/test/java/org/elasticsearch/test/unit/index/search/geo/GeoDistanceTests.java @@ -21,10 +21,13 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.search.geo.GeoDistance; +import org.elasticsearch.index.search.geo.Point; import org.testng.annotations.Test; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; /** */ @@ -35,18 +38,33 @@ public class GeoDistanceTests { public void testDistanceCheck() { // Note, is within is an approximation, so, even though 0.52 is outside 50mi, we still get "true" GeoDistance.DistanceBoundingCheck check = GeoDistance.distanceBoundingCheck(0, 0, 50, DistanceUnit.MILES); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 0, 0.5, 0.5, DistanceUnit.MILES)); assertThat(check.isWithin(0.5, 0.5), equalTo(true)); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 0, 0.52, 0.52, DistanceUnit.MILES)); assertThat(check.isWithin(0.52, 0.52), equalTo(true)); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 0, 1, 1, DistanceUnit.MILES)); assertThat(check.isWithin(1, 1), equalTo(false)); - check = GeoDistance.distanceBoundingCheck(0, 179, 200, DistanceUnit.MILES); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 179, 0, -179, DistanceUnit.MILES)); assertThat(check.isWithin(0, -179), equalTo(true)); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 179, 0, -178, DistanceUnit.MILES)); assertThat(check.isWithin(0, -178), equalTo(false)); } + + @Test + public void testArcDistanceVsPlaneInEllipsis() { + Point centre = new Point(48.8534100, 2.3488000); + Point northernPoint = new Point(48.8801108681, 2.35152032666); + Point westernPoint = new Point(48.85265, 2.308896); + + // With GeoDistance.ARC both the northern and western points are within the 4km range + assertThat(GeoDistance.ARC.calculate(centre.lat, centre.lon, northernPoint.lat, + northernPoint.lon, DistanceUnit.KILOMETERS), lessThan(4D)); + assertThat(GeoDistance.ARC.calculate(centre.lat, centre.lon, westernPoint.lat, + westernPoint.lon, DistanceUnit.KILOMETERS), lessThan(4D)); + + // With GeoDistance.PLANE, only the northern point is within the 4km range, + // the western point is outside of the range due to the simple math it employs, + // meaning results will appear elliptical + assertThat(GeoDistance.PLANE.calculate(centre.lat, centre.lon, northernPoint.lat, + northernPoint.lon, DistanceUnit.KILOMETERS), lessThan(4D)); + assertThat(GeoDistance.PLANE.calculate(centre.lat, centre.lon, westernPoint.lat, + westernPoint.lon, DistanceUnit.KILOMETERS), greaterThan(4D)); + } }
08dbf2ed3696c16a8c2f67e436ace7f6a7622386
cloudname$cloudname
Brave new world Rewrite into a core library with simple methods and an abstraction layer on top of the backend system. The following backends are implemented * Memory (for testing; only works within a single JVM) * ZooKeeper (proof-of-concept implementation of the backend) The following libraries are created: * Service discovery library This brings the overall functionality on par with 2.x with a few exceptions: * Locking isn't implemented. That did not work for the old library so there's no real change in functionality * It isn't possible to query *everything* from a client. This will be addressed in another commit (or just ignored completely since the backends offers this in some shape or form) * It isn't possible to resolve coordinates partially, f.e. finding "logserver" when your own coordinate is "service.tag.region"; "logserver" should resolve to "logserver.tag.region". This will be solved in a later commit by making a separate resolver class that creates service coordinates based on existing coordinates.
p
https://github.com/cloudname/cloudname
diff --git a/README.md b/README.md index 4a1fbe9b..90ef64d5 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,31 @@ # Brave new world: Cloudname 3.0 -Forget everything we said earlier. This is going to be even greater. +## cn-core +The core Cloudname library for resource management + +## cn-service +Service discovery built on top of the core library. + +--- +# The yet-to-be-updated section + +## a3 -- Authentication, Authorization and Access library +Mostly the first, some of the second. Still unchanged from 2.x + +## Idgen - Generating IDs +Generate bucketloads of unique IDs spread across multiple hosts, services and +regions. + +## Flags - Command line Flags +Simple command line flag handling via annotations on properties and accessors. + +## Log - Core logging library +Core entities for logging. + +## Timber - A log server and client +A networked high-performance log server that uses protobuf entities. Server and +client code. + +## Testtools - Testing tools and utilities + +Mostly for internal use by the various modules. diff --git a/cn-core/README.md b/cn-core/README.md new file mode 100644 index 00000000..9df86e77 --- /dev/null +++ b/cn-core/README.md @@ -0,0 +1,30 @@ +# Cloudname Core + +The core libraries are mostly for internal use and are the basic building block for the other libraries. Clients won't use or access this library directly but through the libraries build on the core library. + +The core library supports various backends. The build-in backend is memory-based and is *not* something you want to use in a production service. Its sole purpose is to provide a fast single-JVM backend used when testing other modules built on top of the core library. + +## Key concepts +### Leases +The backends expose **leases** to clients. Each lease is represented by a **path**. Clients belong to a **region**. A region is typically a cluster of servers that are coordinate through a single backend. + + + +#### Client leases +Ordinary leases exists only as long as the client is running and is connected to the backend. When the client terminates the connection the lease expires and anyone listening on changes will be notified. + +#### Permanent leases +Permanent leases persist between client connections. If a client connects to the backend, creates a permanent lease and then disconnects the lease will still be in place. The permanent leases does not expire and will only be removed if done so explicitly by the clients. + +### Paths +A **path** is nothing more than an ordered set of strings that represents a (real or virtual) tree structure. The backend itself does not need to use a hierarchical storage mechanism since the paths can be used directly as identifiers. + +Elements in the paths follows the DNS naming conventions in RFC 952 and RFC 1123: Strings between 1-63 characters long, a-z characters (case insensitive) and hyphens. A string cannot start or end with a hyphen. + + +## Backend requirements +* Paths are guaranteed unique for all clients in the same cluster. There is no guarantee that a lease will be unique for other regions. +* The backend ensures there are no duplicate leases for the current region. +* The backend will create notifications in the same order as they occur. +* Past leases given to disconnected clients are not guaranteed to be unique +* The backend is responsible for cleanups of leases; if all clients disconnect the only leases that should be left is the permanent leases. diff --git a/cn-core/pom.xml b/cn-core/pom.xml new file mode 100644 index 00000000..1c8fe5f9 --- /dev/null +++ b/cn-core/pom.xml @@ -0,0 +1,46 @@ +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + + <parent> + <groupId>org.cloudname</groupId> + <artifactId>cloudname-parent</artifactId> + <version>3.0-SNAPSHOT</version> + </parent> + + <artifactId>cn-core</artifactId> + <packaging>jar</packaging> + + <name>Cloudname Library</name> + <description>Managing distributed resources</description> + <url>https://github.com/Cloudname/cloudname</url> + + <dependencies> + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>org.hamcrest</groupId> + <artifactId>hamcrest-all</artifactId> + <version>1.3</version> + </dependency> + + </dependencies> + + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-surefire-plugin</artifactId> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + </plugin> + + </plugins> + </build> +</project> diff --git a/cn-core/src/main/java/org/cloudname/core/CloudnameBackend.java b/cn-core/src/main/java/org/cloudname/core/CloudnameBackend.java new file mode 100644 index 00000000..ed7f2270 --- /dev/null +++ b/cn-core/src/main/java/org/cloudname/core/CloudnameBackend.java @@ -0,0 +1,133 @@ +package org.cloudname.core; + +/** + * Backends implement this interface. Clients won't use this interface; the logic is handled by the + * libaries built on top of the backend. Each backend provides a few basic primitives that must be + * implemented. One caveat: The backend is responsible for cleaning up unused paths. The clients won't + * remote unused elements. + * + * There are two kinds of leases - permanent and temporary. The permanent leases persist in the + * backend and aren't removed when clients disconnect, even if *all* clients disconnects. + * The temporary leases are removed by the backend when the client closes. Note that clients might + * not be well-behaved and may terminate without calling close(). The backend should remove + * these leases automatically. + * + * Clients listen on both kinds of leases and get notifications through listeners whenever something + * is changed. Notifications to the clients are sent in the same order they are received. + * + * Each lease have a data string attached to the lease and clients may update this freely. + * + * @author [email protected] + */ +public interface CloudnameBackend extends AutoCloseable { + /** + * Create a temporary lease. The temporary lease is limited by the client's connection and will + * be available for as long as the client is connected to the backend. Once the client + * disconnects (either through the LeaseHandle instance that is returned or just vanishing + * from the face of the earth) the lease is removed by the backend. The backend should support + * an unlimited number of leases (FSVO "unlimited") + * + * @param path Path to temporary lease. This value cannot be null. The path supplied by the + * client is just the stem of the full lease, i.e. if a client supplies foo:bar the backend + * will return an unique path to the client which represent the lease (for "foo:bar" the + * backend might return "foo:bar:uniqueid0", "foo:bar:uniqueid1"... to clients acquiring + * the lease. + * + * @param data Temporary lease data. This is an arbitrary string supplied by the client. It + * carries no particular semantics for the backend and the backend only have to return the + * same string to the client. This value cannot be null. + * + * @return A LeaseHandle instance that the client can use to manipulate its data or release + * the lease (ie closing it). + */ + LeaseHandle createTemporaryLease(final CloudnamePath path, final String data); + + /** + * Update a client's lease. Normally this is something the client does itself but libraries + * built on top of the backends might use it to set additional properties. + * @param path Path to the temporary lease. + * @param data The updated lease data. + * @return True if successful, false otherwise + */ + boolean writeTemporaryLeaseData(final CloudnamePath path, final String data); + + /** + * Read temporary lease data. Clients won't use this in regular use but rather monitor changes + * through the listeners but libraries built on top of the backend might read the data. + * + * @param path Path to the client lease. + * @return The data stored in the client lease. + */ + String readTemporaryLeaseData(final CloudnamePath path); + + /** + * Add a listener to a set of temporary leases identified by a path. The temporary leases + * doesn't have to exist but as soon as someone creates a lease matching the given path a + * notification must be sent by the backend implementation. + * + * @param pathToObserve The path to observe for changes. + * @param listener Client's listener. Callbacks on this listener will be invoked by the backend. + */ + void addTemporaryLeaseListener(final CloudnamePath pathToObserve, final LeaseListener listener); + + /** + * Remove a previously attached listener. The backend will ignore leases that doesn't exist. + * + * @param listener The listener to remove + */ + void removeTemporaryLeaseListener(final LeaseListener listener); + + /** + * Create a permanent lease. A permanent lease persists even if the client that created it + * terminates or closes the connection. Other clients will still see the lease. Permanent leases + * must persist until they are explicitly removed. + * + * All permanent leases must be unique. Duplicate permanent leases yields an error. + * + * @param path Path to the permanent lease. + * @param data Data to store in the permanent lease when it is created. + * @return true if successful + */ + boolean createPermanantLease(final CloudnamePath path, final String data); + + /** + * Remove a permanent lease. The lease will be removed and clients listening on the lease + * will be notified. + * + * @param path The path to the lease + * @return true if lease is removed. + */ + boolean removePermanentLease(final CloudnamePath path); + + /** + * Update data on permanent lease. + * + * @param path path to the permanent lease + * @param data data to write to the lease + * @return true if successful + */ + boolean writePermanentLeaseData(final CloudnamePath path, final String data); + + /** + * Read data from permanent lease. + * + * @param path path to permanent lease + * @return data stored in lease or null if the lease doesn't exist + */ + String readPermanentLeaseData(final CloudnamePath path); + + /** + * Add a listener to a permanent lease. The listener is attached to just one lease, as opposed + * to the termporary lease listener. + * + * @param pathToObserver Path to lease + * @param listener Listener. Callbacks on this listener is invoked by the backend. + */ + void addPermanentLeaseListener(final CloudnamePath pathToObserver, final LeaseListener listener); + + /** + * Remove listener on permanent lease. Unknown listeners are ignored by the backend. + * @param listener The listener to remove + */ + void removePermanentLeaseListener(final LeaseListener listener); +} diff --git a/cn-core/src/main/java/org/cloudname/core/CloudnamePath.java b/cn-core/src/main/java/org/cloudname/core/CloudnamePath.java new file mode 100644 index 00000000..269d57cd --- /dev/null +++ b/cn-core/src/main/java/org/cloudname/core/CloudnamePath.java @@ -0,0 +1,195 @@ +package org.cloudname.core; + +import java.util.Arrays; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * A generic representation of a path. A "path" might be a bit of a misnomer in the actual + * backend implementation but it can be represented as an uniquely identifying string for the + * leases handed out. A path can be split into elements which can be accessed individually. + * + * Paths are an ordered set of strings consisting of the characters according to RFC 952 and + * RFC 1123, ie [a-z,0-9,-]. The names cannot start or end with an hyphen and can be between + * 1 and 63 characters long. + * + * @author [email protected] + */ +public class CloudnamePath { + private final String[] pathElements; + private static final Pattern NAME_PATTERN = Pattern.compile("[a-z0-9-]*"); + + /** + * Check if path element is a valid name according to RFC 953/RCC 1123 + * + * @param name The element to check + * @return true if element is a valid stirng + */ + public static boolean isValidPathElementName(final String name) { + if (name == null || name.isEmpty()) { + return false; + } + + final Matcher matcher = NAME_PATTERN.matcher(name); + if (!matcher.matches()) { + return false; + } + if (name.length() > 64) { + return false; + } + if (name.charAt(0) == '-' || name.charAt(name.length() - 1) == '-') { + return false; + } + return true; + } + + /** + * @param pathElements the string array to create the path from. Order is preserved so + * pathElements[0] corresponds to the first element in the path. + * @throws AssertionError if the pathElements parameter is null. + */ + public CloudnamePath(final String[] pathElements) { + if (pathElements == null) { + throw new IllegalArgumentException("Path elements can not be null"); + } + this.pathElements = new String[pathElements.length]; + for (int i = 0; i < pathElements.length; i++) { + if (pathElements[i] == null) { + throw new IllegalArgumentException("Path element at index " + i + " is null"); + } + final String element = pathElements[i].toLowerCase(); + if (!isValidPathElementName(element)) { + throw new IllegalArgumentException("Name element " + element + " isn't a valid name"); + } + this.pathElements[i] = element; + } + } + + /** + * Create a new path based on an existing one by appending a new element + * + * @param path The original CloudnamePath instance + * @param additionalElement Element to append to the end of the original path + * @throws AssertionError if one or more of the parameters are null + */ + public CloudnamePath(final CloudnamePath path, final String additionalElement) { + if (path == null) { + throw new IllegalArgumentException("Path can not be null"); + } + if (additionalElement == null) { + throw new IllegalArgumentException("additionalElement can not be null"); + } + + if (!isValidPathElementName(additionalElement)) { + throw new IllegalArgumentException(additionalElement + " isn't a valid path name"); + } + this.pathElements = Arrays.copyOf(path.pathElements, path.pathElements.length + 1); + this.pathElements[this.pathElements.length - 1] = additionalElement; + + } + + /** + * @return the number of elements in the path + */ + public int length() { + return pathElements.length; + } + + /** + * Join the path elements into a string, f.e. join "foo", "bar" into "foo:bar" + * + * @param separator separator character between elements + * @return joined elements + */ + public String join(final char separator) { + final StringBuilder sb = new StringBuilder(); + boolean first = true; + for (final String element : pathElements) { + if (!first) { + sb.append(separator); + } + sb.append(element); + first = false; + } + return sb.toString(); + } + + /** + * @param index index of element + * @return element at index + * @throws IndexOutOfBoundsException if the index is out of range + */ + public String get(final int index) { + return pathElements[index]; + } + + /** + * Check if this path is a subpath. A path is a subpath whenever it starts with the + * same elements as the other path ("foo/bar/baz" would be a subpath of "foo/bar/baz/baz" + * but not of "bar/foo") + * + * @param other Path to check + * @return true if this path is a subpath of the specified path + */ + public boolean isSubpathOf(final CloudnamePath other) { + if (other == null) { + return false; + } + if (this.pathElements.length > other.pathElements.length) { + return false; + } + + if (this.pathElements.length == 0) { + // This is an empty path. It is the subpath of any other path. + return true; + } + + for (int i = 0; i < this.pathElements.length; i++) { + if (!other.pathElements[i].equals(this.pathElements[i])) { + return false; + } + } + + return true; + } + + /** + * @return parent path of current. If this is the root path (ie it is empty), return the + * current path + */ + public CloudnamePath getParent() { + if (this.pathElements.length == 0) { + return this; + } + return new CloudnamePath(Arrays.copyOf(pathElements, this.pathElements.length - 1)); + } + + @Override + public boolean equals(final Object other) { + if (other == null || !(other instanceof CloudnamePath)) { + return false; + } + final CloudnamePath otherPath = (CloudnamePath) other; + if (otherPath.pathElements.length != pathElements.length) { + return false; + } + for (int i = 0; i < pathElements.length; i++) { + if (!pathElements[i].equals(otherPath.pathElements[i])) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + return Arrays.hashCode(pathElements); + } + + @Override + public String toString() { + return "[ CloudnamePath (" + Arrays.toString(pathElements) + ") ]"; + } + + +} diff --git a/cn-core/src/main/java/org/cloudname/core/LeaseHandle.java b/cn-core/src/main/java/org/cloudname/core/LeaseHandle.java new file mode 100644 index 00000000..45b79740 --- /dev/null +++ b/cn-core/src/main/java/org/cloudname/core/LeaseHandle.java @@ -0,0 +1,21 @@ +package org.cloudname.core; + +/** + * Handle returned by the backend when a temporary lease is created. + * + * @author [email protected] + */ +public interface LeaseHandle extends AutoCloseable { + /** + * Write data to the lease. + * + * @param data data to write. Cannot be null. + * @return true if data is written + */ + boolean writeLeaseData(final String data); + + /** + * @return The full path of the lease + */ + CloudnamePath getLeasePath(); +} \ No newline at end of file diff --git a/cn-core/src/main/java/org/cloudname/core/LeaseListener.java b/cn-core/src/main/java/org/cloudname/core/LeaseListener.java new file mode 100644 index 00000000..1586b100 --- /dev/null +++ b/cn-core/src/main/java/org/cloudname/core/LeaseListener.java @@ -0,0 +1,31 @@ +package org.cloudname.core; + +/** + * Lease notifications to clients. + * + * @author [email protected] + */ +public interface LeaseListener { + /** + * A new lease is created. The lease is created at this point in time. + * + * @param path The full path of the lease + * @param data The data stored on the lease + */ + void leaseCreated(final CloudnamePath path, final String data); + + /** + * A lease is removed. The lease might not exist anymore at this point in time. + * + * @param path The path of the lease. + */ + void leaseRemoved(final CloudnamePath path); + + /** + * Lease data have changed in one of the leases the client is listening on. + * + * @param path Full path to the lease that have changed + * @param data The new data element stored in the lease + */ + void dataChanged(final CloudnamePath path, final String data); +} diff --git a/cn-core/src/test/java/org/cloudname/core/CloudnamePathTest.java b/cn-core/src/test/java/org/cloudname/core/CloudnamePathTest.java new file mode 100644 index 00000000..0e6bff9b --- /dev/null +++ b/cn-core/src/test/java/org/cloudname/core/CloudnamePathTest.java @@ -0,0 +1,204 @@ +package org.cloudname.core; + +import org.junit.Test; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Test the CloudnamePath class. + */ +public class CloudnamePathTest { + private final String[] emptyElements = new String[] {}; + private final String[] oneElement = new String[] { "foo" }; + private final String[] twoElements = new String[] { "foo", "bar" }; + + @Test (expected = IllegalArgumentException.class) + public void elementsCantBeNull() { + new CloudnamePath(null); + fail("No exception, no pass for you!"); + } + + @Test (expected = IllegalArgumentException.class) + public void pathCantBeNull() { + new CloudnamePath(null, "foof"); + } + + @Test (expected = IllegalArgumentException.class) + public void additionalElementCantBeNull() { + new CloudnamePath(new CloudnamePath(new String[] { "foo" }), null); + } + + @Test + public void appendPath() { + final CloudnamePath singleElement = new CloudnamePath(new String[] { "one" }); + final CloudnamePath twoElements = new CloudnamePath(new String[] { "one", "two" }); + assertThat("Elements aren't equal", singleElement.equals(twoElements), is(false)); + final CloudnamePath appendedElement = new CloudnamePath(singleElement, "two"); + assertThat("Appended are equal", appendedElement.equals(twoElements), is(true)); + } + + @Test + public void elementAccess() { + final CloudnamePath path = new CloudnamePath(twoElements); + assertThat(path.get(0), is(twoElements[0])); + assertThat(path.get(1), is(twoElements[1])); + } + + @Test (expected = IndexOutOfBoundsException.class) + public void elementAccessMustBeWithinBounds() { + final CloudnamePath path = new CloudnamePath(twoElements); + path.get(2); + } + + @Test + public void joinPaths() { + final CloudnamePath empty = new CloudnamePath(emptyElements); + assertThat("The empty path is length = 0", empty.length(), is(0)); + assertThat("String representation of emmpty path is empty string", empty.join('.'), is("")); + + final CloudnamePath one = new CloudnamePath(oneElement); + assertThat("A single element path has length 1", one.length(), is(1)); + assertThat("String representation of a single element path is the element", + one.join('.'), is(oneElement[0])); + + final CloudnamePath two = new CloudnamePath(twoElements); + assertThat("Two element paths have length 2", two.length(), is(2)); + assertThat("String representation of two element paths includes both elements", + two.join('.'), is(twoElements[0] + '.' + twoElements[1])); + } + + @Test + public void equalsTest() { + final CloudnamePath twoA = new CloudnamePath(twoElements); + final CloudnamePath twoB = new CloudnamePath(twoElements); + final CloudnamePath none = new CloudnamePath(emptyElements); + final CloudnamePath entirelyDifferent = new CloudnamePath(new String[] { "foo", "2" }); + + assertThat("Identical paths are equal", twoA.equals(twoB), is(true)); + assertThat("Hash codes for equal objects are the same", + twoA.hashCode(), is(twoB.hashCode())); + assertThat("Identical paths are equal, ignore order", twoB.equals(twoA), is(true)); + assertThat("Paths aren't equal to strings", twoA.equals(""), is(false)); + assertThat("Empty path does not equal actual path", twoA.equals(none), is(false)); + assertThat("Null elements aren't equal", twoA.equals(null), is(false)); + assertThat("Differen is just different", twoA.equals(entirelyDifferent), is(false)); + } + + @Test + public void subpaths() { + final String[] e1 = new String[] { "1", "2", "3", "4" }; + final String[] e2 = new String[] { "1", "2" }; + + final CloudnamePath first = new CloudnamePath(e1); + final CloudnamePath second = new CloudnamePath(e2); + final CloudnamePath last = new CloudnamePath(twoElements); + + + assertThat("More specific paths can't be subpaths", first.isSubpathOf(second), is(false)); + assertThat("More generic paths are subpaths", second.isSubpathOf(first), is(true)); + assertThat("A path can be subpath of itself", first.isSubpathOf(first), is(true)); + + assertThat("Paths must match at root levels", last.isSubpathOf(second), is(false)); + + assertThat("Null paths are not subpaths of anything", first.isSubpathOf(null), is(false)); + + final CloudnamePath empty = new CloudnamePath(emptyElements); + assertThat("An empty path is a subpath of everything", empty.isSubpathOf(first), is(true)); + assertThat("Empty paths can't have subpaths", first.isSubpathOf(empty), is(false)); + } + + @Test + public void parentPaths() { + final CloudnamePath originalPath = new CloudnamePath(new String[] { "foo", "bar", "baz" }); + + assertTrue(originalPath.getParent().isSubpathOf(originalPath)); + + assertThat(originalPath.getParent(), is(equalTo( + new CloudnamePath(new String[] { "foo", "bar" })))); + + assertThat(originalPath.getParent().getParent(), + is(equalTo(new CloudnamePath(new String[] { "foo" })))); + + final CloudnamePath emptyPath = new CloudnamePath(new String[] { }); + + assertThat(originalPath.getParent().getParent().getParent(), + is(equalTo(emptyPath))); + + assertThat(originalPath.getParent().getParent().getParent().getParent(), + is(equalTo(emptyPath))); + + assertThat(emptyPath.getParent(), is(equalTo(emptyPath))); + } + @Test + public void testToString() { + final CloudnamePath one = new CloudnamePath(oneElement); + final CloudnamePath two = new CloudnamePath(twoElements); + final CloudnamePath three = new CloudnamePath(emptyElements); + + assertThat(one.toString(), is(notNullValue())); + assertThat(two.toString(), is(notNullValue())); + assertThat(three.toString(), is(notNullValue())); + } + + @Test + public void invalidPathNameWithHyphenFirst() { + assertThat(CloudnamePath.isValidPathElementName("-invalid"), is(false)); + } + + @Test + public void invalidPathNameIsNull() { + assertThat(CloudnamePath.isValidPathElementName(null), is(false)); + } + @Test + public void invalidPathNameWithHyphenLast() { + assertThat(CloudnamePath.isValidPathElementName("invalid-"), is(false)); + } + + @Test + public void invalidPathNameWithEmptyString() { + assertThat(CloudnamePath.isValidPathElementName(""), is(false)); + } + + @Test + public void invalidPathNameWithIllegalChars() { + assertThat(CloudnamePath.isValidPathElementName("__"), is(false)); + } + + @Test + public void invalidPathNameWithTooLongLabel() { + assertThat(CloudnamePath.isValidPathElementName( + "rindfleischetikettierungsueberwachungsaufgabenuebertragungsgesetz"), is(false)); + } + + @Test + public void labelNamesAreCaseInsensitive() { + final CloudnamePath one = new CloudnamePath(new String[] { "FirstSecond" }); + final CloudnamePath two = new CloudnamePath(new String[] { "fIRSTsECOND" }); + assertTrue("Label names aren't case sensitive", one.equals(two)); + } + + @Test (expected = IllegalArgumentException.class) + public void pathCanNotBeNull() { + new CloudnamePath(null); + } + + @Test (expected = IllegalArgumentException.class) + public void pathElementsCanNotBeNull() { + new CloudnamePath(new String[] { null, null }); + } + + @Test (expected = IllegalArgumentException.class) + public void pathElementNamesCanNotBeInvalid() { + new CloudnamePath(new String[] { "__", "foo", "bar"}); + } + + @Test (expected = IllegalArgumentException.class) + public void additionalElementsMustBeValid() { + new CloudnamePath(new CloudnamePath(new String[] { "foo" }), "__"); + } +} diff --git a/cn-memory/README.md b/cn-memory/README.md new file mode 100644 index 00000000..bea9e848 --- /dev/null +++ b/cn-memory/README.md @@ -0,0 +1,4 @@ +# Memory-based backend + +This backend is only suitable for testing. It will only work in a single +VM. diff --git a/cn-memory/pom.xml b/cn-memory/pom.xml new file mode 100644 index 00000000..ea56d0c7 --- /dev/null +++ b/cn-memory/pom.xml @@ -0,0 +1,57 @@ +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + + <parent> + <groupId>org.cloudname</groupId> + <artifactId>cloudname-parent</artifactId> + <version>3.0-SNAPSHOT</version> + </parent> + + <artifactId>cn-memory</artifactId> + <packaging>jar</packaging> + + <name>Cloudname Memory backend</name> + <description>Memory backend for Cloudname</description> + <url>https://github.com/Cloudname/cloudname</url> + + <dependencies> + <dependency> + <groupId>org.cloudname</groupId> + <artifactId>cn-core</artifactId> + </dependency> + + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>org.hamcrest</groupId> + <artifactId>hamcrest-all</artifactId> + <version>1.3</version> + </dependency> + + <dependency> + <groupId>org.cloudname</groupId> + <artifactId>testtools</artifactId> + <scope>test</scope> + </dependency> + + </dependencies> + + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-surefire-plugin</artifactId> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + </plugin> + + </plugins> + </build> +</project> diff --git a/cn-memory/src/main/java/org/cloudname/backends/memory/MemoryBackend.java b/cn-memory/src/main/java/org/cloudname/backends/memory/MemoryBackend.java new file mode 100644 index 00000000..0a869de6 --- /dev/null +++ b/cn-memory/src/main/java/org/cloudname/backends/memory/MemoryBackend.java @@ -0,0 +1,270 @@ +package org.cloudname.backends.memory; + +import org.cloudname.core.CloudnameBackend; +import org.cloudname.core.CloudnamePath; +import org.cloudname.core.LeaseHandle; +import org.cloudname.core.LeaseListener; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Random; +import java.util.Set; + +/** + * Memory backend. This is the canonical implementation. The synchronization is probably not + * optimal but for testing this is OK. It defines the correct behaviour for backends, including + * calling listeners, return values and uniqueness. The actual timing of the various backends + * will of course vary. + * + * @author [email protected] + */ +public class MemoryBackend implements CloudnameBackend { + private enum LeaseEvent { + CREATED, + REMOVED, + DATA + } + + private final Map<CloudnamePath,String> temporaryLeases = new HashMap<>(); + private final Map<CloudnamePath,String> permanentLeases = new HashMap<>(); + private final Map<CloudnamePath, Set<LeaseListener>> observedTemporaryPaths = new HashMap<>(); + private final Map<CloudnamePath, Set<LeaseListener>> observedPermanentPaths = new HashMap<>(); + private final Object syncObject = new Object(); + + /* package-private */ void removeTemporaryLease(final CloudnamePath leasePath) { + synchronized (syncObject) { + if (temporaryLeases.containsKey(leasePath)) { + temporaryLeases.remove(leasePath); + notifyTemporaryObservers(leasePath, LeaseEvent.REMOVED, null); + } + } + } + private final Random random = new Random(); + + private String createRandomInstanceName() { + return Long.toHexString(random.nextLong()); + } + + /** + * @param path The path that has changed + * @param event The event + * @param data The data + */ + private void notifyTemporaryObservers( + final CloudnamePath path, final LeaseEvent event, final String data) { + for (final CloudnamePath observedPath : observedTemporaryPaths.keySet()) { + if (observedPath.isSubpathOf(path)) { + for (final LeaseListener listener : observedTemporaryPaths.get(observedPath)) { + switch (event) { + case CREATED: + listener.leaseCreated(path, data); + break; + case REMOVED: + listener.leaseRemoved(path); + break; + case DATA: + listener.dataChanged(path, data); + break; + default: + throw new RuntimeException("Don't know how to handle " + event); + } + } + } + } + } + + /** + * Notify observers of changes + */ + private void notifyPermanentObservers( + final CloudnamePath path, final LeaseEvent event, final String data) { + for (final CloudnamePath observedPath : observedPermanentPaths.keySet()) { + if (observedPath.isSubpathOf(path)) { + for (final LeaseListener listener : observedPermanentPaths.get(observedPath)) { + switch (event) { + case CREATED: + listener.leaseCreated(path, data); + break; + case REMOVED: + listener.leaseRemoved(path); + break; + case DATA: + listener.dataChanged(path, data); + break; + default: + throw new RuntimeException("Don't know how to handle " + event); + } + } + } + } + } + + @Override + public boolean createPermanantLease(final CloudnamePath path, final String data) { + assert path != null : "Path to lease must be set!"; + assert data != null : "Lease data is required"; + synchronized (syncObject) { + if (permanentLeases.containsKey(path)) { + return false; + } + permanentLeases.put(path, data); + notifyPermanentObservers(path, LeaseEvent.CREATED, data); + } + return true; + } + + @Override + public boolean removePermanentLease(final CloudnamePath path) { + synchronized (syncObject) { + if (!permanentLeases.containsKey(path)) { + return false; + } + permanentLeases.remove(path); + notifyPermanentObservers(path, LeaseEvent.REMOVED, null); + } + return true; + } + + @Override + public boolean writePermanentLeaseData(final CloudnamePath path, String data) { + synchronized (syncObject) { + if (!permanentLeases.containsKey(path)) { + return false; + } + permanentLeases.put(path, data); + notifyPermanentObservers(path, LeaseEvent.DATA, data); + } + return true; + } + + @Override + public String readPermanentLeaseData(final CloudnamePath path) { + synchronized (syncObject) { + if (!permanentLeases.containsKey(path)) { + return null; + } + return permanentLeases.get(path); + } + } + + @Override + public boolean writeTemporaryLeaseData(final CloudnamePath path, String data) { + synchronized (syncObject) { + if (!temporaryLeases.containsKey(path)) { + return false; + } + temporaryLeases.put(path, data); + notifyTemporaryObservers(path, LeaseEvent.DATA, data); + } + return true; + } + + @Override + public String readTemporaryLeaseData(final CloudnamePath path) { + synchronized (syncObject) { + if (!temporaryLeases.containsKey(path)) { + return null; + } + return temporaryLeases.get(path); + } + } + + @Override + public LeaseHandle createTemporaryLease(final CloudnamePath path, final String data) { + synchronized (syncObject) { + final String instanceName = createRandomInstanceName(); + CloudnamePath instancePath = new CloudnamePath(path, instanceName); + while (temporaryLeases.containsKey(instancePath)) { + instancePath = new CloudnamePath(path, instanceName); + } + temporaryLeases.put(instancePath, data); + notifyTemporaryObservers(instancePath, LeaseEvent.CREATED, data); + return new MemoryLeaseHandle(this, instancePath); + } + } + + /** + * Generate created events for temporary leases for newly attached listeners. + */ + private void regenerateEventsForTemporaryListener( + final CloudnamePath path, final LeaseListener listener) { + for (final CloudnamePath temporaryPath : temporaryLeases.keySet()) { + if (path.isSubpathOf(temporaryPath)) { + listener.leaseCreated(temporaryPath, temporaryLeases.get(temporaryPath)); + } + } + } + + /** + * Generate created events on permanent leases for newly attached listeners. + */ + private void regenerateEventsForPermanentListener( + final CloudnamePath path, final LeaseListener listener) { + for (final CloudnamePath permanentPath : permanentLeases.keySet()) { + if (path.isSubpathOf(permanentPath)) { + listener.leaseCreated(permanentPath, permanentLeases.get(permanentPath)); + } + } + } + + @Override + public void addTemporaryLeaseListener( + final CloudnamePath pathToObserve, final LeaseListener listener) { + synchronized (syncObject) { + Set<LeaseListener> listeners = observedTemporaryPaths.get(pathToObserve); + if (listeners == null) { + listeners = new HashSet<>(); + } + listeners.add(listener); + observedTemporaryPaths.put(pathToObserve, listeners); + regenerateEventsForTemporaryListener(pathToObserve, listener); + } + } + + @Override + public void removeTemporaryLeaseListener(final LeaseListener listener) { + synchronized (syncObject) { + for (final Set<LeaseListener> listeners : observedTemporaryPaths.values()) { + if (listeners.contains(listener)) { + listeners.remove(listener); + return; + } + } + } + } + + @Override + public void addPermanentLeaseListener( + final CloudnamePath pathToObserve, final LeaseListener listener) { + synchronized (syncObject) { + Set<LeaseListener> listeners = observedPermanentPaths.get(pathToObserve); + if (listeners == null) { + listeners = new HashSet<>(); + } + listeners.add(listener); + observedPermanentPaths.put(pathToObserve, listeners); + regenerateEventsForPermanentListener(pathToObserve, listener); + } + } + + @Override + public void removePermanentLeaseListener(final LeaseListener listener) { + synchronized (syncObject) { + for (final Set<LeaseListener> listeners : observedPermanentPaths.values()) { + if (listeners.contains(listener)) { + listeners.remove(listener); + return; + } + } + } + } + + @Override + public void close() { + synchronized (syncObject) { + observedTemporaryPaths.clear(); + observedPermanentPaths.clear(); + } + } +} diff --git a/cn-memory/src/main/java/org/cloudname/backends/memory/MemoryLeaseHandle.java b/cn-memory/src/main/java/org/cloudname/backends/memory/MemoryLeaseHandle.java new file mode 100644 index 00000000..53b1e277 --- /dev/null +++ b/cn-memory/src/main/java/org/cloudname/backends/memory/MemoryLeaseHandle.java @@ -0,0 +1,47 @@ +package org.cloudname.backends.memory; + +import org.cloudname.core.CloudnamePath; +import org.cloudname.core.LeaseHandle; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A handle returned to clients acquiring temporary leases. + * + * @author [email protected] + */ +public class MemoryLeaseHandle implements LeaseHandle { + private final MemoryBackend backend; + private final CloudnamePath clientLeasePath; + private AtomicBoolean expired = new AtomicBoolean(false); + + /** + * @param backend The backend issuing the lease + * @param clientLeasePath The path to the lease + */ + public MemoryLeaseHandle(final MemoryBackend backend, final CloudnamePath clientLeasePath) { + this.backend = backend; + this.clientLeasePath = clientLeasePath; + expired.set(false); + } + + @Override + public boolean writeLeaseData(String data) { + return backend.writeTemporaryLeaseData(clientLeasePath, data); + } + + @Override + public CloudnamePath getLeasePath() { + if (expired.get()) { + return null; + } + return clientLeasePath; + } + + @Override + public void close() throws IOException { + backend.removeTemporaryLease(clientLeasePath); + expired.set(true); + } +} diff --git a/cn-memory/src/test/java/org/cloudname/backends/memory/MemoryBackendTest.java b/cn-memory/src/test/java/org/cloudname/backends/memory/MemoryBackendTest.java new file mode 100644 index 00000000..acc1ad8e --- /dev/null +++ b/cn-memory/src/test/java/org/cloudname/backends/memory/MemoryBackendTest.java @@ -0,0 +1,18 @@ +package org.cloudname.backends.memory; + +import org.cloudname.core.CloudnameBackend; +import org.cloudname.testtools.backend.CoreBackendTest; + +/** + * Test the memory backend. Since the memory backend is the reference implementation this test + * shouldn't fail. Ever. + */ +public class MemoryBackendTest extends CoreBackendTest { + private static final CloudnameBackend BACKEND = new MemoryBackend(); + + @Override + protected CloudnameBackend getBackend() { + return BACKEND; + } + +} diff --git a/cn-service/README.md b/cn-service/README.md new file mode 100644 index 00000000..8cf7df15 --- /dev/null +++ b/cn-service/README.md @@ -0,0 +1,107 @@ +# Cloudname service discovery + +## Coordinates +Each service that runs is represented by a **coordinate**. There are two kinds of coordinates: +* **Service coordinates** which are generic coordinates that points to one or more services +* **Instance coordinates** which points to a particular service + +Coordinates are specified through **regions** and **tags**. A **region** is a separate (logical) cluster of services. One region is usually not connected to another region. The simplest comparison is either a *data center* or an AWS *region* or *availability zone* (like eu-west-1, us-east-1 and so on). + +The **tag** is just that - a tag that you can assign to a cluster of different services. The tag doesn't contain any particular semantics. + +A **service coordinate** looks like `<service>.<tag>.<region>`, f.e. `geolocation.rel1501.dc1` or (if you are running in AWS and have decided that you'll assume regions are availability zones) `geolocation.rel1501.eu-west-1a`. + +Instance coordinates points to a particular service instance and looks like this: `<instance identifier>.<service name>.<tag>.<region>`. For the examples above the instance coordinates might look like `ff08f0ah.geolocation.rel1501.dc1` or `ab08bed5.geolocation.rel1501.eu-west-1a`. + +The instance identifier is an unique identifier for that instance. Note that the instance identifier isn't unique across all services, isn't sequential and does not carry any semantic information. + +## Register a service +A service is registered through the `CloudnameService` class: +```java +// Create the service class. Note that getBackend() returns a Cloudname backend +// instance. There ar multiple types available. +try (CloudnameService cloudnameService = new CloudnameService(getBackend())) { + // Create the coordinate and endpoint + ServiceCoordinate serviceCoordinate = ServiceCoordinate.parse("myservice.demo.local"); + Endpoint httpEndpoint = new Endpoint("http", "127.0.0.1", 80); + + ServiceData serviceData = new ServiceData(Arrays.asList(httpEndpoint)); + + // This will register the service. The returned handle will expose the registration + // to other clients until it is closed. + try (ServiceHandle handle = cloudnameService.registerService(serviceCoordinate, serviceData)) { + + // ...Run your service here + + } +} +``` + +## Looking up services +Services can be located without registering a service; supply a listener to the CloudnameService instance to get notified of new services: +```java +CloudnameService cloudnameService = new CloudnameService(getBackend()); +ServiceCoordinate serviceCoordinate = ServiceCoordinate.parse("myservice.demo.local"); +cloudnameService.addServiceListener(ServiceCoordinate, new ServiceListener() { + @Override + public void onServiceCreated(final InstanceCoordinate coordinate, final ServiceData data) { + // A new instance is launched. Retrieve the endpoints via the data parameter. + // Note that this method is also called when the listener is set so you'll + // get notifications on already existing services as well. + } + + @Override + public void onServiceDataChanged(final InstanceCoordinate coordinate, final ServiceData data) { + // There's a change in endpoints for the given instance. The updated endpoints + // are supplied in the data parameter + } + + @Override + public void onServiceRemoved(final InstanceCoordinate coordinate) { + // One of the instances is stopped. It might become unavailable shortly + // (or it might have terminated) + } +}); +``` + +## Permanent services +Some resources might not be suitable for service discovery, either because they are not under your control, they are pet services or not designed for cloud-like behavior (aka "pet servers"). You can still use those in service discovery; just add them as *permanent services*. Permanent services behave a bit differently from ordinary services; they stay alive for long periods of time and on some rare occasions they change their endpoint. Registering permanent services are similar to ordinary services. The following snippet registers a permanent service, then terminates. The service registration will still be available to other clients when this client has terminated: + +```java +try (CloudnameService cloudnameService = new CloudnameService(getBackend())) { + ServiceCoordinate coordinate = ServiceCoordinate.parse("mydb.demo.local"); + Endpoint endpoint = new Endpoint("db", "127.0.0.1", 5678); + + if (!cloudnameService.createPermanentService(coordinate, endpoint)) { + System.out.println("Couldn't register permanent service!"); + } +} +``` +Note that permanent services can not have more than one endpoint registered at any time. A permanent service registration applies only to *one* service at a time. + +Looking up permanent service registrations is similar to ordinary services: + +```java +try (CloudnameService cloudnameService = new CloudnameService(getBackend())) { + ServiceCoordinate coordinate = ServiceCoordinate.parse("mydb.demo.local"); + cloudnameService.addPermanentServiceListener(coordinate, + new PermanentServiceListener() { + @Override + public void onServiceCreated(Endpoint endpoint) { + // Service is created. Note that this is also called when the + // listener is set so you'll get notifications on already + // existing services as well. + } + + @Override + public void onServiceChanged(Endpoint endpoint) { + // The endpoint is updated + } + + @Override + public void onServiceRemoved() { + // The service has been removed + } + }); +} +``` diff --git a/cn-service/pom.xml b/cn-service/pom.xml new file mode 100644 index 00000000..f3788ed2 --- /dev/null +++ b/cn-service/pom.xml @@ -0,0 +1,63 @@ +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + + <parent> + <groupId>org.cloudname</groupId> + <artifactId>cloudname-parent</artifactId> + <version>3.0-SNAPSHOT</version> + </parent> + + <artifactId>cn-service</artifactId> + <packaging>jar</packaging> + + <name>Cloudname Service Discovery</name> + <description>Simple library for service discovery (and notifications)</description> + <url>https://github.com/Cloudname/cloudname</url> + + <dependencies> + <dependency> + <groupId>org.cloudname</groupId> + <artifactId>cn-core</artifactId> + </dependency> + + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>org.hamcrest</groupId> + <artifactId>hamcrest-all</artifactId> + <version>1.3</version> + </dependency> + + <dependency> + <groupId>org.json</groupId> + <artifactId>json</artifactId> + <version>20140107</version> + </dependency> + + <dependency> + <groupId>org.cloudname</groupId> + <artifactId>cn-memory</artifactId> + <scope>test</scope> + + </dependency> + </dependencies> + + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-surefire-plugin</artifactId> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + </plugin> + + </plugins> + </build> +</project> diff --git a/cn-service/src/main/java/org/cloudname/service/CloudnameService.java b/cn-service/src/main/java/org/cloudname/service/CloudnameService.java new file mode 100644 index 00000000..9c4747f4 --- /dev/null +++ b/cn-service/src/main/java/org/cloudname/service/CloudnameService.java @@ -0,0 +1,237 @@ +package org.cloudname.service; +import org.cloudname.core.CloudnameBackend; +import org.cloudname.core.CloudnamePath; +import org.cloudname.core.LeaseHandle; +import org.cloudname.core.LeaseListener; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Service discovery implementation. Use registerService() and addServiceListener() to register + * and locate services. + * + * TODO: Enable lookups based on partial coordinates. Create builder for service coordinates, + * use own coordinate to resolve complete coordinate. + * + * @author [email protected] + */ +public class CloudnameService implements AutoCloseable { + private final Logger LOG = Logger.getLogger(CloudnameService.class.getName()); + + private final CloudnameBackend backend; + private final List<ServiceHandle> handles = new ArrayList<>(); + private final List<LeaseListener> temporaryListeners = new ArrayList<>(); + private final List<LeaseListener> permanentListeners = new ArrayList<>(); + private final Set<ServiceCoordinate> permanentUpdatesInProgress = new CopyOnWriteArraySet<>(); + private final Object syncObject = new Object(); + + /** + * @oaram backend backend implementation to use + * @throws IllegalArgumentException if parameter is invalid + */ + public CloudnameService(final CloudnameBackend backend) { + if (backend == null) { + throw new IllegalArgumentException("Backend can not be null"); + } + this.backend = backend; + } + + /** + * Register an instance with the given service coordinate. The service will get its own + * instance coordinate under the given service coordinate. + * + * @param serviceCoordinate The service coordinate that the service (instance) will attach to + * @param serviceData Service data for the instance + * @return ServiceHandle a handle the client can use to manage the endpoints for the service. + * The most typical use case is to register all endpoints + * @throws IllegalArgumentException if the parameters are invalid + */ + public ServiceHandle registerService( + final ServiceCoordinate serviceCoordinate, final ServiceData serviceData) { + + if (serviceCoordinate == null) { + throw new IllegalArgumentException("Coordinate cannot be null"); + } + if (serviceData == null) { + throw new IllegalArgumentException("Service Data cannot be null"); + } + final LeaseHandle leaseHandle = backend.createTemporaryLease( + serviceCoordinate.toCloudnamePath(), serviceData.toJsonString()); + + final ServiceHandle serviceHandle = new ServiceHandle( + new InstanceCoordinate(leaseHandle.getLeasePath()), serviceData, leaseHandle); + + synchronized (syncObject) { + handles.add(serviceHandle); + } + return serviceHandle; + } + + /** + * Add listener for service events. This only applies to ordinary services. + * + * @param coordinate The coordinate to monitor. + * @param listener Listener getting notifications on changes. + * @throws IllegalArgumentException if parameters are invalid + */ + public void addServiceListener( + final ServiceCoordinate coordinate, final ServiceListener listener) { + if (coordinate == null) { + throw new IllegalArgumentException("Coordinate can not be null"); + } + if (listener == null) { + throw new IllegalArgumentException("Listener can not be null"); + } + // Just create the corresponding listener on the backend and translate the parameters + // from the listener. + final LeaseListener leaseListener = new LeaseListener() { + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + final InstanceCoordinate instanceCoordinate = new InstanceCoordinate(path); + final ServiceData serviceData = ServiceData.fromJsonString(data); + listener.onServiceCreated(instanceCoordinate, serviceData); + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + final InstanceCoordinate instanceCoordinate = new InstanceCoordinate(path); + listener.onServiceRemoved(instanceCoordinate); + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + final InstanceCoordinate instanceCoordinate = new InstanceCoordinate(path); + final ServiceData serviceData = ServiceData.fromJsonString(data); + listener.onServiceDataChanged(instanceCoordinate, serviceData); + } + }; + synchronized (syncObject) { + temporaryListeners.add(leaseListener); + } + backend.addTemporaryLeaseListener(coordinate.toCloudnamePath(), leaseListener); + } + + /** + * Create a permanent service. The service registration will be kept when the client exits. The + * service will have a single endpoint. + */ + public boolean createPermanentService( + final ServiceCoordinate coordinate, final Endpoint endpoint) { + if (coordinate == null) { + throw new IllegalArgumentException("Service coordinate can't be null"); + } + if (endpoint == null) { + throw new IllegalArgumentException("Endpoint can't be null"); + } + + return backend.createPermanantLease(coordinate.toCloudnamePath(), endpoint.toJsonString()); + } + + /** + * Update permanent service coordinate. Note that this is a non-atomic operation with multiple + * trips to the backend system. The update is done in two operations; one delete and one + * create. If the delete operation fail and the create operation succeeds it might end up + * removing the permanent service coordinate. Clients will not be notified of the removal. + */ + public boolean updatePermanentService( + final ServiceCoordinate coordinate, final Endpoint endpoint) { + if (coordinate == null) { + throw new IllegalArgumentException("Coordinate can't be null"); + } + if (endpoint == null) { + throw new IllegalArgumentException("Endpoint can't be null"); + } + + if (permanentUpdatesInProgress.contains(coordinate)) { + LOG.log(Level.WARNING, "Attempt to update a permanent service which is already" + + " updating. (coordinate: " + coordinate + ", endpoint: " + endpoint); + return false; + } + // Check if the endpoint name still matches. + final String data = backend.readPermanentLeaseData(coordinate.toCloudnamePath()); + if (data == null) { + return false; + } + final Endpoint oldEndpoint = Endpoint.fromJson(data); + if (!oldEndpoint.getName().equals(endpoint.getName())) { + LOG.log(Level.INFO, "Rejecting attempt to update permanent service with a new endpoint" + + " that has a different name. Old name: " + oldEndpoint + " new: " + endpoint); + return false; + } + permanentUpdatesInProgress.add(coordinate); + try { + return backend.writePermanentLeaseData( + coordinate.toCloudnamePath(), endpoint.toJsonString()); + } catch (final RuntimeException ex) { + LOG.log(Level.WARNING, "Got exception updating permanent lease. The system might be in" + + " an indeterminate state", ex); + return false; + } finally { + permanentUpdatesInProgress.remove(coordinate); + } + } + + /** + * Remove a perviously registered permanent service. Needless to say: Use with caution. + */ + public boolean removePermanentService(final ServiceCoordinate coordinate) { + if (coordinate == null) { + throw new IllegalArgumentException("Coordinate can not be null"); + } + return backend.removePermanentLease(coordinate.toCloudnamePath()); + } + + /** + * Listen for changes in permanent services. The changes are usually of the earth-shattering + * variety so as a client you'd be interested in knowing about these as soon as possible. + */ + public void addPermanentServiceListener( + final ServiceCoordinate coordinate, final PermanentServiceListener listener) { + if (coordinate == null) { + throw new IllegalArgumentException("Coordinate can not be null"); + } + if (listener == null) { + throw new IllegalArgumentException("Listener can not be null"); + } + final LeaseListener leaseListener = new LeaseListener() { + @Override + public void leaseCreated(CloudnamePath path, String data) { + listener.onServiceCreated(Endpoint.fromJson(data)); + } + + @Override + public void leaseRemoved(CloudnamePath path) { + listener.onServiceRemoved(); + } + + @Override + public void dataChanged(CloudnamePath path, String data) { + listener.onServiceChanged(Endpoint.fromJson(data)); + } + }; + synchronized (syncObject) { + permanentListeners.add(leaseListener); + } + backend.addPermanentLeaseListener(coordinate.toCloudnamePath(), leaseListener); + } + + @Override + public void close() { + synchronized (syncObject) { + for (final ServiceHandle handle : handles) { + handle.close(); + } + for (final LeaseListener listener : temporaryListeners) { + backend.removeTemporaryLeaseListener(listener); + } + for (final LeaseListener listener : permanentListeners) { + backend.removePermanentLeaseListener(listener); + } + } + } +} diff --git a/cn-service/src/main/java/org/cloudname/service/Endpoint.java b/cn-service/src/main/java/org/cloudname/service/Endpoint.java new file mode 100644 index 00000000..d20371fd --- /dev/null +++ b/cn-service/src/main/java/org/cloudname/service/Endpoint.java @@ -0,0 +1,114 @@ +package org.cloudname.service; + +import org.cloudname.core.CloudnamePath; +import org.json.JSONObject; + +/** + * Endpoints exposed by services. Endpoints contains host address and port number. + * + * @author [email protected] + */ +public class Endpoint { + private final String name; + private final String host; + private final int port; + + /** + * @param name Name of endpoint. Must conform to RFC 952 and RFC 1123, + * ie [a-z,0-9,-] + * @param host Host name or IP address + * @param port Port number (1- max port number) + * @throws IllegalArgumentException if one of the parameters are null (name/host) or zero (port) + */ + public Endpoint(final String name, final String host, final int port) { + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("Name can not be null or empty"); + } + if (host == null || host.isEmpty()) { + throw new IllegalArgumentException("Host can not be null or empty"); + } + if (port < 1) { + throw new IllegalArgumentException("Port can not be < 1"); + } + if (!CloudnamePath.isValidPathElementName(name)) { + throw new IllegalArgumentException("Name is not a valid identifier"); + } + + this.name = name; + this.host = host; + this.port = port; + } + + /** + * @return The endpoint's name + */ + public String getName() { + return name; + } + + /** + * @return The endpoint's host name or IP address + */ + public String getHost() { + return host; + } + + /** + * @return The endpoint's port number + */ + public int getPort() { + return port; + } + + /** + * @return JSON representation of isntance + */ + /* package-private */ String toJsonString() { + return new JSONObject() + .put("name", name) + .put("host", host) + .put("port", port) + .toString(); + } + + /** + * @param jsonString String with JSON representation of instance + * @return Endpoint instance + * @throws org.json.JSONException if the string is malformed. + */ + /* package-private */ static Endpoint fromJson(final String jsonString) { + final JSONObject json = new JSONObject(jsonString); + return new Endpoint( + json.getString("name"), + json.getString("host"), + json.getInt("port")); + } + + @Override + public boolean equals(final Object o) { + if (o == null || !(o instanceof Endpoint)) { + return false; + } + final Endpoint other = (Endpoint) o; + + if (!this.name.equals(other.name) + || !this.host.equals(other.host) + || this.port != other.port) { + return false; + } + return true; + } + + @Override + public int hashCode() { + return this.toString().hashCode(); + } + + @Override + public String toString() { + return "[ name = " + name + + ", host = " + host + + ", port = " + port + + "]"; + } +} diff --git a/cn-service/src/main/java/org/cloudname/service/InstanceCoordinate.java b/cn-service/src/main/java/org/cloudname/service/InstanceCoordinate.java new file mode 100644 index 00000000..7d219357 --- /dev/null +++ b/cn-service/src/main/java/org/cloudname/service/InstanceCoordinate.java @@ -0,0 +1,146 @@ +package org.cloudname.service; + +import org.cloudname.core.CloudnamePath; +import org.json.JSONObject; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * A coordinate representing a running service. The coordinate consists of four parts; instance id, + * service name, tag and region. + * + * Note that the order of elements in the string representation is opposite of the CloudnamePath + * class; you can't create a canonical representation of the instance coordinate by calling join() + * on the CloudnamePath instance. + * + * @author [email protected] + */ +public class InstanceCoordinate { + private static final Pattern COORDINATE_PATTERN = Pattern.compile("(.*)\\.(.*)\\.(.*)\\.(.*)"); + private static final String REGION_NAME = "region"; + private static final String TAG_NAME = "tag"; + private static final String SERVICE_NAME = "service"; + private static final String INSTANCE_NAME = "instance"; + + + private final String region; + private final String tag; + private final String service; + private final String instance; + + /** + * @param path CloudnamePath instance to use as source + * @throws IllegalArgumentException if parameters are invalid + */ + /* package-private */ InstanceCoordinate(final CloudnamePath path) { + if (path == null) { + throw new IllegalArgumentException("Path can not be null"); + } + if (path.length() != 4) { + throw new IllegalArgumentException("Path must contain 4 elements"); + } + this.region = path.get(0); + this.tag = path.get(1); + this.service = path.get(2); + this.instance = path.get(3); + } + + /** + * @return The region of the coordinate + */ + public String getRegion() { + return region; + } + + /** + * @return The tag of the coordinate + */ + public String getTag() { + return tag; + } + + /** + * @return The service name + */ + public String getService() { + return service; + } + + /** + * @return The instance identifier + */ + public String getInstance() { + return instance; + } + + /** + * @return A CloudnamePath instance representing this coordinate + */ + /* package-private */ CloudnamePath toCloudnamePath() { + return new CloudnamePath( + new String[] { this.region, this.tag, this.service, this.instance }); + } + + /** + * @return Canonical string representation of coordinate + */ + public String toCanonicalString() { + return new StringBuffer() + .append(instance).append(".") + .append(service).append(".") + .append(tag).append(".") + .append(region) + .toString(); + } + + /** + * @return Coordinate represented as a JSON-formatted string + */ + /* package-private */ String toJsonString() { + return new JSONObject() + .put(REGION_NAME, this.region) + .put(TAG_NAME, this.tag) + .put(SERVICE_NAME, this.service) + .put(INSTANCE_NAME, this.instance) + .toString(); + } + + /** + * @param jsonString A coordinate serialized as a JSON-formatted string + * @return InstanceCoordinate built from the string + */ + /* package-private */ static InstanceCoordinate fromJson(final String jsonString) { + final JSONObject object = new JSONObject(jsonString); + final String[] pathElements = new String[4]; + pathElements[0] = object.getString(REGION_NAME); + pathElements[1] = object.getString(TAG_NAME); + pathElements[2] = object.getString(SERVICE_NAME); + pathElements[3] = object.getString(INSTANCE_NAME); + + return new InstanceCoordinate(new CloudnamePath(pathElements)); + } + + /** + * @param string A canonical string representation of a coordinate + * @return InstanceCoordinate built from the string + */ + public static InstanceCoordinate parse(final String string) { + if (string == null) { + return null; + } + final Matcher matcher = COORDINATE_PATTERN.matcher(string); + if (!matcher.matches()) { + return null; + } + final String[] path = new String[] { + matcher.group(4), matcher.group(3), matcher.group(2), matcher.group(1) + }; + return new InstanceCoordinate(new CloudnamePath(path)); + } + + @Override + public String toString() { + return "[ Coordinate " + toCanonicalString() + "]"; + } +} diff --git a/cn-service/src/main/java/org/cloudname/service/PermanentServiceListener.java b/cn-service/src/main/java/org/cloudname/service/PermanentServiceListener.java new file mode 100644 index 00000000..5d644b89 --- /dev/null +++ b/cn-service/src/main/java/org/cloudname/service/PermanentServiceListener.java @@ -0,0 +1,25 @@ +package org.cloudname.service; + +/** + * Listener interface for permanent services. + * + * @author [email protected] + */ +public interface PermanentServiceListener { + /** + * A service is created. This method will be called on start-up for all existing services. + * @param endpoint The endpoint of the service + */ + void onServiceCreated(final Endpoint endpoint); + + /** + * Service endpoint has changed. + * @param endpoint The new value of the service endpoint + */ + void onServiceChanged(final Endpoint endpoint); + + /** + * Service has been removed. + */ + void onServiceRemoved(); +} diff --git a/cn-service/src/main/java/org/cloudname/service/ServiceCoordinate.java b/cn-service/src/main/java/org/cloudname/service/ServiceCoordinate.java new file mode 100644 index 00000000..02a74637 --- /dev/null +++ b/cn-service/src/main/java/org/cloudname/service/ServiceCoordinate.java @@ -0,0 +1,107 @@ +package org.cloudname.service; + +import org.cloudname.core.CloudnamePath; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * A coordinate pointing to a set of services or a single permanent service. + * + * @author [email protected] + */ +public class ServiceCoordinate { + private final String region; + private final String tag; + private final String service; + + // Pattern for string parsing + private static final Pattern COORDINATE_PATTERN = Pattern.compile("(.*)\\.(.*)\\.(.*)"); + + /** + * @param path The CloudnamePath instance to use when building the coordinate. The coordinate + * must consist of three elements and can not be null. + * @throws IllegalArgumentException if parameter is invalid + */ + /* package-private */ ServiceCoordinate(final CloudnamePath path) { + if (path == null) { + throw new IllegalArgumentException("Path can not be null"); + } + if (path.length() != 3) { + throw new IllegalArgumentException("Path must have three elements"); + } + region = path.get(0); + tag = path.get(1); + service = path.get(2); + } + + /** + * @return The coordinate's region + */ + public String getRegion() { + return region; + } + + /** + * @return The coordinate's tag + */ + public String getTag() { + return tag; + } + + /** + * @return The coordinate's service name + */ + public String getService() { + return service; + } + + /** + * @param serviceCoordinateString String representation of coordinate + * @return ServiceCoordinate instance built from the string. Null if the coordinate + * can't be parsed correctly. + */ + public static ServiceCoordinate parse(final String serviceCoordinateString) { + final Matcher matcher = COORDINATE_PATTERN.matcher(serviceCoordinateString); + if (!matcher.matches()) { + return null; + } + final String[] path = new String[] { matcher.group(3), matcher.group(2), matcher.group(1) }; + return new ServiceCoordinate(new CloudnamePath(path)); + } + + /** + * @return CloudnamePath representing this coordinate + */ + /* package-private */ CloudnamePath toCloudnamePath() { + return new CloudnamePath(new String[] { this.region, this.tag, this.service }); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final ServiceCoordinate other = (ServiceCoordinate) o; + + if (!this.region.equals(other.region) + || !this.tag.equals(other.tag) + || !this.service.equals(other.service)) { + return false; + } + return true; + } + + @Override + public int hashCode() { + int result = region.hashCode(); + result = 31 * result + tag.hashCode(); + result = 31 * result + service.hashCode(); + return result; + } + +} diff --git a/cn-service/src/main/java/org/cloudname/service/ServiceData.java b/cn-service/src/main/java/org/cloudname/service/ServiceData.java new file mode 100644 index 00000000..dec36ea1 --- /dev/null +++ b/cn-service/src/main/java/org/cloudname/service/ServiceData.java @@ -0,0 +1,123 @@ +package org.cloudname.service; + +import org.json.JSONArray; +import org.json.JSONObject; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Service data stored for each service. This data only contains endpoints at the moment. Endpoint + * names must be unique. + * + * @author [email protected] + */ +public class ServiceData { + private final Object syncObject = new Object(); + private final Map<String, Endpoint> endpoints = new HashMap<>(); + + /** + * Create empty service data object with no endpoints. + */ + public ServiceData() { + + } + + /** + * Create a new instance with the given list of endpoints. If there's duplicates in the list + * the duplicates will be discarded. + * + * @param endpointList List of endpoints to add + */ + /* package-private */ ServiceData(final List<Endpoint> endpointList) { + synchronized (syncObject) { + for (final Endpoint endpoint : endpointList) { + endpoints.put(endpoint.getName(), endpoint); + } + } + } + + /** + * @param name Name of endpoint + * @return The endpoint with the specified name. Null if the endpoint doesn't exist + */ + public Endpoint getEndpoint(final String name) { + synchronized (syncObject) { + for (final String epName : endpoints.keySet()) { + if (epName.equals(name)) { + return endpoints.get(name); + } + } + } + return null; + } + + /** + * @param endpoint Endpoint to add + * @return true if endpoint can be added. False if the endpoint already exists. + * @throws IllegalArgumentException if endpoint is invalid + */ + public boolean addEndpoint(final Endpoint endpoint) { + if (endpoint == null) { + throw new IllegalArgumentException("Endpoint can not be null"); + } + synchronized (syncObject) { + if (endpoints.containsKey(endpoint.getName())) { + return false; + } + endpoints.put(endpoint.getName(), endpoint); + } + return true; + } + + /** + * @param endpoint endpoint to remove + * @return True if the endpoint has been removed, false if the endpoint can't be removed. Nulls + * @throws IllegalArgumentException if endpoint is invalid + */ + public boolean removeEndpoint(final Endpoint endpoint) { + if (endpoint == null) { + throw new IllegalArgumentException("Endpoint can't be null"); + } + synchronized (syncObject) { + if (!endpoints.containsKey(endpoint.getName())) { + return false; + } + endpoints.remove(endpoint.getName()); + } + return true; + } + + /** + * @return Service data serialized as a JSON string + */ + /* package-private */ String toJsonString() { + final JSONArray epList = new JSONArray(); + int i = 0; + for (Map.Entry<String, Endpoint> entry : endpoints.entrySet()) { + epList.put(i++, new JSONObject(entry.getValue().toJsonString())); + } + return new JSONObject().put("endpoints", epList).toString(); + } + + /** + * @param jsonString JSON string to create instance from + * @throws IllegalArgumentException if parameter is invalid + */ + /* package-private */ static ServiceData fromJsonString(final String jsonString) { + if (jsonString == null || jsonString.isEmpty()) { + throw new IllegalArgumentException("json string can not be null or empty"); + } + + final List<Endpoint> endpoints = new ArrayList<>(); + + final JSONObject json = new JSONObject(jsonString); + final JSONArray epList = json.getJSONArray("endpoints"); + for (int i = 0; i < epList.length(); i++) { + endpoints.add(Endpoint.fromJson(epList.getJSONObject(i).toString())); + } + return new ServiceData(endpoints); + } +} diff --git a/cn-service/src/main/java/org/cloudname/service/ServiceHandle.java b/cn-service/src/main/java/org/cloudname/service/ServiceHandle.java new file mode 100644 index 00000000..a9305bb2 --- /dev/null +++ b/cn-service/src/main/java/org/cloudname/service/ServiceHandle.java @@ -0,0 +1,75 @@ +package org.cloudname.service; +import org.cloudname.core.LeaseHandle; + +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * A handle to a service registration. The handle is used to modify the registered endpoints. The + * state is kept in the ServiceData instance held by the handle. Note that endpoints in the + * ServiceData instance isn't registered automatically when the handle is created. + * + * @author [email protected] + */ +public class ServiceHandle implements AutoCloseable { + private static final Logger LOG = Logger.getLogger(ServiceHandle.class.getName()); + private final LeaseHandle leaseHandle; + private final InstanceCoordinate instanceCoordinate; + private final ServiceData serviceData; + + /** + * @param instanceCoordinate The instance coordinate this handle belongs to + * @param serviceData The service data object + * @param leaseHandle The Cloudname handle for the lease + * @throws IllegalArgumentException if parameters are invalid + */ + public ServiceHandle( + final InstanceCoordinate instanceCoordinate, + final ServiceData serviceData, + final LeaseHandle leaseHandle) { + if (instanceCoordinate == null) { + throw new IllegalArgumentException("Instance coordinate cannot be null"); + } + if (serviceData == null) { + throw new IllegalArgumentException("Service data must be set"); + } + if (leaseHandle == null) { + throw new IllegalArgumentException("Lease handle cannot be null"); + } + this.leaseHandle = leaseHandle; + this.instanceCoordinate = instanceCoordinate; + this.serviceData = serviceData; + } + + /** + * @param endpoint The endpoint to register + * @return true if endpoint is registered + */ + boolean registerEndpoint(final Endpoint endpoint) { + if (!serviceData.addEndpoint(endpoint)) { + return false; + } + return this.leaseHandle.writeLeaseData(serviceData.toJsonString()); + } + + /** + * @param endpoint The endpoint to remove + * @return true if endpoint is removed + */ + boolean removeEndpoint(final Endpoint endpoint) { + if (!serviceData.removeEndpoint(endpoint)) { + return false; + } + return this.leaseHandle.writeLeaseData(serviceData.toJsonString()); + } + + @Override + public void close() { + try { + leaseHandle.close(); + } catch (final Exception ex) { + LOG.log(Level.WARNING, "Got exception closing lease for instance " + + instanceCoordinate.toCanonicalString(), ex); + } + } +} diff --git a/cn-service/src/main/java/org/cloudname/service/ServiceListener.java b/cn-service/src/main/java/org/cloudname/service/ServiceListener.java new file mode 100644 index 00000000..ac36e6c8 --- /dev/null +++ b/cn-service/src/main/java/org/cloudname/service/ServiceListener.java @@ -0,0 +1,33 @@ +package org.cloudname.service; + +/** + * Listener interface for services. + * + * @author [email protected] + */ +public interface ServiceListener { + /** + * Service is created. Note that this method is called once for every service that already + * exists when the listener is attached. + * + * @param coordinate Coordinate of instance + * @param serviceData The instance's data, ie its endpoints + */ + void onServiceCreated(final InstanceCoordinate coordinate, final ServiceData serviceData); + + /** + * Service's data have changed. + * @param coordinate Coordinate of instance + * @param data The instance's data + */ + void onServiceDataChanged(final InstanceCoordinate coordinate, final ServiceData data); + + /** + * Instance is removed. This means that the service has either closed its connection to + * the Cloudname backend or it has become unavailable for some other reason (f.e. caused + * by a network partition) + * + * @param coordinate The instance's coordinate + */ + void onServiceRemoved(final InstanceCoordinate coordinate); +} diff --git a/cn-service/src/test/java/org/cloudname/service/CloudnameServicePermanentTest.java b/cn-service/src/test/java/org/cloudname/service/CloudnameServicePermanentTest.java new file mode 100644 index 00000000..d819931c --- /dev/null +++ b/cn-service/src/test/java/org/cloudname/service/CloudnameServicePermanentTest.java @@ -0,0 +1,261 @@ +package org.cloudname.service; + +import org.cloudname.backends.memory.MemoryBackend; +import org.cloudname.core.CloudnameBackend; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Test persistent services functions. + */ +public class CloudnameServicePermanentTest { + private static final String SERVICE_COORDINATE = "myoldskoolserver.test.local"; + private static final CloudnameBackend memoryBackend = new MemoryBackend(); + private static final Endpoint DEFAULT_ENDPOINT = new Endpoint("serviceport", "localhost", 80); + private final ServiceCoordinate serviceCoordinate = ServiceCoordinate.parse(SERVICE_COORDINATE); + + @BeforeClass + public static void createServiceRegistration() { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + assertThat( + cloudnameService.createPermanentService( + ServiceCoordinate.parse(SERVICE_COORDINATE), DEFAULT_ENDPOINT), + is(true)); + } + } + + @Test + public void testPersistentServiceChanges() throws InterruptedException { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + + final CountDownLatch callCounter = new CountDownLatch(2); + final int secondsToWait = 1; + + // ...a listener on the service will trigger when there's a change plus the initial + // onCreate call. + cloudnameService.addPermanentServiceListener(serviceCoordinate, + new PermanentServiceListener() { + private final AtomicInteger createCount = new AtomicInteger(0); + private final AtomicInteger changeCount = new AtomicInteger(0); + + @Override + public void onServiceCreated(Endpoint endpoint) { + // Expect this to be called once and only once, even on updates + assertThat(createCount.incrementAndGet(), is(1)); + callCounter.countDown(); + } + + @Override + public void onServiceChanged(Endpoint endpoint) { + // This will be called when the endpoint changes + assertThat(changeCount.incrementAndGet(), is(1)); + callCounter.countDown(); + } + + @Override + public void onServiceRemoved() { + // This won't be called + fail("Did not expect onServiceRemoved to be called"); + } + }); + + // Updating with invalid endpoint name fails + assertThat(cloudnameService.updatePermanentService(serviceCoordinate, + new Endpoint("wrongep", DEFAULT_ENDPOINT.getHost(), 81)), + is(false)); + + // Using the right one, however, does work + assertThat(cloudnameService.updatePermanentService(serviceCoordinate, + new Endpoint( + DEFAULT_ENDPOINT.getName(), DEFAULT_ENDPOINT.getHost(), 81)), + is(true)); + // Wait for notifications + callCounter.await(secondsToWait, TimeUnit.SECONDS); + + } + + // At this point the service created above is closed; changes to the service won't + // trigger errors in the listener declared. Just do one change to make sure. + final CloudnameService cloudnameService = new CloudnameService(memoryBackend); + assertThat(cloudnameService.updatePermanentService( + ServiceCoordinate.parse(SERVICE_COORDINATE), DEFAULT_ENDPOINT), is(true)); + } + + @Test + public void testDuplicateRegistration() { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + // Creating the same permanent service will fail + assertThat("Can't create two identical permanent services", + cloudnameService.createPermanentService(serviceCoordinate, DEFAULT_ENDPOINT), + is(false)); + } + } + + @Test (expected = IllegalArgumentException.class) + public void testNullCoordinateRegistration() { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + cloudnameService.createPermanentService(null, DEFAULT_ENDPOINT); + } + } + + @Test (expected = IllegalArgumentException.class) + public void testInvalidEndpoint() { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + cloudnameService.createPermanentService(serviceCoordinate, null); + } + } + + @Test + public void testListenerOnServiceThatDoesntExist() throws InterruptedException { + final String anotherServiceCoordinate = "someother.service.coordinate"; + + // It should be possible to listen for a permanent service that doesn't exist yet. Once the + // service is created it must trigger a callback to the clients listening. + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + + final CountDownLatch createCalls = new CountDownLatch(1); + final CountDownLatch removeCalls = new CountDownLatch(1); + final CountDownLatch updateCalls = new CountDownLatch(1); + + cloudnameService.addPermanentServiceListener( + ServiceCoordinate.parse(anotherServiceCoordinate), + new PermanentServiceListener() { + final AtomicInteger order = new AtomicInteger(0); + @Override + public void onServiceCreated(Endpoint endpoint) { + createCalls.countDown(); + assertThat(order.incrementAndGet(), is(1)); + } + + @Override + public void onServiceChanged(Endpoint endpoint) { + updateCalls.countDown(); + assertThat(order.incrementAndGet(), is(2)); + } + + @Override + public void onServiceRemoved() { + removeCalls.countDown(); + assertThat(order.incrementAndGet(), is(3)); + } + }); + + // Create the new service registration, change the endpoint, then remove it. The + // count down latches should count down and the order should be create, change, remove + final ServiceCoordinate another = ServiceCoordinate.parse(anotherServiceCoordinate); + cloudnameService.createPermanentService(another, DEFAULT_ENDPOINT); + cloudnameService.updatePermanentService(another, + new Endpoint(DEFAULT_ENDPOINT.getName(), "otherhost", 4711)); + cloudnameService.removePermanentService(another); + + final int secondsToWait = 1; + assertTrue("Expected callback for create to trigger but it didn't", + createCalls.await(secondsToWait, TimeUnit.SECONDS)); + assertTrue("Expected callback for update to trigger but it didn't", + updateCalls.await(secondsToWait, TimeUnit.SECONDS)); + assertTrue("Expected callback for remove to trigger but it didn't", + removeCalls.await(secondsToWait, TimeUnit.SECONDS)); + } + } + + @Test + public void testLeaseUpdateOnLeaseThatDoesntExist() { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + assertThat("Can't update a service that doesn't exist", + cloudnameService.updatePermanentService( + ServiceCoordinate.parse("foo.bar.baz"), DEFAULT_ENDPOINT), + is(false)); + } + } + + @Test + public void testRemoveServiceThatDoesntExist() { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + assertThat("Can't remove a service that doesn't exist", + cloudnameService.removePermanentService(ServiceCoordinate.parse("foo.bar.baz")), + is(false)); + } + } + + @AfterClass + public static void removeServiceRegistration() throws InterruptedException { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + final ServiceCoordinate serviceCoordinate = ServiceCoordinate.parse(SERVICE_COORDINATE); + final CountDownLatch callCounter = new CountDownLatch(2); + final int secondsToWait = 1; + cloudnameService.addPermanentServiceListener(serviceCoordinate, + new PermanentServiceListener() { + private final AtomicInteger createCount = new AtomicInteger(0); + private final AtomicInteger removeCount = new AtomicInteger(0); + + @Override + public void onServiceCreated(final Endpoint endpoint) { + // This will be called once and only once + assertThat("Did not onServiceCreated to be called multiple times", + createCount.incrementAndGet(), is(1)); + callCounter.countDown(); + } + + @Override + public void onServiceChanged(final Endpoint endpoint) { + fail("Did not expect any calls to onServiceChanged"); + } + + @Override + public void onServiceRemoved() { + assertThat("Did not expect onServiceRemoved to be called multiple" + + " times", removeCount.incrementAndGet(), is(1)); + callCounter.countDown(); + } + }); + + // Remove the service created in the setup. + assertThat(cloudnameService.removePermanentService(serviceCoordinate), is(true)); + + assertTrue("Did not receive the expected number of calls to listener. " + + callCounter.getCount() + " calls remaining.", + callCounter.await(secondsToWait, TimeUnit.SECONDS)); + + // Removing it twice will fail. + assertThat(cloudnameService.removePermanentService(serviceCoordinate), is(false)); + } + } + + private final ServiceCoordinate coordinate = ServiceCoordinate.parse("service.tag.region"); + + @Test (expected = IllegalArgumentException.class) + public void coordinateCanNotBeNullWhenUpdatingService() { + new CloudnameService(memoryBackend).updatePermanentService(null, null); + } + + @Test (expected = IllegalArgumentException.class) + public void endpointCanNotBeNullWhenUpdatingService() { + new CloudnameService(memoryBackend).updatePermanentService(coordinate, null); + } + + @Test (expected = IllegalArgumentException.class) + public void coordinateCanNotBeNullWhenRemovingService() { + new CloudnameService(memoryBackend).removePermanentService(null); + } + + @Test (expected = IllegalArgumentException.class) + public void coordinateCanNotBeNullWhenAddingListener() { + new CloudnameService(memoryBackend).addPermanentServiceListener(null, null); + } + + @Test (expected = IllegalArgumentException.class) + public void listenerCanNotBeNullWhenAddingListener() { + new CloudnameService(memoryBackend).addPermanentServiceListener(coordinate, null); + } + +} diff --git a/cn-service/src/test/java/org/cloudname/service/CloudnameServiceTest.java b/cn-service/src/test/java/org/cloudname/service/CloudnameServiceTest.java new file mode 100644 index 00000000..6fe4a9a9 --- /dev/null +++ b/cn-service/src/test/java/org/cloudname/service/CloudnameServiceTest.java @@ -0,0 +1,316 @@ +package org.cloudname.service; + +import org.cloudname.backends.memory.MemoryBackend; +import org.cloudname.core.CloudnameBackend; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assert.assertThat; +import static org.hamcrest.CoreMatchers.is; + +/** + * Test service registration with memory-based backend. + */ +public class CloudnameServiceTest { + private static final CloudnameBackend memoryBackend = new MemoryBackend(); + + private final ServiceCoordinate coordinate = ServiceCoordinate.parse("service.tag.region"); + + /** + * Max time to wait for changes to propagate to clients. In seconds. + */ + private static final int MAX_WAIT_S = 1; + + private final Random random = new Random(); + private int getRandomPort() { + return Math.max(1, Math.abs(random.nextInt(4096))); + } + + private ServiceHandle registerService(final CloudnameService cloudnameService, final String serviceCoordinateString) { + final ServiceCoordinate serviceCoordinate = ServiceCoordinate.parse(serviceCoordinateString); + + final Endpoint httpEndpoint = new Endpoint("http", "127.0.0.1", getRandomPort()); + final Endpoint webconsoleEndpoint = new Endpoint("webconsole", "127.0.0.2", getRandomPort()); + + final ServiceData serviceData = new ServiceData(Arrays.asList(httpEndpoint, webconsoleEndpoint)); + return cloudnameService.registerService(serviceCoordinate, serviceData); + } + + /** + * Create two sets of services, register both and check that notifications are sent to the + * subscribers. + */ + @Test + public void testServiceNotifications() throws InterruptedException { + final String SOME_COORDINATE = "someservice.test.local"; + final String ANOTHER_COORDINATE = "anotherservice.test.local"; + + final CloudnameService mainCloudname = new CloudnameService(memoryBackend); + + final int numOtherServices = 10; + final List<ServiceHandle> handles = new ArrayList<>(); + for (int i = 0; i < numOtherServices; i++) { + handles.add(registerService(mainCloudname, ANOTHER_COORDINATE)); + } + + final Executor executor = Executors.newCachedThreadPool(); + final int numServices = 5; + final CountDownLatch registrationLatch = new CountDownLatch(numServices); + final CountDownLatch instanceLatch = new CountDownLatch(numServices * numOtherServices); + final CountDownLatch httpEndpointLatch = new CountDownLatch(numServices * numOtherServices); + final CountDownLatch webconsoleEndpointLatch = new CountDownLatch(numServices * numOtherServices); + final CountDownLatch removeLatch = new CountDownLatch(numServices * numOtherServices); + final Semaphore terminateSemaphore = new Semaphore(1); + final CountDownLatch completedLatch = new CountDownLatch(numServices); + + final Runnable service = new Runnable() { + @Override + public void run() { + try (final CloudnameService cloudnameService = new CloudnameService(memoryBackend)) { + try (final ServiceHandle handle = registerService(cloudnameService, SOME_COORDINATE)) { + registrationLatch.countDown(); + + final ServiceCoordinate otherServiceCoordinate = ServiceCoordinate.parse(ANOTHER_COORDINATE); + + // Do a service lookup on the other service. This will yield N elements. + cloudnameService.addServiceListener(otherServiceCoordinate, new ServiceListener() { + @Override + public void onServiceCreated(final InstanceCoordinate coordinate, final ServiceData data) { + instanceLatch.countDown(); + if (data.getEndpoint("http") != null) { + httpEndpointLatch.countDown(); + } + if (data.getEndpoint("webconsole") != null) { + webconsoleEndpointLatch.countDown(); + } + } + + @Override + public void onServiceDataChanged(final InstanceCoordinate coordinate, final ServiceData data) { + if (data.getEndpoint("http") != null) { + httpEndpointLatch.countDown(); + } + if (data.getEndpoint("webconsole") != null) { + webconsoleEndpointLatch.countDown(); + } + } + + @Override + public void onServiceRemoved(final InstanceCoordinate coordinate) { + removeLatch.countDown(); + } + }); + + // Wait for the go ahead before terminating + try { + terminateSemaphore.acquire(); + terminateSemaphore.release(); + } catch (final InterruptedException ie) { + throw new RuntimeException(ie); + } + } + // The service handle will close and the instance will be removed at this point. + } + completedLatch.countDown(); + } + }; + + // Grab the semaphore. This wil stop the services from terminating + terminateSemaphore.acquire(); + + // Start two threads which will register a service and look up a set of another. + for (int i = 0; i < numServices; i++) { + executor.execute(service); + } + + // Wait for the registrations and endpoints to propagate + assertTrue("Expected registrations to complete", + registrationLatch.await(MAX_WAIT_S, TimeUnit.SECONDS)); + + assertTrue("Expected http endpoints to be registered but missing " + + httpEndpointLatch.getCount(), + httpEndpointLatch.await(MAX_WAIT_S, TimeUnit.SECONDS)); + + assertTrue("Expected webconsole endpoints to be registered but missing " + + webconsoleEndpointLatch.getCount(), + webconsoleEndpointLatch.await(MAX_WAIT_S, TimeUnit.SECONDS)); + + // Registrations are now completed; remove the existing services + for (final ServiceHandle handle : handles) { + handle.close(); + } + + // This will trigger remove events in the threads. + assertTrue("Expected services to be removed but " + removeLatch.getCount() + + " still remains", removeLatch.await(MAX_WAIT_S, TimeUnit.SECONDS)); + + // Let the threads terminate. This will remove the registrations + terminateSemaphore.release(); + + assertTrue("Expected services to complete but " + completedLatch.getCount() + + " still remains", completedLatch.await(MAX_WAIT_S, TimeUnit.SECONDS)); + + // Success! There shouldn't be any more services registered at this point. Check to make sure + mainCloudname.addServiceListener(ServiceCoordinate.parse(SOME_COORDINATE), new ServiceListener() { + @Override + public void onServiceCreated(final InstanceCoordinate coordinate, final ServiceData data) { + fail("Should not have any services but " + coordinate + " is still there"); + } + + @Override + public void onServiceDataChanged(final InstanceCoordinate coordinate, final ServiceData data) { + fail("Should not have any services but " + coordinate + " reports data"); + } + + @Override + public void onServiceRemoved(final InstanceCoordinate coordinate) { + + } + }); + mainCloudname.addServiceListener(ServiceCoordinate.parse(ANOTHER_COORDINATE), new ServiceListener() { + @Override + public void onServiceCreated(final InstanceCoordinate coordinate, final ServiceData data) { + fail("Should not have any services but " + coordinate + " is still there"); + } + + @Override + public void onServiceDataChanged(final InstanceCoordinate coordinate, final ServiceData data) { + fail("Should not have any services but " + coordinate + " is still there"); + } + + @Override + public void onServiceRemoved(InstanceCoordinate coordinate) { + + } + }); + } + + /** + * Ensure data notifications works as expecte. Update a lot of endpoints on a single + * service and check that the subscribers get notified of all changes in the correct order. + */ + @Test + public void testDataNotifications() throws InterruptedException { + final CloudnameService cs = new CloudnameService(memoryBackend); + + final String serviceCoordinate = "some.service.name"; + final ServiceHandle serviceHandle = cs.registerService( + ServiceCoordinate.parse(serviceCoordinate), + new ServiceData(new ArrayList<Endpoint>())); + + final int numClients = 10; + final int numDataChanges = 50; + final int maxSecondsForNotifications = 1; + final CountDownLatch dataChangeLatch = new CountDownLatch(numClients * numDataChanges); + final CountDownLatch readyLatch = new CountDownLatch(numClients); + final String EP_NAME = "endpoint"; + final Semaphore terminateSemaphore = new Semaphore(1); + + // Grab the semaphore, prevent threads from completing + terminateSemaphore.acquire(); + + final Runnable clientServices = new Runnable() { + @Override + public void run() { + try (final CloudnameService cn = new CloudnameService(memoryBackend)) { + cn.addServiceListener(ServiceCoordinate.parse(serviceCoordinate), new ServiceListener() { + int portNum = 0; + @Override + public void onServiceCreated(InstanceCoordinate coordinate, ServiceData serviceData) { + // ignore this + } + + @Override + public void onServiceDataChanged(InstanceCoordinate coordinate, ServiceData data) { + final Endpoint ep = data.getEndpoint(EP_NAME); + if (ep != null) { + dataChangeLatch.countDown(); + assertThat(ep.getPort(), is(portNum + 1)); + portNum = portNum + 1; + } + } + + @Override + public void onServiceRemoved(InstanceCoordinate coordinate) { + // ignore this + } + }); + readyLatch.countDown(); + + // Wait for the test to finish before closing. The endpoints will be + // processed once every thread is ready. + try { + terminateSemaphore.acquire(); + terminateSemaphore.release(); + } catch (final InterruptedException ie) { + throw new RuntimeException(ie); + } + } + } + }; + + final Executor executor = Executors.newCachedThreadPool(); + for (int i = 0; i < numClients; i++) { + executor.execute(clientServices); + } + + // Wait for the threads to be ready + readyLatch.await(); + + // Publish changes to the same endpoint; the endpoint is updated with a new port + // number for each update. + Endpoint oldEndpoint = null; + for (int portNum = 1; portNum < numDataChanges + 1; portNum++) { + if (oldEndpoint != null) { + serviceHandle.removeEndpoint(oldEndpoint); + } + final Endpoint newEndpoint = new Endpoint(EP_NAME, "localhost", portNum); + serviceHandle.registerEndpoint(newEndpoint); + oldEndpoint = newEndpoint; + } + + // Check if the threads have been notified of all the changes + assertTrue("Expected " + (numDataChanges * numClients) + " changes but " + + dataChangeLatch.getCount() + " remains", + dataChangeLatch.await(maxSecondsForNotifications, TimeUnit.SECONDS)); + + // Let threads terminate + terminateSemaphore.release(); + } + + @Test(expected = IllegalArgumentException.class) + public void coordinateCanNotBeNullWhenAddingListener() { + new CloudnameService(memoryBackend).addServiceListener(null, null); + } + + @Test(expected = IllegalArgumentException.class) + public void listenerCanNotBeNullWhenAddingListener() { + new CloudnameService(memoryBackend).addServiceListener(coordinate, null); + } + + @Test(expected = IllegalArgumentException.class) + public void serviceCannotBeNullWhenRegister() { + new CloudnameService(memoryBackend).registerService(null, null); + } + + @Test(expected = IllegalArgumentException.class) + public void serviceDataCannotBeNullWhenRegister() { + new CloudnameService(memoryBackend).registerService(coordinate, null); + } + + @Test(expected = IllegalArgumentException.class) + public void backendMustBeValid() { + new CloudnameService(null); + } +} diff --git a/cn-service/src/test/java/org/cloudname/service/EndpointTest.java b/cn-service/src/test/java/org/cloudname/service/EndpointTest.java new file mode 100644 index 00000000..8d1b368e --- /dev/null +++ b/cn-service/src/test/java/org/cloudname/service/EndpointTest.java @@ -0,0 +1,97 @@ +package org.cloudname.service; +import org.junit.Test; + +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; +import static org.hamcrest.CoreMatchers.is; + +/** + * Test the Endpoint class. Relatively straightforward; test creation and that + * fields are set correctly, test conversion to and from JSON, test the equals() + * implementation and test assertions in constructor. + */ +public class EndpointTest { + @Test + public void testCreation() { + final Endpoint endpoint = new Endpoint("foo", "localhost", 80); + assertThat(endpoint.getName(), is("foo")); + assertThat(endpoint.getHost(), is("localhost")); + assertThat(endpoint.getPort(), is(80)); + } + + @Test + public void testJsonConversion() { + final Endpoint endpoint = new Endpoint("bar", "baz", 8888); + final String jsonString = endpoint.toJsonString(); + + final Endpoint endpointCopy = Endpoint.fromJson(jsonString); + + assertThat(endpointCopy.getName(), is(endpoint.getName())); + assertThat(endpointCopy.getHost(), is(endpoint.getHost())); + assertThat(endpointCopy.getPort(), is(endpoint.getPort())); + } + + @Test + public void testEquals() { + final Endpoint a = new Endpoint("foo", "bar", 1); + final Endpoint b = new Endpoint("foo", "bar", 1); + assertThat(a.equals(b), is(true)); + assertThat(b.equals(a), is(true)); + assertThat(b.hashCode(), is(a.hashCode())); + + final Endpoint c = new Endpoint("bar", "foo", 1); + assertThat(a.equals(c), is(false)); + assertThat(b.equals(c), is(false)); + + final Endpoint d = new Endpoint("foo", "bar", 2); + assertThat(a.equals(d), is(false)); + + final Endpoint e = new Endpoint("foo", "baz", 1); + assertThat(a.equals(e), is(false)); + + assertThat(a.equals(null), is(false)); + assertThat(a.equals("some string"), is(false)); + } + + @Test (expected = IllegalArgumentException.class) + public void testNullName() { + new Endpoint(null, "foo", 0); + fail("Constructor should have thrown exception for null name"); + } + + @Test (expected = IllegalArgumentException.class) + public void testEmptyName() { + new Endpoint("", "foo", 0); + fail("Constructor should have thrown exception for null name"); + } + + @Test (expected = IllegalArgumentException.class) + public void testNullHost() { + new Endpoint("foo", null, 0); + fail("Constructor should have thrown exception for null host"); + } + + @Test (expected = IllegalArgumentException.class) + public void testEmptyHost() { + new Endpoint("foo", "", 0); + fail("Constructor should have thrown exception for null host"); + } + + @Test (expected = IllegalArgumentException.class) + public void testZeroPort() { + new Endpoint("foo", "bar", 0); + fail("Constructor should have thrown exception for 0 port"); + } + + @Test (expected = IllegalArgumentException.class) + public void testNegativePort() { + new Endpoint("foo", "bar", -1); + fail("Constructor should have thrown exception for 0 port"); + } + + @Test (expected = IllegalArgumentException.class) + public void testInvalidName() { + new Endpoint("æøå", "bar", 80); + fail("Constructor should have thrown exception for 0 port"); + } +} diff --git a/cn-service/src/test/java/org/cloudname/service/InstanceCoordinateTest.java b/cn-service/src/test/java/org/cloudname/service/InstanceCoordinateTest.java new file mode 100644 index 00000000..c7009f0b --- /dev/null +++ b/cn-service/src/test/java/org/cloudname/service/InstanceCoordinateTest.java @@ -0,0 +1,92 @@ +package org.cloudname.service; + +import org.cloudname.core.CloudnamePath; +import org.junit.Test; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertThat; + +public class InstanceCoordinateTest { + @Test + public void testCreation() { + final String[] path = new String[] { "region", "tag", "service", "instance" }; + final InstanceCoordinate coordinate = new InstanceCoordinate(new CloudnamePath(path)); + + final String canonicalString = coordinate.toCanonicalString(); + assertThat(canonicalString, is("instance.service.tag.region")); + + final InstanceCoordinate fromCanonical = InstanceCoordinate.parse(canonicalString); + assertThat(fromCanonical.toCanonicalString(), is(canonicalString)); + assertThat(fromCanonical.getRegion(), is(coordinate.getRegion())); + assertThat(fromCanonical.getTag(), is(coordinate.getTag())); + assertThat(fromCanonical.getService(), is(coordinate.getService())); + assertThat(fromCanonical.getInstance(), is(coordinate.getInstance())); + + final String jsonString = coordinate.toJsonString(); + final InstanceCoordinate fromJson = InstanceCoordinate.fromJson(jsonString); + assertThat(fromJson.getRegion(), is(coordinate.getRegion())); + assertThat(fromJson.getTag(), is(coordinate.getTag())); + assertThat(fromJson.getService(), is(coordinate.getService())); + assertThat(fromJson.getInstance(), is(coordinate.getInstance())); + assertThat(fromJson.toCanonicalString(), is(coordinate.toCanonicalString())); + } + + @Test + public void testPathConversion() { + final CloudnamePath path = new CloudnamePath( + new String[] {"test", "local", "service", "instance" }); + + final InstanceCoordinate coordinate = new InstanceCoordinate(path); + + final CloudnamePath cnPath = coordinate.toCloudnamePath(); + assertThat(cnPath.length(), is(path.length())); + assertThat(cnPath, is(equalTo(path))); + } + + /** + * Ensure toString() has a sensible representation ('ish) + */ + @Test + public void toStringMethod() { + final CloudnamePath pathA = new CloudnamePath( + new String[] {"test", "local", "service", "instance" }); + final CloudnamePath pathB = new CloudnamePath( + new String[] {"test", "local", "service", "instance" }); + final CloudnamePath pathC = new CloudnamePath( + new String[] {"test", "local", "service", "x" }); + + final InstanceCoordinate a = new InstanceCoordinate(pathA); + final InstanceCoordinate b = new InstanceCoordinate(pathB); + final InstanceCoordinate c = new InstanceCoordinate(pathC); + assertThat(a.toString(), is(a.toString())); + assertThat(a.toString(), is(not(c.toString()))); + + assertThat(a.toCanonicalString(), is(b.toCanonicalString())); + } + + @Test + public void invalidStringConversion() { + assertThat(InstanceCoordinate.parse("foo:bar.baz"), is(nullValue())); + assertThat(InstanceCoordinate.parse(null), is(nullValue())); + assertThat(InstanceCoordinate.parse("foo.bar.baz"), is(nullValue())); + assertThat(InstanceCoordinate.parse(""), is(nullValue())); + } + + @Test (expected = IllegalArgumentException.class) + public void invalidNames2() { + assertThat(InstanceCoordinate.parse("æ.ø.å.a"), is(nullValue())); + } + + @Test (expected = IllegalArgumentException.class) + public void nullPathInConstructor() { + new InstanceCoordinate(null); + } + + @Test (expected = IllegalArgumentException.class) + public void invalidPathInConstructor() { + new InstanceCoordinate(new CloudnamePath(new String[] { "foo" })); + } +} diff --git a/cn-service/src/test/java/org/cloudname/service/ServiceCoordinateTest.java b/cn-service/src/test/java/org/cloudname/service/ServiceCoordinateTest.java new file mode 100644 index 00000000..c49126e0 --- /dev/null +++ b/cn-service/src/test/java/org/cloudname/service/ServiceCoordinateTest.java @@ -0,0 +1,87 @@ +package org.cloudname.service; + +import org.cloudname.core.CloudnamePath; +import org.junit.Test; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertThat; + +public class ServiceCoordinateTest { + private final CloudnamePath cnPath = new CloudnamePath( + new String[] { "local", "test", "service" }); + + + @Test + public void testCreation() { + final ServiceCoordinate coordinate = new ServiceCoordinate(cnPath); + assertThat(coordinate.getRegion(), is(cnPath.get(0))); + assertThat(coordinate.getTag(), is(cnPath.get(1))); + assertThat(coordinate.getService(), is(cnPath.get(2))); + } + + @Test + public void testParse() { + final ServiceCoordinate coord = ServiceCoordinate.parse("service.tag.region"); + assertThat(coord.getRegion(), is("region")); + assertThat(coord.getTag(), is("tag")); + assertThat(coord.getService(), is("service")); + } + + @Test + public void testEquals() { + final ServiceCoordinate coordA = ServiceCoordinate.parse("a.b.c"); + final ServiceCoordinate coordB = ServiceCoordinate.parse("a.b.c"); + final ServiceCoordinate coordC = ServiceCoordinate.parse("a.b.d"); + final ServiceCoordinate coordD = ServiceCoordinate.parse("a.a.c"); + final ServiceCoordinate coordE = ServiceCoordinate.parse("a.a.a"); + final ServiceCoordinate coordF = ServiceCoordinate.parse("c.b.c"); + + assertThat(coordA, is(equalTo(coordB))); + assertThat(coordB, is(equalTo(coordA))); + + assertThat(coordA, is(not(equalTo(coordC)))); + assertThat(coordA, is(not(equalTo(coordD)))); + assertThat(coordA, is(not(equalTo(coordE)))); + assertThat(coordA, is(not(equalTo(coordF)))); + + assertThat(coordA.equals(null), is(false)); + assertThat(coordA.equals(new Object()), is(false)); + } + + @Test + public void testHashCode() { + final ServiceCoordinate coordA = ServiceCoordinate.parse("a.b.c"); + final ServiceCoordinate coordB = ServiceCoordinate.parse("a.b.c"); + final ServiceCoordinate coordC = ServiceCoordinate.parse("x.x.x"); + assertThat(coordA.hashCode(), is(coordB.hashCode())); + assertThat(coordC.hashCode(), is(not(coordA.hashCode()))); + } + @Test + public void testInvalidCoordinateString0() { + assertThat(ServiceCoordinate.parse("foo bar baz"), is(nullValue())); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidCoordinateString1() { + ServiceCoordinate.parse(".."); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidCoordinateString2() { + ServiceCoordinate.parse("_._._"); + } + + @Test(expected = IllegalArgumentException.class) + public void nullPathParameter() { + new ServiceCoordinate(null); + } + + @Test(expected = IllegalArgumentException.class) + public void illegalPathParameter() { + new ServiceCoordinate(new CloudnamePath(new String[] { "foo" })); + } + +} diff --git a/cn-service/src/test/java/org/cloudname/service/ServiceDataTest.java b/cn-service/src/test/java/org/cloudname/service/ServiceDataTest.java new file mode 100644 index 00000000..0154a78a --- /dev/null +++ b/cn-service/src/test/java/org/cloudname/service/ServiceDataTest.java @@ -0,0 +1,124 @@ +package org.cloudname.service; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertThat; + +public class ServiceDataTest { + @Test + public void testCreation() { + final Endpoint ep1 = new Endpoint("foo", "bar", 1); + final Endpoint ep2 = new Endpoint("bar", "baz", 1); + + final ServiceData data = new ServiceData(Arrays.asList(ep1, ep2)); + assertThat(data.getEndpoint("foo"), is(equalTo(ep1))); + assertThat(data.getEndpoint("bar"), is(equalTo(ep2))); + assertThat(data.getEndpoint("baz"), is(nullValue())); + } + + @Test + public void testAddRemoveEndpoint() { + final ServiceData data = new ServiceData(new ArrayList<Endpoint>()); + assertThat(data.getEndpoint("a"), is(nullValue())); + assertThat(data.getEndpoint("b"), is(nullValue())); + + final Endpoint ep1 = new Endpoint("a", "localhost", 80); + final Endpoint ep1a = new Endpoint("a", "localhost", 80); + // Endpoint can only be added once + assertThat(data.addEndpoint(ep1), is(true)); + assertThat(data.addEndpoint(ep1), is(false)); + // Endpoints must be unique + assertThat(data.addEndpoint(ep1a), is(false)); + + // Another endpoint can be added + final Endpoint ep2 = new Endpoint("b", "localhost", 80); + final Endpoint ep2a = new Endpoint("b", "localhost", 80); + assertThat(data.addEndpoint(ep2), is(true)); + // But the same rules applies + assertThat(data.addEndpoint(ep2), is(false)); + assertThat(data.addEndpoint(ep2a), is(false)); + + // Data now contains both endpoints + assertThat(data.getEndpoint("a"), is(equalTo(ep1))); + assertThat(data.getEndpoint("b"), is(equalTo(ep2))); + + assertThat(data.removeEndpoint(ep1), is(true)); + assertThat(data.removeEndpoint(ep1a), is(false)); + + // ...ditto for next endpoint + assertThat(data.removeEndpoint(ep2), is(true)); + assertThat(data.removeEndpoint(ep2), is(false)); + + // The endpoints with identical names can be added + assertThat(data.addEndpoint(ep1a), is(true)); + assertThat(data.addEndpoint(ep2a), is(true)); + } + + @Test + public void testConversionToFromJson() { + final Endpoint endpointA = new Endpoint("foo", "bar", 80); + final Endpoint endpointB = new Endpoint("baz", "bar", 81); + final ServiceData dataA = new ServiceData( + Arrays.asList(endpointA, endpointB)); + + final String jsonString = dataA.toJsonString(); + + final ServiceData dataB = ServiceData.fromJsonString(jsonString); + + assertThat(dataB.getEndpoint("foo"), is(endpointA)); + assertThat(dataB.getEndpoint("baz"), is(endpointB)); + } + + @Test + public void uniqueNamesAreRequired() { + final Endpoint endpointA = new Endpoint("foo", "bar", 80); + final Endpoint endpointB = new Endpoint("foo", "baz", 82); + final Endpoint endpointC = new Endpoint("foo", "localhost", 80); + final Endpoint endpointD = new Endpoint("foobar", "localhost", 80); + + final ServiceData serviceData = new ServiceData(new ArrayList<Endpoint>()); + assertThat(serviceData.addEndpoint(endpointA), is(true)); + assertThat(serviceData.addEndpoint(endpointB), is(false)); + assertThat(serviceData.addEndpoint(endpointC), is(false)); + assertThat(serviceData.addEndpoint(endpointD), is(true)); + } + + @Test (expected = IllegalArgumentException.class) + public void testInvalidJson1() { + final String nullStr = null; + ServiceData.fromJsonString(nullStr); + } + + @Test (expected = IllegalArgumentException.class) + public void testInvalidJson2() { + ServiceData.fromJsonString(""); + } + + @Test (expected = org.json.JSONException.class) + public void testInvalidJson3() { + ServiceData.fromJsonString("}{"); + } + + @Test (expected = org.json.JSONException.class) + public void testInvalidJson4() { + ServiceData.fromJsonString("{ \"foo\": 12 }"); + } + + @Test (expected = IllegalArgumentException.class) + public void addNullEndpoint() { + final ServiceData data = new ServiceData(); + data.addEndpoint(null); + } + + @Test (expected = IllegalArgumentException.class) + public void removeNullEndpoint() { + final ServiceData data = new ServiceData(); + data.removeEndpoint(null); + } +} diff --git a/cn-service/src/test/java/org/cloudname/service/ServiceHandleTest.java b/cn-service/src/test/java/org/cloudname/service/ServiceHandleTest.java new file mode 100644 index 00000000..5b43e37c --- /dev/null +++ b/cn-service/src/test/java/org/cloudname/service/ServiceHandleTest.java @@ -0,0 +1,104 @@ +package org.cloudname.service; + +import org.cloudname.core.CloudnamePath; +import org.cloudname.core.LeaseHandle; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; + +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertThat; + +public class ServiceHandleTest { + + @Test + public void testCreation() { + final InstanceCoordinate instanceCoordinate + = InstanceCoordinate.parse("instance.service.tag.region"); + final ServiceData serviceData = new ServiceData(new ArrayList<Endpoint>()); + final LeaseHandle handle = new LeaseHandle() { + @Override + public boolean writeLeaseData(String data) { + return true; + } + + @Override + public CloudnamePath getLeasePath() { + return instanceCoordinate.toCloudnamePath(); + } + + @Override + public void close() throws IOException { + // nothing + } + }; + + final ServiceHandle serviceHandle + = new ServiceHandle(instanceCoordinate, serviceData, handle); + + final Endpoint ep1 = new Endpoint("foo", "bar", 80); + assertThat(serviceHandle.registerEndpoint(ep1), is(true)); + assertThat(serviceHandle.registerEndpoint(ep1), is(false)); + + assertThat(serviceHandle.removeEndpoint(ep1), is(true)); + assertThat(serviceHandle.removeEndpoint(ep1), is(false)); + + serviceHandle.close(); + } + + @Test + public void testFailingHandle() { + final InstanceCoordinate instanceCoordinate + = InstanceCoordinate.parse("instance.service.tag.region"); + final Endpoint ep1 = new Endpoint("foo", "bar", 80); + + final ServiceData serviceData = new ServiceData(Arrays.asList(ep1)); + final LeaseHandle handle = new LeaseHandle() { + @Override + public boolean writeLeaseData(String data) { + return false; + } + + @Override + public CloudnamePath getLeasePath() { + return instanceCoordinate.toCloudnamePath(); + } + + @Override + public void close() throws IOException { + throw new IOException("I broke"); + } + }; + + final ServiceHandle serviceHandle + = new ServiceHandle(instanceCoordinate, serviceData, handle); + + final Endpoint ep2 = new Endpoint("bar", "baz", 81); + assertThat(serviceHandle.registerEndpoint(ep2), is(false)); + + assertThat(serviceHandle.removeEndpoint(ep1), is(false)); + assertThat(serviceHandle.removeEndpoint(ep2), is(false)); + + serviceHandle.close(); + } + + @Test (expected = IllegalArgumentException.class) + public void testWithNullParameters1() { + new ServiceHandle(null, null, null); + } + + @Test (expected = IllegalArgumentException.class) + public void testWithNullParameters2() { + new ServiceHandle(InstanceCoordinate.parse("a.b.c.d"), null, null); + } + + @Test (expected = IllegalArgumentException.class) + public void testWithNullParameters3() { + new ServiceHandle( + InstanceCoordinate.parse("a.b.c.d"), + new ServiceData(new ArrayList<Endpoint>()), + null); + } +} diff --git a/cn-zookeeper/README.md b/cn-zookeeper/README.md new file mode 100644 index 00000000..3ce1e123 --- /dev/null +++ b/cn-zookeeper/README.md @@ -0,0 +1,4 @@ +# ZooKeeper backend + +# Node structure +The root path is set to `/cn` and the leases are stored in `/cn/temporary` and `/cn/permanent`. Temporary leases use ephemeral nodes with a randomly assigned 4-byte long ID. Permanent leases are named by the client. The Curator library is used for the majority of ZooKeeper access. The containing nodes have the `CONTAINER` bit set, i.e. they will be cleaned up by ZooKeeper when there's no more child nodes inside each of the containers. Note that this feature is slated for ZooKeeper 3.5 which is currently in Alpha (as of November 2015). Until then the Curator library uses regular nodes so if it is deployed on a ZooKeeper 3.4 or lower manual cleanups of nodes is necessary. diff --git a/cn-zookeeper/pom.xml b/cn-zookeeper/pom.xml new file mode 100644 index 00000000..4d33d081 --- /dev/null +++ b/cn-zookeeper/pom.xml @@ -0,0 +1,81 @@ +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + + <parent> + <groupId>org.cloudname</groupId> + <artifactId>cloudname-parent</artifactId> + <version>3.0-SNAPSHOT</version> + </parent> + + <artifactId>cn-zookeeper</artifactId> + <packaging>jar</packaging> + + <name>Cloudname ZooKeeper backend</name> + <description>ZooKeeper backend for cloudname</description> + <url>https://github.com/Cloudname/cloudname</url> + + <dependencies> + <dependency> + <groupId>org.cloudname</groupId> + <artifactId>cn-core</artifactId> + </dependency> + + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>org.hamcrest</groupId> + <artifactId>hamcrest-all</artifactId> + <version>1.3</version> + </dependency> + + <dependency> + <groupId>org.apache.curator</groupId> + <artifactId>curator-framework</artifactId> + <version>2.9.0</version> + </dependency> + + <dependency> + <groupId>org.apache.curator</groupId> + <artifactId>curator-recipes</artifactId> + <version>2.9.0</version> + </dependency> + + <dependency> + <groupId>org.apache.curator</groupId> + <artifactId>curator-test</artifactId> + <version>2.9.0</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-nop</artifactId> + <version>1.7.6</version> + </dependency> + <dependency> + <groupId>org.cloudname</groupId> + <artifactId>testtools</artifactId> + <scope>test</scope> + </dependency> + + </dependencies> + + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-surefire-plugin</artifactId> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + </plugin> + + </plugins> + </build> +</project> diff --git a/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/NodeCollectionWatcher.java b/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/NodeCollectionWatcher.java new file mode 100644 index 00000000..e8ccf998 --- /dev/null +++ b/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/NodeCollectionWatcher.java @@ -0,0 +1,257 @@ +package org.cloudname.backends.zookeeper; + +import com.google.common.base.Charsets; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Monitor a set of child nodes for changes. Needs to do this with the ZooKeeper API since + * Curator doesn't provide the necessary interface and the PathChildrenCache is best effort + * (and not even a very good effort) + * + * Watches are kept as usual and the mzxid for each node is kept. If that changes between + * watches it mens we've missed an event and the appropriate event is generated to the + * listener. + * + * Note that this class only watches for changes one level down. Changes in children aren't + * monitored. The path must exist beforehand. + * + * @author [email protected] + */ +public class NodeCollectionWatcher { + private static final Logger LOG = Logger.getLogger(NodeCollectionWatcher.class.getName()); + + private final Map<String, Long> childMzxid = new HashMap<>(); + private final Object syncObject = new Object(); + + private final ZooKeeper zk; + private final String pathToWatch; + private final AtomicBoolean shuttingDown = new AtomicBoolean(false); + private final NodeWatcherListener listener; + + + /** + * @param zk ZooKeeper instance to use + * @param pathToWatch Path to observe + * @param listener Listener for callbacks + */ + public NodeCollectionWatcher( + final ZooKeeper zk, final String pathToWatch, final NodeWatcherListener listener) { + this.pathToWatch = pathToWatch; + this.zk = zk; + this.listener = listener; + readChildNodes(); + } + + /** + * Shut down watchers. The listener won't get notified of changes after it has been shut down. + */ + public void shutdown() { + shuttingDown.set(true); + } + + /** + * Watcher for node collections. Set by getChildren() + */ + private final Watcher nodeCollectionWatcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + switch (watchedEvent.getType()) { + case NodeChildrenChanged: + // Child values have changed, read children, generate events + readChildNodes(); + break; + case None: + // Some zookeeper event. Watches might not apply anymore. Reapply. + switch (watchedEvent.getState()) { + case ConnectedReadOnly: + LOG.severe("Connected to readonly cluster"); + // Connected to a cluster without quorum. Nodes might not be + // correct but re-read the nodes. + readChildNodes(); + break; + case SyncConnected: + LOG.info("Connected to cluster"); + // (re-)Connected to the cluster. Nodes must be re-read. Discard + // those that aren't found, keep unchanged ones. + readChildNodes(); + break; + case Disconnected: + // Disconnected from the cluster. The nodes might not be + // up to date (but a reconnect might solve the issue) + LOG.log(Level.WARNING, "Disconnected from zk cluster"); + break; + case Expired: + // Session has expired. Nodes are no longer available + removeAllChildNodes(); + break; + default: + break; + } + } + + } + }; + + /** + * A watcher for the child nodes (set via getData() + */ + private final Watcher changeWatcher = new Watcher() { + @Override + public void process(WatchedEvent watchedEvent) { + if (shuttingDown.get()) { + return; + } + switch (watchedEvent.getType()) { + case NodeDeleted: + removeChildNode(watchedEvent.getPath()); + break; + case NodeDataChanged: + processNode(watchedEvent.getPath()); + break; + + } + } + }; + + /** + * Remove all nodes. + */ + private void removeAllChildNodes() { + System.out.println("Remove all child nodes"); + final Set<String> nodesToRemove = new HashSet<>(); + synchronized (syncObject) { + nodesToRemove.addAll(childMzxid.keySet()); + } + for (final String node : nodesToRemove) { + removeChildNode(node); + } + } + + /** + * Read nodes from ZooKeeper, generating events as necessary. If a node is missing from the + * result it will generate a remove notification, ditto with new nodes and changes in nodes. + */ + private void readChildNodes() { + try { + final List<String> childNodes = zk.getChildren(pathToWatch, nodeCollectionWatcher); + final Set<String> childrenToDelete = new HashSet<>(); + synchronized (syncObject) { + childrenToDelete.addAll(childMzxid.keySet()); + } + for (final String nodeName : childNodes) { + processNode(pathToWatch + "/" + nodeName); + childrenToDelete.remove(pathToWatch + "/" + nodeName); + } + for (final String nodePath : childrenToDelete) { + removeChildNode(nodePath); + } + } catch (final KeeperException.ConnectionLossException e) { + // We've been disconnected. Let the watcher deal with it + if (!shuttingDown.get()) { + LOG.info("Lost connection to ZooKeeper while reading child nodes."); + } + } catch (final KeeperException.NoNodeException e) { + // Node has been removed. Ignore the error? + removeChildNode(e.getPath()); + } catch (final KeeperException|InterruptedException e) { + LOG.log(Level.WARNING, "Got exception reading child nodes", e); + } + } + + /** + * Add a node, generate create or data change notification if needed. + */ + private void processNode(final String nodePath) { + if (shuttingDown.get()) { + return; + } + try { + final Stat stat = new Stat(); + final byte[] nodeData = zk.getData(nodePath, changeWatcher, stat); + final String data = new String(nodeData, Charsets.UTF_8); + synchronized (syncObject) { + if (!childMzxid.containsKey(nodePath)) { + childMzxid.put(nodePath, stat.getMzxid()); + generateCreateEvent(nodePath, data); + return; + } + final Long zxid = childMzxid.get(nodePath); + if (zxid != stat.getMzxid()) { + // the data have changed. Generate event + childMzxid.put(nodePath, stat.getMzxid()); + generateDataChangeEvent(nodePath, data); + } + } + } catch (final KeeperException.ConnectionLossException e) { + // We've been disconnected. Let the watcher deal with it + if (!shuttingDown.get()) { + LOG.info("Lost connection to ZooKeeper while reading child nodes."); + } + } catch (final KeeperException.NoNodeException e) { + removeChildNode(e.getPath()); + // Node has been removed before we got to do anything. Ignore error? + } catch (final KeeperException|InterruptedException e) { + LOG.log(Level.WARNING, "Got exception adding child node with path " + nodePath, e); + } catch (Exception ex) { + LOG.log(Level.SEVERE, "Pooop!", ex); + } + } + + /** + * Remove node. Generate remove event if needed. + */ + private void removeChildNode(final String nodePath) { + synchronized (syncObject) { + if (childMzxid.containsKey(nodePath)) { + childMzxid.remove(nodePath); + generateRemoveEvent(nodePath); + } + } + } + + /** + * Invoke nodeCreated on listener + */ + private void generateCreateEvent(final String nodePath, final String data) { + try { + listener.nodeCreated(nodePath, data); + } catch (final Exception exception) { + LOG.log(Level.WARNING, "Got exception calling listener.nodeCreated", exception); + } + } + + /** + * Invoke dataChanged on listener + */ + private void generateDataChangeEvent(final String nodePath, final String data) { + try { + listener.dataChanged(nodePath, data); + } catch (final Exception exception) { + LOG.log(Level.WARNING, "Got exception calling listener.dataChanged", exception); + } + } + + /** + * Invoke nodeRemoved on listener + */ + private void generateRemoveEvent(final String nodePath) { + try { + listener.nodeRemoved(nodePath); + } catch (final Exception exception) { + LOG.log(Level.WARNING, "Got exception calling listener.nodeRemoved", exception); + } + } +} diff --git a/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/NodeWatcherListener.java b/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/NodeWatcherListener.java new file mode 100644 index 00000000..91e6a77c --- /dev/null +++ b/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/NodeWatcherListener.java @@ -0,0 +1,38 @@ +package org.cloudname.backends.zookeeper; + +/** + * Listener interface for node change events + * + * @author [email protected] + */ +public interface NodeWatcherListener { + /** + * A node is created. Note that rapid changes with create, data update (and even + * create + delete + create + data change might yield just one create notification. + * + * @param zkPath path to node + * @param data data of node + */ + void nodeCreated(final String zkPath, final String data); + + /** + * Data on a node is changed. Note that you might not get data change notifications + * for nodes that are created and updated within a short time span, only a create + * notification. + * Nodes that are created, deleted, then recreated will also generate this event, even if + * the data is unchanged. + * + * @param zkPath path of node + * @param data data of node + */ + void dataChanged(final String zkPath, final String data); + + /** + * Node is removed. + * + * @param zkPath Path of the node that is removed. + */ + void nodeRemoved(final String zkPath); +} + + diff --git a/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/ZooKeeperBackend.java b/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/ZooKeeperBackend.java new file mode 100644 index 00000000..d51632d8 --- /dev/null +++ b/cn-zookeeper/src/main/java/org/cloudname/backends/zookeeper/ZooKeeperBackend.java @@ -0,0 +1,342 @@ +package org.cloudname.backends.zookeeper; +import com.google.common.base.Charsets; +import org.apache.curator.RetryPolicy; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.retry.ExponentialBackoffRetry; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.data.Stat; +import org.cloudname.core.CloudnameBackend; +import org.cloudname.core.CloudnamePath; +import org.cloudname.core.LeaseHandle; +import org.cloudname.core.LeaseListener; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * A ZooKeeper backend for Cloudname. Leases are represented as nodes; client leases are ephemeral + * nodes inside container nodes and permanent leases are container nodes. + * + * @author [email protected] + */ +public class ZooKeeperBackend implements CloudnameBackend { + private static final Logger LOG = Logger.getLogger(ZooKeeperBackend.class.getName()); + private static final String TEMPORARY_ROOT = "/cn/temporary/"; + private static final String PERMANENT_ROOT = "/cn/permanent/"; + private static final int CONNECTION_TIMEOUT_SECONDS = 30; + + // PRNG for instance names. These will be "random enough" for instance identifiers + private final Random random = new Random(); + private final CuratorFramework curator; + private final Map<LeaseListener, NodeCollectionWatcher> clientListeners = new HashMap<>(); + private final Map<LeaseListener, NodeCollectionWatcher> permanentListeners = new HashMap<>(); + private final Object syncObject = new Object(); + /** + * @param connectionString ZooKeeper connection string + * @throws IllegalStateException if the cluster isn't available. + */ + public ZooKeeperBackend(final String connectionString) { + final RetryPolicy retryPolicy = new ExponentialBackoffRetry(200, 10); + curator = CuratorFrameworkFactory.newClient(connectionString, retryPolicy); + curator.start(); + + try { + curator.blockUntilConnected(CONNECTION_TIMEOUT_SECONDS, TimeUnit.SECONDS); + LOG.info("Connected to zk cluster @ " + connectionString); + } catch (final InterruptedException ie) { + throw new IllegalStateException("Could not connect to ZooKeeper", ie); + } + } + + @Override + public LeaseHandle createTemporaryLease(final CloudnamePath path, final String data) { + boolean created = false; + CloudnamePath tempInstancePath = null; + String tempZkPath = null; + while (!created) { + final long instanceId = random.nextLong(); + tempInstancePath = new CloudnamePath(path, Long.toHexString(instanceId)); + tempZkPath = TEMPORARY_ROOT + tempInstancePath.join('/'); + try { + + curator.create() + .creatingParentContainersIfNeeded() + .withMode(CreateMode.EPHEMERAL) + .forPath(tempZkPath, data.getBytes(Charsets.UTF_8)); + created = true; + } catch (final Exception ex) { + LOG.log(Level.WARNING, "Could not create client node at " + tempInstancePath, ex); + } + } + final CloudnamePath instancePath = tempInstancePath; + final String zkInstancePath = tempZkPath; + return new LeaseHandle() { + private AtomicBoolean closed = new AtomicBoolean(false); + + @Override + public boolean writeLeaseData(final String data) { + if (closed.get()) { + LOG.info("Attempt to write data to closed leased handle " + data); + return false; + } + return writeTemporaryLeaseData(instancePath, data); + } + + @Override + public CloudnamePath getLeasePath() { + if (closed.get()) { + return null; + } + return instancePath; + } + + @Override + public void close() throws IOException { + if (closed.get()) { + return; + } + try { + curator.delete().forPath(zkInstancePath); + closed.set(true); + } catch (final Exception ex) { + throw new IOException(ex); + } + } + }; + } + + @Override + public boolean writeTemporaryLeaseData(final CloudnamePath path, final String data) { + final String zkPath = TEMPORARY_ROOT + path.join('/'); + try { + final Stat nodeStat = curator.checkExists().forPath(zkPath); + if (nodeStat == null) { + LOG.log(Level.WARNING, "Could not write client lease data for " + path + + " with data since the path does not exist. Data = " + data); + } + curator.setData().forPath(zkPath, data.getBytes(Charsets.UTF_8)); + return true; + } catch (final Exception ex) { + LOG.log(Level.WARNING, "Got exception writing lease data to " + path + + " with data " + data); + return false; + } + } + + @Override + public String readTemporaryLeaseData(final CloudnamePath path) { + if (path == null) { + return null; + } + final String zkPath = TEMPORARY_ROOT + path.join('/'); + try { + curator.sync().forPath(zkPath); + final byte[] bytes = curator.getData().forPath(zkPath); + return new String(bytes, Charsets.UTF_8); + } catch (final Exception ex) { + LOG.log(Level.WARNING, "Got exception reading client lease data at " + path, ex); + } + return null; + } + + private CloudnamePath toCloudnamePath(final String zkPath, final String pathPrefix) { + final String clientPath = zkPath.substring(pathPrefix.length()); + final String[] elements = clientPath.split("/"); + return new CloudnamePath(elements); + } + + @Override + public void addTemporaryLeaseListener( + final CloudnamePath pathToObserve, final LeaseListener listener) { + // Ideally the PathChildrenCache class in Curator would be used here to keep track of the + // changes but it is ever so slightly broken and misses most of the watches that ZooKeeper + // triggers, ignores the mzxid on the nodes and generally makes a mess of things. Enter + // custom code. + final String zkPath = TEMPORARY_ROOT + pathToObserve.join('/'); + try { + curator.createContainers(zkPath); + final NodeCollectionWatcher watcher = new NodeCollectionWatcher(curator.getZookeeperClient().getZooKeeper(), + zkPath, + new NodeWatcherListener() { + @Override + public void nodeCreated(final String path, final String data) { + listener.leaseCreated(toCloudnamePath(path, TEMPORARY_ROOT), data); + } + @Override + public void dataChanged(final String path, final String data) { + listener.dataChanged(toCloudnamePath(path, TEMPORARY_ROOT), data); + } + @Override + public void nodeRemoved(final String path) { + listener.leaseRemoved(toCloudnamePath(path, TEMPORARY_ROOT)); + } + }); + + synchronized (syncObject) { + clientListeners.put(listener, watcher); + } + } catch (final Exception exception) { + LOG.log(Level.WARNING, "Got exception when creating node watcher", exception); + } + } + + @Override + public void removeTemporaryLeaseListener(final LeaseListener listener) { + synchronized (syncObject) { + final NodeCollectionWatcher watcher = clientListeners.get(listener); + if (watcher != null) { + clientListeners.remove(listener); + watcher.shutdown(); + } + } + } + + @Override + public boolean createPermanantLease(final CloudnamePath path, final String data) { + final String zkPath = PERMANENT_ROOT + path.join('/'); + try { + curator.sync().forPath(zkPath); + final Stat nodeStat = curator.checkExists().forPath(zkPath); + if (nodeStat == null) { + curator.create() + .creatingParentContainersIfNeeded() + .forPath(zkPath, data.getBytes(Charsets.UTF_8)); + return true; + } + LOG.log(Level.INFO, "Attempt to create permanent node at " + path + + " with data " + data + " but it already exists"); + } catch (final Exception ex) { + LOG.log(Level.WARNING, "Got exception creating parent container for permanent lease" + + " for lease " + path + " with data " + data, ex); + } + return false; + } + + @Override + public boolean removePermanentLease(final CloudnamePath path) { + final String zkPath = PERMANENT_ROOT + path.join('/'); + try { + final Stat nodeStat = curator.checkExists().forPath(zkPath); + if (nodeStat != null) { + curator.delete() + .withVersion(nodeStat.getVersion()) + .forPath(zkPath); + return true; + } + return false; + } catch (final Exception ex) { + LOG.log(Level.WARNING, "Got error removing permanent lease for lease " + path, ex); + return false; + } + } + + @Override + public boolean writePermanentLeaseData(final CloudnamePath path, final String data) { + final String zkPath = PERMANENT_ROOT + path.join('/'); + try { + curator.sync().forPath(zkPath); + final Stat nodeStat = curator.checkExists().forPath(zkPath); + if (nodeStat == null) { + LOG.log(Level.WARNING, "Can't write permanent lease data for lease " + path + + " with data " + data + " since the lease doesn't exist"); + return false; + } + curator.setData() + .withVersion(nodeStat.getVersion()) + .forPath(zkPath, data.getBytes(Charsets.UTF_8)); + } catch (final Exception ex) { + LOG.log(Level.WARNING, "Got exception writing permanent lease data for " + path + + " with data " + data, ex); + return false; + } + return true; + } + + @Override + public String readPermanentLeaseData(final CloudnamePath path) { + final String zkPath = PERMANENT_ROOT + path.join('/'); + try { + curator.sync().forPath(zkPath); + final byte[] bytes = curator.getData().forPath(zkPath); + return new String(bytes, Charsets.UTF_8); + } catch (final Exception ex) { + if (ex instanceof KeeperException.NoNodeException) { + // OK - nothing to worry about + return null; + } + LOG.log(Level.WARNING, "Got exception reading permanent lease data for " + path, ex); + return null; + } + } + + @Override + public void addPermanentLeaseListener(final CloudnamePath pathToObserve, final LeaseListener listener) { + try { + + final String parentPath = PERMANENT_ROOT + pathToObserve.getParent().join('/'); + final String fullPath = PERMANENT_ROOT + pathToObserve.join('/'); + curator.createContainers(parentPath); + final NodeCollectionWatcher watcher = new NodeCollectionWatcher(curator.getZookeeperClient().getZooKeeper(), + parentPath, + new NodeWatcherListener() { + @Override + public void nodeCreated(final String path, final String data) { + if (path.equals(fullPath)) { + listener.leaseCreated(toCloudnamePath(path, PERMANENT_ROOT), data); + } + } + @Override + public void dataChanged(final String path, final String data) { + if (path.equals(fullPath)) { + listener.dataChanged(toCloudnamePath(path, PERMANENT_ROOT), data); + } + } + @Override + public void nodeRemoved(final String path) { + if (path.equals(fullPath)) { + listener.leaseRemoved(toCloudnamePath(path, PERMANENT_ROOT)); + } + } + }); + + synchronized (syncObject) { + permanentListeners.put(listener, watcher); + } + } catch (final Exception exception) { + LOG.log(Level.WARNING, "Got exception when creating node watcher", exception); + } + } + + @Override + public void removePermanentLeaseListener(final LeaseListener listener) { + synchronized (syncObject) { + final NodeCollectionWatcher watcher = permanentListeners.get(listener); + if (watcher != null) { + permanentListeners.remove(listener); + watcher.shutdown(); + } + } + } + + @Override + public void close() { + synchronized (syncObject) { + for (final NodeCollectionWatcher watcher : clientListeners.values()) { + watcher.shutdown(); + } + clientListeners.clear(); + for (final NodeCollectionWatcher watcher : permanentListeners.values()) { + watcher.shutdown(); + } + permanentListeners.clear(); + } + } +} diff --git a/cn-zookeeper/src/test/java/org/cloudname/backends/zookeeper/NodeCollectionWatcherTest.java b/cn-zookeeper/src/test/java/org/cloudname/backends/zookeeper/NodeCollectionWatcherTest.java new file mode 100644 index 00000000..adb41310 --- /dev/null +++ b/cn-zookeeper/src/test/java/org/cloudname/backends/zookeeper/NodeCollectionWatcherTest.java @@ -0,0 +1,367 @@ +package org.cloudname.backends.zookeeper; + +import org.apache.curator.CuratorConnectionLossException; +import org.apache.curator.RetryPolicy; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.retry.RetryUntilElapsed; +import org.apache.curator.test.InstanceSpec; +import org.apache.curator.test.TestingCluster; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.nio.charset.Charset; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeThat; + +/** + * Test the node watching mechanism. + */ +public class NodeCollectionWatcherTest { + private static TestingCluster zkServer; + private static CuratorFramework curator; + private static ZooKeeper zooKeeper; + + @BeforeClass + public static void setUp() throws Exception { + zkServer = new TestingCluster(3); + zkServer.start(); + final RetryPolicy retryPolicy = new RetryUntilElapsed(60000, 100); + curator = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), retryPolicy); + curator.start(); + curator.blockUntilConnected(10, TimeUnit.SECONDS); + zooKeeper = curator.getZookeeperClient().getZooKeeper(); + } + + @AfterClass + public static void tearDown() throws Exception { + zkServer.close(); + } + + private final AtomicInteger counter = new AtomicInteger(0); + + private byte[] getData() { + return ("" + counter.incrementAndGet()).getBytes(Charset.defaultCharset()); + } + + /** + * A custom listener that counts and counts down notifications. + */ + private class ListenerCounter implements NodeWatcherListener { + // Then a few counters to check the number of events + public AtomicInteger createCount = new AtomicInteger(0); + public AtomicInteger dataCount = new AtomicInteger(0); + public AtomicInteger removeCount = new AtomicInteger(0); + public CountDownLatch createLatch; + public CountDownLatch dataLatch; + public CountDownLatch removeLatch; + + public ListenerCounter(final int createLatchCount, final int dataLatchCount, final int removeLatchCount) { + createLatch = new CountDownLatch(createLatchCount); + dataLatch = new CountDownLatch(dataLatchCount); + removeLatch = new CountDownLatch(removeLatchCount); + } + + @Override + public void nodeCreated(String zkPath, String data) { + createCount.incrementAndGet(); + createLatch.countDown(); + } + + @Override + public void dataChanged(String zkPath, String data) { + dataCount.incrementAndGet(); + dataLatch.countDown(); + } + + @Override + public void nodeRemoved(String zkPath) { + removeCount.incrementAndGet(); + removeLatch.countDown(); + } + } + + @Test + public void sequentialNotifications() throws Exception { + final int maxPropagationTime = 4; + + final String pathPrefix = "/foo/slow"; + curator.create().creatingParentsIfNeeded().forPath(pathPrefix); + + final ListenerCounter listener = new ListenerCounter(1, 1, 1); + + final NodeCollectionWatcher nodeCollectionWatcher = new NodeCollectionWatcher(zooKeeper, pathPrefix, listener); + + // Create should trigger create notification (and no other notification) + curator.create().forPath(pathPrefix + "/node1", getData()); + assertTrue(listener.createLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(1)); + assertThat(listener.dataCount.get(), is(0)); + assertThat(listener.removeCount.get(), is(0)); + + // Data change should trigger the data notification (and no other notification) + curator.setData().forPath(pathPrefix + "/node1", getData()); + assertTrue(listener.dataLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(1)); + assertThat(listener.dataCount.get(), is(1)); + assertThat(listener.removeCount.get(), is(0)); + + // Delete should trigger the remove notification (and no other notification) + curator.delete().forPath(pathPrefix + "/node1"); + assertTrue(listener.removeLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(1)); + assertThat(listener.dataCount.get(), is(1)); + assertThat(listener.removeCount.get(), is(1)); + + nodeCollectionWatcher.shutdown(); + + // Ensure that there are no notifications when the watcher shuts down + curator.create().forPath(pathPrefix + "node_9", getData()); + Thread.sleep(maxPropagationTime); + assertThat(listener.createCount.get(), is(1)); + assertThat(listener.dataCount.get(), is(1)); + assertThat(listener.removeCount.get(), is(1)); + + curator.setData().forPath(pathPrefix + "node_9", getData()); + Thread.sleep(maxPropagationTime); + assertThat(listener.createCount.get(), is(1)); + assertThat(listener.dataCount.get(), is(1)); + assertThat(listener.removeCount.get(), is(1)); + + curator.delete().forPath(pathPrefix + "node_9"); + Thread.sleep(maxPropagationTime); + assertThat(listener.createCount.get(), is(1)); + assertThat(listener.dataCount.get(), is(1)); + assertThat(listener.removeCount.get(), is(1)); + } + + /** + * Make rapid changes to ZooKeeper. The changes (most likely) won't be caught by the + * watcher events but must be generated by the class itself. Ensure the correct number + * of notifications is generated. + */ + @Test + public void rapidChanges() throws Exception { + final int maxPropagationTime = 100; + + final String pathPrefix = "/foo/rapido"; + + curator.create().creatingParentsIfNeeded().forPath(pathPrefix); + + final int numNodes = 50; + final ListenerCounter listener = new ListenerCounter(numNodes, 0, numNodes); + + final NodeCollectionWatcher nodeCollectionWatcher = new NodeCollectionWatcher(zooKeeper, pathPrefix, listener); + // Create all of the nodes at once + for (int i = 0; i < numNodes; i++) { + curator.create().forPath(pathPrefix + "/node" + i, getData()); + } + assertTrue(listener.createLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(numNodes)); + assertThat(listener.dataCount.get(), is(0)); + assertThat(listener.removeCount.get(), is(0)); + + // Repeat data test multiple times to ensure data changes are detected + // repeatedly on the same nodes + int total = 0; + for (int j = 0; j < 5; j++) { + listener.dataLatch = new CountDownLatch(numNodes); + // Since there's a watch for every node all of the data changes should be detected + for (int i = 0; i < numNodes; i++) { + curator.setData().forPath(pathPrefix + "/node" + i, getData()); + } + total += numNodes; + assertTrue(listener.dataLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(numNodes)); + assertThat(listener.dataCount.get(), is(total)); + assertThat(listener.removeCount.get(), is(0)); + } + + // Finally, remove everything in rapid succession + // Create all of the nodes at once + for (int i = 0; i < numNodes; i++) { + curator.delete().forPath(pathPrefix + "/node" + i); + } + + assertTrue(listener.removeLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(numNodes)); + assertThat(listener.dataCount.get(), is(total)); + assertThat(listener.removeCount.get(), is(numNodes)); + + nodeCollectionWatcher.shutdown(); + } + + /** + * Emulate a network partition by killing off two out of three ZooKeeper instances + * and check the output. Set the system property NodeWatcher.SlowTests to "ok" to enable + * it. The test itself can be quite slow depending on what Curator is connected to. If + * Curator uses one of the servers that are killed it will try a reconnect and the whole + * test might take up to 120-180 seconds to complete. + */ + @Test + public void networkPartitionTest() throws Exception { + assumeThat(System.getProperty("NodeCollectionWatcher.SlowTests"), is("ok")); + + final int maxPropagationTime = 10; + + final String pathPrefix = "/foo/partition"; + curator.create().creatingParentsIfNeeded().forPath(pathPrefix); + + final int nodeCount = 10; + + final ListenerCounter listener = new ListenerCounter(nodeCount, nodeCount, nodeCount); + + final NodeCollectionWatcher nodeCollectionWatcher = new NodeCollectionWatcher(zooKeeper, pathPrefix, listener); + + // Create a few nodes to set the initial state + for (int i = 0; i < nodeCount; i++) { + curator.create().forPath(pathPrefix + "/node" + i, getData()); + } + assertTrue(listener.createLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(nodeCount)); + assertThat(listener.removeCount.get(), is(0)); + assertThat(listener.dataCount.get(), is(0)); + + final InstanceSpec firstInstance = zkServer.findConnectionInstance(zooKeeper); + zkServer.killServer(firstInstance); + + listener.createLatch = new CountDownLatch(1); + // Client should reconnect to one of the two remaining + curator.create().forPath(pathPrefix + "/stillalive", getData()); + // Wait for the notification to go through. This could take some time since there's + // reconnects and all sorts of magic happening under the hood + assertTrue(listener.createLatch.await(10, TimeUnit.SECONDS)); + assertThat(listener.createCount.get(), is(nodeCount + 1)); + assertThat(listener.removeCount.get(), is(0)); + assertThat(listener.dataCount.get(), is(0)); + + // Kill the 2nd server. The cluster won't have a quorum now + final InstanceSpec secondInstance = zkServer.findConnectionInstance(zooKeeper); + assertThat(firstInstance, is(not(secondInstance))); + zkServer.killServer(secondInstance); + + boolean retry; + do { + System.out.println("Checking node with Curator... This might take a while..."); + try { + final Stat stat = curator.checkExists().forPath(pathPrefix); + retry = false; + assertThat(stat, is(notNullValue())); + } catch (CuratorConnectionLossException ex) { + System.out.println("Missing connection. Retrying"); + retry = true; + } + } while (retry); + + zkServer.restartServer(firstInstance); + zkServer.restartServer(secondInstance); + listener.createLatch = new CountDownLatch(1); + + System.out.println("Creating node via Curator... This might take a while..."); + curator.create().forPath(pathPrefix + "/imback", getData()); + + assertTrue(listener.createLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(nodeCount + 2)); + assertThat(listener.removeCount.get(), is(0)); + assertThat(listener.dataCount.get(), is(0)); + + // Ensure data notifications are propagated after a failure + for (int i = 0; i < nodeCount; i++) { + final Stat stat = curator.setData().forPath(pathPrefix + "/node" + i, getData()); + assertThat(stat, is(notNullValue())); + } + assertTrue(listener.dataLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(nodeCount + 2)); + assertThat(listener.removeCount.get(), is(0)); + assertThat(listener.dataCount.get(), is(nodeCount)); + + // ..and remove notifications are sent + for (int i = 0; i < nodeCount; i++) { + curator.delete().forPath(pathPrefix + "/node" + i); + } + assertTrue(listener.removeLatch.await(maxPropagationTime, TimeUnit.MILLISECONDS)); + assertThat(listener.createCount.get(), is(nodeCount + 2)); + assertThat(listener.removeCount.get(), is(nodeCount)); + assertThat(listener.dataCount.get(), is(nodeCount)); + + nodeCollectionWatcher.shutdown(); + + } + + /** + * Be a misbehaving client and throw exceptions in the listners. Ensure the watcher still works + * afterwards. + */ + @Test + public void misbehavingClient() throws Exception { + final int propagationTime = 5; + + final AtomicBoolean triggerExceptions = new AtomicBoolean(false); + final CountDownLatch createLatch = new CountDownLatch(1); + final CountDownLatch dataLatch = new CountDownLatch(1); + final CountDownLatch removeLatch = new CountDownLatch(1); + + final NodeWatcherListener listener = new NodeWatcherListener() { + @Override + public void nodeCreated(String zkPath, String data) { + if (triggerExceptions.get()) { + throw new RuntimeException("boo!"); + } + createLatch.countDown(); + } + + @Override + public void dataChanged(String zkPath, String data) { + if (triggerExceptions.get()) { + throw new RuntimeException("boo!"); + } + dataLatch.countDown(); + } + + @Override + public void nodeRemoved(String zkPath) { + if (triggerExceptions.get()) { + throw new RuntimeException("boo!"); + } + removeLatch.countDown(); + } + }; + + final String pathPrefix = "/foo/misbehaving"; + + curator.create().creatingParentsIfNeeded().forPath(pathPrefix); + + final NodeCollectionWatcher nodeCollectionWatcher = new NodeCollectionWatcher(zooKeeper, pathPrefix, listener); + + triggerExceptions.set(true); + curator.create().forPath(pathPrefix + "/first", getData()); + Thread.sleep(propagationTime); + curator.setData().forPath(pathPrefix + "/first", getData()); + Thread.sleep(propagationTime); + curator.delete().forPath(pathPrefix + "/first"); + Thread.sleep(propagationTime); + + // Now create a node but without setting the data field. + triggerExceptions.set(false); + curator.create().forPath(pathPrefix + "/second"); + assertTrue(createLatch.await(propagationTime, TimeUnit.MILLISECONDS)); + curator.setData().forPath(pathPrefix + "/second", getData()); + assertTrue(dataLatch.await(propagationTime, TimeUnit.MILLISECONDS)); + curator.delete().forPath(pathPrefix + "/second"); + assertTrue(removeLatch.await(propagationTime, TimeUnit.MILLISECONDS)); + + nodeCollectionWatcher.shutdown(); + } +} diff --git a/cn-zookeeper/src/test/java/org/cloudname/backends/zookeeper/ZooKeeperBackendTest.java b/cn-zookeeper/src/test/java/org/cloudname/backends/zookeeper/ZooKeeperBackendTest.java new file mode 100644 index 00000000..393b78b4 --- /dev/null +++ b/cn-zookeeper/src/test/java/org/cloudname/backends/zookeeper/ZooKeeperBackendTest.java @@ -0,0 +1,36 @@ +package org.cloudname.backends.zookeeper; + +import org.apache.curator.test.TestingCluster; +import org.cloudname.core.CloudnameBackend; +import org.cloudname.testtools.backend.CoreBackendTest; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.util.concurrent.atomic.AtomicReference; + +/** + * Test the ZooKeeper backend. + */ +public class ZooKeeperBackendTest extends CoreBackendTest { + private static TestingCluster testCluster; + private AtomicReference<ZooKeeperBackend> backend = new AtomicReference<>(null); + + @BeforeClass + public static void setUp() throws Exception { + testCluster = new TestingCluster(3); + testCluster.start(); + } + + @AfterClass + public static void tearDown() throws Exception { + testCluster.stop(); + } + + protected CloudnameBackend getBackend() { + if (backend.get() == null) { + backend.compareAndSet(null, new ZooKeeperBackend(testCluster.getConnectString())); + } + return backend.get(); + + } +} diff --git a/cn/pom.xml b/cn/pom.xml deleted file mode 100644 index f46d83ff..00000000 --- a/cn/pom.xml +++ /dev/null @@ -1,93 +0,0 @@ -<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - <modelVersion>4.0.0</modelVersion> - - <parent> - <groupId>org.cloudname</groupId> - <artifactId>cloudname-parent</artifactId> - <version>3.0-SNAPSHOT</version> - </parent> - - <artifactId>cn</artifactId> - <packaging>jar</packaging> - - <name>Cloudname Library</name> - <description>Simple library for managing resources using ZooKeeper.</description> - <url>https://github.com/Cloudname/cloudname</url> - - <dependencies> - <dependency> - <groupId>org.cloudname</groupId> - <artifactId>testtools</artifactId> - </dependency> - - <dependency> - <groupId>org.apache.zookeeper</groupId> - <artifactId>zookeeper</artifactId> - </dependency> - - <dependency> - <groupId>com.fasterxml.jackson.core</groupId> - <artifactId>jackson-databind</artifactId> - </dependency> - - <dependency> - <groupId>org.cloudname</groupId> - <artifactId>flags</artifactId> - </dependency> - - <dependency> - <groupId>junit</groupId> - <artifactId>junit-dep</artifactId> - <scope>test</scope> - </dependency> - - <dependency> - <groupId>org.hamcrest</groupId> - <artifactId>hamcrest-all</artifactId> - <version>1.3</version> - </dependency> - </dependencies> - - <build> - <plugins> - <plugin> - <groupId>org.dstovall</groupId> - <artifactId>onejar-maven-plugin</artifactId> - <version>1.4.4</version> - <executions> - <execution> - <configuration> - <!-- Optional, default is false --> - <attachToBuild>true</attachToBuild> - <mainClass>org.cloudname.zk.ZkTool</mainClass> - <filename>ZkTool.jar</filename> - </configuration> - <goals> - <goal>one-jar</goal> - </goals> - </execution> - </executions> - </plugin> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-surefire-plugin</artifactId> - </plugin> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-antrun-plugin</artifactId> - </plugin> - <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>build-helper-maven-plugin</artifactId> - </plugin> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-compiler-plugin</artifactId> - </plugin> - <plugin> - <artifactId>maven-failsafe-plugin</artifactId> - </plugin> - </plugins> - </build> -</project> diff --git a/cn/src/integrationtest/java/org/cloudname/zk/ZkCloudnameIntegrationTest.java b/cn/src/integrationtest/java/org/cloudname/zk/ZkCloudnameIntegrationTest.java deleted file mode 100644 index b5d7daaa..00000000 --- a/cn/src/integrationtest/java/org/cloudname/zk/ZkCloudnameIntegrationTest.java +++ /dev/null @@ -1,480 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.ZooKeeper; -import org.cloudname.Cloudname; -import org.cloudname.CloudnameException; -import org.cloudname.Coordinate; -import org.cloudname.CoordinateException; -import org.cloudname.CoordinateExistsException; -import org.cloudname.CoordinateListener; -import org.cloudname.ServiceHandle; -import org.cloudname.ServiceState; -import org.cloudname.ServiceStatus; -import org.cloudname.testtools.Net; -import org.cloudname.testtools.network.PortForwarder; -import org.cloudname.testtools.zookeeper.EmbeddedZooKeeper; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; - -import static org.junit.Assert.*; - -/** - * Integration tests for testing ZkCloudname. - * Contains mostly heavy tests containing sleep calls not fit as a unit test. - */ -public class ZkCloudnameIntegrationTest { - private static final Logger LOG = Logger.getLogger(ZkCloudnameIntegrationTest.class.getName()); - - private EmbeddedZooKeeper ezk; - private ZooKeeper zk; - private int zkport; - private PortForwarder forwarder = null; - private int forwarderPort; - private ZkCloudname cn = null; - - @Rule - public TemporaryFolder temp = new TemporaryFolder(); - - /** - * Set up an embedded ZooKeeper instance backed by a temporary - * directory. The setup procedure also allocates a port that is - * free for the ZooKeeper server so that you should be able to run - * multiple instances of this test. - */ - @Before - public void setup() throws Exception { - File rootDir = temp.newFolder("zk-test"); - zkport = Net.getFreePort(); - - LOG.info("EmbeddedZooKeeper rootDir=" + rootDir.getCanonicalPath() + ", port=" + zkport); - - // Set up and initialize the embedded ZooKeeper - ezk = new EmbeddedZooKeeper(rootDir, zkport); - ezk.init(); - - // Set up a zookeeper client that we can use for inspection - final CountDownLatch connectedLatch = new CountDownLatch(1); - - zk = new ZooKeeper("localhost:" + zkport, 1000, new Watcher() { - @Override - public void process(WatchedEvent event) { - if (event.getState() == Event.KeeperState.SyncConnected) { - connectedLatch.countDown(); - } - } - }); - connectedLatch.await(); - - LOG.info("ZooKeeper port is " + zkport); - } - - @After - public void tearDown() throws Exception { - zk.close(); - if (forwarder != null) { - forwarder.close(); - } - ezk.shutdown(); - } - - /** - * A coordinate listener that stores events and calls a latch. - */ - class TestCoordinateListener implements CoordinateListener { - private final List<Event> events = new CopyOnWriteArrayList<Event>(); - - private final Set<CountDownLatch> listenerLatches; - - private final List<Event> waitForEvent = new ArrayList<Event>(); - private final Object eventMonitor = new Object(); - private final List<CountDownLatch> waitForLatch = new ArrayList<CountDownLatch>(); - - public boolean failOnWrongEvent = false; - private CountDownLatch latestLatch = null; - - void waitForExpected() throws InterruptedException { - final CountDownLatch latch; - synchronized (eventMonitor) { - if (waitForEvent.size() > 0) { - LOG.info("Waiting for event " + waitForEvent.get(waitForEvent.size() - 1)); - latch = latestLatch; - } else { - return; - } - } - assert(latch.await(25, TimeUnit.SECONDS)); - LOG.info("Event happened."); - } - - public TestCoordinateListener(final Set<CountDownLatch> listenerLatches) { - this.listenerLatches = listenerLatches; - } - - public void expectEvent(final Event event) { - LOG.info("Expecting event " + event.name()); - synchronized (eventMonitor) { - waitForEvent.add(event); - latestLatch = new CountDownLatch(1); - waitForLatch.add(latestLatch); - } - } - - @Override - public void onCoordinateEvent(Event event, String message) { - LOG.info("I got event ..." + event.name() + " " + message); - synchronized (eventMonitor) { - if (waitForEvent.size() > 0) { - LOG.info("Waiting for event " + waitForEvent.get(0)); - } else { - LOG.info("not expecting any specific events"); - } - events.add(event); - for (CountDownLatch countDownLatch :listenerLatches) { - countDownLatch.countDown(); - } - if (waitForEvent.size() > 0 && waitForEvent.get(0) == event) { - waitForLatch.remove(0).countDown(); - waitForEvent.remove(0); - } else { - assertFalse(failOnWrongEvent); - } - } - } - } - - private TestCoordinateListener setUpListenerEnvironment( - final CountDownLatch latch) throws Exception { - Set<CountDownLatch> latches = new HashSet<CountDownLatch>(); - latches.add(latch); - return setUpListenerEnvironment(latches); - } - - private TestCoordinateListener setUpListenerEnvironment( - final Set<CountDownLatch> listenerLatches) throws Exception { - forwarderPort = Net.getFreePort(); - forwarder = new PortForwarder(forwarderPort, "127.0.0.1", zkport); - final Coordinate c = Coordinate.parse("1.service.user.cell"); - - cn = makeLocalZkCloudname(forwarderPort); - try { - cn.createCoordinate(c); - } catch (CoordinateException e) { - fail(e.toString()); - } - final TestCoordinateListener listener = new TestCoordinateListener(listenerLatches); - ServiceHandle serviceHandle = cn.claim(c); - assert(serviceHandle.waitForCoordinateOkSeconds(3 /* secs */)); - serviceHandle.registerCoordinateListener(listener); - - return listener; - } - - @Test - public void testCoordinateListenerInitialEvent() throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(15, TimeUnit.SECONDS)); - assertEquals(1, listener.events.size()); - assertEquals(CoordinateListener.Event.COORDINATE_OK, listener.events.get(0)); - } - - @Test - public void testCoordinateListenerConnectionDies() throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - - listener.expectEvent(CoordinateListener.Event.NO_CONNECTION_TO_STORAGE); - forwarder.close(); - forwarder = null; - listener.waitForExpected(); - } - - @Test - public void testCoordinateListenerCoordinateCorrupted() throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - - listener.expectEvent(CoordinateListener.Event.NOT_OWNER); - - byte[] garbageBytes = "sdfgsdfgsfgdsdfgsdfgsdfg".getBytes("UTF-16LE"); - - zk.setData("/cn/cell/user/service/1/status", garbageBytes, -1); - listener.waitForExpected(); - } - - @Test - public void testCoordinateListenerCoordinateOutOfSync() throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - - listener.expectEvent(CoordinateListener.Event.NOT_OWNER); - - String source = "\"{\\\"state\\\":\\\"STARTING\\\",\\\"message\\\":\\\"Lost hamster.\\\"}\" {}"; - byte[] byteArray = source.getBytes(Util.CHARSET_NAME); - - zk.setData("/cn/cell/user/service/1/status", byteArray, -1); - - listener.waitForExpected(); - } - - @Test - public void testCoordinateListenerCoordinateLost() throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - listener.expectEvent(CoordinateListener.Event.NO_CONNECTION_TO_STORAGE); - listener.expectEvent(CoordinateListener.Event.NOT_OWNER); - - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - LOG.info("Deleting coordinate"); - forwarder.pause(); - zk.delete("/cn/cell/user/service/1/status", -1); - zk.delete("/cn/cell/user/service/1/config", -1); - zk.delete("/cn/cell/user/service/1", -1); - forwarder.unpause(); - - listener.waitForExpected(); - - } - - @Test - public void testCoordinateListenerStolenCoordinate() throws Exception { - - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - LOG.info("Killing zookeeper"); - assertTrue(zk.getState() == ZooKeeper.States.CONNECTED); - - LOG.info("Killing connection"); - forwarder.pause(); - - zk.delete("/cn/cell/user/service/1/status", -1); - Util.mkdir(zk, "/cn/cell/user/service/1/status" , ZooDefs.Ids.OPEN_ACL_UNSAFE); - - forwarder.unpause(); - - listener.expectEvent(CoordinateListener.Event.NOT_OWNER); - listener.waitForExpected(); - } - - - @Test - public void testCoordinateListenerConnectionDiesReconnect() throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - - listener.expectEvent(CoordinateListener.Event.NO_CONNECTION_TO_STORAGE); - - forwarder.pause(); - listener.waitForExpected(); - - listener.expectEvent(CoordinateListener.Event.COORDINATE_OK); - forwarder.unpause(); - listener.waitForExpected(); - } - - /** - * In this test the ZK server thinks the client is connected, but the client wants to reconnect - * due to a disconnect. To trig this condition the connection needs to be down for - * a specific time. This test does not fail even if it does not manage to create this - * state. It will write the result to the log. The test is useful for development and - * should not fail. - */ - @Test - public void testCoordinateListenerConnectionDiesReconnectAfterTimeoutClient() - throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - assertEquals(CoordinateListener.Event.COORDINATE_OK, - listener.events.get(listener.events.size() -1 )); - - listener.expectEvent(CoordinateListener.Event.NO_CONNECTION_TO_STORAGE); - LOG.info("Killing connection"); - forwarder.pause(); - - LOG.info("Connection down."); - listener.waitForExpected(); - - // Client sees problem, server not. - listener.expectEvent(CoordinateListener.Event.COORDINATE_OK); - - // 3400 is a magic number for getting zookeeper and local client in a specific state. - Thread.sleep(2400); - LOG.info("Recreating connection soon" + forwarderPort + "->" + zkport); - - - forwarder.unpause(); - listener.waitForExpected(); // COORDINATE_OK - - // If the previous event is NOT_OWNER, the wanted situation was created by the test. - if (listener.events.get(listener.events.size() - 2) == - CoordinateListener.Event.NOT_OWNER) { - LOG.info("Manage to trig event inn ZooKeeper, true positive."); - } else { - LOG.info("Did NOT manage to trig event in ZooKeeper. This depends on timing, so " + - "ignoring this problem"); - } - } - - @Test - public void testCoordinateListenerConnectionDiesReconnectAfterTimeout() throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - assertEquals(CoordinateListener.Event.COORDINATE_OK, - listener.events.get(listener.events.size() -1 )); - - listener.expectEvent(CoordinateListener.Event.NO_CONNECTION_TO_STORAGE); - - forwarder.close(); - forwarder = null; - listener.waitForExpected(); - // We do not want NOT OWNER event from ZooKeeper. Therefore this long time out. - LOG.info("Going into sleep, waiting for zookeeper to loose node"); - Thread.sleep(10000); - - listener.expectEvent(CoordinateListener.Event.COORDINATE_OK); - forwarder = new PortForwarder(forwarderPort, "127.0.0.1", zkport); - - // We need to re-instantiate the forwarder, or zookeeper thinks - // the connection is good and will not kill the ephemeral node. - // This is probably because we keep the server socket against zookeeper open - // in pause mode. - - listener.waitForExpected(); - } - - - /** - * Tests the behavior of Zookeeper upon a restart. ZK should clean up old coordinates. - * @throws Exception - */ - @Test - public void testZookeeperRestarts() throws Exception { - final CountDownLatch connectedLatch1 = new CountDownLatch(1); - final TestCoordinateListener listener = setUpListenerEnvironment(connectedLatch1); - assertTrue(connectedLatch1.await(20, TimeUnit.SECONDS)); - - - listener.expectEvent(CoordinateListener.Event.NO_CONNECTION_TO_STORAGE); - forwarder.pause(); - listener.waitForExpected(); - - ezk.shutdown(); - ezk.del(); - ezk.init(); - - listener.expectEvent(CoordinateListener.Event.NOT_OWNER); - - forwarder.unpause(); - listener.waitForExpected(); - - createCoordinateWithRetries(); - - listener.expectEvent(CoordinateListener.Event.COORDINATE_OK); - listener.waitForExpected(); - } - - private void createCoordinateWithRetries() throws CoordinateExistsException, - InterruptedException, CloudnameException { - Coordinate c = Coordinate.parse("1.service.user.cell"); - int retries = 10; - for (;;) { - try { - cn.createCoordinate(c); - break; - } catch (CloudnameException e) { - /* - * CloudnameException indicates that the connection with - * ZooKeeper isn't back up yet. Retry a few times. - */ - if (retries-- > 0) { - LOG.info("Failed to create coordinate: " + e - + ", retrying in 1 second"); - Thread.sleep(1000); - } else { - throw e; - } - } - } - } - - /** - * Tests that one process claims a coordinate, then another process tries to claim the same coordinate. - * The first coordinate looses connection to ZooKeeper and the other process gets the coordinate. - * @throws Exception - */ - @Test - public void testFastHardRestart() throws Exception { - final Coordinate c = Coordinate.parse("1.service.user.cell"); - final CountDownLatch claimLatch1 = new CountDownLatch(1); - forwarderPort = Net.getFreePort(); - forwarder = new PortForwarder(forwarderPort, "127.0.0.1", zkport); - final Cloudname cn1 = new ZkCloudname.Builder().setConnectString( - "localhost:" + forwarderPort).build().connect(); - cn1.createCoordinate(c); - - ServiceHandle handle1 = cn1.claim(c); - handle1.registerCoordinateListener(new CoordinateListener() { - - @Override - public void onCoordinateEvent(Event event, String message) { - if (event == Event.COORDINATE_OK) { - claimLatch1.countDown(); - } - } - }); - assertTrue(claimLatch1.await(5, TimeUnit.SECONDS)); - - final Cloudname cn2 = new ZkCloudname.Builder().setConnectString( - "localhost:" + zkport).build().connect(); - - ServiceHandle handle2 = cn2.claim(c); - - forwarder.close(); - forwarder = null; - - assertTrue(handle2.waitForCoordinateOkSeconds(20)); - - ServiceStatus status = new ServiceStatus(ServiceState.RUNNING, "updated status"); - handle2.setStatus(status); - - final Cloudname cn3 = new ZkCloudname.Builder().setConnectString("localhost:" + zkport) - .build().connect(); - ServiceStatus statusRetrieved = cn3.getStatus(c); - assertEquals("updated status", statusRetrieved.getMessage()); - - cn1.close(); - cn2.close(); - cn3.close(); - } - - /** - * Makes a local ZkCloudname instance with the port given by zkPort. - * Then it connects to ZK. - */ - private ZkCloudname makeLocalZkCloudname(int port) throws CloudnameException { - return new ZkCloudname.Builder().setConnectString("localhost:" + port).build().connect(); - } -} diff --git a/cn/src/integrationtest/java/org/cloudname/zk/ZkResolverIntegrationTest.java b/cn/src/integrationtest/java/org/cloudname/zk/ZkResolverIntegrationTest.java deleted file mode 100644 index b0a01517..00000000 --- a/cn/src/integrationtest/java/org/cloudname/zk/ZkResolverIntegrationTest.java +++ /dev/null @@ -1,420 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.*; -import org.cloudname.*; -import org.cloudname.testtools.Net; -import org.cloudname.testtools.zookeeper.EmbeddedZooKeeper; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import static org.junit.Assert.*; -import static org.junit.Assert.assertEquals; - -/** - * Integration tests for the ZkResolver class. - * This test class contains tests dependent on timing or - * tests depending on other modules, or both. - */ -public class ZkResolverIntegrationTest { - private ZooKeeper zk; - private Cloudname cn; - private Coordinate coordinateRunning; - private Coordinate coordinateDraining; - @Rule - public TemporaryFolder temp = new TemporaryFolder(); - private ServiceHandle handleDraining; - - - /** - * Set up an embedded ZooKeeper instance backed by a temporary - * directory. The setup procedure also allocates a port that is - * free for the ZooKeeper server so that you should be able to run - * multiple instances of this test. - */ - @Before - public void setup() throws Exception { - - // Speed up tests waiting for this event to happen. - DynamicExpression.TIME_BETWEEN_NODE_SCANNING_MS = 200; - - File rootDir = temp.newFolder("zk-test"); - final int zkport = Net.getFreePort(); - - // Set up and initialize the embedded ZooKeeper - final EmbeddedZooKeeper ezk = new EmbeddedZooKeeper(rootDir, zkport); - ezk.init(); - - // Set up a zookeeper client that we can use for inspection - final CountDownLatch connectedLatch = new CountDownLatch(1); - zk = new ZooKeeper("localhost:" + zkport, 1000, new Watcher() { - public void process(WatchedEvent event) { - if (event.getState() == Event.KeeperState.SyncConnected) { - connectedLatch.countDown(); - } - } - }); - connectedLatch.await(); - coordinateRunning = Coordinate.parse("1.service.user.cell"); - cn = new ZkCloudname.Builder().setConnectString("localhost:" + zkport).build().connect(); - cn.createCoordinate(coordinateRunning); - ServiceHandle handleRunning = cn.claim(coordinateRunning); - assertTrue(handleRunning.waitForCoordinateOkSeconds(30)); - - handleRunning.putEndpoint(new Endpoint( - coordinateRunning, "foo", "localhost", 1234, "http", "data")); - handleRunning.putEndpoint(new Endpoint( - coordinateRunning, "bar", "localhost", 1235, "http", null)); - ServiceStatus statusRunning = new ServiceStatus(ServiceState.RUNNING, "Running message"); - handleRunning.setStatus(statusRunning); - - coordinateDraining = Coordinate.parse("0.service.user.cell"); - cn.createCoordinate(coordinateDraining); - handleDraining = cn.claim(coordinateDraining); - assertTrue(handleDraining.waitForCoordinateOkSeconds(10)); - handleDraining.putEndpoint(new Endpoint( - coordinateDraining, "foo", "localhost", 5555, "http", "data")); - handleDraining.putEndpoint(new Endpoint( - coordinateDraining, "bar", "localhost", 5556, "http", null)); - - ServiceStatus statusDraining = new ServiceStatus(ServiceState.DRAINING, "Draining message"); - handleDraining.setStatus(statusDraining); - } - - @After - public void tearDown() throws Exception { - zk.close(); - } - - - public void undrain() throws CoordinateMissingException, CloudnameException { - ServiceStatus statusDraining = new ServiceStatus(ServiceState.RUNNING, "alive"); - handleDraining.setStatus(statusDraining); - } - - public void drain() throws CoordinateMissingException, CloudnameException { - ServiceStatus statusDraining = new ServiceStatus(ServiceState.DRAINING, "dead"); - handleDraining.setStatus(statusDraining); - } - - public void changeEndpointData() throws CoordinateMissingException, CloudnameException { - handleDraining.putEndpoint(new Endpoint( - coordinateDraining, "foo", "localhost", 5555, "http", "dataChanged")); - } - - public void changeEndpointPort() throws CoordinateMissingException, CloudnameException { - handleDraining.putEndpoint(new Endpoint( - coordinateDraining, "foo", "localhost", 5551, "http", "dataChanged")); - } - - @Test - public void testStatus() throws Exception { - ServiceStatus status = cn.getStatus(coordinateRunning); - assertEquals(ServiceState.RUNNING, status.getState()); - assertEquals("Running message", status.getMessage()); - } - - @Test - public void testBasicSyncResolving() throws Exception { - List<Endpoint> endpoints = cn.getResolver().resolve("foo.1.service.user.cell"); - assertEquals(1, endpoints.size()); - assertEquals("foo", endpoints.get(0).getName()); - assertEquals("localhost", endpoints.get(0).getHost()); - assertEquals("1.service.user.cell", endpoints.get(0).getCoordinate().toString()); - assertEquals("data", endpoints.get(0).getEndpointData()); - assertEquals("http", endpoints.get(0).getProtocol()); - } - - - @Test - public void testAnyResolving() throws Exception { - List<Endpoint> endpoints = cn.getResolver().resolve("foo.any.service.user.cell"); - assertEquals(1, endpoints.size()); - assertEquals("foo", endpoints.get(0).getName()); - assertEquals("localhost", endpoints.get(0).getHost()); - assertEquals("1.service.user.cell", endpoints.get(0).getCoordinate().toString()); - } - - @Test - public void testAllResolving() throws Exception { - List<Endpoint> endpoints = cn.getResolver().resolve("all.service.user.cell"); - assertEquals(2, endpoints.size()); - assertEquals("foo", endpoints.get(0).getName()); - assertEquals("bar", endpoints.get(1).getName()); - } - - /** - * Tests that all registered endpoints are returned. - */ - @Test - public void testGetCoordinateDataAll() throws Exception { - Resolver.CoordinateDataFilter filter = new Resolver.CoordinateDataFilter(); - Set<Endpoint> endpoints = cn.getResolver().getEndpoints(filter); - assertEquals(4, endpoints.size()); - } - - /** - * Tests that all methods of the filters are called and some basic filtering are functional. - */ - @Test - public void testGetCoordinateDataFilterOptions() throws Exception { - final StringBuilder filterCalls = new StringBuilder(); - - Resolver.CoordinateDataFilter filter = new Resolver.CoordinateDataFilter() { - @Override - public boolean includeCell(final String datacenter) { - filterCalls.append(datacenter).append(":"); - return true; - } - @Override - public boolean includeUser(final String user) { - filterCalls.append(user).append(":"); - return true; - } - @Override - public boolean includeService(final String service) { - filterCalls.append(service).append(":"); - return true; - } - @Override - public boolean includeEndpointname(final String endpointName) { - return endpointName.equals("foo"); - } - @Override - public boolean includeServiceState(final ServiceState state) { - return state == ServiceState.RUNNING; - } - }; - Set<Endpoint> endpoints = cn.getResolver().getEndpoints(filter); - assertEquals(1, endpoints.size()); - Endpoint selectedEndpoint = endpoints.iterator().next(); - - assertEquals("foo", selectedEndpoint.getName()); - assertEquals("cell:user:service:", filterCalls.toString()); - } - - - /** - * Test an unclaimed coordinate and a path that is not complete. - * Number of endpoints should not increase when inputting bad data. - * @throws Exception - */ - @Test - public void testGetCoordinateDataAllNoClaimedCoordinate() throws Exception { - // Create unclaimned coordinate. - Coordinate coordinateNoStatus = Coordinate.parse("4.service.user.cell"); - cn.createCoordinate(coordinateNoStatus); - - // Throw in a incomplete path. - zk.create("/cn/foo", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - - Resolver resolver = cn.getResolver(); - - Resolver.CoordinateDataFilter filter = new Resolver.CoordinateDataFilter(); - Set<Endpoint> endpoints = resolver.getEndpoints(filter); - assertEquals(4, endpoints.size()); - } - - @Test - public void testBasicAsyncResolving() throws Exception { - Resolver resolver = cn.getResolver(); - - final List<Endpoint> endpointListNew = new ArrayList<Endpoint>(); - final List<Endpoint> endpointListRemoved = new ArrayList<Endpoint>(); - final List<Endpoint> endpointListModified = new ArrayList<Endpoint>(); - - // This class is needed since the abstract resolver listener class can only access final variables. - class LatchWrapper { - public CountDownLatch latch; - } - final LatchWrapper latchWrapper = new LatchWrapper(); - - latchWrapper.latch = new CountDownLatch(1); - - resolver.addResolverListener( - "foo.all.service.user.cell", new Resolver.ResolverListener() { - - @Override - public void endpointEvent(Event event, Endpoint endpoint) { - switch (event) { - case NEW_ENDPOINT: - endpointListNew.add(endpoint); - latchWrapper.latch.countDown(); - break; - case REMOVED_ENDPOINT: - endpointListRemoved.add(endpoint); - latchWrapper.latch.countDown(); - break; - case MODIFIED_ENDPOINT_DATA: - endpointListModified.add(endpoint); - latchWrapper.latch.countDown(); - break; - } - } - }); - assertTrue(latchWrapper.latch.await(24000, TimeUnit.MILLISECONDS)); - assertEquals(1, endpointListNew.size()); - assertEquals("foo", endpointListNew.get(0).getName()); - assertEquals("1.service.user.cell", endpointListNew.get(0).getCoordinate().toString()); - endpointListNew.clear(); - latchWrapper.latch = new CountDownLatch(1); - - undrain(); - - - assertTrue(latchWrapper.latch.await(125000, TimeUnit.MILLISECONDS)); - - assertEquals(1, endpointListNew.size()); - - assertEquals("foo", endpointListNew.get(0).getName()); - assertEquals("0.service.user.cell", endpointListNew.get(0).getCoordinate().toString()); - - latchWrapper.latch = new CountDownLatch(1); - endpointListNew.clear(); - - changeEndpointData(); - - assertTrue(latchWrapper.latch.await(26000, TimeUnit.MILLISECONDS)); - - assertEquals(1, endpointListModified.size()); - - assertEquals("0.service.user.cell", endpointListModified.get(0).getCoordinate().toString()); - assertEquals("foo", endpointListModified.get(0).getName()); - assertEquals("dataChanged", endpointListModified.get(0).getEndpointData()); - - endpointListModified.clear(); - - latchWrapper.latch = new CountDownLatch(2); - - changeEndpointPort(); - - assertTrue(latchWrapper.latch.await(27000, TimeUnit.MILLISECONDS)); - - assertEquals(1, endpointListNew.size()); - assertEquals(1, endpointListRemoved.size()); - - endpointListNew.clear(); - endpointListRemoved.clear(); - - - - latchWrapper.latch = new CountDownLatch(1); - - drain(); - - assertTrue(latchWrapper.latch.await(27000, TimeUnit.MILLISECONDS)); - - assertEquals(1, endpointListRemoved.size()); - - assertEquals("0.service.user.cell", endpointListRemoved.get(0).getCoordinate().toString()); - assertEquals("foo", endpointListRemoved.get(0).getName()); - } - - @Test - public void testBasicAsyncResolvingAnyStrategy() throws Exception { - Resolver resolver = cn.getResolver(); - - final List<Endpoint> endpointListNew = new ArrayList<Endpoint>(); - - // This class is needed since the abstract resolver listener class can only access - // final variables. - class LatchWrapper { - public CountDownLatch latch; - } - final LatchWrapper latchWrapper = new LatchWrapper(); - - latchWrapper.latch = new CountDownLatch(1); - - resolver.addResolverListener( - "foo.any.service.user.cell", new Resolver.ResolverListener() { - - @Override - public void endpointEvent(Event event, Endpoint endpoint) { - switch (event) { - case NEW_ENDPOINT: - endpointListNew.add(endpoint); - latchWrapper.latch.countDown(); - break; - case REMOVED_ENDPOINT: - latchWrapper.latch.countDown(); - break; - } - } - }); - assertTrue(latchWrapper.latch.await(5000, TimeUnit.MILLISECONDS)); - assertEquals(1, endpointListNew.size()); - assertEquals("foo", endpointListNew.get(0).getName()); - assertEquals("1.service.user.cell", endpointListNew.get(0).getCoordinate().toString()); - endpointListNew.clear(); - - latchWrapper.latch = new CountDownLatch(1); - - undrain(); - - assertFalse(latchWrapper.latch.await(3000, TimeUnit.MILLISECONDS)); - } - - @Test - public void testStopAsyncResolving() throws Exception { - Resolver resolver = cn.getResolver(); - - final List<Endpoint> endpointListNew = new ArrayList<Endpoint>(); - - // This class is needed since the abstract resolver listener class can only access - // final variables. - class LatchWrapper { - public CountDownLatch latch; - } - final LatchWrapper latchWrapper = new LatchWrapper(); - - latchWrapper.latch = new CountDownLatch(1); - - - Resolver.ResolverListener resolverListener = new Resolver.ResolverListener() { - @Override - public void endpointEvent(Event event, Endpoint endpoint) { - switch (event) { - - case NEW_ENDPOINT: - endpointListNew.add(endpoint); - latchWrapper.latch.countDown(); - break; - case REMOVED_ENDPOINT: - latchWrapper.latch.countDown(); - break; - } - } - }; - resolver.addResolverListener("foo.all.service.user.cell", resolverListener); - assertTrue(latchWrapper.latch.await(5000, TimeUnit.MILLISECONDS)); - assertEquals(1, endpointListNew.size()); - assertEquals("foo", endpointListNew.get(0).getName()); - assertEquals("1.service.user.cell", endpointListNew.get(0).getCoordinate().toString()); - endpointListNew.clear(); - - latchWrapper.latch = new CountDownLatch(1); - - resolver.removeResolverListener(resolverListener); - - undrain(); - - assertFalse(latchWrapper.latch.await(100, TimeUnit.MILLISECONDS)); - - try { - resolver.removeResolverListener(resolverListener); - } catch (IllegalArgumentException e) { - // This is expected. - return; - } - fail("Did not throw an exception on deleting a non existing listener."); - } -} diff --git a/cn/src/main/java/org/cloudname/Cloudname.java b/cn/src/main/java/org/cloudname/Cloudname.java deleted file mode 100644 index d1fbb729..00000000 --- a/cn/src/main/java/org/cloudname/Cloudname.java +++ /dev/null @@ -1,84 +0,0 @@ -package org.cloudname; - -/** - * The main interface for interacting with Cloudname. - * - * @author borud - * @author dybdahl - */ -public interface Cloudname { - /** - * Claim a coordinate returning a {@link ServiceHandle} through - * which the service can interact with the system. This is an asynchronous operation, to check result - * use the returned Servicehandle. E.g. for waiting up to ten seconds for a claim to happen: - * - * Cloudname cn = ... - * Coordinate coordinate = ... - * ServiceHandle serviceHandle = cn.claim(coordinate); - * boolean claimSuccess = serviceHandle.waitForCoordinateOkSeconds(10); - * - * @param coordinate of the service we wish to claim. - * @return a ServiceHandle that can wait for the claim to be successful and listen to the state of the claim. - */ - ServiceHandle claim(Coordinate coordinate); - - /** - * Get a resolver instance. - */ - Resolver getResolver(); - - /** - * Create a coordinate in the persistent service store. Must - * throw an exception if the coordinate has already been defined. - * - * - * @param coordinate the coordinate we wish to create - * @throws CoordinateExistsException if coordinate already exists. - * @throws CloudnameException if problems with talking with storage. - */ - void createCoordinate(Coordinate coordinate) - throws CloudnameException, CoordinateExistsException; - - /** - * Deletes a coordinate in the persistent service store. It will throw an exception if the coordinate is claimed. - * @param coordinate the coordinate we wish to destroy. - * @throws CoordinateMissingException if coordinate does not exist. - * @throws CloudnameException if problems talking with storage. - * @throws CoordinateDeletionException if problems occurred during deletion. - */ - void destroyCoordinate(Coordinate coordinate) - throws CoordinateDeletionException, CoordinateMissingException, CloudnameException; - - /** - * Get the ServiceStatus for a given Coordinate. - * - * @param coordinate the coordinate we want to get the status of - * @return a ServiceStatus instance. - * @throws CloudnameException if problems with talking with storage. - */ - ServiceStatus getStatus(Coordinate coordinate) - throws CloudnameException; - - /** - * Updates the config for a coordinate. If the oldConfig is set (not null) it will require that the old config - * matches otherwise it will throw an exception - * @throws CoordinateMissingException if coordinate does not exist. - * @throws CloudnameException if problems including oldConfig does not match old config. - */ - void setConfig(final Coordinate coordinate, final String newConfig, final String oldConfig) - throws CoordinateMissingException, CloudnameException; - - /** - * Get config for a coordinate. - * @return the new config. - * @throws CoordinateMissingException if coordinate does not exist. - * @throws CloudnameException - */ - String getConfig(final Coordinate coordinate) - throws CoordinateMissingException, CloudnameException; - - /** - * Close down connection to storage. - */ - void close(); -} diff --git a/cn/src/main/java/org/cloudname/CloudnameException.java b/cn/src/main/java/org/cloudname/CloudnameException.java deleted file mode 100644 index 66da94f7..00000000 --- a/cn/src/main/java/org/cloudname/CloudnameException.java +++ /dev/null @@ -1,21 +0,0 @@ -package org.cloudname; - -/** - * Exceptions for Cloudname caused by problems talking to storage. - * - * @author borud - */ -public class CloudnameException extends Exception { - - public CloudnameException(Throwable t) { - super(t); - } - - public CloudnameException(String message) { - super(message); - } - - public CloudnameException(String message, Throwable t) { - super(message, t); - } -} diff --git a/cn/src/main/java/org/cloudname/CloudnameTestBootstrapper.java b/cn/src/main/java/org/cloudname/CloudnameTestBootstrapper.java deleted file mode 100644 index 5b29aae6..00000000 --- a/cn/src/main/java/org/cloudname/CloudnameTestBootstrapper.java +++ /dev/null @@ -1,63 +0,0 @@ -package org.cloudname; - -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; - -import java.io.File; -import java.util.concurrent.CountDownLatch; -import java.util.logging.Logger; - -import org.apache.zookeeper.ZooKeeper; -import org.cloudname.testtools.Net; -import org.cloudname.testtools.zookeeper.EmbeddedZooKeeper; -import org.cloudname.zk.ZkCloudname; - - -/** - * Helper class for bootstrapping cloudname for unit-tests. It also exposes the ZooKeeper instance. - * @author @author dybdahl - */ -public class CloudnameTestBootstrapper { - - private static final Logger LOGGER = Logger.getLogger(CloudnameTestBootstrapper.class.getName()); - private EmbeddedZooKeeper embeddedZooKeeper; - private ZooKeeper zooKeeper; - private Cloudname cloudname; - private File rootDir; - - public CloudnameTestBootstrapper(File rootDir) { - this.rootDir = rootDir; - } - - public void init() throws Exception, CloudnameException { - int zookeeperPort = Net.getFreePort(); - - LOGGER.info("EmbeddedZooKeeper rootDir=" + rootDir.getCanonicalPath() - + ", port=" + zookeeperPort - ); - - // Set up and initialize the embedded ZooKeeper - embeddedZooKeeper = new EmbeddedZooKeeper(rootDir, zookeeperPort); - embeddedZooKeeper.init(); - - // Set up a zookeeper client that we can use for inspection - final CountDownLatch connectedLatch = new CountDownLatch(1); - zooKeeper = new ZooKeeper("localhost:" + zookeeperPort, 1000, new Watcher() { - public void process(WatchedEvent event) { - if (event.getState() == Watcher.Event.KeeperState.SyncConnected) { - connectedLatch.countDown(); - } - } - }); - connectedLatch.await(); - cloudname = new ZkCloudname.Builder().setConnectString("localhost:" + zookeeperPort).build().connect(); - } - - public ZooKeeper getZooKeeper() { - return zooKeeper; - } - - public Cloudname getCloudname() { - return cloudname; - } -} diff --git a/cn/src/main/java/org/cloudname/ConfigListener.java b/cn/src/main/java/org/cloudname/ConfigListener.java deleted file mode 100644 index 04f3c5d8..00000000 --- a/cn/src/main/java/org/cloudname/ConfigListener.java +++ /dev/null @@ -1,25 +0,0 @@ -package org.cloudname; - -/** - * This interface defines the callback interface used to notify of - * config node changes. - * - * @author borud - */ - -public interface ConfigListener { - public enum Event { - CREATE, - UPDATED, - DELETED, - } - - /** - * This method is called whenever the application needs to be - * notified of events related to configuration. - * - * @param event the type of event observed on the config node. - * @param data the contents of the config node - */ - public void onConfigEvent(Event event, String data); -} \ No newline at end of file diff --git a/cn/src/main/java/org/cloudname/Coordinate.java b/cn/src/main/java/org/cloudname/Coordinate.java deleted file mode 100644 index e35d8b8c..00000000 --- a/cn/src/main/java/org/cloudname/Coordinate.java +++ /dev/null @@ -1,184 +0,0 @@ -package org.cloudname; - -import java.util.regex.Pattern; -import java.util.regex.Matcher; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; - -import java.io.IOException; - -/** - * This class represents a service coordinate. A coordinate is given - * by four pieces of data. - * - * <dl> - * <dt> Cell - * <dd> A cell is roughly equivalent to "data center". The strict definition - * is that a cell represents a ZooKeeper installation. You can have - * multiple cells in a physical datacenter, but it is not advisable to - * have ZooKeeper installations span physical data centers. - * - * <dt> User - * <dd> The user owning the service. May or may not have any relation to - * the operating system user. - * - * <dt> Service - * <dd> The name of the service. - * - * <dt> Instance - * </dd> An integer [0, Integer.MAX_VALUE) indicating the instance number - * of the service. - * - * The canonical form of a coordinate is {@code 0.service.user.dc}. - * - * This class is immutable. - * - * @author borud - */ -public class Coordinate { - private final String cell; - private final String user; - private final String service; - private final int instance; - - // TODO(borud): allow for numbers in service, user and cell. Just - // not the first character. - public static final Pattern coordinatePattern - = Pattern.compile("^(\\d+)\\." // instance - + "([a-z][a-z0-9-_]*)\\." // service - + "([a-z][a-z0-9-_]*)\\." // user - + "([a-z][a-z0-9-_]*)\\z"); // cell - - /** - * Create a new coordinate instance. - * - * @param instance the instance number - * @param service the service name - * @param user the user name - * @param cell the cell name - * @throws IllegalArgumentException if the coordinate is invalid. - */ - @JsonCreator - public Coordinate (@JsonProperty("instance") int instance, - @JsonProperty("service") String service, - @JsonProperty("user") String user, - @JsonProperty("cell") String cell) { - // Enables validation of coordinate. - this(instance, service, user, cell, true); - } - - /** - * Internal version of constructor. Makes validation optional. - */ - public Coordinate (int instance, String service, String user, String cell, boolean validate) { - this.instance = instance; - this.service = service; - this.user = user; - this.cell = cell; - - if (instance < 0) { - throw new IllegalArgumentException("Invalid instance number: " + instance); - } - - // If the coordinate was created by the parse() method the - // coordinate has already been parsed using the - // coordinatePattern so no validation is required. If the - // coordinate was defined using the regular constructor we - // need to validate the parts. And we do this by re-using the - // coordinatePattern. - if (validate) { - if (! coordinatePattern.matcher(asString()).matches()) { - throw new IllegalArgumentException("Invalid coordinate: '" + asString() + "'"); - } - } - } - - /** - * Parse coordinate and create a {@code Coordinate} instance from - * a {@code String}. - * - * @param s Coordinate we wish to parse as a string. - * @return a Coordinate instance equivalent to {@code s} - * @throws IllegalArgumentException if the coordinate string {@s} - * is not a valid coordinate. - */ - public static Coordinate parse(String s) { - Matcher m = coordinatePattern.matcher(s); - if (! m.matches()) { - throw new IllegalArgumentException("Malformed coordinate: " + s); - } - - int instance = Integer.parseInt(m.group(1)); - String service = m.group(2); - String user = m.group(3); - String cell = m.group(4); - - return new Coordinate(instance, service, user, cell, false); - } - - public String getCell() { - return cell; - } - - public String getUser() { - return user; - } - - public String getService() { - return service; - } - - public int getInstance() { - return instance; - } - - public String asString() { - return instance + "." + service + "." + user + "." + cell; - } - - @Override - public String toString() { - return asString(); - } - - @Override - public boolean equals(Object o) { - if (null == o) { - return false; - } - - if (this == o) { - return true; - } - - if (getClass() != o.getClass()) { - return false; - } - - Coordinate c = (Coordinate) o; - return ((instance == c.instance) - && service.equals(c.service) - && user.equals(c.user) - && cell.equals(c.cell)); - } - - @Override - public int hashCode() { - return asString().hashCode(); - } - - public String toJson() { - try { - return new ObjectMapper().writeValueAsString(this); - } catch (IOException e) { - return null; - } - } - - public static Coordinate fromJson(String json) throws IOException { - return new ObjectMapper().readValue(json, Coordinate.class); - } - -} diff --git a/cn/src/main/java/org/cloudname/CoordinateDeletionException.java b/cn/src/main/java/org/cloudname/CoordinateDeletionException.java deleted file mode 100644 index c1ac89c7..00000000 --- a/cn/src/main/java/org/cloudname/CoordinateDeletionException.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.cloudname; - -/** - * Thrown when there are problems deleting a coordinate. - * @auther dybdahl - */ -public class CoordinateDeletionException extends CoordinateException { - public CoordinateDeletionException(String reason) { - super(reason); - } -} diff --git a/cn/src/main/java/org/cloudname/CoordinateException.java b/cn/src/main/java/org/cloudname/CoordinateException.java deleted file mode 100644 index fadb25b7..00000000 --- a/cn/src/main/java/org/cloudname/CoordinateException.java +++ /dev/null @@ -1,12 +0,0 @@ -package org.cloudname; - -/** - * Base class for exception related to a specific coordinate. - * @auther dybdahl - */ -public abstract class CoordinateException extends Exception { - - public CoordinateException(String reason) { - super(reason); - } -} diff --git a/cn/src/main/java/org/cloudname/CoordinateExistsException.java b/cn/src/main/java/org/cloudname/CoordinateExistsException.java deleted file mode 100644 index 7ba067a0..00000000 --- a/cn/src/main/java/org/cloudname/CoordinateExistsException.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.cloudname; - -/** - * It was assumed that the coordinate did not exist, but it did. - * @auther dybdahl - */ -public class CoordinateExistsException extends CoordinateException { - public CoordinateExistsException(String reason) { - super(reason); - } -} diff --git a/cn/src/main/java/org/cloudname/CoordinateListener.java b/cn/src/main/java/org/cloudname/CoordinateListener.java deleted file mode 100644 index 9a057ae7..00000000 --- a/cn/src/main/java/org/cloudname/CoordinateListener.java +++ /dev/null @@ -1,48 +0,0 @@ -package org.cloudname; - -/** - * Interface for listening to status on a claimed coordinate. - * @author dybdahl - */ -public interface CoordinateListener { - /** - * Events that can be triggered when monitoring a coordinate. - */ - public enum Event { - - /** - * Everything is fine. - */ - COORDINATE_OK, - - /** - * Connection lost to storage, no more events will occur. - */ - NO_CONNECTION_TO_STORAGE, - - /** - * Problems with parsing the data in storage for this coordinate. - */ - COORDINATE_CORRUPTED, - - /** - * The data in the storage and memory is out of sync. - */ - COORDINATE_OUT_OF_SYNC, - - /** - * No longer the owner of the coordinate. - */ - NOT_OWNER, - } - - - - /** - * Implement this function to receive the events. - * Return false if no more events are wanted, will stop eventually. - * @param event the event that happened. - * @param message some message associated with the event. - */ - public void onCoordinateEvent(Event event, String message); -} diff --git a/cn/src/main/java/org/cloudname/CoordinateMissingException.java b/cn/src/main/java/org/cloudname/CoordinateMissingException.java deleted file mode 100644 index 6fcdb56a..00000000 --- a/cn/src/main/java/org/cloudname/CoordinateMissingException.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.cloudname; - -/** - * Exception related to a coordinate that is missing. - * @auther dybdahl - */ -public class CoordinateMissingException extends CoordinateException { - public CoordinateMissingException(String reason) { - super(reason); - } -} diff --git a/cn/src/main/java/org/cloudname/Endpoint.java b/cn/src/main/java/org/cloudname/Endpoint.java deleted file mode 100644 index b892cfaf..00000000 --- a/cn/src/main/java/org/cloudname/Endpoint.java +++ /dev/null @@ -1,102 +0,0 @@ -package org.cloudname; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; - -import java.io.IOException; - -/** - * Representation of an endpoint. This class is used to describe a - * wide range of endpoints, but it is, initially geared mainly towards - * services for which we need to know a hostname, port and protocol. - * As a stop-gap measure we provide an {@code endpointData} field - * which can be used in a pinch to communicate extra information about - * the endpoint. - * - * Instances of this class are immutable. - * - * TODO(borud): decide if coordinate and name should be part of this - * class. - * - * @author borud - */ -public class Endpoint { - // This gets saved into ZooKeeper as well and is redundant info, - // but it makes sense to have this information in the Endpoint - // instances to make it possible for clients to get a list of - // endpoints and be able to figure out what coordinates they come - // from if they were gathered from multiple services. - private final Coordinate coordinate; - // Ditto for name. - private final String name; - private final String host; - private final int port; - private final String protocol; - private final String endpointData; - - @JsonCreator - public Endpoint(@JsonProperty("coordinate") Coordinate coordinate, - @JsonProperty("name") String name, - @JsonProperty("host") String host, - @JsonProperty("port") int port, - @JsonProperty("protocol") String protocol, - @JsonProperty("endpointData") String endpointData) - { - this.coordinate = coordinate; - this.name = name; - this.host = host; - this.port = port; - this.protocol = protocol; - this.endpointData = endpointData; - } - - public Coordinate getCoordinate() { - return coordinate; - } - - public String getName() { - return name; - } - - public String getHost() { - return host; - } - - public int getPort() { - return port; - } - - public String getProtocol() { - return protocol; - } - - public String getEndpointData() { - return endpointData; - } - - @Override - public boolean equals(Object endpoint) { - return endpoint instanceof Endpoint && ((Endpoint) endpoint).toJson().equals(toJson()); - } - - public int hashCode() { - return toJson().hashCode(); - } - - public static Endpoint fromJson(String json) throws IOException { - return new ObjectMapper().readValue(json, Endpoint.class); - } - - public String toJson() { - try { - return new ObjectMapper().writeValueAsString(this); - } catch (IOException e) { - return null; - } - } - - public String toString() { - return toJson(); - } -} diff --git a/cn/src/main/java/org/cloudname/Resolver.java b/cn/src/main/java/org/cloudname/Resolver.java deleted file mode 100644 index e6c08728..00000000 --- a/cn/src/main/java/org/cloudname/Resolver.java +++ /dev/null @@ -1,115 +0,0 @@ -package org.cloudname; - -import java.util.List; -import java.util.Set; - -/** - * This interface defines how we resolve endpoints in Cloudname. The client has to keep a reference to this Resolver - * object otherwise it will stop resolving. - * - * @author borud - */ -public interface Resolver { - - /** - * Resolve an expression to a list of endpoints. The order of the - * endpoints may be subject to ranking criteria. - * - * @param expression The expression to resolve, e.g. for ZooKeeper implementation there are various formats like - * endpoint.instance.service.user.cell (see ZkResolver for details). - * @throws CloudnameException if problems talking with storage. - */ - List<Endpoint> resolve(String expression) throws CloudnameException; - - - /** - * Implement this interface to get dynamic information about what endpoints that are available. - * If you want to register more than 1000 listeners in the same resolver, you might consider overriding - * equals() and hashCode(), but the default implementation should work in normal cases. - */ - interface ResolverListener { - enum Event { - /** - * New endpoint was added. - */ - NEW_ENDPOINT, - /** - * Endpoint removed. This include when the coordinate goes to draining. - */ - REMOVED_ENDPOINT, - /** - * Endpoint data has been modified. - */ - MODIFIED_ENDPOINT_DATA, - /** - * Lost connection to storage. The list of endpoints will get stale. The system will reconnect - * automatically. - */ - LOST_CONNECTION, - /** - * Connection to storage is good, list of endpoints will be updated. - */ - CONNECTION_OK - } - - /** - * An Event happened related to the expression, see enum Event above. - * @param endpoint is only populated for the Event NEW_ENDPOINT and REMOVED_ENDPOINT. - */ - void endpointEvent(Event event, final Endpoint endpoint); - } - - /** - * Registers a ResolverListener to get dynamic information about an expression. The expression is set in the - * ResolverListener. You will only get updates as long as you keep a reference to Resolver. - * If you don't have a reference, it is up to the garbage collector to decide how long you will receive callbacks. - * One listener can only be registered once. - * - * @param expression The expression to resolve, e.g. for ZooKeeper implementation there are various formats like - * endpoint.instance.service.user.cell (see ZkResolver for details). This should be static data, i.e. - * the function might be called only once. - */ - void addResolverListener(String expression, ResolverListener listener) throws CloudnameException; - - /** - * Calling this function unregisters the listener, i.e. stopping future callbacks. - * The listener must be registered. For identification of listener, see comment on ResolverListener. - * The default is to use object id. - */ - void removeResolverListener(ResolverListener listener); - - /** - * This class is used as a parameter to {@link #getEndpoints(CoordinateDataFilter)}. Override methods to filter - * the endpoints to be - * returned. - */ - class CoordinateDataFilter { - /** - * Override these methods to filter on cell, user, service, endpointName, and/or service state. - */ - - public boolean includeCell(final String cell) { - return true; - } - public boolean includeUser(final String user) { - return true; - } - public boolean includeService(final String service) { - return true; - } - public boolean includeEndpointname(final String endpointName) { - return true; - } - public boolean includeServiceState(final ServiceState state) { - return true; - } - } - - /** - * This method reads out all the nodes from the storage. IT CAN BE VERY EXPENSIVE AND SHOULD BE USED ONLY - * WHEN NO OTHER METHODS ARE FEASIBLE. Do not call it frequently! - * @param filter class for filtering out endpoints - * @return list of endpoints. - */ - Set<Endpoint> getEndpoints(CoordinateDataFilter filter) throws CloudnameException, InterruptedException; -} diff --git a/cn/src/main/java/org/cloudname/ResolverStrategy.java b/cn/src/main/java/org/cloudname/ResolverStrategy.java deleted file mode 100644 index b28ebfb2..00000000 --- a/cn/src/main/java/org/cloudname/ResolverStrategy.java +++ /dev/null @@ -1,29 +0,0 @@ -package org.cloudname; - -import java.util.List; - -/** - * The ResolverStrategy is an interface for implementing a strategy when resolving endpoints. - * - * @auther dybdahl - */ - -public interface ResolverStrategy { - - /** - * Given a list of endpoints, return only those endpoints that are desired for this strategy. - */ - public List<Endpoint> filter(List<Endpoint> endpoints); - - /** - * Returns the endpoints ordered according to strategy specific scheme. - */ - public List<Endpoint> order(List<Endpoint> endpoints); - - /** - * Returns the name of this strategy. This is the same name that is used in the resolver - * (e.g. "all", "any" etc). - * @return name of strategy. - */ - public String getName(); -} diff --git a/cn/src/main/java/org/cloudname/ServiceHandle.java b/cn/src/main/java/org/cloudname/ServiceHandle.java deleted file mode 100644 index e0b9d81e..00000000 --- a/cn/src/main/java/org/cloudname/ServiceHandle.java +++ /dev/null @@ -1,105 +0,0 @@ -package org.cloudname; - -import java.util.List; - -/** - * The service handle -- the interface through which services - * communicate their state to the outside world and where services can - * register listeners to handle configuration updates. - * - * @author borud - */ -public interface ServiceHandle { - - /** - * This is a convenient function for waiting for the connection to storage to be ok. It is the same as - * registering a CoordinateListener and waiting for event coordinate ok. - */ - boolean waitForCoordinateOkSeconds(int seconds) throws InterruptedException; - - /** - * Set the status of this service. - * - * @param status the new status. - * @throws CoordinateMissingException if coordinate does not exist. - * @throws CloudnameException if coordinate is not claimed, connection to storage is down, or problems - * with ZooKeeper. - */ - void setStatus(ServiceStatus status) throws CoordinateMissingException, CloudnameException; - - /** - * Publish a named endpoint. It is legal to push an endpoint with updated data. - * - * @param endpoint the endpoint data. - * @throws CoordinateMissingException if coordinate does not exist. - * @throws CloudnameException if coordinate is not claimed, connection to storage is down, or problems - * with ZooKeeper. - */ - void putEndpoint(Endpoint endpoint) throws CoordinateMissingException, CloudnameException; - - /** - * Same as putEndpoints, but takes a list. - * - * @param endpoints the endpoints data. - * @throws CloudnameException if coordinate is not claimed, connection to storage is down, or problems - * with ZooKeeper. - * @throws CoordinateMissingException if coordinate does not exist. - */ - void putEndpoints(List<Endpoint> endpoints) throws CoordinateMissingException, CloudnameException; - - /** - * Remove a published endpoint. - * - * @param name the name of the endpoint we wish to remove. - * @throws CoordinateMissingException if coordinate does not exist. - * @throws CloudnameException if coordinate is not claimed, connection to storage is down, or problems - * with ZooKeeper. - */ - void removeEndpoint(String name) throws CoordinateMissingException, CloudnameException; - - /** - * Same as removeEndpoint() but takes a list of names. - * - * @throws CloudnameException if coordinate is not claimed, connection to storage is down, or problems - * with ZooKeeper. - * @throws CoordinateMissingException if coordinate does not exist. - * @throws CoordinateMissingException if coordinate does not exist. - */ - void removeEndpoints(List<String> names) throws CoordinateMissingException, CloudnameException; - - - /** - * Register a ConfigListener which will be called whenever there - * is a configuration change. - * - * There may have been configuration pushed to the backing storage - * already by the time a ConfigListener is registered. In that - * case the ConfigListener will see these configuration items as - * being created. - */ - // TODO(dybdahl): This logic lacks tests. Before used in any production code, tests have to be added. - void registerConfigListener(ConfigListener listener); - - /** - * After registering a new listener, a new event is triggered which include current state, even without change - * of state. - * Don't call the cloudname library, do any heavy lifting, or do any IO operation from this callback thread. - * That might deadlock as there is no guarantee what kind of thread that runs the callback. - * - * @throws CloudnameException if problems talking with storage. - */ - void registerCoordinateListener(CoordinateListener listener) - throws CloudnameException; - - /** - * Close the service handle and free up the coordinate so it can - * be claimed by others. After close() has been called all - * operations on this instance of the service handle will result - * in an exception being thrown. All endpoints are deleted. - * @throws CloudnameException if problems removing the claim. - */ - void close() - throws CloudnameException; - -} - diff --git a/cn/src/main/java/org/cloudname/ServiceState.java b/cn/src/main/java/org/cloudname/ServiceState.java deleted file mode 100644 index 6af89561..00000000 --- a/cn/src/main/java/org/cloudname/ServiceState.java +++ /dev/null @@ -1,29 +0,0 @@ -package org.cloudname; - -/** - * The defined states of a service. - * - * @author borud - */ -public enum ServiceState { - // This means that no service has claimed the coordinate, or in - // more practical terms: there is no ephemeral node called - // "status" in the service root path in ZooKeeper. - UNASSIGNED, - - // A running process has claimed the coordinate and is in the - // process of starting up. - STARTING, - - // A running process has claimed the coordinate and is running - // normally. - RUNNING, - - // A running process has claimed the coordinate and is running, - // but it is in the process of shutting down and will not accept - // new work. - DRAINING, - - // An error condition has occurred. - ERROR -} diff --git a/cn/src/main/java/org/cloudname/ServiceStatus.java b/cn/src/main/java/org/cloudname/ServiceStatus.java deleted file mode 100644 index eba04e20..00000000 --- a/cn/src/main/java/org/cloudname/ServiceStatus.java +++ /dev/null @@ -1,51 +0,0 @@ -package org.cloudname; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; - -import java.io.IOException; - -/** - * A representation of the basic runtime status of a service. - * - * Instances of ServiceStatus are immutable. - * - * @author borud - */ -public class ServiceStatus { - private final ServiceState state; - private final String message; - - /** - * @param state the state of the service - * @param message a human readable message - */ - @JsonCreator - public ServiceStatus(@JsonProperty("state") ServiceState state, - @JsonProperty("message") String message) - { - this.state = state; - this.message = message; - } - - public ServiceState getState() { - return state; - } - - public String getMessage() { - return message; - } - - public static ServiceStatus fromJson(String json) throws IOException { - return new ObjectMapper().readValue(json, ServiceStatus.class); - } - - public String toJson() { - try { - return new ObjectMapper().writeValueAsString(this); - } catch (IOException e) { - return null; - } - } -} \ No newline at end of file diff --git a/cn/src/main/java/org/cloudname/StrategyAll.java b/cn/src/main/java/org/cloudname/StrategyAll.java deleted file mode 100644 index 2bf6dcbb..00000000 --- a/cn/src/main/java/org/cloudname/StrategyAll.java +++ /dev/null @@ -1,35 +0,0 @@ -package org.cloudname; - -import java.util.List; - -/** - * A strategy that implements "all" and returns everything and does not change order. - * @author : dybdahl - */ -public class StrategyAll implements ResolverStrategy { - - /** - * Returns all the endpoints. - */ - @Override - public List<Endpoint> filter(List<Endpoint> endpoints) { - return endpoints; - } - - /** - * Doesn't change ordering of endpoints. - */ - @Override - public List<Endpoint> order(List<Endpoint> endpoints) { - return endpoints; - } - - /** - * The name of the strategy is "all". - */ - @Override - public String getName() { - return "all"; - } - -} diff --git a/cn/src/main/java/org/cloudname/StrategyAny.java b/cn/src/main/java/org/cloudname/StrategyAny.java deleted file mode 100644 index 76679a55..00000000 --- a/cn/src/main/java/org/cloudname/StrategyAny.java +++ /dev/null @@ -1,60 +0,0 @@ -package org.cloudname; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.SortedSet; - -/** - * A strategy that returns the first element of the sorted coordinates (by instance value) hashed with - * the time of this object creation. This is useful for returning the same endpoint in most cases even - * if an endpoint is removed or added. - * @author : dybdahl - */ -public class StrategyAny implements ResolverStrategy { - - // Some systems might not have nano seconds accuracy and we do not want zeros in the least significant - // numbers. - private int sortSeed = (int) System.nanoTime() / 1000; - - /** - * Returns a list of the first endpoint if any, else returns the empty list. - */ - @Override - public List<Endpoint> filter(List<Endpoint> endpoints) { - if (endpoints.size() > 0) { - List<Endpoint> retVal = new ArrayList<Endpoint>(); - retVal.add(endpoints.get(0)); - return retVal; - } - // Empty list. - return endpoints; - } - - /** - * We return a list that is sorted differently for different clients. In this way only a few - * clients are touched when an endpoint is added/removed. - */ - @Override - public List<Endpoint> order(List<Endpoint> endpoints) { - Collections.sort(endpoints, new Comparator<Endpoint>() { - @Override - public int compare(Endpoint endpointA, Endpoint endpointB) { - int instanceA = endpointA.getCoordinate().getInstance() ^ sortSeed; - int instanceB = endpointB.getCoordinate().getInstance() ^ sortSeed; - return (instanceA > instanceB ? -1 : (instanceA == instanceB ? 0 : 1)); - } - }); - return endpoints; - } - - /** - * The name of the strategy is "any" - */ - @Override - public String getName() { - return "any"; - } -} diff --git a/cn/src/main/java/org/cloudname/zk/ClaimedCoordinate.java b/cn/src/main/java/org/cloudname/zk/ClaimedCoordinate.java deleted file mode 100644 index f4d8e133..00000000 --- a/cn/src/main/java/org/cloudname/zk/ClaimedCoordinate.java +++ /dev/null @@ -1,535 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.*; -import org.apache.zookeeper.data.Stat; -import org.cloudname.*; - -import java.io.IOException; - -import java.io.UnsupportedEncodingException; -import java.util.*; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Logger; - -/** - * This class keeps track of coordinate data and endpoints for a coordinate. It is notified about - * the state of ZooKeeper connection by implementing the ZkObjectHandler.ConnectionStateChanged. - * It implements the Watcher interface to track the specific path of the coordinate. - * This is useful for being notified if something happens to the coordinate (if it - * is overwritten etc). - * - * @author dybdahl - */ -public class ClaimedCoordinate implements Watcher, ZkObjectHandler.ConnectionStateChanged { - - /** - * True if we know that our state is in sync with zookeeper. - */ - private final AtomicBoolean isSynchronizedWithZooKeeper = new AtomicBoolean(false); - - /** - * The client of the class has to call start. This will flip this bit. - */ - private final AtomicBoolean started = new AtomicBoolean(false); - - /** - * The connection from client to ZooKeeper might go down. If it comes up again within some time - * window the server might think an ephemeral node should be alive. The client lib might think - * otherwise. If this flag is set, the class will eventually check version. - */ - private final AtomicBoolean checkVersion = new AtomicBoolean(false); - - /** - * We keep track of the last version so we know if we are in sync. We set a large value to make - * sure we do not accidentally overwrite an existing not owned coordinate. - */ - private int lastStatusVersion = Integer.MAX_VALUE; - - private final Object lastStatusVersionMonitor = new Object(); - - private static final Logger LOG = Logger.getLogger(ClaimedCoordinate.class.getName()); - - private final ZkObjectHandler.Client zkClient; - - /** - * The claimed coordinate. - */ - private final Coordinate coordinate; - - /** - * Status path of the coordinate. - */ - private final String path; - - /** - * This is needed to make sure that the first message about state is sent while - * other update messages are queued. - */ - private final Object callbacksMonitor = new Object(); - - /** - * The endpoints and the status of the coordinate is stored here. - */ - private final ZkCoordinateData zkCoordinateData = new ZkCoordinateData(); - - /** - * For running internal thread. - */ - private final ScheduledExecutorService scheduler = - Executors.newSingleThreadScheduledExecutor(); - - /** - * A list of the coordinate listeners that are registered for this coordinate. - */ - private final List<CoordinateListener> coordinateListenerList = - Collections.synchronizedList(new ArrayList<CoordinateListener>()); - - /** - * A list of tracked configs for this coordinate. - */ - private final List<TrackedConfig> trackedConfigList = - Collections.synchronizedList(new ArrayList<TrackedConfig>()); - - - /** - * This class implements the logic for handling callbacks from ZooKeeper on claim. - * In general we could just ignore errors since we have a time based retry mechanism. However, - * we want to notify clients, and we need to update the consistencyState. - */ - class ClaimCallback implements AsyncCallback.StringCallback { - - @Override - public void processResult( - int rawReturnCode, String notUsed, Object parent, String notUsed2) { - - KeeperException.Code returnCode = KeeperException.Code.get(rawReturnCode); - ClaimedCoordinate claimedCoordinate = (ClaimedCoordinate) parent; - LOG.fine("Claim callback with " + returnCode.name() + " " + claimedCoordinate.path - + " synched: " + isSynchronizedWithZooKeeper.get() + " thread: " + this); - switch (returnCode) { - // The claim was successful. This means that the node was created. We need to - // populate the status and endpoints. - case OK: - - // We should be the first one to write to the new node, or fail. - // This requires that the first version is 0, have not seen this documented - // but it should be a fair assumption and is verified by unit tests. - synchronized (lastStatusVersionMonitor) { - lastStatusVersion = 0; - } - - // We need to set this to synced or updateCoordinateData will complain. - // updateCoordinateData will set it to out-of-sync in case of problems. - isSynchronizedWithZooKeeper.set(true); - - - try { - registerWatcher(); - } catch (CloudnameException e) { - LOG.fine("Failed register watcher after claim. Going to state out of sync: " - + e.getMessage()); - - isSynchronizedWithZooKeeper.set(false); - return; - - } catch (InterruptedException e) { - - LOG.fine("Interrupted while setting up new watcher. Going to state out of sync."); - isSynchronizedWithZooKeeper.set(false); - return; - - } - // No exceptions, let's celebrate with a log message. - LOG.info("Claim processed ok, path: " + path); - claimedCoordinate.sendEventToCoordinateListener( - CoordinateListener.Event.COORDINATE_OK, "claimed"); - return; - - case NODEEXISTS: - // Someone has already claimed the coordinate. It might have been us in a - // different thread. If we already have claimed the coordinate then don't care. - // Else notify the client. If everything is fine, this is not a true negative, - // so ignore it. It might happen if two attempts to tryClaim the coordinate run - // in parallel. - if (isSynchronizedWithZooKeeper.get() && started.get()) { - LOG.fine("Everything is fine, ignoring NODEEXISTS message, path: " + path); - return; - } - - LOG.info("Claimed fail, node already exists, will retry: " + path); - claimedCoordinate.sendEventToCoordinateListener( - CoordinateListener.Event.NOT_OWNER, "Node already exists."); - LOG.info("isSynchronizedWithZooKeeper: " + isSynchronizedWithZooKeeper.get()); - checkVersion.set(true); - return; - case NONODE: - LOG.info("Could not claim due to missing coordinate, path: " + path); - claimedCoordinate.sendEventToCoordinateListener( - CoordinateListener.Event.NOT_OWNER, - "No node on claiming coordinate: " + returnCode.name()); - return; - - default: - // Random problem, report the problem to the client. - claimedCoordinate.sendEventToCoordinateListener( - CoordinateListener.Event.NO_CONNECTION_TO_STORAGE, - "Could not reclaim coordinate. Return code: " + returnCode.name()); - return; - } - } - } - - - private class ResolveProblems implements Runnable { - @Override - public void run() { - if (isSynchronizedWithZooKeeper.get() || ! zkClient.isConnected() || - ! started.get()) { - - return; - } - if (checkVersion.getAndSet(false)) { - try { - synchronized (lastStatusVersionMonitor) { - final Stat stat = zkClient.getZookeeper().exists(path, null); - if (stat != null && zkClient.getZookeeper().getSessionId() == - stat.getEphemeralOwner()) { - zkClient.getZookeeper().delete(path, lastStatusVersion); - } - } - } catch (InterruptedException e) { - LOG.info("Interrupted"); - checkVersion.set(true); - } catch (KeeperException e) { - LOG.info("exception "+ e.getMessage()); - checkVersion.set(true); - } - - } - LOG.info("We are out-of-sync, have a zookeeper connection, and are started, trying reclaim: " - + path + this); - tryClaim(); - } - } - - /** - * Constructor. - * @param coordinate The coordinate that is claimed. - * @param zkClient for getting access to ZooKeeper. - */ - public ClaimedCoordinate(final Coordinate coordinate, final ZkObjectHandler.Client zkClient) { - this.coordinate = coordinate; - path = ZkCoordinatePath.getStatusPath(coordinate); - this.zkClient = zkClient; - } - - /** - * Claims a coordinate. To know if it was successful or not you need to register a listener. - * @return this. - */ - public ClaimedCoordinate start() { - zkClient.registerListener(this); - started.set(true); - final long periodicDelayMs = 2000; - scheduler.scheduleWithFixedDelay(new ResolveProblems(), 1 /* initial delay ms */, - periodicDelayMs, TimeUnit.MILLISECONDS); - return this; - } - - /** - * Callbacks from zkClient - */ - @Override - public void connectionUp() { - isSynchronizedWithZooKeeper.set(false); - } - - /** - * Callbacks from zkClient - */ - @Override - public void connectionDown() { - List<CoordinateListener> coordinateListenerListCopy = new ArrayList<CoordinateListener>(); - synchronized (coordinateListenerList) { - coordinateListenerListCopy.addAll(coordinateListenerList); - } - for (CoordinateListener coordinateListener : coordinateListenerListCopy) { - coordinateListener.onCoordinateEvent( - CoordinateListener.Event.NO_CONNECTION_TO_STORAGE, "down"); - } - isSynchronizedWithZooKeeper.set(false); - } - - @Override - public void shutDown() { - scheduler.shutdown(); - } - - /** - * Updates the ServiceStatus and persists it. Only allowed if we claimed the coordinate. - * @param status The new value for serviceStatus. - */ - public void updateStatus(final ServiceStatus status) - throws CloudnameException, CoordinateMissingException { - zkCoordinateData.setStatus(status); - updateCoordinateData(); - } - - /** - * Adds new endpoints and persist them. Requires that this instance owns the tryClaim to the - * coordinate. - * @param newEndpoints endpoints to be added. - */ - public void putEndpoints(final List<Endpoint> newEndpoints) - throws CloudnameException, CoordinateMissingException { - zkCoordinateData.putEndpoints(newEndpoints); - updateCoordinateData(); - } - - /** - * Remove endpoints and persist it. Requires that this instance owns the tryClaim to the - * coordinate. - * @param names names of endpoints to be removed. - */ - public void removeEndpoints(final List<String> names) - throws CloudnameException, CoordinateMissingException { - zkCoordinateData.removeEndpoints(names); - updateCoordinateData(); - } - - /** - * Release the tryClaim of the coordinate. It means that nobody owns the coordinate anymore. - * Requires that that this instance owns the tryClaim to the coordinate. - */ - public void releaseClaim() throws CloudnameException { - scheduler.shutdown(); - zkClient.deregisterListener(this); - - while (true) { - final TrackedConfig config; - synchronized (trackedConfigList) { - if (trackedConfigList.isEmpty()) { - break; - } - config = trackedConfigList.remove(0); - } - config.stop(); - } - - sendEventToCoordinateListener( - CoordinateListener.Event.NOT_OWNER, "Released claim of coordinate"); - - synchronized (lastStatusVersionMonitor) { - try { - zkClient.getZookeeper().delete(path, lastStatusVersion); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } catch (KeeperException e) { - throw new CloudnameException(e); - } - } - } - - - /** - * Creates a string for debugging etc - * @return serialized version of the instance data. - */ - public synchronized String toString() { - return zkCoordinateData.snapshot().toString(); - } - - /** - * Registers a coordinatelistener that will receive events when there are changes to the status - * node. Don't do any heavy lifting in the callback and don't call cloudname from the callback - * as this might create a deadlock. - * @param coordinateListener - */ - public void registerCoordinateListener(final CoordinateListener coordinateListener) { - - String message = "New listener added, resending current state."; - synchronized (callbacksMonitor) { - coordinateListenerList.add(coordinateListener); - if (isSynchronizedWithZooKeeper.get()) { - coordinateListener.onCoordinateEvent( - CoordinateListener.Event.COORDINATE_OK, message); - } - } - } - - - public void deregisterCoordinateListener(final CoordinateListener coordinateListener) { - coordinateListenerList.remove(coordinateListener); - } - - /** - * Registers a configlistener that will receive events when there are changes to the config node. - * Don't do any heavy lifting in the callback and don't call cloudname from the callback as - * this might create a deadlock. - * @param trackedConfig - */ - public void registerTrackedConfig(final TrackedConfig trackedConfig) { - trackedConfigList.add(trackedConfig); - } - - /** - * Handles event from ZooKeeper for this coordinate. - * @param event - */ - @Override public void process(WatchedEvent event) { - LOG.info("Got an event from ZooKeeper " + event.toString()); - synchronized (lastStatusVersionMonitor) { - switch (event.getType()) { - - case None: - switch (event.getState()) { - case SyncConnected: - break; - case Disconnected: - case AuthFailed: - case Expired: - default: - // If we lost connection, we don't attempt to register another watcher as - // this might be blocking forever. Parent will try to reconnect (reclaim) - // later. - isSynchronizedWithZooKeeper.set(false); - sendEventToCoordinateListener( - CoordinateListener.Event.NO_CONNECTION_TO_STORAGE, - event.toString()); - - return; - } - return; - - case NodeDeleted: - // If node is deleted, we have no node to place a new watcher so we stop watching. - isSynchronizedWithZooKeeper.set(false); - sendEventToCoordinateListener(CoordinateListener.Event.NOT_OWNER, event.toString()); - return; - - case NodeDataChanged: - LOG.fine("Node data changed, check versions."); - boolean verifiedSynchronized = false; - try { - final Stat stat = zkClient.getZookeeper().exists(path, this); - if (stat == null) { - LOG.info("Could not stat path, setting out of synch, will retry claim"); - } else { - LOG.fine("Previous version is " + lastStatusVersion + " now is " - + stat.getVersion()); - if (stat.getVersion() != lastStatusVersion) { - LOG.fine("Version mismatch, sending out of sync."); - } else { - verifiedSynchronized = true; - } - } - } catch (KeeperException e) { - LOG.fine("Problems with zookeeper, sending consistencyState out of sync: " - + e.getMessage()); - } catch (InterruptedException e) { - LOG.fine("Got interrupted: " + e.getMessage()); - return; - } finally { - isSynchronizedWithZooKeeper.set(verifiedSynchronized); - } - - if (verifiedSynchronized) { - sendEventToCoordinateListener( - CoordinateListener.Event.COORDINATE_OUT_OF_SYNC, event.toString()); - } - return; - - case NodeChildrenChanged: - case NodeCreated: - // This should not happen.. - isSynchronizedWithZooKeeper.set(false); - sendEventToCoordinateListener( - CoordinateListener.Event.COORDINATE_OUT_OF_SYNC, event.toString()); - return; - } - } - } - - private void tryClaim() { - try { - zkClient.getZookeeper().create( - path, zkCoordinateData.snapshot().serialize().getBytes(Util.CHARSET_NAME), - ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, new ClaimCallback(), this); - } catch (IOException e) { - LOG.info("Got IO exception on claim with new ZooKeeper instance " + e.getMessage()); - } - } - - - /** - * Sends an event too all coordinate listeners. Note that the event is sent from this thread so - * if the callback code does the wrong calls, deadlocks might occur. - * @param event - * @param message - */ - private void sendEventToCoordinateListener( - final CoordinateListener.Event event, final String message) { - synchronized (callbacksMonitor) { - LOG.fine("Event " + event.name() + " " + message); - List<CoordinateListener> coordinateListenerListCopy = - new ArrayList<CoordinateListener>(); - synchronized (coordinateListenerList) { - coordinateListenerListCopy.addAll(coordinateListenerList); - } - for (CoordinateListener listener : coordinateListenerListCopy) { - listener.onCoordinateEvent(event, message); - } - } - } - - /** - * Register a watcher for the coordinate. - */ - private void registerWatcher() throws CloudnameException, InterruptedException { - LOG.fine("Register watcher for ZooKeeper.."); - try { - zkClient.getZookeeper().exists(path, this); - } catch (KeeperException e) { - throw new CloudnameException(e); - } - } - - /** - * Creates the serialized value of the object and stores this in ZooKeeper under the path. - * It updates the lastStatusVersion. It does not set a watcher for the path. - */ - private void updateCoordinateData() throws CoordinateMissingException, CloudnameException { - if (! started.get()) { - throw new IllegalStateException("Not started."); - } - - if (! zkClient.isConnected()) { - throw new CloudnameException("No proper connection with zookeeper."); - } - - synchronized (lastStatusVersionMonitor) { - try { - Stat stat = zkClient.getZookeeper().setData(path, - zkCoordinateData.snapshot().serialize().getBytes(Util.CHARSET_NAME), - lastStatusVersion); - LOG.fine("Updated coordinate, latest version is " + stat.getVersion()); - lastStatusVersion = stat.getVersion(); - } catch (KeeperException.NoNodeException e) { - throw new CoordinateMissingException("Coordinate does not exist " + path); - } catch (KeeperException e) { - throw new CloudnameException("ZooKeeper errror in updateCoordinateData: " - + e.getMessage(), e); - } catch (UnsupportedEncodingException e) { - throw new CloudnameException(e); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } catch (IOException e) { - throw new CloudnameException(e); - } - } - - } -} diff --git a/cn/src/main/java/org/cloudname/zk/DynamicExpression.java b/cn/src/main/java/org/cloudname/zk/DynamicExpression.java deleted file mode 100644 index 3811e640..00000000 --- a/cn/src/main/java/org/cloudname/zk/DynamicExpression.java +++ /dev/null @@ -1,379 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.cloudname.CloudnameException; -import org.cloudname.Endpoint; -import org.cloudname.Resolver; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * Class that is capable of tracking an expression. An expression can include many nodes. - * The number of nodes is dynamic and can change over time. - * For now, the implementation is rather simple. For single endpoints it does use feedback from - * ZooKeeper watcher events. For keeping track of new nodes, it does a scan on regular intervals. - * @author dybdahl - */ -class DynamicExpression implements Watcher, TrackedCoordinate.ExpressionResolverNotify, - ZkObjectHandler.ConnectionStateChanged { - - /** - * Keeps track of what picture (what an expression has resolved to) is sent to the user so that - * we know when to send new events. - */ - private final Map<String, Endpoint> clientPicture = new HashMap<String, Endpoint>(); - - /** - * Where to notify changes. - */ - private final Resolver.ResolverListener clientCallback; - - /** - * This is the expression to dynamically resolved represented as ZkResolver.Parameters. - */ - private final ZkResolver.Parameters parameters; - - /** - * When ZooKeeper reports an error about an path, when to try to read it again. - */ - private final long RETRY_INTERVAL_ZOOKEEPER_ERROR_MS = 30000; // 30 seconds - - /** - * We wait a bit after a node has changed because in many cases there might be several updates, - * e.g. an application registers several endpoints, each causing an update. - */ - private final long REFRESH_NODE_AFTER_CHANGE_MS = 2000; // two seconds - - /** - * Does a full scan with this interval. Non-final so unit test can run faster. - */ - protected static long TIME_BETWEEN_NODE_SCANNING_MS = 1 * 60 * 1000; // one minute - - /** - * A Map with all the coordinate we care about for now. - */ - final private Map<String, TrackedCoordinate> coordinateByPath = - new HashMap<String, TrackedCoordinate>(); - - /** - * We always add some random noise to when to do things so not all servers fire at the same time - * against - * ZooKeeper. - */ - private final Random random = new Random(); - - private static final Logger log = Logger.getLogger(DynamicExpression.class.getName()); - - private boolean stopped = false; - - private final ZkResolver zkResolver; - - private final ZkObjectHandler.Client zkClient; - - private final ScheduledExecutorService scheduler = - Executors.newSingleThreadScheduledExecutor(); - - private final Object instanceLock = new Object(); - - /** - * Start getting notified about changes to expression. - * @param expression Coordinate expression. - * @param clientCallback called on changes and initially. - */ - public DynamicExpression( - final String expression, - final Resolver.ResolverListener clientCallback, - final ZkResolver zkResolver, - final ZkObjectHandler.Client zkClient) { - this.clientCallback = clientCallback; - this.parameters = new ZkResolver.Parameters(expression); - this.zkResolver = zkResolver; - this.zkClient = zkClient; - } - - public void start() { - zkClient.registerListener(this); - scheduler.scheduleWithFixedDelay(new NodeScanner(""), 1 /* initial delay ms */, - TIME_BETWEEN_NODE_SCANNING_MS, TimeUnit.MILLISECONDS); - } - - /** - * Stop receiving callbacks about coordinate. - */ - public void stop() { - scheduler.shutdown(); - - synchronized (instanceLock) { - stopped = true; - for (TrackedCoordinate trackedCoordinate : coordinateByPath.values()) { - trackedCoordinate.stop(); - } - coordinateByPath.clear(); - } - } - - private void scheduleRefresh(String path, long delayMs) { - try { - scheduler.schedule(new NodeScanner(path), delayMs, TimeUnit.MILLISECONDS); - } catch (RejectedExecutionException e) { - if (scheduler.isShutdown()) { - return; - } - log.log(Level.SEVERE, "Got exception while scheduling new refresh", e); - } - } - - @Override - public void connectionUp() { - } - - @Override - public void connectionDown() { - } - - @Override - public void shutDown() { - scheduler.shutdown(); - } - - /** - * The method will try to resolve the expression and find new nodes. - */ - private class NodeScanner implements Runnable { - final String path; - - public NodeScanner(final String path) { - this.path = path; - } - - @Override - public void run() { - if (path.isEmpty()) { - resolve(); - } else { - refreshPathWithWatcher(path); - } - notifyClient(); - } - } - - /** - * Callback from zookeeper watcher. - */ - @Override - public void process(WatchedEvent watchedEvent) { - synchronized (instanceLock) { - if (stopped) { - return; - } - } - String path = watchedEvent.getPath(); - Event.KeeperState state = watchedEvent.getState(); - Event.EventType type = watchedEvent.getType(); - - switch (state) { - case Expired: - case AuthFailed: - case Disconnected: - // Something bad happened to the path, try again later. - scheduleRefresh(path, RETRY_INTERVAL_ZOOKEEPER_ERROR_MS); - break; - } - switch (type) { - case NodeChildrenChanged: - case None: - case NodeCreated: - scheduleRefresh(path, REFRESH_NODE_AFTER_CHANGE_MS); - break; - case NodeDeleted: - synchronized (instanceLock) { - coordinateByPath.remove(path); - notifyClient(); - return; - } - case NodeDataChanged: - refreshPathWithWatcher(path); - break; - } - - } - - /** - * Implements interface TrackedCoordinate.ExpressionResolverNotify - */ - @Override - public void nodeDead(final String path) { - synchronized (instanceLock) { - TrackedCoordinate trackedCoordinate = coordinateByPath.remove(path); - if (trackedCoordinate == null) { - return; - } - trackedCoordinate.stop(); - // Triggers a new scan, and potential client updates. - scheduleRefresh("" /** scan for all nodes */, 50 /* ms*/); - } - } - - /** - * Implements interface TrackedCoordinate.ExpressionResolverNotify - */ - @Override - public void stateChanged(final String path) { - // Something happened to a path, schedule a refetch. - scheduleRefresh(path, 50); - } - - private void resolve() { - final List<Endpoint> endpoints; - try { - endpoints = zkResolver.resolve(parameters.getExpression()); - } catch (CloudnameException e) { - log.warning("Exception from cloudname: " + e.toString()); - return; - } - - final Set<String> validEndpointsPaths = new HashSet<String>(); - - for (Endpoint endpoint : endpoints) { - - final String statusPath = ZkCoordinatePath.getStatusPath(endpoint.getCoordinate()); - validEndpointsPaths.add(statusPath); - - final TrackedCoordinate trackedCoordinate; - - synchronized (instanceLock) { - - // If already discovered, do nothing. - if (coordinateByPath.containsKey(statusPath)) { - continue; - } - trackedCoordinate = new TrackedCoordinate(this, statusPath, zkClient); - coordinateByPath.put(statusPath, trackedCoordinate); - } - // Tracked coordinate has to be in coordinateByPath before start is called, or events - // gets lost. - trackedCoordinate.start(); - try { - trackedCoordinate.waitForFirstData(); - } catch (InterruptedException e) { - log.log(Level.SEVERE, "Got interrupt while waiting for data.", e); - return; - } - } - - // Remove tracked coordinates that does not resolve. - synchronized (instanceLock) { - for (Iterator<Map.Entry<String, TrackedCoordinate> > it = - coordinateByPath.entrySet().iterator(); - it.hasNext(); /* nop */) { - Map.Entry<String, TrackedCoordinate> entry = it.next(); - - if (! validEndpointsPaths.contains(entry.getKey())) { - log.info("Killing endpoint " + entry.getKey() + ": No longer resolved."); - entry.getValue().stop(); - it.remove(); - } - } - } - } - - private String getEndpointKey(final Endpoint endpoint) { - return endpoint.getCoordinate().asString() + "@" + endpoint.getName(); - } - - - private List<Endpoint> getNewEndpoints() { - final List<Endpoint> newEndpoints = new ArrayList<Endpoint>(); - for (TrackedCoordinate trackedCoordinate : coordinateByPath.values()) { - if (trackedCoordinate.getCoordinatedata() != null) { - ZkResolver.addEndpoints( - trackedCoordinate.getCoordinatedata(), - newEndpoints, parameters.getEndpointName()); - } - } - return newEndpoints; - } - - private void notifyClient() { - synchronized (instanceLock) { - if (stopped) { - return; - } - } - // First generate a fresh list of endpoints. - final List<Endpoint> newEndpoints = getNewEndpoints(); - - synchronized (instanceLock) { - final Map<String, Endpoint> newEndpointsByName = new HashMap<String, Endpoint>(); - for (final Endpoint endpoint : newEndpoints) { - newEndpointsByName.put(getEndpointKey(endpoint), endpoint); - } - final Iterator<Map.Entry<String, Endpoint>> it = clientPicture.entrySet().iterator(); - while (it.hasNext()) { - - final Map.Entry<String, Endpoint> endpointEntry = it.next(); - - final String key = endpointEntry.getKey(); - if (! newEndpointsByName.containsKey(key)) { - it.remove(); - clientCallback.endpointEvent( - Resolver.ResolverListener.Event.REMOVED_ENDPOINT, - endpointEntry.getValue()); - } - } - for (final Endpoint endpoint : newEndpoints) { - final String key = getEndpointKey(endpoint); - - if (! clientPicture.containsKey(key)) { - clientCallback.endpointEvent( - Resolver.ResolverListener.Event.NEW_ENDPOINT, endpoint); - clientPicture.put(key, endpoint); - continue; - } - final Endpoint clientEndpoint = clientPicture.get(key); - if (endpoint.equals(clientEndpoint)) { continue; } - if (endpoint.getHost().equals(clientEndpoint.getHost()) && - endpoint.getName().equals(clientEndpoint.getName()) && - endpoint.getPort() == clientEndpoint.getPort() && - endpoint.getProtocol().equals(clientEndpoint.getProtocol())) { - clientCallback.endpointEvent( - Resolver.ResolverListener.Event.MODIFIED_ENDPOINT_DATA, endpoint); - clientPicture.put(key, endpoint); - continue; - } - clientCallback.endpointEvent( - Resolver.ResolverListener.Event.REMOVED_ENDPOINT, - clientPicture.get(key)); - clientCallback.endpointEvent( - Resolver.ResolverListener.Event.NEW_ENDPOINT, endpoint); - clientPicture.put(key, endpoint); - } - } - } - - private void refreshPathWithWatcher(String path) { - synchronized (instanceLock) { - TrackedCoordinate trackedCoordinate = coordinateByPath.get(path); - if (trackedCoordinate == null) { - // Endpoint has been removed while waiting for refresh. - return; - } - trackedCoordinate.refreshAsync(); - } - } - -} \ No newline at end of file diff --git a/cn/src/main/java/org/cloudname/zk/TrackedConfig.java b/cn/src/main/java/org/cloudname/zk/TrackedConfig.java deleted file mode 100644 index aacd1908..00000000 --- a/cn/src/main/java/org/cloudname/zk/TrackedConfig.java +++ /dev/null @@ -1,220 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.data.Stat; -import org.cloudname.CloudnameException; -import org.cloudname.ConfigListener; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Logger; - - -/** - * This class keeps track of config for a coordinate. - * - * @author dybdahl - */ -public class TrackedConfig implements Watcher, ZkObjectHandler.ConnectionStateChanged { - - private String configData = null; - private final Object configDataMonitor = new Object(); - private final ConfigListener configListener; - - private static final Logger log = Logger.getLogger(TrackedConfig.class.getName()); - - private final String path; - - private final AtomicBoolean isSynchronizedWithZookeeper = new AtomicBoolean(false); - - private final ZkObjectHandler.Client zkClient; - private final ScheduledExecutorService scheduler = - Executors.newSingleThreadScheduledExecutor(); - /** - * Constructor, the ZooKeeper instances is retrieved from ZkObjectHandler.Client, - * so we won't get it until the client reports we have a Zk Instance in the handler. - * @param path is the path of the configuration of the coordinate. - */ - public TrackedConfig( - String path, ConfigListener configListener, ZkObjectHandler.Client zkClient) { - this.path = path; - this.configListener = configListener; - this.zkClient = zkClient; - } - - - @Override - public void connectionUp() { - } - - @Override - public void connectionDown() { - isSynchronizedWithZookeeper.set(false); - } - - @Override - public void shutDown() { - scheduler.shutdown(); - } - - /** - * Starts tracking the config. - */ - public void start() { - zkClient.registerListener(this); - final long periodicDelayMs = 2000; - scheduler.scheduleWithFixedDelay(new ReloadConfigOnErrors(), 1 /* initial delay ms */, - periodicDelayMs, TimeUnit.MILLISECONDS); - } - - /** - * Stops the tracker. - */ - public void stop() { - scheduler.shutdown(); - zkClient.deregisterListener(this); - } - - /** - * If connection to zookeeper is away, we need to reload because messages might have been - * lost. This class has a method for checking this. - */ - private class ReloadConfigOnErrors implements Runnable { - @Override - public void run() { - - if (isSynchronizedWithZookeeper.get()) - return; - - try { - if (refreshConfigData()) { - configListener.onConfigEvent(ConfigListener.Event.UPDATED, getConfigData()); - } - } catch (CloudnameException e) { - // No worries, we try again later - } - } - } - - /** - * Returns current config. - * @return config - */ - public String getConfigData() { - synchronized (configDataMonitor) { - return configData; - } - } - - /** - * Creates a string for debugging etc - * @return serialized version of the instance data. - */ - public String toString() { - return "Config: " + getConfigData(); - } - - - /** - * Handles event from ZooKeeper for this coordinate. - * @param event Event to handle - */ - @Override public void process(WatchedEvent event) { - log.severe("Got an event from ZooKeeper " + event.toString() + " path: " + path); - - switch (event.getType()) { - case None: - switch (event.getState()) { - case SyncConnected: - break; - case Disconnected: - case AuthFailed: - case Expired: - default: - isSynchronizedWithZookeeper.set(false); - // If we lost connection, we don't attempt to register another watcher as - // this might be blocking forever. Parent might try to reconnect. - return; - } - break; - case NodeDeleted: - synchronized (configDataMonitor) { - isSynchronizedWithZookeeper.set(false); - configData = null; - } - configListener.onConfigEvent(ConfigListener.Event.DELETED, ""); - return; - case NodeDataChanged: - isSynchronizedWithZookeeper.set(false); - return; - case NodeChildrenChanged: - case NodeCreated: - break; - } - // We are only interested in registering a watcher in a few cases. E.g. if the event is lost - // connection, registerWatcher does not make sense as it is blocking. In NodeDataChanged - // above, a watcher is registerred in refreshConfigData(). - try { - registerWatcher(); - } catch (CloudnameException e) { - log.info("Got cloudname exception: " + e.getMessage()); - return; - } catch (InterruptedException e) { - log.info("Got interrupted exception: " + e.getMessage()); - return; - } - } - - - /** - * Loads the config from ZooKeeper. In case of failure, we keep the old data. - * - * @return Returns true if data has changed. - */ - private boolean refreshConfigData() throws CloudnameException { - if (! zkClient.isConnected()) { - throw new CloudnameException("No connection to storage."); - } - synchronized (configDataMonitor) { - - String oldConfig = configData; - Stat stat = new Stat(); - try { - byte[] data; - - data = zkClient.getZookeeper().getData(path, this, stat); - if (data == null) { - configData = ""; - } else { - configData = new String(data, Util.CHARSET_NAME); - } - isSynchronizedWithZookeeper.set(true); - return oldConfig == null || ! oldConfig.equals(configData); - } catch (KeeperException e) { - throw new CloudnameException(e); - } catch (UnsupportedEncodingException e) { - throw new CloudnameException(e); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } catch (IOException e) { - throw new CloudnameException(e); - } - } - } - - private void registerWatcher() throws CloudnameException, InterruptedException { - try { - zkClient.getZookeeper().exists(path, this); - - } catch (KeeperException e) { - throw new CloudnameException(e); - } - } - -} \ No newline at end of file diff --git a/cn/src/main/java/org/cloudname/zk/TrackedCoordinate.java b/cn/src/main/java/org/cloudname/zk/TrackedCoordinate.java deleted file mode 100644 index 37b314c0..00000000 --- a/cn/src/main/java/org/cloudname/zk/TrackedCoordinate.java +++ /dev/null @@ -1,220 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.*; -import org.cloudname.*; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * This class keeps track of serviceStatus and endpoints for a coordinate. - * - * @author dybdahl - */ -public class TrackedCoordinate implements Watcher, ZkObjectHandler.ConnectionStateChanged { - - - /** - * The client can implement this to get notified on changes. - */ - public interface ExpressionResolverNotify { - /** - * This is called when the state has changed, it might have become unavailable. - * @param statusPath path of the coordinate in zookeeper. - */ - void stateChanged(final String statusPath); - - /** - * This is called when node is deleted, or it can not be read. - * @param statusPath path of the coordinate in zookeeper. - */ - void nodeDead(final String statusPath); - } - - private ZkCoordinateData.Snapshot coordinateData = null; - private final Object coordinateDataMonitor = new Object(); - - private static final Logger LOG = Logger.getLogger(TrackedCoordinate.class.getName()); - private final String path; - private final ExpressionResolverNotify client; - private final AtomicBoolean isSynchronizedWithZookeeper = new AtomicBoolean(false); - private final ZkObjectHandler.Client zkClient; - - private final ScheduledExecutorService scheduler = - Executors.newSingleThreadScheduledExecutor(); - - private final CountDownLatch firstRound = new CountDownLatch(1); - /** - * Constructor, the ZooKeeper instances is retrieved from implementing the ZkUserInterface so - * the object is not ready to be used before the ZooKeeper instance is received. - * @param path is the path of the status of the coordinate. - */ - public TrackedCoordinate( - final ExpressionResolverNotify client, final String path, - final ZkObjectHandler.Client zkClient) { - LOG.finest("Tracking coordinate with path " + path); - this.path = path; - this.client = client; - this.zkClient = zkClient; - } - - // Implementation of ZkObjectHandler.ConnectionStateChanged. - @Override - public void connectionUp() { - } - - // Implementation of ZkObjectHandler.ConnectionStateChanged. - @Override - public void connectionDown() { - isSynchronizedWithZookeeper.set(false); - } - - @Override - public void shutDown() { - scheduler.shutdown(); - } - - /** - * Signalize that the class should reload its data. - */ - public void refreshAsync() { - isSynchronizedWithZookeeper.set(false); - } - - public void start() { - zkClient.registerListener(this); - final long periodicDelayMs = 2000; - scheduler.scheduleWithFixedDelay(new ReloadCoordinateData(), 1 /* initial delay ms */, - periodicDelayMs, TimeUnit.MILLISECONDS); - } - - public void stop() { - scheduler.shutdown(); - zkClient.deregisterListener(this); - } - - public void waitForFirstData() throws InterruptedException { - firstRound.await(); - } - - - - /** - * This class handles reloading new data from zookeeper if we are out of synch. - */ - class ReloadCoordinateData implements Runnable { - @Override - public void run() { - if (! isSynchronizedWithZookeeper.getAndSet(true)) { return; } - try { - refreshCoordinateData(); - } catch (CloudnameException e) { - LOG.log(Level.INFO, "exception on reloading coordinate data.", e); - isSynchronizedWithZookeeper.set(false); - } - firstRound.countDown(); - } - } - - - public ZkCoordinateData.Snapshot getCoordinatedata() { - synchronized (coordinateDataMonitor) { - return coordinateData; - } - } - - - /** - * Creates a string for debugging etc - * @return serialized version of the instance data. - */ - public String toString() { - synchronized (coordinateDataMonitor) { - return coordinateData.toString(); - } - } - - - /** - * Handles event from ZooKeeper for this coordinate. - * @param event Event to handle - */ - @Override public void process(WatchedEvent event) { - LOG.fine("Got an event from ZooKeeper " + event.toString() + " path: " + path); - - switch (event.getType()) { - case None: - switch (event.getState()) { - case SyncConnected: - break; - case Disconnected: - case AuthFailed: - case Expired: - default: - // If we lost connection, we don't attempt to register another watcher as - // this might be blocking forever. Parent might try to reconnect. - return; - } - break; - case NodeDeleted: - synchronized (coordinateDataMonitor) { - coordinateData = new ZkCoordinateData().snapshot(); - } - client.nodeDead(path); - return; - case NodeDataChanged: - isSynchronizedWithZookeeper.set(false); - return; - case NodeChildrenChanged: - case NodeCreated: - break; - } - try { - registerWatcher(); - } catch (CloudnameException e) { - LOG.log(Level.INFO, "Got cloudname exception.", e); - } catch (InterruptedException e) { - LOG.log(Level.INFO, "Got interrupted exception.", e); - } - } - - - /** - * Loads the coordinate from ZooKeeper. In case of failure, we keep the old data. - * Notifies the client if state changes. - */ - private void refreshCoordinateData() throws CloudnameException { - - if (! zkClient.isConnected()) { - throw new CloudnameException("No connection to storage."); - } - synchronized (coordinateDataMonitor) { - String oldDataSerialized = (null == coordinateData) ? "" : coordinateData.serialize(); - try { - coordinateData = ZkCoordinateData.loadCoordinateData( - path, zkClient.getZookeeper(), this).snapshot(); - } catch (CloudnameException e) { - client.nodeDead(path); - LOG.log(Level.FINER, "Exception while reading path " + path, e); - return; - } - isSynchronizedWithZookeeper.set(true); - if (! oldDataSerialized.equals(coordinateData.toString())) { - client.stateChanged(path); - } - } - } - - private void registerWatcher() throws CloudnameException, InterruptedException { - try { - zkClient.getZookeeper().exists(path, this); - } catch (KeeperException e) { - throw new CloudnameException(e); - } - } -} \ No newline at end of file diff --git a/cn/src/main/java/org/cloudname/zk/Util.java b/cn/src/main/java/org/cloudname/zk/Util.java deleted file mode 100644 index 6bcd21e0..00000000 --- a/cn/src/main/java/org/cloudname/zk/Util.java +++ /dev/null @@ -1,177 +0,0 @@ -package org.cloudname.zk; - -import org.cloudname.CloudnameException; - -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.KeeperException; -import org.cloudname.CoordinateMissingException; - -import java.util.ArrayList; -import java.util.List; - -/** - * Various ZooKeeper utilities. - * - * @author borud - */ -public final class Util { - // Constants - public static final String CHARSET_NAME = "UTF-8"; - - /** - * Create a path in ZooKeeper. We just start at the top and work - * our way down. Nodes that exist will throw an exception but we - * will just ignore those. The result should be a path consisting - * of ZooKeeper nodes with the names specified by the path and - * with their data element set to null. - * @throws CloudnameException if problems talking with ZooKeeper. - */ - public static void mkdir(final ZooKeeper zk, String path, final List<ACL> acl) - throws CloudnameException, InterruptedException { - if (path.startsWith("/")) { - path = path.substring(1); - } - - String[] parts = path.split("/"); - - String createPath = ""; - for (String p : parts) { - // Sonar will complain about this. Usually it would be - // right but in this case it isn't. - createPath += "/" + p; - try { - zk.create(createPath, null, acl, CreateMode.PERSISTENT); - } catch (KeeperException.NodeExistsException e) { - // This is okay. Ignore. - } catch (KeeperException e) { - throw new CloudnameException(e); - } - } - } - - /** - * Lists sub nodes of a path in a ZooKeeper instance. - * @param path starts from this path - * @param nodeList put sub-nodes in this list - */ - public static void listRecursively( - final ZooKeeper zk, final String path, final List<String> nodeList) - throws CloudnameException, InterruptedException { - - List<String> children = null; - try { - children = zk.getChildren(path, false); - } catch (KeeperException e) { - throw new CloudnameException(e); - } - if (children.size() == 0) { - nodeList.add(path); - return; - } - for (String childPath : children) { - listRecursively(zk, path + "/" +childPath, nodeList); - } - } - - /** - * Figures out if there are sub-nodes under the path in a ZooKeeper instance. - * @return true if the node exists and has children. - * @throws CoordinateMissingException if the path does not exist in ZooKeeper. - */ - public static boolean hasChildren(final ZooKeeper zk, final String path) - throws CloudnameException, CoordinateMissingException, InterruptedException { - if (! exist(zk, path)) { - throw new CoordinateMissingException("Could not get children due to non-existing path " - + path); - } - List<String> children = null; - try { - children = zk.getChildren(path, false); - } catch (KeeperException e) { - throw new CloudnameException(e); - } - return ((children != null) && (children.size() > 0)); - } - - /** - * Figures out if a path exists in a ZooKeeper instance. - * @throws CloudnameException if there are problems taking to the ZooKeeper instance. - * @return true if the path exists. - */ - public static boolean exist(final ZooKeeper zk, final String path) - throws CloudnameException, InterruptedException { - try { - return zk.exists(path, false) != null; - } catch (KeeperException e) { - throw new CloudnameException(e); - } - } - - /** - * Returns the version of the path. - * @param zk - * @param path - * @return version number - */ - public static int getVersionForDeletion(final ZooKeeper zk, final String path) - throws CloudnameException, InterruptedException { - - try { - int version = zk.exists(path, false).getVersion(); - if (version < 0) { - throw new CloudnameException( - new RuntimeException("Got negative version for path " + path)); - } - return version; - } catch (KeeperException e) { - throw new CloudnameException(e); - } - } - - /** - * Deletes nodes from a path from the right to the left. - * @param zk - * @param path to be deleted - * @param keepMinLevels is the minimum number of levels (depths) to keep in the path. - * @return the number of deleted levels. - */ - public static int deletePathKeepRootLevels( - final ZooKeeper zk, String path, int keepMinLevels) - throws CloudnameException, CoordinateMissingException, InterruptedException { - if (path.startsWith("/")) { - path = path.substring(1); - } - - String[] parts = path.split("/"); - - // We are happy if only the first two deletions went through. The other deletions are just - // cleaning up if there are no more coordinates on the same rootPath. - int deletedNodes = 0; - List<String> paths = new ArrayList<String>(); - String incrementalPath = ""; - for (String p : parts) { - incrementalPath += "/" + p; - paths.add(incrementalPath); - } - - for (int counter = paths.size() - 1; counter >= keepMinLevels; counter--) { - String deletePath = paths.get(counter); - int version = getVersionForDeletion(zk, deletePath); - if (hasChildren(zk, deletePath)) { - return deletedNodes; - } - try { - zk.delete(deletePath, version); - } catch (KeeperException e) { - throw new CloudnameException(e); - } - deletedNodes++; - } - return deletedNodes; - } - - // Should not be instantiated. - private Util() {} -} diff --git a/cn/src/main/java/org/cloudname/zk/ZkCloudname.java b/cn/src/main/java/org/cloudname/zk/ZkCloudname.java deleted file mode 100644 index c14e5e7b..00000000 --- a/cn/src/main/java/org/cloudname/zk/ZkCloudname.java +++ /dev/null @@ -1,419 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.data.Stat; -import org.cloudname.*; - -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.ZooDefs.Ids; -import org.apache.zookeeper.KeeperException; - -import java.io.UnsupportedEncodingException; -import java.util.List; - -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import java.util.logging.Level; -import java.util.logging.Logger; - -import java.util.concurrent.CountDownLatch; - -import java.io.IOException; - - -/** - * An implementation of Cloudname using ZooKeeper. - * - * This implementation assumes that the path prefix defined by - * CN_PATH_PREFIX is only used by Cloudname. The structure and - * semantics of things under this prefix are defined by this library - * and will be subject to change. - * - * - * @author borud - * @author dybdahl - * @author storsveen - */ -public final class ZkCloudname implements Cloudname, Watcher, Runnable { - - private static final int SESSION_TIMEOUT = 5000; - - private static final Logger log = Logger.getLogger(ZkCloudname.class.getName()); - - private ZkObjectHandler zkObjectHandler = null; - - private final String connectString; - - // Latches that count down when ZooKeeper is connected - private final CountDownLatch connectedSignal = new CountDownLatch(1); - - private ZkResolver resolver = null; - - private int connectingCounter = 0; - - private final ScheduledExecutorService scheduler = - Executors.newSingleThreadScheduledExecutor(); - - private ZkCloudname(final Builder builder) { - connectString = builder.getConnectString(); - - } - - /** - * Checks state of zookeeper connection and try to keep it running. - */ - @Override - public void run() { - final ZooKeeper.States state = zkObjectHandler.getClient().getZookeeper().getState(); - - if (state == ZooKeeper.States.CONNECTED) { - connectingCounter = 0; - return; - } - - if (state == ZooKeeper.States.CONNECTING) { - connectingCounter++; - if (connectingCounter > 10) { - log.fine("Long time in connecting, forcing a close of zookeeper client."); - zkObjectHandler.close(); - connectingCounter = 0; - } - return; - } - - if (state == ZooKeeper.States.CLOSED) { - log.fine("Retrying connection to ZooKeeper."); - try { - zkObjectHandler.setZooKeeper( - new ZooKeeper(connectString, SESSION_TIMEOUT, this)); - } catch (IOException e) { - log.log(Level.SEVERE, "RetryConnection failed for some reason:" - + e.getMessage(), e); - } - return; - } - - log.severe("Unknown state " + state + " closing...."); - zkObjectHandler.close(); - } - - - /** - * Connect to ZooKeeper instance with time-out value. - * @param waitTime time-out value for establishing connection. - * @param waitUnit time unit for time-out when establishing connection. - * @throws CloudnameException if connection can not be established - * @return - */ - public ZkCloudname connectWithTimeout(long waitTime, TimeUnit waitUnit) - throws CloudnameException { - boolean connected = false; - try { - - zkObjectHandler = new ZkObjectHandler( - new ZooKeeper(connectString, SESSION_TIMEOUT, this)); - - if (! connectedSignal.await(waitTime, waitUnit)) { - throw new CloudnameException("Connecting to ZooKeeper timed out."); - } - log.fine("Connected to ZooKeeper " + connectString); - connected = true; - } catch (IOException e) { - throw new CloudnameException(e); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } finally { - if (!connected && zkObjectHandler != null) { - zkObjectHandler.close(); - } - } - resolver = new ZkResolver.Builder().addStrategy(new StrategyAll()) - .addStrategy(new StrategyAny()).build(zkObjectHandler.getClient()); - scheduler.scheduleWithFixedDelay(this, 1 /* initial delay ms */, - 1000 /* check state every second */, TimeUnit.MILLISECONDS); - return this; - } - - /** - * Connect to ZooKeeper instance with long time-out, however, it might fail fast. - * @return connected ZkCloudname object - * @throws CloudnameException if connection can not be established. - */ - public ZkCloudname connect() throws CloudnameException { - // We wait up to 100 years. - return connectWithTimeout(365 * 100, TimeUnit.DAYS); - } - - - - @Override - public void process(WatchedEvent event) { - log.fine("Got event in ZkCloudname: " + event.toString()); - if (event.getState() == Event.KeeperState.Disconnected - || event.getState() == Event.KeeperState.Expired) { - zkObjectHandler.connectionDown(); - } - - // Initial connection to ZooKeeper is completed. - if (event.getState() == Event.KeeperState.SyncConnected) { - zkObjectHandler.connectionUp(); - // The first connection set up is blocking, this will unblock the connection. - connectedSignal.countDown(); - } - } - - /** - * Create a given coordinate in the ZooKeeper node tree. - * - * Just blindly creates the entire path. Elements of the path may - * exist already, but it seems wasteful to - * @throws CoordinateExistsException if coordinate already exists- - * @throws CloudnameException if problems with zookeeper connection. - */ - @Override - public void createCoordinate(final Coordinate coordinate) - throws CloudnameException, CoordinateExistsException { - // Create the root path for the coordinate. We do this - // blindly, meaning that if the path already exists, then - // that's ok -- so a more correct name for this method would - // be ensureCoordinate(), but that might confuse developers. - String root = ZkCoordinatePath.getCoordinateRoot(coordinate); - final ZooKeeper zk = zkObjectHandler.getClient().getZookeeper(); - try { - if (Util.exist(zk, root)) { - throw new CoordinateExistsException("Coordinate already created:" +root); - } - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - - try { - Util.mkdir(zk, root, Ids.OPEN_ACL_UNSAFE); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - - // Create the nodes that represent subdirectories. - String configPath = ZkCoordinatePath.getConfigPath(coordinate, null); - try { - log.fine("Creating config node " + configPath); - zk.create(configPath, null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - } catch (KeeperException e) { - throw new CloudnameException(e); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - } - - /** - * Deletes a coordinate in the persistent service store. This includes deletion - * of config. It will fail if the coordinate is claimed. - * @param coordinate the coordinate we wish to destroy. - */ - @Override - public void destroyCoordinate(final Coordinate coordinate) - throws CoordinateDeletionException, CoordinateMissingException, CloudnameException { - String statusPath = ZkCoordinatePath.getStatusPath(coordinate); - String configPath = ZkCoordinatePath.getConfigPath(coordinate, null); - String rootPath = ZkCoordinatePath.getCoordinateRoot(coordinate); - final ZooKeeper zk = zkObjectHandler.getClient().getZookeeper(); - try { - if (! Util.exist(zk, rootPath)) { - throw new CoordinateMissingException("Coordinate not found: " + rootPath); - } - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - - - // Do this early to raise the error before anything is deleted. However, there might be a - // race condition if someone claims while we delete configPath and instance (root) node. - try { - if (Util.exist(zk, configPath) && Util.hasChildren(zk, configPath)) { - throw new CoordinateDeletionException("Coordinate has config node."); - } - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - - try { - if (Util.exist(zk, statusPath)) { - throw new CoordinateDeletionException("Coordinate is claimed."); - } - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - - // Delete config, the instance node, and continue with as much as possible. - // We might have a raise condition if someone is creating a coordinate with a shared path - // in parallel. We want to keep 3 levels of nodes (/cn/%CELL%/%USER%). - int deletedNodes = 0; - try { - deletedNodes = Util.deletePathKeepRootLevels(zk, configPath, 3); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - if (deletedNodes == 0) { - throw new CoordinateDeletionException("Failed deleting config node, nothing deleted.."); - } - if (deletedNodes == 1) { - throw new CoordinateDeletionException("Failed deleting instance node."); - } - } - - /** - * Claim a coordinate. - * - * In this implementation a coordinate is claimed by creating an - * ephemeral with the name defined in CN_STATUS_NAME. If the node - * already exists the coordinate has already been claimed. - */ - @Override - public ServiceHandle claim(final Coordinate coordinate) { - String statusPath = ZkCoordinatePath.getStatusPath(coordinate); - log.fine("Claiming " + coordinate.asString() + " (" + statusPath + ")"); - - ClaimedCoordinate statusAndEndpoints = new ClaimedCoordinate( - coordinate, zkObjectHandler.getClient()); - - // If we have come thus far we have succeeded in creating the - // CN_STATUS_NAME node within the service coordinate directory - // in ZooKeeper and we can give the client a ServiceHandle. - ZkServiceHandle handle = new ZkServiceHandle( - statusAndEndpoints, coordinate, zkObjectHandler.getClient()); - statusAndEndpoints.start(); - return handle; - } - - @Override - public Resolver getResolver() { - - return resolver; - } - - @Override - public ServiceStatus getStatus(Coordinate coordinate) throws CloudnameException { - String statusPath = ZkCoordinatePath.getStatusPath(coordinate); - ZkCoordinateData zkCoordinateData = ZkCoordinateData.loadCoordinateData( - statusPath, zkObjectHandler.getClient().getZookeeper(), null); - return zkCoordinateData.snapshot().getServiceStatus(); - } - - @Override - public void setConfig( - final Coordinate coordinate, final String newConfig, final String oldConfig) - throws CoordinateMissingException, CloudnameException { - String configPath = ZkCoordinatePath.getConfigPath(coordinate, null); - int version = -1; - final ZooKeeper zk = zkObjectHandler.getClient().getZookeeper(); - if (oldConfig != null) { - Stat stat = new Stat(); - byte [] data = null; - try { - data = zk.getData(configPath, false, stat); - } catch (KeeperException e) { - throw new CloudnameException(e); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - try { - String stringData = new String(data, Util.CHARSET_NAME); - if (! stringData.equals(oldConfig)) { - throw new CloudnameException("Data did not match old config. Actual old " - + stringData + " specified old " + oldConfig); - } - } catch (UnsupportedEncodingException e) { - throw new CloudnameException(e); - } - version = stat.getVersion(); - } - try { - zk.setData(configPath, newConfig.getBytes(Util.CHARSET_NAME), version); - } catch (KeeperException e) { - throw new CloudnameException(e); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } catch (UnsupportedEncodingException e) { - throw new CloudnameException(e); - } - } - - - @Override - public String getConfig(final Coordinate coordinate) - throws CoordinateMissingException, CloudnameException { - String configPath = ZkCoordinatePath.getConfigPath(coordinate, null); - Stat stat = new Stat(); - try { - byte[] data = zkObjectHandler.getClient().getZookeeper().getData( - configPath, false, stat); - if (data == null) { - return null; - } - return new String(data, Util.CHARSET_NAME); - } catch (KeeperException e) { - throw new CloudnameException(e); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } catch (UnsupportedEncodingException e) { - throw new CloudnameException(e); - } - } - - /** - * Close the connection to ZooKeeper. - */ - @Override - public void close() { - zkObjectHandler.shutdown(); - log.fine("ZooKeeper session closed for " + connectString); - scheduler.shutdown(); - } - - /** - * List the sub-nodes in ZooKeeper owned by Cloudname. - * @param nodeList - */ - public void listRecursively(List<String> nodeList) - throws CloudnameException, InterruptedException { - Util.listRecursively(zkObjectHandler.getClient().getZookeeper(), - ZkCoordinatePath.getCloudnameRoot(), nodeList); - } - - /** - * This class builds parameters for ZkCloudname. - */ - public static class Builder { - private String connectString; - - public Builder setConnectString(String connectString) { - this.connectString = connectString; - return this; - } - - // TODO(borud, dybdahl): Make this smarter, some ideas: - // Connect to one node and read from a magic path - // how many zookeepers that are running and build - // the path based on this information. - public Builder setDefaultConnectString() { - this.connectString = "z1:2181,z2:2181,z3:2181"; - return this; - } - - public String getConnectString() { - return connectString; - } - - public ZkCloudname build() { - if (connectString.isEmpty()) { - throw new RuntimeException( - "You need to specify connection string before you can build."); - } - return new ZkCloudname(this); - } - } -} diff --git a/cn/src/main/java/org/cloudname/zk/ZkCoordinateData.java b/cn/src/main/java/org/cloudname/zk/ZkCoordinateData.java deleted file mode 100644 index 622b6482..00000000 --- a/cn/src/main/java/org/cloudname/zk/ZkCoordinateData.java +++ /dev/null @@ -1,228 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.data.Stat; -import org.cloudname.CloudnameException; -import org.cloudname.Endpoint; -import org.cloudname.ServiceState; -import org.cloudname.ServiceStatus; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; - -import java.io.IOException; -import java.io.StringWriter; -import java.io.UnsupportedEncodingException; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * ZkCoordinateData represent the data regarding a coordinate. It can return an immutable snapshot. - * The class has support for deserializing and serializing the data and methods for accessing - * endpoints. The class is fully thread-safe. - * - * @auther dybdahl - */ -public final class ZkCoordinateData { - /** - * The status of the coordinate, is it running etc. - */ - private ServiceStatus serviceStatus = new ServiceStatus(ServiceState.UNASSIGNED, - "No service state has been assigned"); - - /** - * The endpoints registered at the coordinate mapped by endpoint name. - */ - private final Map<String, Endpoint> endpointsByName = new HashMap<String, Endpoint>(); - - // Used for deserializing. - private final ObjectMapper objectMapper = new ObjectMapper(); - - private final Object localVariablesMonitor = new Object(); - - /** - * Create a new immutable snapshot object. - */ - public Snapshot snapshot() { - synchronized (localVariablesMonitor) { - return new Snapshot(serviceStatus, endpointsByName); - } - } - - /** - * Sets status, overwrite any existing status information. - */ - public ZkCoordinateData setStatus(ServiceStatus status) { - synchronized (localVariablesMonitor) { - this.serviceStatus = status; - return this; - } - } - - /** - * Adds new endpoints to the builder. It is legal to add a new endpoint with an endpoint - * that already exists. - */ - public ZkCoordinateData putEndpoints(final List<Endpoint> newEndpoints) { - synchronized (localVariablesMonitor) { - for (Endpoint endpoint : newEndpoints) { - endpointsByName.put(endpoint.getName(), endpoint); - } - } - return this; - } - - /** - * Remove endpoints from the Dynamic object. - */ - public ZkCoordinateData removeEndpoints(final List<String> names) { - synchronized (localVariablesMonitor) { - for (String name : names) { - if (! endpointsByName.containsKey(name)) { - throw new IllegalArgumentException("endpoint does not exist: " + name); - } - if (null == endpointsByName.remove(name)) { - throw new IllegalArgumentException( - "Endpoint does not exists, null in internal structure." + name); - } - } - } - return this; - } - - /** - * Sets the state of the Dynamic object based on a serialized byte string. - * Any old data is overwritten. - * @throws IOException if something went wrong, should not happen on valid data. - */ - public ZkCoordinateData deserialize(byte[] data) throws IOException { - synchronized (localVariablesMonitor) { - final String stringData = new String(data, Util.CHARSET_NAME); - final JsonFactory jsonFactory = new JsonFactory(); - final JsonParser jp = jsonFactory.createJsonParser(stringData); - final String statusString = objectMapper.readValue(jp, new TypeReference<String>() {}); - serviceStatus = ServiceStatus.fromJson(statusString); - endpointsByName.clear(); - endpointsByName.putAll((Map<String, Endpoint>)objectMapper.readValue(jp, - new TypeReference <Map<String, Endpoint>>() {})); - } - return this; - } - - /** - * An immutable representation of the coordinate data. - */ - public static class Snapshot { - /** - * The status of the coordinate, is it running etc. - */ - private final ServiceStatus serviceStatus; - - /** - * The endpoints registered at the coordinate mapped by endpoint name. - */ - private final Map<String, Endpoint> endpointsByName; - - /** - * Getter for status of coordinate. - * @return the service status of the coordinate. - */ - public ServiceStatus getServiceStatus() { - return serviceStatus; - } - - /** - * Getter for endpoint of the coordinate given the endpoint name. - * @param name of the endpoint. - * @return the endpoint or null if non-existing. - */ - public Endpoint getEndpoint(final String name) { - return endpointsByName.get(name); - } - - /** - * Returns all the endpoints. - * @return set of endpoints. - */ - public Set<Endpoint> getEndpoints() { - Set<Endpoint> endpoints = new HashSet<Endpoint>(); - endpoints.addAll(endpointsByName.values()); - return endpoints; - } - - /** - * A method for getting all endpoints. - * @param endpoints The endpoints are put in this list. - */ - public void appendAllEndpoints(final Collection<Endpoint> endpoints) { - endpoints.addAll(endpointsByName.values()); - } - - /** - * Return a serialized string representing the status and endpoint. It can be de-serialize - * by the inner class. - * @return The serialized string. - * @throws IOException if something goes wrong, should not be a common problem though. - */ - public String serialize() { - final StringWriter stringWriter = new StringWriter(); - final JsonGenerator generator; - - try { - generator = new JsonFactory(new ObjectMapper()).createJsonGenerator(stringWriter); - generator.writeString(serviceStatus.toJson()); - generator.writeObject(endpointsByName); - - generator.flush(); - } catch (IOException e) { - throw new RuntimeException( - "Got IOException while serializing coordinate data." , e); - } - return new String(stringWriter.getBuffer()); - } - - /** - * Private constructor, only ZkCoordinateData can build this. - */ - private Snapshot(ServiceStatus serviceStatus, Map<String, Endpoint> endpointsByName) { - this.serviceStatus = serviceStatus; - this.endpointsByName = endpointsByName; - } - } - - /** - * Utility function to create and load a ZkCoordinateData from ZooKeeper. - * @param watcher for callbacks from ZooKeeper. It is ok to pass null. - * @throws CloudnameException when problems loading data. - */ - static public ZkCoordinateData loadCoordinateData( - final String statusPath, final ZooKeeper zk, final Watcher watcher) - throws CloudnameException { - Stat stat = new Stat(); - try { - byte[] data; - if (watcher == null) { - data = zk.getData(statusPath, false, stat); - } else { - data = zk.getData(statusPath, watcher, stat); - } - return new ZkCoordinateData().deserialize(data); - } catch (KeeperException e) { - throw new CloudnameException(e); - } catch (UnsupportedEncodingException e) { - throw new CloudnameException(e); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } catch (IOException e) { - throw new CloudnameException(e); - } - } -} diff --git a/cn/src/main/java/org/cloudname/zk/ZkCoordinatePath.java b/cn/src/main/java/org/cloudname/zk/ZkCoordinatePath.java deleted file mode 100644 index 6e4eb847..00000000 --- a/cn/src/main/java/org/cloudname/zk/ZkCoordinatePath.java +++ /dev/null @@ -1,87 +0,0 @@ -package org.cloudname.zk; - -import org.cloudname.Coordinate; - - -/** - * A class for creating paths for ZooKeeper. - * The semantic of a path is string of the form /cn/%cell%/%user%/%service%/%instance%/[status]|[config/%name%] - - * The prefix /cn indicates that the content is owned by the CloudName library. - * Anything that lives under this prefix can only be touched by the Cloudname library. - * If clients begin to fiddle with nodes under this prefix directly, all deals are off. - * @author: dybdahl - */ -public final class ZkCoordinatePath { - private static final String CN_PATH_PREFIX = "/cn"; - private static final String CN_STATUS_NAME = "status"; - private static final String CN_CONFIG_NAME = "config"; - - public static String getCloudnameRoot() { - return CN_PATH_PREFIX; - } - /** - * Builds the root path of a coordinate. - * @param coordinate - * @return the path of the coordinate in ZooKeeper (/cn/%cell%/%user%/%service%/%instance%). - */ - public static String getCoordinateRoot(final Coordinate coordinate) { - return coordinateAsPath(coordinate.getCell(), coordinate.getUser(), coordinate.getService(), - coordinate.getInstance()); - } - - /** - * Builds the status path of a coordinate. - * @param coordinate - * @return full status path (/cn/%cell%/%user%/%service%/%instance%/status) - */ - public static String getStatusPath(final Coordinate coordinate) { - return getCoordinateRoot(coordinate) + "/" + CN_STATUS_NAME; - } - - /** - * Builds the config path of a coordinate. - * @param coordinate - * @param name if null, the last path of the path (/%name%) is not included. - * @return config path /cn/%cell%/%user%/%service%/%instance%/config or - * /cn/%cell%/%user%/%service%/%instance%/config/%name% - */ - public static String getConfigPath(final Coordinate coordinate, final String name) { - if (name == null) { - return getCoordinateRoot(coordinate) + "/" + CN_CONFIG_NAME; - } - return getCoordinateRoot(coordinate) + "/" + CN_CONFIG_NAME + "/" + name; - } - - /** - * Builds first part of a ZooKeeper path. - * @param cell - * @param user - * @param service - * @return path (/cn/%cell%/%user%/%service%) - */ - public static String coordinateWithoutInstanceAsPath( - final String cell, final String user, final String service) { - return CN_PATH_PREFIX + "/" + cell + "/" + user + "/" + service; - } - - public static String getStatusPath(String cell, String user, String service, Integer instance) { - return coordinateAsPath(cell, user, service, instance) + "/" + CN_STATUS_NAME; - } - - /** - * Builds first part of a ZooKeeper path. - * @param cell - * @param user - * @param service - * @param instance - * @return path (/cn/%cell%/%user%/%service%/%instance%) - */ - private static String coordinateAsPath( - final String cell, final String user, final String service, Integer instance) { - return coordinateWithoutInstanceAsPath(cell, user, service) + "/" + instance.toString(); - } - - // Should not be instantiated. - private ZkCoordinatePath() {} -} diff --git a/cn/src/main/java/org/cloudname/zk/ZkObjectHandler.java b/cn/src/main/java/org/cloudname/zk/ZkObjectHandler.java deleted file mode 100644 index e17e0009..00000000 --- a/cn/src/main/java/org/cloudname/zk/ZkObjectHandler.java +++ /dev/null @@ -1,157 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.ZooKeeper; - -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Class that keeps an instance of zookeeper. It has a sub-class with read access and - * a listener service. - * @author dybdahl - */ -public class ZkObjectHandler { - private ZooKeeper zooKeeper = null; - private final Object zooKeeperMonitor = new Object(); - - private final Set<ConnectionStateChanged> registeredCallbacks = - new HashSet<ConnectionStateChanged>(); - private final Object callbacksMonitor = new Object(); - - private final AtomicBoolean isConnected = new AtomicBoolean(true); - - /** - * Constructor - * @param zooKeeper first zooKeeper to use, should not be null. - */ - public ZkObjectHandler(final ZooKeeper zooKeeper) { - this.zooKeeper = zooKeeper; - } - - /** - * Interface for notification of connection state changes. - */ - public interface ConnectionStateChanged { - void connectionUp(); - void connectionDown(); - void shutDown(); - } - - /** - * Indicate that zookeeper connection is working by calling this method. - */ - public void connectionUp() { - boolean previous = isConnected.getAndSet(true); - if (previous == true) { return; } - synchronized (callbacksMonitor) { - for (ConnectionStateChanged connectionStateChanged : registeredCallbacks) { - connectionStateChanged.connectionUp(); - } - } - } - - /** - * Indicate that zookeeper connection is broken by calling this method. - */ - public void connectionDown() { - boolean previous = isConnected.getAndSet(false); - if (previous == false) { return; } - synchronized (callbacksMonitor) { - for (ConnectionStateChanged connectionStateChanged : registeredCallbacks) { - connectionStateChanged.connectionDown(); - } - } - } - - /** - * Every class using Zookeeper has an instance of this Client class - * to check the connection and fetch the instance. - */ - public class Client { - public ZooKeeper getZookeeper() { - synchronized (zooKeeperMonitor) { - return zooKeeper; - } - } - - /** - * Check if we are connected to Zookeeper - * @return True if zkCloudname confirmed connection <1000ms ago. - */ - public boolean isConnected() { - return isConnected.get(); - } - - /** - * Register a callback. - * @param connectionStateChanged Callback to register - * @return true if this is a new callback. - */ - public boolean registerListener(ConnectionStateChanged connectionStateChanged) { - synchronized (callbacksMonitor) { - return registeredCallbacks.add(connectionStateChanged); - } - } - - /** - * Deregister a callback. - * @param connectionStateChanged Callback to deregister. - * @return true if the callback was registered. - */ - public boolean deregisterListener(ConnectionStateChanged connectionStateChanged) { - synchronized (callbacksMonitor) { - return registeredCallbacks.remove(connectionStateChanged); - } - } - } - - /** - * Returns client - * @return client object. - */ - public Client getClient() { - return new Client(); - } - - /** - * Update zooKeeper instance. - * @param zooKeeper - */ - public void setZooKeeper(final ZooKeeper zooKeeper) { - synchronized (zooKeeperMonitor) { - this.zooKeeper = zooKeeper; - } - } - - /** - * Closes zooKeeper object. - */ - public void close() { - synchronized (zooKeeperMonitor) { - if (zooKeeper == null) { return; } - - try { - zooKeeper.close(); - } catch (InterruptedException e) { - // ignore - } - } - } - - /** - * Shut down all listeners. - */ - public void shutdown() { - synchronized (callbacksMonitor) { - for (ConnectionStateChanged connectionStateChanged : registeredCallbacks) { - connectionStateChanged.shutDown(); - } - } - try { - zooKeeper.close(); - } catch (InterruptedException e) { - // ignore - } - } -} \ No newline at end of file diff --git a/cn/src/main/java/org/cloudname/zk/ZkResolver.java b/cn/src/main/java/org/cloudname/zk/ZkResolver.java deleted file mode 100644 index 80b49197..00000000 --- a/cn/src/main/java/org/cloudname/zk/ZkResolver.java +++ /dev/null @@ -1,460 +0,0 @@ -package org.cloudname.zk; - -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.ZooKeeper; -import org.cloudname.*; - -import java.util.*; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.regex.Pattern; -import java.util.regex.Matcher; - - -/** - * This class is used to resolve Cloudname coordinates into endpoints. - * - * @author borud - */ -public final class ZkResolver implements Resolver, ZkObjectHandler.ConnectionStateChanged { - - private static final Logger log = Logger.getLogger(ZkResolver.class.getName()); - - private Map<String, ResolverStrategy> strategies; - - private final ZkObjectHandler.Client zkGetter; - - private final Object dynamicAddressMonitor = new Object(); - - private Map<ResolverListener, DynamicExpression> dynamicAddressesByListener = new HashMap<ResolverListener, DynamicExpression>(); - - @Override - public void connectionUp() { - synchronized (dynamicAddressMonitor) { - for (ResolverListener listener : dynamicAddressesByListener.keySet()) { - listener.endpointEvent(ResolverListener.Event.CONNECTION_OK, null); - } - } - } - - @Override - public void connectionDown() { - synchronized (dynamicAddressMonitor) { - for (ResolverListener listener : dynamicAddressesByListener.keySet()) { - listener.endpointEvent(ResolverListener.Event.LOST_CONNECTION, null); - } - } - } - - @Override - public void shutDown() { - // Nothing to shut down here. - } - - public static class Builder { - - final private Map<String, ResolverStrategy> strategies = new HashMap<String, ResolverStrategy>(); - - public Builder addStrategy(ResolverStrategy strategy) { - strategies.put(strategy.getName(), strategy); - return this; - } - - public Map<String, ResolverStrategy> getStrategies() { - return strategies; - } - - public ZkResolver build(ZkObjectHandler.Client zkGetter) { - return new ZkResolver(this, zkGetter); - } - - } - - - // Matches coordinate with endpoint of the form: - // endpoint.instance.service.user.cell - public static final Pattern endpointPattern - = Pattern.compile( "^([a-z][a-z0-9-_]*)\\." // endpoint - + "(\\d+)\\." // instance - + "([a-z][a-z0-9-_]*)\\." // service - + "([a-z][a-z0-9-_]*)\\." // user - + "([a-z][a-z-_]*)\\z"); // cell - - // Parses abstract coordinate of the form: - // strategy.service.user.cell. This pattern is useful for - // resolving hosts, but not endpoints. - public static final Pattern strategyPattern - = Pattern.compile( "^([a-z][a-z0-9-_]*)\\." // strategy - + "([a-z][a-z0-9-_]*)\\." // service - + "([a-z][a-z0-9-_]*)\\." // user - + "([a-z][a-z0-9-_]*)\\z"); // cell - - // Parses abstract coordinate of the form: - // strategy.service.user.cell. This pattern is useful for - // resolving hosts, but not endpoints. - public static final Pattern instancePattern - = Pattern.compile( "^([a-z0-9-_]*)\\." // strategy - + "([a-z][a-z0-9-_]*)\\." // service - + "([a-z][a-z0-9-_]*)\\." // user - + "([a-z][a-z0-9-_]*)\\z"); // cell - - // Parses abstract coordinate of the form: - // endpoint.strategy.service.user.cell. - public static final Pattern endpointStrategyPattern - = Pattern.compile( "^([a-z][a-z0-9-_]*)\\." // endpoint - + "([a-z][a-z0-9-_]*)\\." // strategy - + "([a-z][a-z0-9-_]*)\\." // service - + "([a-z][a-z0-9-_]*)\\." // user - + "([a-z][a-z0-9-_]*)\\z"); // cell - - - /** - * Inner class to keep track of parameters parsed from addressExpression. - */ - static class Parameters { - private String endpointName = null; - private Integer instance = null; - private String service = null; - private String user = null; - private String cell = null; - private String strategy = null; - private String expression = null; - - /** - * Constructor that takes an addressExperssion and sets the inner variables. - * @param addressExpression - */ - public Parameters(String addressExpression) { - this.expression = addressExpression; - if (! (trySetEndPointPattern(addressExpression) || - trySetStrategyPattern(addressExpression) || - trySetInstancePattern(addressExpression) || - trySetEndpointStrategyPattern(addressExpression))) { - throw new IllegalStateException( - "Could not parse addressExpression:" + addressExpression); - } - - } - - /** - * Returns the original expression set in the constructor of Parameters. - * @return expression to be resolved. - */ - public String getExpression() { - return expression; - } - - /** - * Returns strategy. - * @return the string (e.g. "all" or "any", or "" if there is no strategy - * (but instance is specified). - */ - public String getStrategy() { - return strategy; - } - - /** - * Returns endpoint name if set or "" if not set. - * @return endpointname. - */ - public String getEndpointName() { - return endpointName; - } - - /** - * Returns instance if set or negative number if not set. - * @return instance number. - */ - public Integer getInstance() { - return instance; - } - - /** - * Returns service - * @return service name. - */ - public String getService() { - return service; - } - - /** - * Returns user - * @return user. - */ - public String getUser() { - return user; - } - - /** - * Returns cell. - * @return cell. - */ - public String getCell() { - return cell; - } - - private boolean trySetEndPointPattern(String addressExperssion) { - Matcher m = endpointPattern.matcher(addressExperssion); - if (! m.matches()) { - return false; - } - endpointName = m.group(1); - instance = Integer.parseInt(m.group(2)); - strategy = ""; - service = m.group(3); - user = m.group(4); - cell = m.group(5); - return true; - - } - - private boolean trySetStrategyPattern(String addressExpression) { - Matcher m = strategyPattern.matcher(addressExpression); - if (! m.matches()) { - return false; - } - endpointName = ""; - strategy = m.group(1); - service = m.group(2); - user = m.group(3); - cell = m.group(4); - instance = -1; - return true; - } - - private boolean trySetInstancePattern(String addressExpression) { - Matcher m = instancePattern.matcher(addressExpression); - if (! m.matches()) { - return false; - } - endpointName = ""; - instance = Integer.parseInt(m.group(1)); - service = m.group(2); - user = m.group(3); - cell = m.group(4); - strategy = ""; - return true; - } - - private boolean trySetEndpointStrategyPattern(String addressExperssion) { - Matcher m = endpointStrategyPattern.matcher(addressExperssion); - if (! m.matches()) { - return false; - } - endpointName = m.group(1); - strategy = m.group(2); - service = m.group(3); - user = m.group(4); - cell = m.group(5); - instance = -1; - return true; - } - - } - - /** - * Constructor, to be called from the inner Dynamic class. - * @param builder - */ - private ZkResolver(Builder builder, ZkObjectHandler.Client zkGetter) { - this.strategies = builder.getStrategies(); - this.zkGetter = zkGetter; - zkGetter.registerListener(this); - } - - - @Override - public List<Endpoint> resolve(String addressExpression) throws CloudnameException { - Parameters parameters = new Parameters(addressExpression); - // TODO(borud): add some comments on the decision logic. I'm - // not sure I am too fond of the check for negative values to - // have some particular semantics. That smells like a problem - // waiting to happen. - - ZooKeeper localZkPointer = zkGetter.getZookeeper(); - if (localZkPointer == null) { - throw new CloudnameException("No connection to ZooKeeper."); - } - List<Integer> instances = resolveInstances(parameters, localZkPointer); - - List<Endpoint> endpoints = new ArrayList<Endpoint>(); - for (Integer instance : instances) { - String statusPath = ZkCoordinatePath.getStatusPath( - parameters.getCell(), parameters.getUser(), - parameters.getService(), instance); - - try { - if (! Util.exist(localZkPointer, statusPath)) { - continue; - } - } catch (InterruptedException e) { - throw new CloudnameException(e); - - } - final ZkCoordinateData zkCoordinateData = ZkCoordinateData.loadCoordinateData( - statusPath, localZkPointer, null); - addEndpoints(zkCoordinateData.snapshot(), endpoints, parameters.getEndpointName()); - - } - if (parameters.getStrategy().equals("")) { - return endpoints; - } - ResolverStrategy strategy = strategies.get(parameters.getStrategy()); - return strategy.order(strategy.filter(endpoints)); - } - - @Override - public void removeResolverListener(final ResolverListener listener) { - synchronized (dynamicAddressMonitor) { - DynamicExpression expression = dynamicAddressesByListener.remove(listener); - if (expression == null) { - throw new IllegalArgumentException("Do not have the listener in my list."); - } - expression.stop(); - } - log.fine("Removed listener."); - } - - /** - * The implementation does filter while listing out nodes. In this way paths that are not of - * interest are not traversed. - * @param filter class for filtering out endpoints - * @return the endpoints that passes the filter - */ - @Override - public Set<Endpoint> getEndpoints(final Resolver.CoordinateDataFilter filter) - throws CloudnameException, InterruptedException { - - final Set<Endpoint> endpointsIncluded = new HashSet<Endpoint>(); - final String cellPath = ZkCoordinatePath.getCloudnameRoot(); - final ZooKeeper zk = zkGetter.getZookeeper(); - try { - final List<String> cells = zk.getChildren(cellPath, false); - for (final String cell : cells) { - if (! filter.includeCell(cell)) { - continue; - } - final String userPath = cellPath + "/" + cell; - final List<String> users = zk.getChildren(userPath, false); - - for (final String user : users) { - if (! filter.includeUser(user)) { - continue; - } - final String servicePath = userPath + "/" + user; - final List<String> services = zk.getChildren(servicePath, false); - - for (final String service : services) { - if (! filter.includeService(service)) { - continue; - } - final String instancePath = servicePath + "/" + service; - final List<String> instances = zk.getChildren(instancePath, false); - - for (final String instance : instances) { - final String statusPath; - try { - statusPath = ZkCoordinatePath.getStatusPath( - cell, user, service, Integer.parseInt(instance)); - } catch (NumberFormatException e) { - log.log( - Level.WARNING, - "Got non-number as instance in cn path: " + instancePath + "/" - + instance + " skipping.", - e); - continue; - } - - ZkCoordinateData zkCoordinateData = null; - try { - zkCoordinateData = ZkCoordinateData.loadCoordinateData( - statusPath, zk, null); - } catch (CloudnameException e) { - // This is ok, an unclaimed node will not have status data, we - // ignore it even though there might also be other exception - // (this should be rare). The advantage is that we don't need to - // check if the node exists and hence reduce the load on zookeeper. - continue; - } - final Set<Endpoint> endpoints = zkCoordinateData.snapshot().getEndpoints(); - for (final Endpoint endpoint : endpoints) { - if (filter.includeEndpointname(endpoint.getName())) { - if (filter.includeServiceState( - zkCoordinateData.snapshot().getServiceStatus().getState())) { - endpointsIncluded.add(endpoint); - } - } - } - } - } - } - } - } catch (KeeperException e) { - throw new CloudnameException(e); - } - return endpointsIncluded; - } - - @Override - public void addResolverListener(String expression, ResolverListener listener) - throws CloudnameException { - final DynamicExpression dynamicExpression = - new DynamicExpression(expression, listener, this, zkGetter); - - synchronized (dynamicAddressMonitor) { - DynamicExpression previousExpression = dynamicAddressesByListener.put( - listener, dynamicExpression); - if (previousExpression != null) { - throw new IllegalArgumentException("It is not legal to register a listener twice."); - } - } - dynamicExpression.start(); - } - - public static void addEndpoints( - ZkCoordinateData.Snapshot statusAndEndpoints, List<Endpoint> endpoints, - String endpointname) { - if (statusAndEndpoints.getServiceStatus().getState() != ServiceState.RUNNING) { - return; - } - if (endpointname.equals("")) { - statusAndEndpoints.appendAllEndpoints(endpoints); - } else { - Endpoint e = statusAndEndpoints.getEndpoint(endpointname); - if (e != null) { - endpoints.add(e); - } - } - } - - private List<Integer> resolveInstances(Parameters parameters, ZooKeeper zk) - throws CloudnameException { - List<Integer> instances = new ArrayList<Integer>(); - if (parameters.getInstance() > -1) { - instances.add(parameters.getInstance()); - } else { - try { - instances = getInstances(zk, - ZkCoordinatePath.coordinateWithoutInstanceAsPath(parameters.getCell(), - parameters.getUser(), parameters.getService())); - } catch (InterruptedException e) { - throw new CloudnameException(e); - } - } - return instances; - } - - private List<Integer> getInstances(ZooKeeper zk, String path) - throws CloudnameException, InterruptedException { - List<Integer> paths = new ArrayList<Integer>(); - try { - List<String> children = zk.getChildren(path, false /* watcher */); - for (String child : children) { - paths.add(Integer.parseInt(child)); - } - } catch (KeeperException e) { - throw new CloudnameException(e); - } - return paths; - } -} diff --git a/cn/src/main/java/org/cloudname/zk/ZkServiceHandle.java b/cn/src/main/java/org/cloudname/zk/ZkServiceHandle.java deleted file mode 100644 index eb8fb837..00000000 --- a/cn/src/main/java/org/cloudname/zk/ZkServiceHandle.java +++ /dev/null @@ -1,116 +0,0 @@ -package org.cloudname.zk; - -import org.cloudname.*; - -import java.util.ArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; - -import java.util.List; - -/** - * A service handle implementation. It does not have a lot of logic, it wraps ClaimedCoordinate, and - * handles some config logic. - * - * @author borud - */ -public class ZkServiceHandle implements ServiceHandle { - private final ClaimedCoordinate claimedCoordinate; - private static final Logger LOG = Logger.getLogger(ZkServiceHandle.class.getName()); - - private final ZkObjectHandler.Client zkClient; - - private final Coordinate coordinate; - - /** - * Create a ZkServiceHandle for a given coordinate. - * - * @param claimedCoordinate the claimed coordinate for this service handle. - */ - public ZkServiceHandle( - ClaimedCoordinate claimedCoordinate, Coordinate coordinate, - ZkObjectHandler.Client zkClient) { - this.claimedCoordinate = claimedCoordinate; - this.coordinate = coordinate; - this.zkClient = zkClient; - } - - - @Override - public boolean waitForCoordinateOkSeconds(int seconds) throws InterruptedException { - final CountDownLatch latch = new CountDownLatch(1); - - CoordinateListener listner = new CoordinateListener() { - - @Override - public void onCoordinateEvent(Event event, String message) { - if (event == Event.COORDINATE_OK) { - latch.countDown(); - } - } - }; - registerCoordinateListener(listner); - boolean result = latch.await(seconds, TimeUnit.SECONDS); - claimedCoordinate.deregisterCoordinateListener(listner); - return result; - } - - - @Override - public void setStatus(ServiceStatus status) - throws CoordinateMissingException, CloudnameException { - claimedCoordinate.updateStatus(status); - } - - @Override - public void putEndpoints(List<Endpoint> endpoints) - throws CoordinateMissingException, CloudnameException { - claimedCoordinate.putEndpoints(endpoints); - } - - @Override - public void putEndpoint(Endpoint endpoint) - throws CoordinateMissingException, CloudnameException { - List<Endpoint> endpoints = new ArrayList<Endpoint>(); - endpoints.add(endpoint); - putEndpoints(endpoints); - } - - @Override - public void removeEndpoints(List<String> names) - throws CoordinateMissingException, CloudnameException { - claimedCoordinate.removeEndpoints(names); - } - - @Override - public void removeEndpoint(String name) - throws CoordinateMissingException, CloudnameException { - List<String> names = new ArrayList<String>(); - names.add(name); - removeEndpoints(names); - } - - @Override - public void registerConfigListener(ConfigListener listener) { - TrackedConfig trackedConfig = new TrackedConfig( - ZkCoordinatePath.getConfigPath(coordinate, null), listener, zkClient); - claimedCoordinate.registerTrackedConfig(trackedConfig); - trackedConfig.start(); - } - - @Override - public void registerCoordinateListener(CoordinateListener listener) { - claimedCoordinate.registerCoordinateListener(listener); - } - - @Override - public void close() throws CloudnameException { - claimedCoordinate.releaseClaim(); - } - - @Override - public String toString() { - return "Claimed coordinate instance: "+ claimedCoordinate.toString(); - } -} diff --git a/cn/src/main/java/org/cloudname/zk/ZkTool.java b/cn/src/main/java/org/cloudname/zk/ZkTool.java deleted file mode 100644 index 7be0f8cf..00000000 --- a/cn/src/main/java/org/cloudname/zk/ZkTool.java +++ /dev/null @@ -1,359 +0,0 @@ -package org.cloudname.zk; - -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.PatternLayout; -import org.cloudname.*; -import org.cloudname.Resolver.ResolverListener; -import org.cloudname.flags.Flag; -import org.cloudname.flags.Flags; -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.FileReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - - -/** - * Command line tool for using the Cloudname library. Run with - * <code>--help</code> option to see available flags. - * - * @author dybdahl - */ -public final class ZkTool { - @Flag(name="zookeeper", description="A list of host:port for connecting to ZooKeeper.") - private static String zooKeeperFlag = null; - - @Flag(name="coordinate", description="The coordinate to work on.") - private static String coordinateFlag = null; - - @Flag(name="operation", options = Operation.class, - description = "The operation to do on coordinate.") - private static Operation operationFlag = Operation.STATUS; - - @Flag(name = "setup-file", - description = "Path to file containing a list of coordinates to create (1 coordinate per line).") - private static String filePath = null; - - @Flag(name = "config", - description = "New config if setting new config.") - private static String configFlag = ""; - - @Flag(name = "resolver-expression", - description = "The resolver expression to listen to events for.") - private static String resolverExpression = null; - - @Flag(name = "list", - description = "Print the coordinates in ZooKeeper.") - private static Boolean listFlag = null; - - /** - * List of flag names for flags that select which action the tool should - * perform. These flags are mutually exclusive. - */ - private static String actionSelectingFlagNames = - "--setup-file, --resolver, --coordinate, --list"; - - /** - * The possible operations to do on a coordinate. - */ - public enum Operation { - /** - * Create a new coordinate. - */ - CREATE, - /** - * Delete a coordinate. - */ - DELETE, - /** - * Print out some status about a coordinate. - */ - STATUS, - /** - * Print the host of a coordinate. - */ - HOST, - /** - * Set config - */ - SET_CONFIG, - /** - * Read config - */ - READ_CONFIG; - } - - /** - * Matches coordinate of type: cell.user.service.instance.config. - */ - public static final Pattern instanceConfigPattern - = Pattern.compile("\\/cn\\/([a-z][a-z-_]*)\\/" // cell - + "([a-z][a-z0-9-_]*)\\/" // user - + "([a-z][a-z0-9-_]*)\\/" // service - + "(\\d+)\\/config\\z"); // instance - - private static ZkCloudname cloudname = null; - - public static void main(final String[] args) { - - // Disable log system, we want full control over what is sent to console. - final ConsoleAppender consoleAppender = new ConsoleAppender(); - consoleAppender.activateOptions(); - consoleAppender.setLayout(new PatternLayout("%p %t %C:%M %m%n")); - consoleAppender.setThreshold(Level.OFF); - BasicConfigurator.configure(consoleAppender); - - // Parse the flags. - Flags flags = new Flags() - .loadOpts(ZkTool.class) - .parse(args); - - // Check if we wish to print out help text - if (flags.helpFlagged()) { - flags.printHelp(System.out); - System.out.println("Must specify one of the following options:"); - System.out.println(actionSelectingFlagNames); - return; - } - - checkArgumentCombinationValid(flags); - - ZkCloudname.Builder builder = new ZkCloudname.Builder(); - if (zooKeeperFlag == null) { - builder.setDefaultConnectString(); - } else { - builder.setConnectString(zooKeeperFlag); - } - try { - cloudname = builder.build().connect(); - } catch (CloudnameException e) { - System.err.println("Could not connect to zookeeper " + e.getMessage()); - return; - } - - try { - if (filePath != null) { - handleFilepath(); - } else if (coordinateFlag != null) { - handleCoordinateOperation(); - } else if (resolverExpression != null) { - handleResolverExpression(); - } else if (listFlag) { - listCoordinates(); - } else { - System.err.println("No action specified"); - } - } catch (Exception e) { - System.err.println("An error occurred: " + e.getMessage()); - e.printStackTrace(); - } finally { - cloudname.close(); - } - } - - private static void checkArgumentCombinationValid(final Flags flags) { - int actionSelectedCount = 0; - final Object[] actionSelectingFlags = { - filePath, coordinateFlag, resolverExpression, listFlag - }; - for (Object flag: actionSelectingFlags) { - if (flag != null) { - actionSelectedCount++; - } - } - if (actionSelectedCount != 1) { - System.err.println("Must specify exactly one of the following options:"); - System.err.println(actionSelectingFlagNames); - flags.printHelp(System.err); - System.exit(1); - } - } - - private static void handleResolverExpression() { - final Resolver resolver = cloudname.getResolver(); - try { - System.out.println("Added a resolver listener for expression: " + resolverExpression + ". Will print out all events for the given expression."); - resolver.addResolverListener(resolverExpression, new ResolverListener() { - @Override - public void endpointEvent(Event event, Endpoint endpoint) { - System.out.println("Received event: " + event + " for endpoint: " + endpoint); - } - }); - } catch (CloudnameException e) { - System.err.println("Problem with cloudname: " + e.getMessage()); - } - final BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); - while(true) { - System.out.println("Press enter to exit"); - String s = null; - try { - s = br.readLine(); - } catch (IOException e) { - e.printStackTrace(); - } - if (s.length() == 0) { - System.out.println("Exiting"); - System.exit(0); - } - } - } - - private static void handleCoordinateOperation() { - final Resolver resolver = cloudname.getResolver(); - final Coordinate coordinate = Coordinate.parse(coordinateFlag); - switch (operationFlag) { - case CREATE: - try { - cloudname.createCoordinate(coordinate); - } catch (CloudnameException e) { - System.err.println("Got error: " + e.getMessage()); - break; - } catch (CoordinateExistsException e) { - e.printStackTrace(); - break; - } - System.err.println("Created coordinate."); - break; - case DELETE: - try { - cloudname.destroyCoordinate(coordinate); - } catch (CoordinateDeletionException e) { - System.err.println("Got error: " + e.getMessage()); - return; - } catch (CoordinateMissingException e) { - System.err.println("Got error: " + e.getMessage()); - break; - } catch (CloudnameException e) { - System.err.println("Got error: " + e.getMessage()); - break; - } - System.err.println("Deleted coordinate."); - break; - case STATUS: { - ServiceStatus status; - try { - status = cloudname.getStatus(coordinate); - } catch (CloudnameException e) { - System.err.println("Problems loading status, is service running? Error:\n" + e.getMessage()); - break; - } - System.err.println("Status:\n" + status.getState().toString() + " " + status.getMessage()); - List<Endpoint> endpoints = null; - try { - endpoints = resolver.resolve("all." + coordinate.getService() - + "." + coordinate.getUser() + "." + coordinate.getCell()); - } catch (CloudnameException e) { - System.err.println("Got error: " + e.getMessage()); - break; - } - System.err.println("Endpoints:"); - for (Endpoint endpoint : endpoints) { - if (endpoint.getCoordinate().getInstance() == coordinate.getInstance()) { - System.err.println(endpoint.getName() + "-->" + endpoint.getHost() + ":" + endpoint.getPort() - + " protocol:" + endpoint.getProtocol()); - System.err.println("Endpoint data:\n" + endpoint.getEndpointData()); - } - } - break; - } - case HOST: { - List<Endpoint> endpoints = null; - try { - endpoints = resolver.resolve(coordinate.asString()); - } catch (CloudnameException e) { - System.err.println("Could not resolve " + coordinate.asString() + " Error:\n" + e.getMessage()); - break; - } - for (Endpoint endpoint : endpoints) { - System.out.println("Host: " + endpoint.getHost()); - } - } - break; - case SET_CONFIG: - try { - cloudname.setConfig(coordinate, configFlag, null); - } catch (CloudnameException e) { - System.err.println("Got error: " + e.getMessage()); - break; - - } catch (CoordinateMissingException e) { - System.err.println("Non-existing coordinate."); - } - System.err.println("Config updated."); - break; - - case READ_CONFIG: - try { - System.out.println("Config is:" + cloudname.getConfig(coordinate)); - } catch (CoordinateMissingException e) { - System.err.println("Non-existing coordinate."); - } catch (CloudnameException e) { - System.err.println("Problem with cloudname: " + e.getMessage()); - } - break; - default: - System.out.println("Unknown command " + operationFlag); - } - } - - private static void listCoordinates() { - try { - final List<String> nodeList = new ArrayList<String>(); - cloudname.listRecursively(nodeList); - for (final String node : nodeList) { - final Matcher m = instanceConfigPattern.matcher(node); - - /* - * We only parse config paths, and we convert these to - * Cloudname coordinates to not confuse the user. - */ - if (m.matches()) { - System.out.printf("%s.%s.%s.%s\n", - m.group(4), m.group(3), m.group(2), m.group(1)); - } - } - } catch (final CloudnameException e) { - System.err.println("Got error: " + e.getMessage()); - } catch (final InterruptedException e) { - System.err.println("Got error: " + e.getMessage()); - } - } - - private static void handleFilepath() { - final BufferedReader br; - try { - br = new BufferedReader(new FileReader(filePath)); - } catch (FileNotFoundException e) { - throw new RuntimeException("File not found: " + filePath, e); - } - String line; - try { - while ((line = br.readLine()) != null) { - try { - cloudname.createCoordinate(Coordinate.parse(line)); - System.out.println("Created " + line); - } catch (Exception e) { - System.err.println("Could not create: " + line + "Got error: " + e.getMessage()); - } - } - } catch (IOException e) { - throw new RuntimeException("Failed to read coordinate from file. " + e.getMessage(), e); - } finally { - cloudname.close(); - try { - br.close(); - } catch (IOException e) { - System.err.println("Failed while trying to close file reader. " + e.getMessage()); - } - } - } - - // Should not be instantiated. - private ZkTool() {} -} diff --git a/cn/src/test/java/org/cloudname/CoordinateTest.java b/cn/src/test/java/org/cloudname/CoordinateTest.java deleted file mode 100644 index 42ee09cc..00000000 --- a/cn/src/test/java/org/cloudname/CoordinateTest.java +++ /dev/null @@ -1,74 +0,0 @@ -package org.cloudname; - -import org.junit.*; -import static org.junit.Assert.*; - -/** - * Unit tests for Coordinate. - * - * @author borud - */ -public class CoordinateTest { - @Test - public void testSimple() throws Exception { - Coordinate c = Coordinate.parse("1.service.user.cell"); - assertNotNull(c); - assertEquals(1, c.getInstance()); - assertEquals("service", c.getService()); - assertEquals("user", c.getUser()); - assertEquals("cell", c.getCell()); - } - - @Test (expected = IllegalArgumentException.class) - public void testInvalidInstanceNumber() throws Exception { - new Coordinate(-1, "service", "user", "cell"); - } - - @Test - public void testEquals() throws Exception { - assertEquals( - new Coordinate(1,"foo", "bar", "baz"), - new Coordinate(1, "foo", "bar", "baz") - ); - } - - @Test - public void testSymmetry() throws Exception { - String s = "0.fooservice.baruser.bazcell"; - assertEquals(s, Coordinate.parse(s).asString()); - assertEquals(s, new Coordinate(0, - "fooservice", - "baruser", - "bazcell").asString()); - - System.out.println(Coordinate.parse(s)); - } - - @Test (expected = IllegalArgumentException.class) - public void testInvalidInstance() throws Exception { - Coordinate.parse("invalid.service.user.cell"); - } - - @Test (expected = IllegalArgumentException.class) - public void testInvalidCharacters() throws Exception { - Coordinate.parse("0.ser!vice.user.cell"); - } - - @Test - public void testLegalCharacters() throws Exception { - Coordinate.parse("0.service-test.user.cell"); - Coordinate.parse("0.service_test.user.cell"); - Coordinate.parse("0.service.user-foo.cell"); - Coordinate.parse("0.service.user_foo.ce_ll"); - } - - @Test (expected = IllegalArgumentException.class) - public void testRequireStartsWithLetter() throws Exception { - Coordinate.parse("0._aaa._bbb._ccc"); - } - - @Test (expected = IllegalArgumentException.class) - public void testIllegalArgumentsConstructor() throws Exception { - new Coordinate(1, "service", "_user", "cell"); - } -} \ No newline at end of file diff --git a/cn/src/test/java/org/cloudname/EndpointTest.java b/cn/src/test/java/org/cloudname/EndpointTest.java deleted file mode 100644 index 0769ac51..00000000 --- a/cn/src/test/java/org/cloudname/EndpointTest.java +++ /dev/null @@ -1,31 +0,0 @@ -package org.cloudname; - -import org.junit.*; -import static org.junit.Assert.*; - -/** - * Unit tests for Endpoint. - * - * @author borud - */ -public class EndpointTest { - @Test - public void testSimple() throws Exception { - Endpoint endpoint = new Endpoint(Coordinate.parse("1.foo.bar.zot"), - "rest-api", - "somehost", - 4711, - "http", - null); - String json = endpoint.toJson(); - Endpoint endpoint2 = Endpoint.fromJson(json); - - assertEquals(endpoint.getCoordinate(), endpoint2.getCoordinate()); - assertEquals(endpoint.getName(), endpoint2.getName()); - assertEquals(endpoint.getHost(), endpoint2.getHost()); - assertEquals(endpoint.getPort(), endpoint2.getPort()); - assertEquals(endpoint.getEndpointData(), endpoint2.getEndpointData()); - - System.out.println(json); - } -} \ No newline at end of file diff --git a/cn/src/test/java/org/cloudname/ServiceStatusTest.java b/cn/src/test/java/org/cloudname/ServiceStatusTest.java deleted file mode 100644 index ec7fe5b3..00000000 --- a/cn/src/test/java/org/cloudname/ServiceStatusTest.java +++ /dev/null @@ -1,24 +0,0 @@ -package org.cloudname; - -import org.junit.*; -import static org.junit.Assert.*; - -/** - * Unit tests for ServiceStatus. - * - * @author borud - */ -public class ServiceStatusTest { - @Test - public void testSimple() throws Exception { - ServiceStatus status = new ServiceStatus(ServiceState.STARTING, - "Loading hamster into wheel"); - String json = status.toJson(); - assertNotNull(json); - - ServiceStatus status2 = ServiceStatus.fromJson(json); - - assertEquals(status.getMessage(), status2.getMessage()); - assertSame(status.getState(), status2.getState()); - } -} \ No newline at end of file diff --git a/cn/src/test/java/org/cloudname/StrategyAnyTest.java b/cn/src/test/java/org/cloudname/StrategyAnyTest.java deleted file mode 100644 index d6f56f54..00000000 --- a/cn/src/test/java/org/cloudname/StrategyAnyTest.java +++ /dev/null @@ -1,88 +0,0 @@ -package org.cloudname; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; - - -/** - * Unit tests for StrategyAny. - * @author dybdahl - */ -public class StrategyAnyTest { - private List<Endpoint> endpoints; - - /** - * Adds a list endpoints with even instance number to the endpoints list. - */ - @Before - public void setup() { - endpoints = new ArrayList<Endpoint>(); - // Only even instance numbers. - for (int i = 0; i < 100; i+= 2) { - endpoints.add(new Endpoint(Coordinate.parse(String.valueOf(i) + ".foo.bar.zot"), - "rest-api", - "somehost", - 4711, - "http", - null)); - } - } - - /** - * Different clients should have different lists. - */ - @Test - public void testDifferentLists() { - StrategyAny strategyAny = new StrategyAny(); - - List<Endpoint> sortedResult = strategyAny.order(new ArrayList<Endpoint>(endpoints)); - - // Try with up tp 150 clients, if they all have the same first element, something is wrong. - // In each iteration there is 1/50 probability for this. For 150 runs, the probability for - // false negative is 1,42724769 × 10^-255 (e.g. zero). - for (int z = 0; z < 150; z++) { - StrategyAny strategyAny2 = new StrategyAny(); - List<Endpoint> sortedResult2 = strategyAny2.order(new ArrayList<Endpoint>(endpoints)); - if (sortedResult.get(0).getCoordinate().getInstance() != - sortedResult2.get(0).getCoordinate().getInstance()) { - return; - } - } - assertTrue(false); - } - - /** - * Test that insertion does only create a new first element now and then. - */ - @Test - public void testInsertions() { - StrategyAny strategyAny = new StrategyAny(); - - List<Endpoint> sortedResult = strategyAny.order(new ArrayList<Endpoint>(endpoints)); - int newFrontEndpoint = 0; - for (int c = 1; c < 30; c +=2) { - int headInstance = sortedResult.get(0).getCoordinate().getInstance(); - sortedResult.add(new Endpoint(Coordinate.parse(String.valueOf(c) + ".foo.bar.zot"), - "rest-api", - "somehost", - 4711, - "http", - null)); - sortedResult = strategyAny.order(sortedResult); - if (headInstance != sortedResult.get(0).getCoordinate().getInstance()) { - ++newFrontEndpoint; - } - } - // For each insertion it a probability of less than 1/50 that front element is changed. The probability - // that more than 10 front elements are changed should be close to zero. - assertThat(newFrontEndpoint, is(lessThan(10))); - } -} diff --git a/cn/src/test/java/org/cloudname/zk/ZkCloudnameTest.java b/cn/src/test/java/org/cloudname/zk/ZkCloudnameTest.java deleted file mode 100644 index 5bccc3f6..00000000 --- a/cn/src/test/java/org/cloudname/zk/ZkCloudnameTest.java +++ /dev/null @@ -1,324 +0,0 @@ -package org.cloudname.zk; - -import org.cloudname.*; - -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.*; - -import org.junit.*; -import org.junit.rules.TemporaryFolder; -import static org.junit.Assert.*; -import static org.junit.Assert.assertTrue; - -import org.cloudname.testtools.Net; -import org.cloudname.testtools.zookeeper.EmbeddedZooKeeper; - -import java.io.File; -import java.util.logging.Logger; - -/** - * Unit test for the ZkCloudname class. - * - * @author borud, dybdahl - */ -public class ZkCloudnameTest { - private static final Logger LOG = Logger.getLogger(ZkCloudnameTest.class.getName()); - - private ZooKeeper zk; - private int zkport; - - @Rule public TemporaryFolder temp = new TemporaryFolder(); - - /** - * Set up an embedded ZooKeeper instance backed by a temporary - * directory. The setup procedure also allocates a port that is - * free for the ZooKeeper server so that you should be able to run - * multiple instances of this test. - */ - @Before - public void setup() throws Exception { - File rootDir = temp.newFolder("zk-test"); - zkport = Net.getFreePort(); - - LOG.info("EmbeddedZooKeeper rootDir=" + rootDir.getCanonicalPath() + ", port=" + zkport - ); - - // Set up and initialize the embedded ZooKeeper - final EmbeddedZooKeeper ezk = new EmbeddedZooKeeper(rootDir, zkport); - ezk.init(); - - // Set up a zookeeper client that we can use for inspection - final CountDownLatch connectedLatch = new CountDownLatch(1); - - zk = new ZooKeeper("localhost:" + zkport, 1000, new Watcher() { - public void process(WatchedEvent event) { - if (event.getState() == Event.KeeperState.SyncConnected) { - connectedLatch.countDown(); - } - } - }); - connectedLatch.await(); - - LOG.info("ZooKeeper port is " + zkport); - } - - @After - public void tearDown() throws Exception { - zk.close(); - } - - /** - * Tests that the time-out mechanism on connecting to ZooKeeper works. - */ - @Test - public void testTimeout() throws IOException, InterruptedException { - int deadPort = Net.getFreePort(); - try { - new ZkCloudname.Builder().setConnectString("localhost:" + deadPort).build() - .connectWithTimeout(1000, TimeUnit.NANOSECONDS); - fail("Expected time-out exception."); - } catch (CloudnameException e) { - // Expected. - } - } - - /** - * A relatively simple voyage through a typical lifecycle. - */ - @Test - public void testSimple() throws Exception { - final Coordinate c = Coordinate.parse("1.service.user.cell"); - final ZkCloudname cn = makeLocalZkCloudname(); - - assertFalse(pathExists("/cn/cell/user/service/1")); - cn.createCoordinate(c); - - // Coordinate should exist, but no status node - assertTrue(pathExists("/cn/cell/user/service/1")); - assertTrue(pathExists("/cn/cell/user/service/1/config")); - assertFalse(pathExists("/cn/cell/user/service/1/status")); - - // Claiming the coordinate creates the status node - final ServiceHandle handle = cn.claim(c); - assertTrue(handle.waitForCoordinateOkSeconds(3)); - assertNotNull(handle); - final CountDownLatch latch = new CountDownLatch(1); - handle.registerCoordinateListener(new CoordinateListener() { - - @Override - public void onCoordinateEvent(Event event, String message) { - if (event == Event.COORDINATE_OK) { - latch.countDown(); - } - } - }); - assertTrue(latch.await(2, TimeUnit.SECONDS)); - - final CountDownLatch configLatch1 = new CountDownLatch(1); - final CountDownLatch configLatch2 = new CountDownLatch(2); - final StringBuilder buffer = new StringBuilder(); - handle.registerConfigListener(new ConfigListener() { - @Override - public void onConfigEvent(Event event, String data) { - buffer.append(data); - configLatch1.countDown(); - configLatch2.countDown(); - } - }); - assertTrue(configLatch1.await(5, TimeUnit.SECONDS)); - assertEquals(buffer.toString(), ""); - zk.setData("/cn/cell/user/service/1/config", "hello".getBytes(), -1); - assertTrue(configLatch2.await(5, TimeUnit.SECONDS)); - assertEquals(buffer.toString(), "hello"); - - assertTrue(pathExists("/cn/cell/user/service/1/status")); - - List<String> nodes = new ArrayList<String>(); - cn.listRecursively(nodes); - assertEquals(2, nodes.size()); - assertEquals(nodes.get(0), "/cn/cell/user/service/1/config"); - assertEquals(nodes.get(1), "/cn/cell/user/service/1/status"); - - // Try to set the status to something else - String msg = "Hamster getting quite eager now"; - handle.setStatus(new ServiceStatus(ServiceState.STARTING,msg)); - ServiceStatus status = cn.getStatus(c); - assertEquals(msg, status.getMessage()); - assertSame(ServiceState.STARTING, status.getState()); - - // Publish two endpoints - handle.putEndpoint(new Endpoint(c, "foo", "localhost", 1234, "http", null)); - handle.putEndpoint(new Endpoint(c, "bar", "localhost", 1235, "http", null)); - - handle.setStatus(new ServiceStatus(ServiceState.RUNNING, msg)); - - // Remove one of them - handle.removeEndpoint("bar"); - - List<Endpoint> endpointList = cn.getResolver().resolve("bar.1.service.user.cell"); - assertEquals(0, endpointList.size()); - - endpointList = cn.getResolver().resolve("foo.1.service.user.cell"); - assertEquals(1, endpointList.size()); - Endpoint endpointFoo = endpointList.get(0); - - String fooData = endpointFoo.getName(); - assertEquals("foo", fooData); - assertEquals("foo", endpointFoo.getName()); - assertEquals("localhost", endpointFoo.getHost()); - assertEquals(1234, endpointFoo.getPort()); - assertEquals("http", endpointFoo.getProtocol()); - assertNull(endpointFoo.getEndpointData()); - - // Close handle just invalidates handle - handle.close(); - - // These nodes are ephemeral and will be cleaned out when we - // call cn.releaseClaim(), but calling handle.releaseClaim() explicitly - // cleans out the ephemeral nodes. - assertFalse(pathExists("/cn/cell/user/service/1/status")); - - // Closing Cloudname instance disconnects the zk client - // connection and thus should kill all ephemeral nodes. - cn.close(); - - // But the coordinate and its persistent subnodes should - assertTrue(pathExists("/cn/cell/user/service/1")); - assertFalse(pathExists("/cn/cell/user/service/1/endpoints")); - assertTrue(pathExists("/cn/cell/user/service/1/config")); - } - - /** - * Claim non-existing coordinate - */ - @Test - public void testCoordinateNotFound() throws CloudnameException, InterruptedException { - final Coordinate c = Coordinate.parse("3.service.user.cell"); - final Cloudname cn = makeLocalZkCloudname(); - - final ExecutorService executor = Executors.newCachedThreadPool(); - final Callable<Object> task = new Callable<Object>() { - public Object call() throws InterruptedException { - return cn.claim(c); - } - }; - final Future<Object> future = executor.submit(task); - try { - future.get(300, TimeUnit.MILLISECONDS); - } catch (TimeoutException ex) { - // handle the timeout - LOG.info("Got time out, nice!"); - } catch (InterruptedException e) { - fail("Interrupted"); - } catch (ExecutionException e) { - fail("Some error " + e.getMessage()); - // handle other exceptions - } finally { - future.cancel(true); - } - } - - /** - * Try to claim coordinate twice - */ - @Test - public void testDoubleClaim() throws CloudnameException, InterruptedException { - final Coordinate c = Coordinate.parse("2.service.user.cell"); - final CountDownLatch okCounter = new CountDownLatch(1); - final CountDownLatch failCounter = new CountDownLatch(1); - - final CoordinateListener listener = new CoordinateListener() { - @Override - public void onCoordinateEvent(Event event, String message) { - switch (event) { - case COORDINATE_OK: - okCounter.countDown(); - break; - case NOT_OWNER: - failCounter.countDown(); - default: //Any other Event is unexpected. - assert(false); - break; - } - } - }; - final Cloudname cn; - try { - cn = makeLocalZkCloudname(); - } catch (CloudnameException e) { - fail("connecting to localhost failed."); - return; - } - - try { - cn.createCoordinate(c); - } catch (CoordinateExistsException e) { - fail("should not happen."); - } - final ServiceHandle handle1 = cn.claim(c); - assert(handle1.waitForCoordinateOkSeconds(4)); - handle1.registerCoordinateListener(listener); - ServiceHandle handle2 = cn.claim(c); - assertFalse(handle2.waitForCoordinateOkSeconds(1)); - handle2.registerCoordinateListener(listener); - assert(okCounter.await(4, TimeUnit.SECONDS)); - assert(failCounter.await(2, TimeUnit.SECONDS)); - } - - - @Test - public void testDestroyBasic() throws Exception { - final Coordinate c = Coordinate.parse("1.service.user.cell"); - final Cloudname cn = makeLocalZkCloudname(); - cn.createCoordinate(c); - assertTrue(pathExists("/cn/cell/user/service/1/config")); - cn.destroyCoordinate(c); - assertFalse(pathExists("/cn/cell/user/service")); - assertTrue(pathExists("/cn/cell/user")); - } - - @Test - public void testDestroyTwoInstances() throws Exception { - final Coordinate c1 = Coordinate.parse("1.service.user.cell"); - final Coordinate c2 = Coordinate.parse("2.service.user.cell"); - final Cloudname cn = makeLocalZkCloudname(); - cn.createCoordinate(c1); - cn.createCoordinate(c2); - assertTrue(pathExists("/cn/cell/user/service/1/config")); - assertTrue(pathExists("/cn/cell/user/service/2/config")); - cn.destroyCoordinate(c1); - assertFalse(pathExists("/cn/cell/user/service/1")); - assertTrue(pathExists("/cn/cell/user/service/2/config")); - } - - @Test - public void testDestroyClaimed() throws Exception { - final Coordinate c = Coordinate.parse("1.service.user.cell"); - final Cloudname cn = makeLocalZkCloudname(); - cn.createCoordinate(c); - ServiceHandle handle = cn.claim(c); - handle.waitForCoordinateOkSeconds(1); - try { - cn.destroyCoordinate(c); - fail("Expected exception to happen"); - } catch (CoordinateException e) { - } - } - - private boolean pathExists(String path) throws Exception { - return (null != zk.exists(path, false)); - } - - /** - * Makes a local ZkCloudname instance with the port given by zkPort. - */ - private ZkCloudname makeLocalZkCloudname() throws CloudnameException { - return new ZkCloudname.Builder().setConnectString("localhost:" + zkport).build().connect(); - } -} diff --git a/cn/src/test/java/org/cloudname/zk/ZkCoordinatePathTest.java b/cn/src/test/java/org/cloudname/zk/ZkCoordinatePathTest.java deleted file mode 100644 index 9e9f6dcf..00000000 --- a/cn/src/test/java/org/cloudname/zk/ZkCoordinatePathTest.java +++ /dev/null @@ -1,31 +0,0 @@ -package org.cloudname.zk; - -import org.cloudname.Coordinate; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -/** - * Unit tests for class ZkCoordinatePathTest. - * @author dybdahl - */ -public class ZkCoordinatePathTest { - @Test - public void testSimple() throws Exception { - final Coordinate coordinate = new Coordinate( - 42 /*instance*/, "service", "user", "cell", false /*validate*/); - assertEquals("/cn/cell/user/service/42/config", - ZkCoordinatePath.getConfigPath(coordinate, null)); - assertEquals("/cn/cell/user/service/42/config/name", - ZkCoordinatePath.getConfigPath(coordinate, "name")); - assertEquals("/cn/cell/user/service/42", - ZkCoordinatePath.getCoordinateRoot(coordinate)); - assertEquals("/cn/cell/user/service/42/status", - ZkCoordinatePath.getStatusPath(coordinate)); - assertEquals("/cn/cell/user/service", - ZkCoordinatePath.coordinateWithoutInstanceAsPath( - "cell", "user", "service")); - assertEquals("/cn/cell/user/service/42/status", - ZkCoordinatePath.getStatusPath("cell", "user", "service", 42)); - } -} diff --git a/cn/src/test/java/org/cloudname/zk/ZkResolverTest.java b/cn/src/test/java/org/cloudname/zk/ZkResolverTest.java deleted file mode 100644 index 3dd868f6..00000000 --- a/cn/src/test/java/org/cloudname/zk/ZkResolverTest.java +++ /dev/null @@ -1,134 +0,0 @@ -package org.cloudname.zk; - -import org.cloudname.*; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import static org.junit.Assert.*; - - -/** - * This class contains the unit tests for the ZkResolver class. - * - * TODO(borud): add tests for when the input is a coordinate. - * - * @author borud - */ -public class ZkResolverTest { - private Resolver resolver; - - @Rule - public TemporaryFolder temp = new TemporaryFolder(); - - /** - * Set up an embedded ZooKeeper instance backed by a temporary - * directory. The setup procedure also allocates a port that is - * free for the ZooKeeper server so that you should be able to run - * multiple instances of this test. - */ - @Before - public void setup() throws Exception { - resolver = new ZkResolver.Builder() - .addStrategy(new StrategyAll()) - .addStrategy(new StrategyAny()) - .build(new ZkObjectHandler(null).getClient()); - } - - // Valid endpoints. - public static final String[] validEndpointPatterns = new String[] { - "http.1.service.user.cell", - "foo-bar.3245.service.user.cell", - "foo_bar.3245.service.user.cell", - }; - - // Valid strategy. - public static final String[] validStrategyPatterns = new String[] { - "any.service.user.cell", - "all.service.user.cell", - "somestrategy.service.user.cell", - }; - - // Valid endpoint strategy. - public static final String[] validEndpointStrategyPatterns = new String[] { - "http.any.service.user.cell", - "thrift.all.service.user.cell", - "some-endpoint.somestrategy.service.user.cell", - }; - - @Test(expected=IllegalArgumentException.class) - public void testRegisterSameListenerTwice() throws Exception { - Resolver.ResolverListener resolverListener = new Resolver.ResolverListener() { - @Override - public void endpointEvent(Event event, Endpoint endpoint) { - - } - }; - resolver.addResolverListener("foo.all.service.user.cell", resolverListener); - resolver.addResolverListener("bar.all.service.user.cell", resolverListener); - } - - @Test - public void testEndpointPatterns() throws Exception { - // Test input that should match - for (String s : validEndpointPatterns) { - assertTrue("Didn't match '" + s + "'", - ZkResolver.endpointPattern.matcher(s).matches()); - } - - // Test input that should not match - for (String s : validStrategyPatterns) { - assertFalse("Matched '" + s + "'", - ZkResolver.endpointPattern.matcher(s).matches()); - } - - // Test input that should not match - for (String s : validEndpointStrategyPatterns) { - assertFalse("Matched '" + s + "'", - ZkResolver.endpointPattern.matcher(s).matches()); - } - } - - @Test - public void testStrategyPatterns() throws Exception { - // Test input that should match - for (String s : validStrategyPatterns) { - assertTrue("Didn't match '" + s + "'", - ZkResolver.strategyPattern.matcher(s).matches()); - } - - // Test input that should not match - for (String s : validEndpointPatterns) { - assertFalse("Matched '" + s + "'", - ZkResolver.strategyPattern.matcher(s).matches()); - } - // Test input that should not match - for (String s : validEndpointStrategyPatterns) { - assertFalse("Matched '" + s + "'", - ZkResolver.endpointPattern.matcher(s).matches()); - } - } - - @Test - public void testEndpointStrategyPatterns() throws Exception { - // Test input that should match - for (String s : validEndpointStrategyPatterns) { - assertTrue("Didn't match '" + s + "'", - ZkResolver.endpointStrategyPattern.matcher(s).matches()); - } - - // Test input that should not match - for (String s : validStrategyPatterns) { - assertFalse("Matched '" + s + "'", - ZkResolver.endpointStrategyPattern.matcher(s).matches()); - } - - - // Test input that should not match - for (String s : validEndpointPatterns) { - assertFalse("Matched '" + s + "'", - ZkResolver.endpointStrategyPattern.matcher(s).matches()); - } - } -} \ No newline at end of file diff --git a/flags/src/test/java/org/cloudname/flags/FlagsTest.java b/flags/src/test/java/org/cloudname/flags/FlagsTest.java index 7db55632..4cc3aa75 100644 --- a/flags/src/test/java/org/cloudname/flags/FlagsTest.java +++ b/flags/src/test/java/org/cloudname/flags/FlagsTest.java @@ -4,7 +4,7 @@ import java.io.File; import java.io.FileOutputStream; import javax.annotation.PostConstruct; -import junit.framework.Assert; +import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; diff --git a/pom.xml b/pom.xml index bde06f53..19b35348 100644 --- a/pom.xml +++ b/pom.xml @@ -39,7 +39,7 @@ <cn.curator.version>2.9.0</cn.curator.version> <cn.jackson.version>2.1.1</cn.jackson.version> <cn.junit.version>4.11</cn.junit.version> - <cn.netty.version>4.0.32.Final</cn.netty.version> + <cn.netty.version>3.7.0.Final</cn.netty.version> <cn.protobuf.version>2.6.1</cn.protobuf.version> <cn.jline.version>0.9.94</cn.jline.version> <cn.jmxri.version>1.2.1</cn.jmxri.version> @@ -47,13 +47,14 @@ <cn.jbcrypt.version>0.3m</cn.jbcrypt.version> <cn.joda-time.version>2.1</cn.joda-time.version> <cn.a3.jersey.version>1.16</cn.a3.jersey.version> - <integrationSourceDirectory>src/integrationtest</integrationSourceDirectory> - <integrationOutputDirectory>target/integrationtest-classes</integrationOutputDirectory> </properties> <modules> <module>a3</module> - <module>cn</module> + <module>cn-core</module> + <module>cn-service</module> + <module>cn-memory</module> + <module>cn-zookeeper</module> <module>testtools</module> <module>log</module> <module>timber</module> @@ -82,15 +83,6 @@ <useFile>false</useFile> </configuration> </plugin> - <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>cobertura-maven-plugin</artifactId> - <version>2.5.2</version> - <configuration> - <format>xml</format> - <aggregate>true</aggregate> - </configuration> - </plugin> </plugins> </build> @@ -108,7 +100,13 @@ <!-- Internal dependencies --> <dependency> <groupId>org.cloudname</groupId> - <artifactId>cn</artifactId> + <artifactId>cn-core</artifactId> + <version>${project.version}</version> + </dependency> + + <dependency> + <groupId>org.cloudname</groupId> + <artifactId>cn-memory</artifactId> <version>${project.version}</version> </dependency> @@ -139,7 +137,7 @@ <!-- Netty --> <dependency> <groupId>io.netty</groupId> - <artifactId>netty-all</artifactId> + <artifactId>netty</artifactId> <version>${cn.netty.version}</version> </dependency> @@ -157,34 +155,6 @@ <version>${cn.protobuf.version}</version> </dependency> - <!-- ZooKeeper --> - <dependency> - <groupId>org.apache.zookeeper</groupId> - <artifactId>zookeeper</artifactId> - <version>${cn.zookeeper.version}</version> - <exclusions> - <exclusion> - <groupId>com.sun.jmx</groupId> - <artifactId>jmxri</artifactId> - </exclusion> - <exclusion> - <groupId>com.sun.jdmk</groupId> - <artifactId>jmxtools</artifactId> - </exclusion> - <exclusion> - <groupId>javax.jms</groupId> - <artifactId>jms</artifactId> - </exclusion> - </exclusions> - </dependency> - - <!-- Apache Curator --> - <dependency> - <groupId>org.apache.curator</groupId> - <artifactId>curator-framework</artifactId> - <version>${cn.curator.version}</version> - </dependency> - <!-- Jackson JSON library --> <dependency> <groupId>com.fasterxml.jackson.core</groupId> @@ -254,135 +224,4 @@ </dependencies> </dependencyManagement> - <profiles> - <profile> - <activation> - <file><exists>src/integrationtest</exists></file> - </activation> - <id>it</id> - <build> - <pluginManagement> - <plugins> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-surefire-plugin</artifactId> - <version>2.10</version> - <configuration> - <test>**/*.java</test> - </configuration> - </plugin> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-antrun-plugin</artifactId> - <executions> - <execution> - <id>create-directory</id> - <phase>pre-integration-test</phase> - <goals> - <goal>run</goal> - </goals> - <configuration> - <tasks> - <echo message="Creating Directory ${integrationOutputDirectory}" /> - <mkdir dir="${integrationOutputDirectory}" /> - </tasks> - </configuration> - </execution> - </executions> - </plugin> - <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>build-helper-maven-plugin</artifactId> - <version>1.5</version> - <executions> - <execution> - <id>add-test-sources</id> - <phase>pre-integration-test</phase> - <goals> - <goal>add-test-source</goal> - </goals> - <configuration> - <sources> - <source>${integrationSourceDirectory}/java</source> - </sources> - </configuration> - </execution> - <execution> - <id>add-test-resources</id> - <phase>pre-integration-test</phase> - <goals> - <goal>add-test-resource</goal> - </goals> - <configuration> - <resources> - <resource> - <directory>${integrationSourceDirectory}/java</directory> - <targetPath>${integrationOutputDirectory}</targetPath> - </resource> - </resources> - </configuration> - </execution> - <execution> - <id>add-empty-directory</id> - <phase>pre-integration-test</phase> - <goals> - <goal>add-test-resource</goal> - </goals> - <configuration> - <resources> - <resource> - <directory>${integrationSourceDirectory}/java</directory> - <targetPath>${integrationOutputDirectory}</targetPath> - <excludes> - <exclude>**/*</exclude> - </excludes> - </resource> - </resources> - </configuration> - </execution> - </executions> - </plugin> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-compiler-plugin</artifactId> - <version>2.3.2</version> - <executions> - <execution> - <phase>pre-integration-test</phase> - <goals> - <goal>testCompile</goal> - </goals> - <configuration> - <compilerArguments> - <d>${basedir}/${integrationOutputDirectory}</d> - </compilerArguments> - </configuration> - </execution> - </executions> - </plugin> - <plugin> - <artifactId>maven-failsafe-plugin</artifactId> - <version>2.8</version> - <configuration> - <testClassesDirectory>${integrationOutputDirectory}</testClassesDirectory> - <reportsDirectory>${integrationOutputDirectory}/failsafe-reports</reportsDirectory> - <test>**/*.java</test> - <additionalClasspathElements> - <additionalClasspathElement>${integrationSourceDirectory}/resources</additionalClasspathElement> - </additionalClasspathElements> - </configuration> - <executions> - <execution> - <goals> - <goal>integration-test</goal> - <goal>verify</goal> - </goals> - </execution> - </executions> - </plugin> - </plugins> - </pluginManagement> - </build> - </profile> - </profiles> </project> diff --git a/testtools/pom.xml b/testtools/pom.xml index c50037e1..817a9a87 100644 --- a/testtools/pom.xml +++ b/testtools/pom.xml @@ -15,17 +15,16 @@ <url>https://github.com/Cloudname/cloudname</url> <dependencies> - <dependency> - <groupId>org.apache.curator</groupId> - <artifactId>curator-test</artifactId> - <version>${cn.curator.version}</version> - </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> - <scope>test</scope> + <scope>compile</scope> </dependency> + <dependency> + <groupId>org.cloudname</groupId> + <artifactId>cn-core</artifactId> + </dependency> </dependencies> </project> diff --git a/testtools/src/main/java/org/cloudname/testtools/backend/CoreBackendTest.java b/testtools/src/main/java/org/cloudname/testtools/backend/CoreBackendTest.java new file mode 100644 index 00000000..3525b4d6 --- /dev/null +++ b/testtools/src/main/java/org/cloudname/testtools/backend/CoreBackendTest.java @@ -0,0 +1,635 @@ +package org.cloudname.testtools.backend; + +import org.cloudname.core.CloudnameBackend; +import org.cloudname.core.CloudnamePath; +import org.cloudname.core.LeaseHandle; +import org.cloudname.core.LeaseListener; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Core backend tests. This ensures the backend implementation works as expected on the most + * basic level. Override this class in your backend implementation to test it. + * + * @author [email protected] + */ +public abstract class CoreBackendTest { + private final CloudnamePath serviceA = new CloudnamePath( + new String[] { "local", "test", "service-a" }); + private final CloudnamePath serviceB = new CloudnamePath( + new String[] { "local", "test", "service-b" }); + + private final Random random = new Random(); + + /** + * Max data propagation time (in ms) for notifications from the backend. Override if your + * backend implementation is slow. 5 ms is a lot of time though so do it carefully. + */ + protected int getBackendPropagationTime() { + return 5; + } + /** + * Ensure multiple clients can connect and that leases get an unique path for each client. + */ + @Test + public void temporaryLeaseCreation() throws Exception { + try (final CloudnameBackend backend = getBackend()) { + final String data = Long.toHexString(random.nextLong()); + final LeaseHandle lease = backend.createTemporaryLease(serviceA, data); + assertThat("Expected lease to be not null", lease, is(notNullValue())); + + assertTrue("Expected lease path to be a subpath of the supplied lease (" + serviceA + + ") but it is " + lease.getLeasePath(), + serviceA.isSubpathOf(lease.getLeasePath())); + + assertThat("The temporary lease data can be read", + backend.readTemporaryLeaseData(lease.getLeasePath()), is(data)); + + final String newData = Long.toHexString(random.nextLong()); + assertThat("Expected to be able to write lease data but didn't", + lease.writeLeaseData(newData), is(true)); + + assertThat("Expected to be able to read data back but didn't", + backend.readTemporaryLeaseData(lease.getLeasePath()), is(newData)); + lease.close(); + + assertThat("Expect the lease path to be null", lease.getLeasePath(), is(nullValue())); + + assertFalse("Did not expect to be able to write lease data for a closed lease", + lease.writeLeaseData(Long.toHexString(random.nextLong()))); + assertThat("The temporary lease data can not be read", + backend.readTemporaryLeaseData(lease.getLeasePath()), is(nullValue())); + + + final int numberOfLeases = 50; + + final Set<String> leasePaths = new HashSet<>(); + for (int i = 0; i < numberOfLeases; i++) { + final String randomData = Long.toHexString(random.nextLong()); + final LeaseHandle handle = backend.createTemporaryLease(serviceB, randomData); + leasePaths.add(handle.getLeasePath().join(':')); + handle.close(); + } + + assertThat("Expected " + numberOfLeases + " unique paths but it was " + leasePaths.size(), + leasePaths.size(), is(numberOfLeases)); + } + } + + /** + * A very simple single-threaded notification. Make sure this works before implementing + * the multiple notifications elsewhere in this test. + */ + @Test + public void simpleTemporaryNotification() throws Exception { + + try (final CloudnameBackend backend = getBackend()) { + + final CloudnamePath rootPath = new CloudnamePath(new String[]{"simple"}); + final CountDownLatch createCounter = new CountDownLatch(1); + final CountDownLatch removeCounter = new CountDownLatch(1); + final CountDownLatch dataCounter = new CountDownLatch(1); + + final String firstData = "first data"; + final String lastData = "last data"; + final LeaseListener listener = new LeaseListener() { + @Override + public void leaseCreated(CloudnamePath path, String data) { + createCounter.countDown(); + if (data.equals(lastData)) { + dataCounter.countDown(); + } + } + + @Override + public void leaseRemoved(CloudnamePath path) { + removeCounter.countDown(); + } + + @Override + public void dataChanged(CloudnamePath path, String data) { + dataCounter.countDown(); + } + }; + backend.addTemporaryLeaseListener(rootPath, listener); + final LeaseHandle handle = backend.createTemporaryLease(rootPath, firstData); + assertThat(handle, is(notNullValue())); + Thread.sleep(getBackendPropagationTime()); + + handle.writeLeaseData(lastData); + Thread.sleep(getBackendPropagationTime()); + + handle.close(); + + assertTrue("Expected create notification but didn't get one", + createCounter.await(getBackendPropagationTime(), TimeUnit.MILLISECONDS)); + assertTrue("Expected remove notification but didn't get one", + removeCounter.await(getBackendPropagationTime(), TimeUnit.MILLISECONDS)); + assertTrue("Expected data notification but didn't get one", + dataCounter.await(getBackendPropagationTime(), TimeUnit.MILLISECONDS)); + + backend.removeTemporaryLeaseListener(listener); + } + } + + /** + * Ensure permanent leases can be created and that they can't be overwritten by clients using + * the library. + */ + @Test + public void permanentLeaseCreation() throws Exception { + final CloudnamePath leasePath = new CloudnamePath(new String[]{"some", "path"}); + final String dataString = "some data string"; + final String newDataString = "new data string"; + + + try (final CloudnameBackend backend = getBackend()) { + backend.removePermanentLease(leasePath); + + assertThat("Permanent lease can be created", + backend.createPermanantLease(leasePath, dataString), is(true)); + + assertThat("Permanent lease data can be read", + backend.readPermanentLeaseData(leasePath), is(dataString)); + + assertThat("Permanent lease can't be created twice", + backend.createPermanantLease(leasePath, dataString), is(false)); + + assertThat("Permanent lease can be updated", + backend.writePermanentLeaseData(leasePath, newDataString), is(true)); + + assertThat("Permanent lease data can be read after update", + backend.readPermanentLeaseData(leasePath), is(newDataString)); + } + + try (final CloudnameBackend backend = getBackend()) { + assertThat("Permanent lease data can be read from another backend", + backend.readPermanentLeaseData(leasePath), is(newDataString)); + assertThat("Permanent lease can be removed", + backend.removePermanentLease(leasePath), is(true)); + assertThat("Lease can't be removed twice", + backend.removePermanentLease(leasePath), is(false)); + assertThat("Lease data can't be read from deleted lease", + backend.readPermanentLeaseData(leasePath), is(nullValue())); + } + } + + /** + * Ensure clients are notified of changes + */ + @Test + public void multipleTemporaryNotifications() throws Exception { + try (final CloudnameBackend backend = getBackend()) { + final CloudnamePath rootPath = new CloudnamePath(new String[]{"root", "lease"}); + final String clientData = "client data here"; + + final LeaseHandle lease = backend.createTemporaryLease(rootPath, clientData); + assertThat("Handle to lease is returned", lease, is(notNullValue())); + assertThat("Lease is a child of the root lease", + rootPath.isSubpathOf(lease.getLeasePath()), is(true)); + + int numListeners = 10; + final int numUpdates = 10; + + // Add some listeners to the temporary lease. Each should be notified once on + // creation, once on removal and once every time the data is updated + final CountDownLatch createNotifications = new CountDownLatch(numListeners); + final CountDownLatch dataNotifications = new CountDownLatch(numListeners * numUpdates); + final CountDownLatch removeNotifications = new CountDownLatch(numListeners); + + final List<LeaseListener> listeners = new ArrayList<>(); + for (int i = 0; i < numListeners; i++) { + final LeaseListener listener = new LeaseListener() { + private AtomicInteger lastData = new AtomicInteger(-1); + + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + createNotifications.countDown(); + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + removeNotifications.countDown(); + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + assertThat(lastData.incrementAndGet(), is(Integer.parseInt(data))); + dataNotifications.countDown(); + } + }; + listeners.add(listener); + backend.addTemporaryLeaseListener(rootPath, listener); + } + + // Change the data a few times. Every change should be propagated to the listeners + // in the same order they have changed + for (int i = 0; i < numUpdates; i++) { + lease.writeLeaseData(Integer.toString(i)); + Thread.sleep(getBackendPropagationTime()); + } + + // Remove the lease. Removal notifications will be sent to the clients + + assertThat("All create notifications are received but " + createNotifications.getCount() + + " remains out of " + numListeners, + createNotifications.await(getBackendPropagationTime(), TimeUnit.MICROSECONDS), is(true)); + + assertThat("All data notifications are received but " + dataNotifications.getCount() + + " remains out of " + (numListeners * numUpdates), + dataNotifications.await(getBackendPropagationTime(), TimeUnit.MILLISECONDS), is(true)); + + lease.close(); + assertThat("All remove notifications are received but " + removeNotifications.getCount() + + " remains out of " + numListeners, + removeNotifications.await(getBackendPropagationTime(), TimeUnit.MILLISECONDS), is(true)); + + // Remove the listeners + for (final LeaseListener listener : listeners) { + lease.close(); + backend.removeTemporaryLeaseListener(listener); + } + } + } + + /** + * Test a simple peer to peer scheme; all clients grabbing a lease and listening on other + * clients. + */ + @Test + public void multipleServicesWithMultipleClients() throws Exception { + try (final CloudnameBackend backend = getBackend()) { + + final CloudnamePath rootLease = new CloudnamePath(new String[]{"multi", "multi"}); + final int numberOfClients = 5; + + // All clients will be notified of all other clients (including themselves) + final CountDownLatch createNotifications + = new CountDownLatch(numberOfClients * numberOfClients); + // All clients will write one change each + final CountDownLatch dataNotifications = new CountDownLatch(numberOfClients); + // There will be 99 + 98 + 97 + 96 ... 1 notifications, in all n (n + 1) / 2 + // remove notifications + final int n = numberOfClients - 1; + final CountDownLatch removeNotifications = new CountDownLatch(n * (n + 1) / 2); + + final Runnable clientProcess = new Runnable() { + @Override + public void run() { + final String myData = Long.toHexString(random.nextLong()); + final LeaseHandle handle = backend.createTemporaryLease(rootLease, myData); + assertThat("Got a valid handle back", handle, is(notNullValue())); + backend.addTemporaryLeaseListener(rootLease, new LeaseListener() { + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + assertThat("Notification belongs to root path", + rootLease.isSubpathOf(path), is(true)); + createNotifications.countDown(); + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + removeNotifications.countDown(); + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + dataNotifications.countDown(); + } + }); + + try { + assertThat(createNotifications.await( + getBackendPropagationTime(), TimeUnit.MILLISECONDS), + is(true)); + } catch (InterruptedException ie) { + throw new RuntimeException(ie); + } + + // Change the data for my own lease, wait for it to propagate + assertThat(handle.writeLeaseData(Long.toHexString(random.nextLong())), + is(true)); + try { + Thread.sleep(getBackendPropagationTime()); + } catch (final InterruptedException ie) { + throw new RuntimeException(ie); + } + + try { + assertThat(dataNotifications.await( + getBackendPropagationTime(), TimeUnit.MILLISECONDS), + is(true)); + } catch (InterruptedException ie) { + throw new RuntimeException(ie); + } + + // ..and close my lease + try { + handle.close(); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + }; + + final Executor executor = Executors.newCachedThreadPool(); + for (int i = 0; i < numberOfClients; i++) { + executor.execute(clientProcess); + } + + removeNotifications.await(getBackendPropagationTime(), TimeUnit.SECONDS); + } + } + + + /** + * Just make sure unknown listeners doesn't throw exceptions + */ + @Test + public void removeInvalidListener() throws Exception { + try (final CloudnameBackend backend = getBackend()) { + final LeaseListener unknownnListener = new LeaseListener() { + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + } + }; + backend.removeTemporaryLeaseListener(unknownnListener); + } + } + + + /** + * Create a whole set of different listener pairs that runs in parallel. They won't + * receive notifications from any other lease - listener pairs. + */ + @Test + public void multipleIndependentListeners() throws Exception { + try (final CloudnameBackend backend = getBackend()) { + final int leasePairs = 10; + + class LeaseWorker { + private final String id; + private final CloudnamePath rootPath; + private final LeaseListener listener; + private final AtomicInteger createNotifications = new AtomicInteger(0); + private final AtomicInteger dataNotifications = new AtomicInteger(0); + private LeaseHandle handle; + + public LeaseWorker(final String id) { + this.id = id; + rootPath = new CloudnamePath(new String[]{"pair", id}); + listener = new LeaseListener() { + + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + createNotifications.incrementAndGet(); + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + dataNotifications.incrementAndGet(); + } + }; + } + + public void createLease() { + backend.addTemporaryLeaseListener(rootPath, listener); + try { + Thread.sleep(getBackendPropagationTime()); + } catch (final InterruptedException ie) { + throw new RuntimeException(ie); + } + handle = backend.createTemporaryLease(rootPath, id); + } + + public void writeData() { + handle.writeLeaseData(id); + } + + public void checkNumberOfNotifications() { + // There will be two notifications; one for this lease, one for the other + assertThat("Expected 2 create notifications", createNotifications.get(), is(2)); + // There will be two notifications; one for this lease, one for the other + assertThat("Expected 2 data notifications", dataNotifications.get(), is(2)); + } + + public void closeLease() { + try { + handle.close(); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + } + + final List<LeaseWorker> workers = new ArrayList<>(); + + for (int i = 0; i < leasePairs; i++) { + final String id = Long.toHexString(random.nextLong()); + final LeaseWorker leaseWorker1 = new LeaseWorker(id); + leaseWorker1.createLease(); + workers.add(leaseWorker1); + final LeaseWorker leaseWorker2 = new LeaseWorker(id); + leaseWorker2.createLease(); + workers.add(leaseWorker2); + } + + for (final LeaseWorker worker : workers) { + worker.writeData(); + } + Thread.sleep(getBackendPropagationTime()); + for (final LeaseWorker worker : workers) { + worker.checkNumberOfNotifications(); + } + for (final LeaseWorker worker : workers) { + worker.closeLease(); + } + } + } + + /** + * Ensure permanent leases distribute notifications as well + */ + @Test + public void permanentLeaseNotifications() throws Exception { + final CloudnamePath rootLease = new CloudnamePath(new String[] { "permanent", "vacation" }); + final String leaseData = "the aero smiths"; + final String newLeaseData = "popcultural reference"; + + try (final CloudnameBackend backend = getBackend()) { + backend.removePermanentLease(rootLease); + assertThat("Can create permanent node", + backend.createPermanantLease(rootLease, leaseData), is(true)); + } + + final AtomicInteger numberOfNotifications = new AtomicInteger(0); + final CountDownLatch createLatch = new CountDownLatch(1); + final CountDownLatch removeLatch = new CountDownLatch(1); + final CountDownLatch dataLatch = new CountDownLatch(1); + + final LeaseListener listener = new LeaseListener() { + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + assertThat(path, is(equalTo(rootLease))); + assertThat(data, is(equalTo(leaseData))); + numberOfNotifications.incrementAndGet(); + createLatch.countDown(); + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + assertThat(path, is(equalTo(rootLease))); + numberOfNotifications.incrementAndGet(); + removeLatch.countDown(); + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + assertThat(path, is(equalTo(rootLease))); + assertThat(data, is(equalTo(newLeaseData))); + numberOfNotifications.incrementAndGet(); + dataLatch.countDown(); + } + }; + + try (final CloudnameBackend backend = getBackend()) { + + assertThat("Lease still exists", + backend.readPermanentLeaseData(rootLease), is(leaseData)); + + // Add the lease back + backend.addPermanentLeaseListener(rootLease, listener); + + assertThat("New data can be written", + backend.writePermanentLeaseData(rootLease, newLeaseData), is(true)); + + // Write new data + assertThat("Lease can be removed", backend.removePermanentLease(rootLease), is(true)); + + assertTrue(createLatch.await(getBackendPropagationTime(), TimeUnit.MILLISECONDS)); + assertTrue(dataLatch.await(getBackendPropagationTime(), TimeUnit.MILLISECONDS)); + assertTrue(removeLatch.await(getBackendPropagationTime(), TimeUnit.MILLISECONDS)); + // This includes one created, one data, one close + assertThat("One notifications is expected but only got " + + numberOfNotifications.get(), numberOfNotifications.get(), is(3)); + + backend.removePermanentLeaseListener(listener); + // just to be sure - this won't upset anything + backend.removePermanentLeaseListener(listener); + } + } + + + /** + * Set up two listeners listening to different permanent leases. There should be no crosstalk + * between the listeners. + */ + @Test + public void multiplePermanentListeners() throws Exception { + final CloudnamePath permanentA = new CloudnamePath(new String[] { "primary" }); + final CloudnamePath permanentB = new CloudnamePath(new String[] { "secondary" }); + final CloudnamePath permanentC = new CloudnamePath( + new String[] { "tertiary", "permanent", "lease" }); + + try (final CloudnameBackend backend = getBackend()) { + backend.addPermanentLeaseListener(permanentA, new LeaseListener() { + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + assertThat(path, is(equalTo(permanentA))); + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + assertThat(path, is(equalTo(permanentA))); + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + assertThat(path, is(equalTo(permanentA))); + } + }); + + backend.addPermanentLeaseListener(permanentB, new LeaseListener() { + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + assertThat(path, is(equalTo(permanentB))); + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + assertThat(path, is(equalTo(permanentB))); + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + assertThat(path, is(equalTo(permanentB))); + } + }); + + backend.addPermanentLeaseListener(permanentC, new LeaseListener() { + @Override + public void leaseCreated(final CloudnamePath path, final String data) { + fail("Did not expect any leases to be created at " + permanentC); + } + + @Override + public void leaseRemoved(final CloudnamePath path) { + fail("Did not expect any leases to be created at " + permanentC); + } + + @Override + public void dataChanged(final CloudnamePath path, final String data) { + fail("Did not expect any leases to be created at " + permanentC); + } + }); + + backend.createPermanantLease(permanentA, "Some data that belongs to A"); + backend.createPermanantLease(permanentB, "Some data that belongs to B"); + + // Some might say this is a dirty trick but permanent and temporary leases should not + // interfere with eachother. + final LeaseHandle handle = backend.createTemporaryLease( + permanentC, "Some data that belongs to C"); + assertThat(handle, is(notNullValue())); + handle.writeLeaseData("Some other data that belongs to C"); + try { + handle.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + } + } + } + + protected abstract CloudnameBackend getBackend(); +} diff --git a/testtools/src/main/java/org/cloudname/testtools/network/ClientThread.java b/testtools/src/main/java/org/cloudname/testtools/network/ClientThread.java deleted file mode 100644 index cadbb470..00000000 --- a/testtools/src/main/java/org/cloudname/testtools/network/ClientThread.java +++ /dev/null @@ -1,121 +0,0 @@ -package org.cloudname.testtools.network; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.Socket; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * ClientThread forwards communication for one pair of sockets. - * TODO(borud): this class lacks unit tests - * - * @author dybdahl - */ -class ClientThread { - private final static Logger log = Logger.getLogger(ClientThread.class.getName()); - - private Socket serverSocket = null; - private Socket clientSocket = null; - private Object threadMonitor = new Object(); - - - /** - * Constructor - * @param clientSocket socket crated for incomming call - * @param hostName destination host name - * @param hostPort destination host port - */ - public ClientThread(final Socket clientSocket, final String hostName, final int hostPort) { - this.clientSocket = clientSocket; - Runnable myRunnable = new Runnable() { - @Override - public void run() { - final InputStream clientIn, serverIn; - final OutputStream clientOut, serverOut; - - try { - synchronized (threadMonitor) { - serverSocket = new Socket(hostName, hostPort); - } - clientIn = clientSocket.getInputStream(); - clientOut = clientSocket.getOutputStream(); - serverIn = serverSocket.getInputStream(); - serverOut = serverSocket.getOutputStream(); - } catch (IOException ioe) { - log.severe("Portforwarder: Can not connect to " + hostName + ":" + hostPort); - try { - if (serverSocket != null) { - serverSocket.close(); - } - } catch (IOException e) { - log.severe("Could not close server socket"); - } - return; - } - synchronized (threadMonitor) { - startForwarderThread(clientIn, serverOut); - startForwarderThread(serverIn, clientOut); - } - } - }; - Thread fireAndForget = new Thread(myRunnable); - fireAndForget.start(); - } - - /** - * Closes sockets, which again closes the running threads. - */ - public void close() { - synchronized (threadMonitor) { - try { - if (serverSocket != null) { - serverSocket.close(); - serverSocket = null; - } - } catch (Exception e) { - log.log(Level.SEVERE, "Error while closing server socket", e); - } - try { - if (clientSocket != null) { - clientSocket.close(); - clientSocket = null; - } - } catch (Exception e) { - log.log(Level.SEVERE, "Error while closing client socket", e); - } - } - } - - private Thread startForwarderThread( - final InputStream inputStream, final OutputStream outputStream) { - final int BUFFER_SIZE = 4096; - Runnable myRunnable = new Runnable() { - @Override - public void run() { - byte[] buffer = new byte[BUFFER_SIZE]; - try { - while (true) { - int bytesRead = inputStream.read(buffer); - - if (bytesRead == -1) - // End of stream is reached --> exit - break; - - outputStream.write(buffer, 0, bytesRead); - outputStream.flush(); - } - } catch (IOException e) { - // Read/write failed --> connection is broken - log.log(Level.SEVERE, "Forwarding in loop died."); - } - // Notify parent thread that the connection is broken - close(); - } - }; - Thread forwarder = new Thread(myRunnable); - forwarder.start(); - return forwarder; - } -} diff --git a/testtools/src/main/java/org/cloudname/testtools/network/PortForwarder.java b/testtools/src/main/java/org/cloudname/testtools/network/PortForwarder.java deleted file mode 100644 index 35c2ec5e..00000000 --- a/testtools/src/main/java/org/cloudname/testtools/network/PortForwarder.java +++ /dev/null @@ -1,145 +0,0 @@ -package org.cloudname.testtools.network; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * Simple class for setting up port forwarding in unit tests. This - * enables killing the connection. - * - * TODO(stalehd): Remove? Replace with TestCluster class. Makes for better integration tests. - * TODO(borud): this class lacks unit tests - * - * @author dybdahl - */ -public class PortForwarder { - private final static Logger log = Logger.getLogger(PortForwarder.class.getName()); - - private final int myPort; - private final AtomicBoolean isAlive = new AtomicBoolean(true); - private ServerSocket serverSocket = null; - - private Thread portThread; - private final Object threadMonitor = new Object(); - - private final List<ClientThread> clientThreadList = new ArrayList<ClientThread>(); - private final AtomicBoolean pause = new AtomicBoolean(false); - - private final String hostName; - private final int hostPort; - - /** - * Constructor for port-forwarder. Does stat the forwarder. - * @param myPort client port - * @param hostName name of host to forward to. - * @param hostPort port of host to forward to. - * @throws IOException if unable to open server socket - */ - public PortForwarder(final int myPort, final String hostName, final int hostPort) throws IOException { - this.myPort = myPort; - this.hostName = hostName; - this.hostPort = hostPort; - log.info("Starting port forwarder " + myPort + " -> " + hostPort); - startServerSocketThread(); - } - - private void startServerSocketThread() - throws IOException { - openServerSocket(); - Runnable myRunnable = new Runnable() { - @Override - public void run() { - log.info("Forwarder running"); - while (isAlive.get() && !pause.get()) { - try { - final Socket clientSocket = serverSocket.accept(); - synchronized (threadMonitor) { - if (isAlive.get() && !pause.get()) { - clientThreadList.add(new ClientThread(clientSocket, hostName, hostPort)); - } else { - clientSocket.close(); - } - } - } catch (IOException e) { - log.log(Level.SEVERE, "Got exception in forwarder", e); - // Keep going, maybe later connections will succeed. - } - } - log.info("Forwarder stopped"); - } - }; - portThread = new Thread(myRunnable); - // Make this a daemon thread, so it won't keep the VM running at shutdown. - portThread.setDaemon(true); - portThread.start(); - } - - private void openServerSocket() throws IOException { - serverSocket = new ServerSocket(); - serverSocket.setReuseAddress(true); - serverSocket.bind(new InetSocketAddress("localhost", myPort)); - } - - /** - * Forces client to loose connection and refuses to create new (closing attempts to connect). - * @throws IOException - * @throws InterruptedException - */ - public void pause() throws IOException, InterruptedException { - final Thread currentServerThread; - synchronized (threadMonitor) { - if (!pause.compareAndSet(false, true)) { - return; - } - for (ClientThread clientThread: clientThreadList) { - clientThread.close(); - - } - clientThreadList.clear(); - serverSocket.close(); - /* - * Make a copy of the server socket thread, so we can wait for it - * to complete outside any monitor. - */ - currentServerThread = portThread; - } - currentServerThread.join(); - } - - /** - * Lets client start connecting again. - * @throws IOException - */ - public void unpause() throws IOException { - synchronized (threadMonitor) { - if (pause.compareAndSet(true, false)) { - startServerSocketThread(); - } - } - } - - /** - * Shuts down the forwarder. - */ - public void close() { - isAlive.set(false); - try { - pause(); - } catch (final IOException e) { - // Ignore this - log.severe("Could not close server socket."); - } catch (InterruptedException e) { - log.severe("Interrupted while waiting for server thread to finish."); - // Reassert interrupt. - Thread.currentThread().interrupt(); - } - } -} - diff --git a/testtools/src/main/java/org/cloudname/testtools/zookeeper/EmbeddedZooKeeper.java b/testtools/src/main/java/org/cloudname/testtools/zookeeper/EmbeddedZooKeeper.java deleted file mode 100644 index c081434f..00000000 --- a/testtools/src/main/java/org/cloudname/testtools/zookeeper/EmbeddedZooKeeper.java +++ /dev/null @@ -1,88 +0,0 @@ -package org.cloudname.testtools.zookeeper; - -import org.apache.curator.test.TestingServer; - -import java.io.File; -import java.io.IOException; - -/** - * Utility class to fire up an embedded ZooKeeper server in the - * current JVM for testing purposes. - * - * @author borud - * @author stalehd - */ -public final class EmbeddedZooKeeper { - private final File rootDir; - private final int port; - private TestingServer server; - - /** - * @param rootDir the root directory of where the ZooKeeper - * instance will keep its files. If null, a temporary directory is created - * @param port the port where ZooKeeper will listen for client - * connections. - */ - public EmbeddedZooKeeper(File rootDir, int port) { - this.rootDir = rootDir; - this.port = port; - } - - private void delDir(File path) throws IOException { - for(File f : path.listFiles()) - { - if(f.isDirectory()) { - delDir(f); - } else { - if (!f.delete() && f.exists()) { - throw new IOException("Failed to delete file " + f); - } - } - } - if (!path.delete() && path.exists()) { - throw new IOException("Failed to delete directory " + path); - } - - } - - /** - * Delete all data owned by the ZooKeeper instance. - * @throws IOException if some file could not be deleted - */ - public void del() throws IOException { - File path = new File(rootDir, "data"); - delDir(path); - } - - - /** - * Set up the ZooKeeper instance. - */ - public void init() throws Exception { - this.server = new TestingServer(this.port, this.rootDir); - // Create the data directory - File dataDir = new File(rootDir, "data"); - dataDir.mkdir(); - - this.server.start(); - } - - /** - * Shut the ZooKeeper instance down. - * @throws IOException if shutdown encountered I/O errors - */ - public void shutdown() throws IOException { - this.server.stop(); - del(); - } - - /** - * Get the client connection string for the ZooKeeper instance. - * - * @return a String containing a comma-separated list of host:port - * entries for use as a parameter to the ZooKeeper client class. - */ - public String getClientConnectionString() { - return "127.0.0.1:" + port; - } -} diff --git a/timber/pom.xml b/timber/pom.xml index 3ed79030..6dca5385 100644 --- a/timber/pom.xml +++ b/timber/pom.xml @@ -62,7 +62,7 @@ <dependency> <groupId>io.netty</groupId> - <artifactId>netty-all</artifactId> + <artifactId>netty</artifactId> </dependency> <dependency> @@ -75,11 +75,13 @@ <artifactId>junit</artifactId> <scope>test</scope> </dependency> + <dependency> <groupId>org.cloudname</groupId> <artifactId>idgen</artifactId> <scope>test</scope> </dependency> + <dependency> <groupId>joda-time</groupId> <artifactId>joda-time</artifactId>
c873ad72e7d2c6e2f5340deea5ec7b7eefa92839
edwardkort$wwidesigner
Move functionality into StudyModel, to facilitate standalone testing of that class. StudyModel now encapsulates uses of the bind factories; they are no longer needed in StudyView.
p
https://github.com/edwardkort/wwidesigner
diff --git a/WWIDesigner/src/main/com/wwidesigner/gui/StudyModel.java b/WWIDesigner/src/main/com/wwidesigner/gui/StudyModel.java index b84a272..8fe8476 100644 --- a/WWIDesigner/src/main/com/wwidesigner/gui/StudyModel.java +++ b/WWIDesigner/src/main/com/wwidesigner/gui/StudyModel.java @@ -1,8 +1,6 @@ -/** - * - */ package com.wwidesigner.gui; +import java.io.File; import java.io.StringWriter; import java.util.ArrayList; import java.util.List; @@ -24,6 +22,8 @@ import com.wwidesigner.util.PhysicalParameters; /** + * Abstract class to encapsulate processes for analyzing and optimizing + * instrument models. * @author kort * */ @@ -39,6 +39,11 @@ public abstract class StudyModel // Preferences. protected BaseObjectiveFunction.OptimizerType preferredOptimizerType; + // Statistics saved from the most recent call to optimizeInstrument + + protected double initialNorm; // Initial value of objective function. + protected double finalNorm; // Final value of objective function. + /** * Tree of selectable categories that the study model supports. */ @@ -94,6 +99,15 @@ public void setCategorySelection(Category category, String subCategoryName) } } + /** + * Class to encapsulate a main branch of the study model selection tree. + * The derived study model defines a set of main branches, typically + * a static set. + * At present, this class is public, to allow StudyView to display + * and select items in the category tree. Should be changed to + * expose only category names, not the Category type. + * + */ public static class Category { private String name; @@ -130,7 +144,11 @@ public void removeSub(String name) public Map<String, Object> getSubs() { - return subs == null ? new TreeMap<String, Object>() : subs; + if (subs == null) + { + subs = new TreeMap<String, Object>(); + } + return subs; } public void setSelectedSub(String key) @@ -145,6 +163,10 @@ public String getSelectedSub() public Object getSelectedSubValue() { + if (subs == null) + { + return null; + } return subs.get(selectedSub); } @@ -153,6 +175,10 @@ public void replaceSub(String newName, FileDataModel source) // Find sub by matching dataModel reference String oldName = null; boolean isSelected = false; + if (subs == null) + { + subs = new TreeMap<String, Object>(); + } for (Map.Entry<String, Object> entry : subs.entrySet()) { FileDataModel model = (FileDataModel) entry.getValue(); @@ -178,6 +204,113 @@ public void replaceSub(String newName, FileDataModel source) } } + protected static String getCategoryName(String xmlString) + { + // Check for an Instrument + BindFactory bindFactory = GeometryBindFactory.getInstance(); + if (bindFactory.isValidXml(xmlString, "Instrument", true)) // TODO Make + // constants + // in + // binding + // framework + { + return INSTRUMENT_CATEGORY_ID; + } + + // Check for a Tuning + bindFactory = NoteBindFactory.getInstance(); + if (bindFactory.isValidXml(xmlString, "Tuning", true)) // TODO Make + // constants in + // binding + // framework + { + return TUNING_CATEGORY_ID; + } + + return null; + } + + /** + * Add an Instrument or Tuning to the category tree, from a JIDE FileDataModel. + * Post: If dataModel is valid XML, it is added to INSTRUMENT_CATEGORY_ID, + * or TUNING_CATEGORY_ID, as appropriate, and addDataModel returns true. + * @param dataModel - FileDataModel containing instrument or tuning XML. + * @return true if the dataModel contained valid instrument or tuning XML. + */ + public boolean addDataModel(FileDataModel dataModel) + { + String data = (String) dataModel.getData(); + String categoryName = getCategoryName(data); + if (categoryName == null) + { + return false; + } + Category category = getCategory(categoryName); + category.addSub(dataModel.getName(), dataModel); + category.setSelectedSub(dataModel.getName()); + return true; + } + + /** + * Remove an Instrument or Tuning from the category tree, + * given a JIDE FileDataModel. + * Pre: Assumes that the type of XML, Instrument or Tuning, + * has not changed since the call to addDataModel. + * Post: The specified dataModel is no longer in INSTRUMENT_CATEGORY_ID, + * or TUNING_CATEGORY_ID, as appropriate. + * @param dataModel - FileDataModel containing instrument or tuning XML. + * @return true. + */ + public boolean removeDataModel(FileDataModel dataModel) + { + String data = (String) dataModel.getData(); + String categoryName = getCategoryName(data); + Category category; + if (categoryName == null) + { + // Invalid XML. Remove from both categories. + category = getCategory(INSTRUMENT_CATEGORY_ID); + category.removeSub(dataModel.getName()); + category = getCategory(TUNING_CATEGORY_ID); + category.removeSub(dataModel.getName()); + return true; + } + category = getCategory(categoryName); + category.removeSub(dataModel.getName()); + return true; + } + + /** + * Add an Instrument or Tuning to the category tree, from a JIDE FileDataModel, + * replacing any existing instance. + * Pre: Assumes that the type of XML, Instrument or Tuning, + * has not changed since the call to addDataModel (if any). + * Post: The prior instance of dataModel is removed from INSTRUMENT_CATEGORY_ID, + * or TUNING_CATEGORY_ID, as appropriate + * If dataModel is valid XML, it is added to INSTRUMENT_CATEGORY_ID, + * or TUNING_CATEGORY_ID, as appropriate, and addDataModel returns true. + * @param dataModel - FileDataModel containing instrument or tuning XML. + * @return true if the dataModel contained valid instrument or tuning XML. + */ + public boolean replaceDataModel(FileDataModel dataModel) + { + String data = (String) dataModel.getData(); + String categoryName = getCategoryName(data); + if (categoryName == null) + { + removeDataModel(dataModel); + return false; + } + Category category = getCategory(categoryName); + category.replaceSub(dataModel.getName(), dataModel); + category.setSelectedSub(dataModel.getName()); + return true; + } + + /** + * @return true if category selections are sufficient for calls to + * calculateTuning() and graphTuning(). + */ public boolean canTune() { Category tuningCategory = getCategory(TUNING_CATEGORY_ID); @@ -189,6 +322,10 @@ public boolean canTune() return tuningSelected != null && instrumentSelected != null; } + /** + * @return true if category selections are sufficient for calls to + * optimizeInstrument(). + */ public boolean canOptimize() { if ( ! canTune() ) @@ -207,15 +344,11 @@ public void calculateTuning(String title) throws Exception Category category = this.getCategory(INSTRUMENT_CATEGORY_ID); String instrumentName = category.getSelectedSub(); - FileDataModel model = (FileDataModel) category.getSelectedSubValue(); - model.getApplication().getDataView(model).updateModel(model); - tuner.setInstrument((String) model.getData()); + tuner.setInstrument(getSelectedXmlString(INSTRUMENT_CATEGORY_ID)); category = getCategory(TUNING_CATEGORY_ID); String tuningName = category.getSelectedSub(); - model = (FileDataModel) category.getSelectedSubValue(); - model.getApplication().getDataView(model).updateModel(model); - tuner.setTuning((String) model.getData()); + tuner.setTuning(getSelectedXmlString(TUNING_CATEGORY_ID)); tuner.setCalculator(getCalculator()); @@ -229,15 +362,11 @@ public void graphTuning(String title) throws Exception Category category = this.getCategory(INSTRUMENT_CATEGORY_ID); String instrumentName = category.getSelectedSub(); - FileDataModel model = (FileDataModel) category.getSelectedSubValue(); - model.getApplication().getDataView(model).updateModel(model); - tuner.setInstrument((String) model.getData()); + tuner.setInstrument(getSelectedXmlString(INSTRUMENT_CATEGORY_ID)); category = getCategory(TUNING_CATEGORY_ID); String tuningName = category.getSelectedSub(); - model = (FileDataModel) category.getSelectedSubValue(); - model.getApplication().getDataView(model).updateModel(model); - tuner.setTuning((String) model.getData()); + tuner.setTuning(getSelectedXmlString(TUNING_CATEGORY_ID)); tuner.setCalculator(getCalculator()); @@ -260,12 +389,16 @@ public String optimizeInstrument() throws Exception optimizerType = preferredOptimizerType; } + initialNorm = 1.0; + finalNorm = 1.0; if ( ObjectiveFunctionOptimizer.optimizeObjectiveFunction(objective, optimizerType) ) { Instrument instrument = objective.getInstrument(); // Convert back to the input unit-of-measure values instrument.convertToLengthType(); String xmlString = marshal(instrument); + initialNorm = ObjectiveFunctionOptimizer.getInitialNorm(); + finalNorm = ObjectiveFunctionOptimizer.getFinalNorm(); return xmlString; } return null; @@ -289,8 +422,8 @@ public void compareInstrument(String newName, Instrument newInstrument) throws E table.buildTable(oldName, oldInstrument, newName, newInstrument); table.showTable(false); } - - protected String marshal(Instrument instrument) throws Exception + + public static String marshal(Instrument instrument) throws Exception { BindFactory binder = GeometryBindFactory.getInstance(); StringWriter writer = new StringWriter(); @@ -299,13 +432,27 @@ protected String marshal(Instrument instrument) throws Exception return writer.toString(); } + public static String marshal(Tuning tuning) throws Exception + { + BindFactory binder = NoteBindFactory.getInstance(); + StringWriter writer = new StringWriter(); + binder.marshalToXml(tuning, writer); + + return writer.toString(); + } + protected String getSelectedXmlString(String categoryName) throws Exception { String xmlString = null; Category category = getCategory(categoryName); FileDataModel model = (FileDataModel) category.getSelectedSubValue(); - model.getApplication().getDataView(model).updateModel(model); + if (model.getApplication() != null) + { + // If the file is a data view in an active application, + // update the data in model with the latest from the application's data view. + model.getApplication().getDataView(model).updateModel(model); + } xmlString = (String) model.getData(); return xmlString; @@ -320,7 +467,7 @@ protected Instrument getInstrument() throws Exception return instrument; } - protected Instrument getInstrument(String xmlString) + public static Instrument getInstrument(String xmlString) { try { @@ -344,6 +491,28 @@ protected Tuning getTuning() throws Exception return tuning; } + public static Instrument getInstrumentFromFile(String fileName) throws Exception + { + BindFactory geometryBindFactory = GeometryBindFactory.getInstance(); + String inputPath = BindFactory.getPathFromName(fileName); + File inputFile = new File(inputPath); + Instrument instrument = (Instrument) geometryBindFactory.unmarshalXml( + inputFile, true); + instrument.updateComponents(); + + return instrument; + } + + public static Tuning getTuningFromFile(String fileName) throws Exception + { + BindFactory noteBindFactory = NoteBindFactory.getInstance(); + String inputPath = BindFactory.getPathFromName(fileName); + File inputFile = new File(inputPath); + Tuning tuning = (Tuning) noteBindFactory.unmarshalXml(inputFile, true); + + return tuning; + } + public PhysicalParameters getParams() { return params; @@ -400,6 +569,23 @@ else if ( optimizerPreference.contentEquals(OptimizationPreferences.OPT_POWELL_N preferredOptimizerType = null; } } + + // Methods to return statistics from an optimization. + + public double getInitialNorm() + { + return initialNorm; + } + + public double getFinalNorm() + { + return finalNorm; + } + + public double getResidualErrorRatio() + { + return finalNorm/initialNorm; + } // Methods to create objects that will perform this study, // according to components that the user has selected. diff --git a/WWIDesigner/src/main/com/wwidesigner/gui/StudyView.java b/WWIDesigner/src/main/com/wwidesigner/gui/StudyView.java index a120f20..99cbe92 100644 --- a/WWIDesigner/src/main/com/wwidesigner/gui/StudyView.java +++ b/WWIDesigner/src/main/com/wwidesigner/gui/StudyView.java @@ -19,7 +19,6 @@ import javax.swing.tree.TreePath; import javax.swing.tree.TreeSelectionModel; -import com.jidesoft.app.framework.BasicDataModel; import com.jidesoft.app.framework.DataModel; import com.jidesoft.app.framework.event.EventSubscriber; import com.jidesoft.app.framework.event.SubscriberEvent; @@ -28,10 +27,7 @@ import com.jidesoft.app.framework.gui.filebased.FileBasedApplication; import com.jidesoft.tree.TreeUtils; import com.wwidesigner.geometry.Instrument; -import com.wwidesigner.geometry.bind.GeometryBindFactory; import com.wwidesigner.gui.StudyModel.Category; -import com.wwidesigner.note.bind.NoteBindFactory; -import com.wwidesigner.util.BindFactory; /** * @author kort @@ -149,60 +145,39 @@ protected void updateActions() public void doEvent(SubscriberEvent event) { String eventId = event.getEvent(); - DataModel source = (DataModel) event.getSource(); - if (source instanceof FileDataModel) + Object eventSource = event.getSource(); + if (eventSource instanceof FileDataModel) { - String data = (String) ((FileDataModel) source).getData(); - String categoryName = getCategoryName(data); - if (categoryName != null) + FileDataModel source = (FileDataModel) eventSource; + switch (eventId) { - Category category = study.getCategory(categoryName); - String subName = source.getName(); - switch (eventId) - { - case NafOptimizationRunner.FILE_OPENED_EVENT_ID: - category.addSub(subName, source); - category.setSelectedSub(subName); - break; - case NafOptimizationRunner.FILE_CLOSED_EVENT_ID: - category.removeSub(subName); - break; - case NafOptimizationRunner.FILE_SAVED_EVENT_ID: - case NafOptimizationRunner.WINDOW_RENAMED_EVENT_ID: - category.replaceSub(subName, (FileDataModel) source); - break; - } - updateView(); + case NafOptimizationRunner.FILE_OPENED_EVENT_ID: + if (! study.addDataModel(source)) + { + System.out.print("\nError: Data in editor tab, "); + System.out.print(source.getName()); + System.out.println(", is not valid Instrument or Tuning XML."); + System.out.println("Fix and close the file, then re-open it."); + } + break; + case NafOptimizationRunner.FILE_CLOSED_EVENT_ID: + study.removeDataModel(source); + break; + case NafOptimizationRunner.FILE_SAVED_EVENT_ID: + case NafOptimizationRunner.WINDOW_RENAMED_EVENT_ID: + if (! study.replaceDataModel(source)) + { + System.out.print("\nError: Data in editor tab, "); + System.out.print(source.getName()); + System.out.println(", is not valid Instrument or Tuning XML."); + System.out.println("Fix and close the file, then re-open it."); + } + break; } + updateView(); } } - protected String getCategoryName(String xmlString) - { - // Check Instrument - BindFactory bindFactory = GeometryBindFactory.getInstance(); - if (bindFactory.isValidXml(xmlString, "Instrument", true)) // TODO Make - // constants - // in - // binding - // framework - { - return StudyModel.INSTRUMENT_CATEGORY_ID; - } - - // Check Tuning - bindFactory = NoteBindFactory.getInstance(); - if (bindFactory.isValidXml(xmlString, "Tuning", true)) // TODO Make - // constants in - // binding - // framework - { - return StudyModel.TUNING_CATEGORY_ID; - } - - return null; - } - public void getTuning() { try @@ -237,9 +212,10 @@ public void optimizeInstrument() if (xmlInstrument != null && ! xmlInstrument.isEmpty()) { FileBasedApplication app = (FileBasedApplication) getApplication(); - DataModel data = app.newData("xml"); - ((BasicDataModel)data).setData(xmlInstrument); - addDataModelToStudy(data); + FileDataModel data = (FileDataModel) app.newData("xml"); + data.setData(xmlInstrument); + study.addDataModel(data); + updateView(); } } catch (Exception e) @@ -261,7 +237,7 @@ public void compareInstrument() if (view != null) { String xmlInstrument2 = view.getText(); - Instrument instrument2 = study.getInstrument(xmlInstrument2); + Instrument instrument2 = StudyModel.getInstrument(xmlInstrument2); if (instrument2 == null) { System.out.print("\nError: Current editor tab, "); @@ -281,21 +257,6 @@ public void compareInstrument() } } - private void addDataModelToStudy(DataModel dataModel) - { - if (dataModel instanceof FileDataModel) - { - String data = (String) ((FileDataModel) dataModel).getData(); - String categoryName = getCategoryName(data); - if (categoryName != null) - { - Category category = study.getCategory(categoryName); - category.addSub(dataModel.getName(), dataModel); - updateView(); - } - } - } - public StudyModel getStudyModel() { return study; @@ -306,13 +267,22 @@ public StudyModel getStudyModel() */ public void setStudyModel(StudyModel study) { - DataModel[] models = getApplication().getDataModels(); this.study = study; - updateView(); + + DataModel[] models = getApplication().getDataModels(); for ( DataModel model : models ) { - addDataModelToStudy(model); + if (model instanceof FileDataModel) + { + if (! study.addDataModel((FileDataModel) model)) + { + System.out.print("\nError: Data in editor tab, "); + System.out.print(model.getName()); + System.out.println(", is not valid Instrument or Tuning XML."); + } + } } + updateView(); } /**
e25dcdd6b12a965b6eca0c69c847fd0316b1c455
camel
CAMEL-1255: Fixed missing classes in .jar - osgi- export stuff--git-svn-id: https://svn.apache.org/repos/asf/activemq/camel/trunk@734408 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/camel
diff --git a/components/camel-jpa/pom.xml b/components/camel-jpa/pom.xml index 4f1fdbc49b782..a2a513db7aa56 100644 --- a/components/camel-jpa/pom.xml +++ b/components/camel-jpa/pom.xml @@ -35,8 +35,9 @@ <properties> <camel.osgi.export.pkg>org.apache.camel.component.jpa.*, - org.apache.camel.processor.idempotent.jpa.* - org.apache.camel.processor.interceptor.*</camel.osgi.export.pkg> + org.apache.camel.processor.idempotent.jpa.*, + org.apache.camel.processor.interceptor.* + </camel.osgi.export.pkg> </properties> <dependencies> diff --git a/components/camel-jpa/src/main/java/org/apache/camel/processor/interceptor/JpaTraceEventMessage.java b/components/camel-jpa/src/main/java/org/apache/camel/processor/interceptor/JpaTraceEventMessage.java index 504079977c49b..bf6b0b1058f9e 100644 --- a/components/camel-jpa/src/main/java/org/apache/camel/processor/interceptor/JpaTraceEventMessage.java +++ b/components/camel-jpa/src/main/java/org/apache/camel/processor/interceptor/JpaTraceEventMessage.java @@ -175,7 +175,7 @@ public void setCausedByException(String causedByException) { @Override public String toString() { - return "TraceEventMessage[" + exchangeId + "] on node: " + toNode; + return "TraceEventMessage[" + getExchangeId() + "] on node: " + getToNode(); } } diff --git a/examples/camel-example-bam/README.txt b/examples/camel-example-bam/README.txt index e0af2baf529a7..cb7f6ceee0fe6 100644 --- a/examples/camel-example-bam/README.txt +++ b/examples/camel-example-bam/README.txt @@ -40,11 +40,10 @@ You can see the BAM activies defined in src/main/java/org/apache/camel/example/bam/MyActivites.java In the HSQL Database Explorer type - select * from activitystate + select * from camel_activitystate to see the states of the activities. Notice that one activity never receives its expected message and when it's overdue Camel reports this as an error. - To stop the example hit ctrl + c To use log4j as the logging framework add this to the pom.xml:
01ff1c8a77ed63405af637be35318ee693dcd1c5
restlet-framework-java
- Deprecated ServletConverter and added an- equivalent ServletAdapter class to prevent confusion with the - ConverterService reintroduced in Restlet 1.2. - Added root Helper class.--
p
https://github.com/restlet/restlet-framework-java
diff --git a/build/tmpl/text/changes.txt b/build/tmpl/text/changes.txt index 1a4dc4b5ff..3bdf989f0d 100644 --- a/build/tmpl/text/changes.txt +++ b/build/tmpl/text/changes.txt @@ -5,7 +5,9 @@ Changes log - @version-full@ (@release-date@) - Breaking changes - - + - Deprecated ServletConverter and added an equivalent + ServletAdapter class to prevent confusion with the + ConverterService reintroduced in Restlet 1.2. - Bugs fixed - Fixed bug with a ServerResource when an annotated method does not return a value. diff --git a/modules/org.restlet.ext.jaxrs/src/org/restlet/ext/jaxrs/internal/util/Util.java b/modules/org.restlet.ext.jaxrs/src/org/restlet/ext/jaxrs/internal/util/Util.java index 4a32217c8d..6accbacefd 100644 --- a/modules/org.restlet.ext.jaxrs/src/org/restlet/ext/jaxrs/internal/util/Util.java +++ b/modules/org.restlet.ext.jaxrs/src/org/restlet/ext/jaxrs/internal/util/Util.java @@ -78,8 +78,8 @@ import org.restlet.data.Response; import org.restlet.engine.http.ContentType; import org.restlet.engine.http.HttpClientCall; -import org.restlet.engine.http.HttpClientConverter; -import org.restlet.engine.http.HttpServerConverter; +import org.restlet.engine.http.HttpClientAdapter; +import org.restlet.engine.http.HttpServerAdapter; import org.restlet.engine.http.HttpUtils; import org.restlet.engine.util.DateUtils; import org.restlet.ext.jaxrs.internal.core.UnmodifiableMultivaluedMap; @@ -306,7 +306,7 @@ public static void copyResponseHeaders( restletResponse.setEntity(new EmptyRepresentation()); } - HttpClientConverter.copyResponseTransportHeaders(headers, + HttpClientAdapter.copyResponseTransportHeaders(headers, restletResponse); HttpClientCall.copyResponseEntityHeaders(headers, restletResponse .getEntity()); @@ -324,8 +324,8 @@ public static void copyResponseHeaders( */ public static Series<Parameter> copyResponseHeaders(Response restletResponse) { final Series<Parameter> headers = new Form(); - HttpServerConverter.addResponseHeaders(restletResponse, headers); - HttpServerConverter.addEntityHeaders(restletResponse.getEntity(), + HttpServerAdapter.addResponseHeaders(restletResponse, headers); + HttpServerAdapter.addEntityHeaders(restletResponse.getEntity(), headers); return headers; } diff --git a/modules/org.restlet.ext.servlet/src/org/restlet/ext/servlet/ServletAdapter.java b/modules/org.restlet.ext.servlet/src/org/restlet/ext/servlet/ServletAdapter.java new file mode 100644 index 0000000000..48ac006096 --- /dev/null +++ b/modules/org.restlet.ext.servlet/src/org/restlet/ext/servlet/ServletAdapter.java @@ -0,0 +1,243 @@ +/** + * Copyright 2005-2009 Noelios Technologies. + * + * The contents of this file are subject to the terms of one of the following + * open source licenses: LGPL 3.0 or LGPL 2.1 or CDDL 1.0 or EPL 1.0 (the + * "Licenses"). You can select the license that you prefer but you may not use + * this file except in compliance with one of these Licenses. + * + * You can obtain a copy of the LGPL 3.0 license at + * http://www.opensource.org/licenses/lgpl-3.0.html + * + * You can obtain a copy of the LGPL 2.1 license at + * http://www.opensource.org/licenses/lgpl-2.1.php + * + * You can obtain a copy of the CDDL 1.0 license at + * http://www.opensource.org/licenses/cddl1.php + * + * You can obtain a copy of the EPL 1.0 license at + * http://www.opensource.org/licenses/eclipse-1.0.php + * + * See the Licenses for the specific language governing permissions and + * limitations under the Licenses. + * + * Alternatively, you can obtain a royalty free commercial license with less + * limitations, transferable or non-transferable, directly at + * http://www.noelios.com/products/restlet-engine + * + * Restlet is a registered trademark of Noelios Technologies. + */ + +package org.restlet.ext.servlet; + +import java.io.IOException; +import java.util.Enumeration; + +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.restlet.Context; +import org.restlet.Restlet; +import org.restlet.data.Reference; +import org.restlet.engine.http.HttpRequest; +import org.restlet.engine.http.HttpResponse; +import org.restlet.engine.http.HttpServerAdapter; +import org.restlet.ext.servlet.internal.ServletCall; +import org.restlet.ext.servlet.internal.ServletLogger; + +/** + * HTTP adapter from Servlet calls to Restlet calls. This class can be used in + * any Servlet, just create a new instance and override the service() method in + * your Servlet to delegate all those calls to this class's service() method. + * Remember to set the target Restlet, for example using a Restlet Router + * instance. You can get the Restlet context directly on instances of this + * class, it will be based on the parent Servlet's context for logging purpose.<br> + * <br> + * This class is especially useful when directly integrating Restlets with + * Spring managed Web applications. Here is a simple usage example: + * + * <pre> + * public class TestServlet extends HttpServlet { + * private ServletAdapter adapter; + * + * public void init() throws ServletException { + * super.init(); + * this.adapter = new ServletAdapter(getServletContext()); + * + * Restlet trace = new Restlet(this.adapter.getContext()) { + * public void handle(Request req, Response res) { + * getLogger().info(&quot;Hello World&quot;); + * res.setEntity(&quot;Hello World!&quot;, MediaType.TEXT_PLAIN); + * } + * }; + * + * this.adapter.setTarget(trace); + * } + * + * protected void service(HttpServletRequest req, HttpServletResponse res) + * throws ServletException, IOException { + * this.adapter.service(req, res); + * } + * } + * </pre> + * + * @author Jerome Louvel + */ +public class ServletAdapter extends HttpServerAdapter { + /** The target Restlet. */ + private volatile Restlet target; + + /** + * Constructor. Remember to manually set the "target" property before + * invoking the service() method. + * + * @param context + * The Servlet context. + */ + public ServletAdapter(ServletContext context) { + this(context, null); + } + + /** + * Constructor. + * + * @param context + * The Servlet context. + * @param target + * The target Restlet. + */ + public ServletAdapter(ServletContext context, Restlet target) { + super(new Context(new ServletLogger(context))); + this.target = target; + } + + /** + * Returns the base reference of new Restlet requests. + * + * @param request + * The Servlet request. + * @return The base reference of new Restlet requests. + */ + public Reference getBaseRef(HttpServletRequest request) { + Reference result = null; + final String basePath = request.getContextPath() + + request.getServletPath(); + final String baseUri = request.getRequestURL().toString(); + // Path starts at first slash after scheme:// + final int pathStart = baseUri.indexOf("/", + request.getScheme().length() + 3); + if (basePath.length() == 0) { + // basePath is empty in case the webapp is mounted on root context + if (pathStart != -1) { + result = new Reference(baseUri.substring(0, pathStart)); + } else { + result = new Reference(baseUri); + } + } else { + if (pathStart != -1) { + final int baseIndex = baseUri.indexOf(basePath, pathStart); + if (baseIndex != -1) { + result = new Reference(baseUri.substring(0, baseIndex + + basePath.length())); + } + } + } + + return result; + } + + /** + * Returns the root reference of new Restlet requests. By default it returns + * the result of getBaseRef(). + * + * @param request + * The Servlet request. + * @return The root reference of new Restlet requests. + */ + public Reference getRootRef(HttpServletRequest request) { + return getBaseRef(request); + } + + /** + * Returns the target Restlet. + * + * @return The target Restlet. + */ + public Restlet getTarget() { + return this.target; + } + + /** + * Services a HTTP Servlet request as a Restlet request handled by the + * "target" Restlet. + * + * @param request + * The HTTP Servlet request. + * @param response + * The HTTP Servlet response. + */ + public void service(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + if (getTarget() != null) { + // Set the current context + Context.setCurrent(getContext()); + + // Convert the Servlet call to a Restlet call + final ServletCall servletCall = new ServletCall(request + .getLocalAddr(), request.getLocalPort(), request, response); + final HttpRequest httpRequest = toRequest(servletCall); + final HttpResponse httpResponse = new HttpResponse(servletCall, + httpRequest); + + // Adjust the relative reference + httpRequest.getResourceRef().setBaseRef(getBaseRef(request)); + + // Adjust the root reference + httpRequest.setRootRef(getRootRef(request)); + + // Handle the request and commit the response + getTarget().handle(httpRequest, httpResponse); + commit(httpResponse); + } else { + getLogger().warning("Unable to find the Restlet target"); + } + } + + /** + * Sets the target Restlet. + * + * @param target + * The target Restlet. + */ + public void setTarget(Restlet target) { + this.target = target; + } + + /** + * Converts a low-level Servlet call into a high-level Restlet request. In + * addition to the parent {@link HttpServerAdapter}, it also copies the + * Servlet's request attributes into the Restlet's request attributes map. + * + * @param servletCall + * The low-level Servlet call. + * @return A new high-level uniform request. + */ + @SuppressWarnings("unchecked") + public HttpRequest toRequest(ServletCall servletCall) { + final HttpRequest result = super.toRequest(servletCall); + + // Copy all Servlet's request attributes + String attributeName; + for (final Enumeration<String> namesEnum = servletCall.getRequest() + .getAttributeNames(); namesEnum.hasMoreElements();) { + attributeName = namesEnum.nextElement(); + result.getAttributes().put(attributeName, + servletCall.getRequest().getAttribute(attributeName)); + } + + return result; + } + +} diff --git a/modules/org.restlet.ext.servlet/src/org/restlet/ext/servlet/ServletConverter.java b/modules/org.restlet.ext.servlet/src/org/restlet/ext/servlet/ServletConverter.java index 407e52a214..cf5c0a7918 100644 --- a/modules/org.restlet.ext.servlet/src/org/restlet/ext/servlet/ServletConverter.java +++ b/modules/org.restlet.ext.servlet/src/org/restlet/ext/servlet/ServletConverter.java @@ -43,7 +43,7 @@ import org.restlet.data.Reference; import org.restlet.engine.http.HttpRequest; import org.restlet.engine.http.HttpResponse; -import org.restlet.engine.http.HttpServerConverter; +import org.restlet.engine.http.HttpServerAdapter; import org.restlet.ext.servlet.internal.ServletCall; import org.restlet.ext.servlet.internal.ServletLogger; @@ -60,184 +60,186 @@ * * <pre> * public class TestServlet extends HttpServlet { - * private ServletConverter converter; + * private ServletConverter converter; * - * public void init() throws ServletException { - * super.init(); - * this.converter = new ServletConverter(getServletContext()); + * public void init() throws ServletException { + * super.init(); + * this.converter = new ServletConverter(getServletContext()); * - * Restlet trace = new Restlet(this.converter.getContext()) { - * public void handle(Request req, Response res) { - * getLogger().info(&quot;Hello World&quot;); - * res.setEntity(&quot;Hello World!&quot;, MediaType.TEXT_PLAIN); - * } - * }; + * Restlet trace = new Restlet(this.converter.getContext()) { + * public void handle(Request req, Response res) { + * getLogger().info(&quot;Hello World&quot;); + * res.setEntity(&quot;Hello World!&quot;, MediaType.TEXT_PLAIN); + * } + * }; * - * this.converter.setTarget(trace); - * } + * this.converter.setTarget(trace); + * } * - * protected void service(HttpServletRequest req, HttpServletResponse res) - * throws ServletException, IOException { - * this.converter.service(req, res); - * } + * protected void service(HttpServletRequest req, HttpServletResponse res) + * throws ServletException, IOException { + * this.converter.service(req, res); + * } * } * </pre> * * @author Jerome Louvel + * @deprecated Use {@link ServletAdapter} instead. */ -public class ServletConverter extends HttpServerConverter { - /** The target Restlet. */ - private volatile Restlet target; - - /** - * Constructor. Remember to manually set the "target" property before - * invoking the service() method. - * - * @param context - * The Servlet context. - */ - public ServletConverter(ServletContext context) { - this(context, null); - } - - /** - * Constructor. - * - * @param context - * The Servlet context. - * @param target - * The target Restlet. - */ - public ServletConverter(ServletContext context, Restlet target) { - super(new Context(new ServletLogger(context))); - this.target = target; - } - - /** - * Returns the base reference of new Restlet requests. - * - * @param request - * The Servlet request. - * @return The base reference of new Restlet requests. - */ - public Reference getBaseRef(HttpServletRequest request) { - Reference result = null; - final String basePath = request.getContextPath() - + request.getServletPath(); - final String baseUri = request.getRequestURL().toString(); - // Path starts at first slash after scheme:// - final int pathStart = baseUri.indexOf("/", - request.getScheme().length() + 3); - if (basePath.length() == 0) { - // basePath is empty in case the webapp is mounted on root context - if (pathStart != -1) { - result = new Reference(baseUri.substring(0, pathStart)); - } else { - result = new Reference(baseUri); - } - } else { - if (pathStart != -1) { - final int baseIndex = baseUri.indexOf(basePath, pathStart); - if (baseIndex != -1) { - result = new Reference(baseUri.substring(0, baseIndex - + basePath.length())); - } - } - } - - return result; - } - - /** - * Returns the root reference of new Restlet requests. By default it returns - * the result of getBaseRef(). - * - * @param request - * The Servlet request. - * @return The root reference of new Restlet requests. - */ - public Reference getRootRef(HttpServletRequest request) { - return getBaseRef(request); - } - - /** - * Returns the target Restlet. - * - * @return The target Restlet. - */ - public Restlet getTarget() { - return this.target; - } - - /** - * Services a HTTP Servlet request as a Restlet request handled by the - * "target" Restlet. - * - * @param request - * The HTTP Servlet request. - * @param response - * The HTTP Servlet response. - */ - public void service(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - if (getTarget() != null) { - // Set the current context - Context.setCurrent(getContext()); - - // Convert the Servlet call to a Restlet call - final ServletCall servletCall = new ServletCall(request - .getLocalAddr(), request.getLocalPort(), request, response); - final HttpRequest httpRequest = toRequest(servletCall); - final HttpResponse httpResponse = new HttpResponse(servletCall, - httpRequest); - - // Adjust the relative reference - httpRequest.getResourceRef().setBaseRef(getBaseRef(request)); - - // Adjust the root reference - httpRequest.setRootRef(getRootRef(request)); - - // Handle the request and commit the response - getTarget().handle(httpRequest, httpResponse); - commit(httpResponse); - } else { - getLogger().warning("Unable to find the Restlet target"); - } - } - - /** - * Sets the target Restlet. - * - * @param target - * The target Restlet. - */ - public void setTarget(Restlet target) { - this.target = target; - } - - /** - * Converts a low-level Servlet call into a high-level Restlet request. In - * addition to the parent HttpServerConverter class, it also copies the - * Servlet's request attributes into the Restlet's request attributes map. - * - * @param servletCall - * The low-level Servlet call. - * @return A new high-level uniform request. - */ - @SuppressWarnings("unchecked") - public HttpRequest toRequest(ServletCall servletCall) { - final HttpRequest result = super.toRequest(servletCall); - - // Copy all Servlet's request attributes - String attributeName; - for (final Enumeration<String> namesEnum = servletCall.getRequest() - .getAttributeNames(); namesEnum.hasMoreElements();) { - attributeName = namesEnum.nextElement(); - result.getAttributes().put(attributeName, - servletCall.getRequest().getAttribute(attributeName)); - } - - return result; - } +@Deprecated +public class ServletConverter extends HttpServerAdapter { + /** The target Restlet. */ + private volatile Restlet target; + + /** + * Constructor. Remember to manually set the "target" property before + * invoking the service() method. + * + * @param context + * The Servlet context. + */ + public ServletConverter(ServletContext context) { + this(context, null); + } + + /** + * Constructor. + * + * @param context + * The Servlet context. + * @param target + * The target Restlet. + */ + public ServletConverter(ServletContext context, Restlet target) { + super(new Context(new ServletLogger(context))); + this.target = target; + } + + /** + * Returns the base reference of new Restlet requests. + * + * @param request + * The Servlet request. + * @return The base reference of new Restlet requests. + */ + public Reference getBaseRef(HttpServletRequest request) { + Reference result = null; + final String basePath = request.getContextPath() + + request.getServletPath(); + final String baseUri = request.getRequestURL().toString(); + // Path starts at first slash after scheme:// + final int pathStart = baseUri.indexOf("/", + request.getScheme().length() + 3); + if (basePath.length() == 0) { + // basePath is empty in case the webapp is mounted on root context + if (pathStart != -1) { + result = new Reference(baseUri.substring(0, pathStart)); + } else { + result = new Reference(baseUri); + } + } else { + if (pathStart != -1) { + final int baseIndex = baseUri.indexOf(basePath, pathStart); + if (baseIndex != -1) { + result = new Reference(baseUri.substring(0, baseIndex + + basePath.length())); + } + } + } + + return result; + } + + /** + * Returns the root reference of new Restlet requests. By default it returns + * the result of getBaseRef(). + * + * @param request + * The Servlet request. + * @return The root reference of new Restlet requests. + */ + public Reference getRootRef(HttpServletRequest request) { + return getBaseRef(request); + } + + /** + * Returns the target Restlet. + * + * @return The target Restlet. + */ + public Restlet getTarget() { + return this.target; + } + + /** + * Services a HTTP Servlet request as a Restlet request handled by the + * "target" Restlet. + * + * @param request + * The HTTP Servlet request. + * @param response + * The HTTP Servlet response. + */ + public void service(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + if (getTarget() != null) { + // Set the current context + Context.setCurrent(getContext()); + + // Convert the Servlet call to a Restlet call + final ServletCall servletCall = new ServletCall(request + .getLocalAddr(), request.getLocalPort(), request, response); + final HttpRequest httpRequest = toRequest(servletCall); + final HttpResponse httpResponse = new HttpResponse(servletCall, + httpRequest); + + // Adjust the relative reference + httpRequest.getResourceRef().setBaseRef(getBaseRef(request)); + + // Adjust the root reference + httpRequest.setRootRef(getRootRef(request)); + + // Handle the request and commit the response + getTarget().handle(httpRequest, httpResponse); + commit(httpResponse); + } else { + getLogger().warning("Unable to find the Restlet target"); + } + } + + /** + * Sets the target Restlet. + * + * @param target + * The target Restlet. + */ + public void setTarget(Restlet target) { + this.target = target; + } + + /** + * Converts a low-level Servlet call into a high-level Restlet request. In + * addition to the parent HttpServerConverter class, it also copies the + * Servlet's request attributes into the Restlet's request attributes map. + * + * @param servletCall + * The low-level Servlet call. + * @return A new high-level uniform request. + */ + @SuppressWarnings("unchecked") + public HttpRequest toRequest(ServletCall servletCall) { + final HttpRequest result = super.toRequest(servletCall); + + // Copy all Servlet's request attributes + String attributeName; + for (final Enumeration<String> namesEnum = servletCall.getRequest() + .getAttributeNames(); namesEnum.hasMoreElements();) { + attributeName = namesEnum.nextElement(); + result.getAttributes().put(attributeName, + servletCall.getRequest().getAttribute(attributeName)); + } + + return result; + } } diff --git a/modules/org.restlet.ext.spring/src/org/restlet/ext/spring/RestletFrameworkServlet.java b/modules/org.restlet.ext.spring/src/org/restlet/ext/spring/RestletFrameworkServlet.java index c5647a2625..12824a3b0d 100644 --- a/modules/org.restlet.ext.spring/src/org/restlet/ext/spring/RestletFrameworkServlet.java +++ b/modules/org.restlet.ext.spring/src/org/restlet/ext/spring/RestletFrameworkServlet.java @@ -39,11 +39,10 @@ import org.restlet.Application; import org.restlet.Context; import org.restlet.Restlet; -import org.restlet.ext.servlet.ServletConverter; +import org.restlet.ext.servlet.ServletAdapter; import org.springframework.beans.BeansException; import org.springframework.web.servlet.FrameworkServlet; - /** * A Servlet which provides an automatic Restlet integration with an existing * {@link org.springframework.web.context.WebApplicationContext}. The usage is @@ -97,16 +96,53 @@ public class RestletFrameworkServlet extends FrameworkServlet { private static final long serialVersionUID = 1L; - /** The converter of Servlet calls into Restlet equivalents. */ - private volatile ServletConverter converter; + /** The adapter of Servlet calls into Restlet equivalents. */ + private volatile ServletAdapter adapter; /** The bean name of the target Restlet. */ private volatile String targetRestletBeanName; + /** + * Creates the Restlet {@link Context} to use if the target application does + * not already have a context associated, or if the target restlet is not an + * {@link Application} at all. + * <p> + * Uses a simple {@link Context} by default. + * + * @return A new instance of {@link Context} + */ + protected Context createContext() { + return new Context(); + } + @Override protected void doService(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - getConverter().service(request, response); + getAdapter().service(request, response); + } + + /** + * Provides access to the {@link ServletAdapter} used to handle requests. + * Exposed so that subclasses may do additional configuration, if necessary, + * by overriding {@link #initFrameworkServlet()}. + * + * @return The adapter of Servlet calls into Restlet equivalents. + */ + protected ServletAdapter getAdapter() { + return this.adapter; + } + + /** + * Provides access to the {@link ServletConverter} used to handle requests. + * Exposed so that subclasses may do additional configuration, if necessary, + * by overriding {@link #initFrameworkServlet()}. + * + * @return The converter of Servlet calls into Restlet equivalents. + * @deprecated Use {@link #getAdapter()} instead. + */ + @Deprecated + protected ServletAdapter getConverter() { + return this.adapter; } /** @@ -129,22 +165,11 @@ public String getTargetRestletBeanName() { : this.targetRestletBeanName; } - /** - * Provides access to the {@link ServletConverter} used to handle requests. - * Exposed so that subclasses may do additional configuration, if necessary, - * by overriding {@link #initFrameworkServlet()}. - * - * @return The converter of Servlet calls into Restlet equivalents. - */ - protected ServletConverter getConverter() { - return this.converter; - } - @Override protected void initFrameworkServlet() throws ServletException, BeansException { super.initFrameworkServlet(); - this.converter = new ServletConverter(getServletContext()); + this.adapter = new ServletAdapter(getServletContext()); org.restlet.Application application; if (getTargetRestlet() instanceof Application) { @@ -156,20 +181,7 @@ protected void initFrameworkServlet() throws ServletException, if (application.getContext() == null) { application.setContext(createContext()); } - this.converter.setTarget(application); - } - - /** - * Creates the Restlet {@link Context} to use if the target application does - * not already have a context associated, or if the target restlet is not an - * {@link Application} at all. - * <p> - * Uses a simple {@link Context} by default. - * - * @return A new instance of {@link Context} - */ - protected Context createContext() { - return new Context(); + this.adapter.setTarget(application); } /** diff --git a/modules/org.restlet.ext.xdb/src/org/restlet/ext/xdb/XdbServletConverter.java b/modules/org.restlet.ext.xdb/src/org/restlet/ext/xdb/XdbServletConverter.java index 66dd3da4c5..3c1f5cb406 100644 --- a/modules/org.restlet.ext.xdb/src/org/restlet/ext/xdb/XdbServletConverter.java +++ b/modules/org.restlet.ext.xdb/src/org/restlet/ext/xdb/XdbServletConverter.java @@ -47,7 +47,7 @@ import org.restlet.data.Reference; import org.restlet.engine.http.HttpRequest; import org.restlet.engine.http.HttpResponse; -import org.restlet.engine.http.HttpServerConverter; +import org.restlet.engine.http.HttpServerAdapter; import org.restlet.ext.servlet.internal.ServletLogger; @@ -89,7 +89,7 @@ * * @author Marcelo F. Ochoa ([email protected]) */ -public class XdbServletConverter extends HttpServerConverter { +public class XdbServletConverter extends HttpServerAdapter { /** The target Restlet. */ private volatile Restlet target; diff --git a/modules/org.restlet/src/org/restlet/engine/converter/RepresentationConverter.java b/modules/org.restlet/src/org/restlet/engine/Helper.java similarity index 56% rename from modules/org.restlet/src/org/restlet/engine/converter/RepresentationConverter.java rename to modules/org.restlet/src/org/restlet/engine/Helper.java index ae2463059e..493f28593c 100644 --- a/modules/org.restlet/src/org/restlet/engine/converter/RepresentationConverter.java +++ b/modules/org.restlet/src/org/restlet/engine/Helper.java @@ -28,41 +28,13 @@ * Restlet is a registered trademark of Noelios Technologies. */ -package org.restlet.engine.converter; - -import java.util.List; - -import org.restlet.representation.Representation; -import org.restlet.representation.Variant; -import org.restlet.resource.UniformResource; +package org.restlet.engine; /** - * Converter between the DOM API and Representation classes. + * Abstract marker class parent of all engine helpers. * * @author Jerome Louvel */ -public class RepresentationConverter extends ConverterHelper { - - @Override - public List<Class<?>> getObjectClasses(Variant variant) { - return null; - } - - @Override - public List<Variant> getVariants(Class<?> objectClass) { - return null; - } - - @Override - public <T> T toObject(Representation representation, Class<T> targetClass, - UniformResource resource) { - return null; - } - - @Override - public Representation toRepresentation(Object object, - Variant targetVariant, UniformResource resource) { - return null; - } +public abstract class Helper { } diff --git a/modules/org.restlet/src/org/restlet/engine/RestletHelper.java b/modules/org.restlet/src/org/restlet/engine/RestletHelper.java index 687905d4c6..b7946d1497 100644 --- a/modules/org.restlet/src/org/restlet/engine/RestletHelper.java +++ b/modules/org.restlet/src/org/restlet/engine/RestletHelper.java @@ -48,7 +48,7 @@ * * @author Jerome Louvel */ -public abstract class RestletHelper<T extends Restlet> { +public abstract class RestletHelper<T extends Restlet> extends Helper { /** * The map of attributes exchanged between the API and the Engine via this diff --git a/modules/org.restlet/src/org/restlet/engine/converter/ConverterHelper.java b/modules/org.restlet/src/org/restlet/engine/converter/ConverterHelper.java index 9713868eb3..425a80ebcb 100644 --- a/modules/org.restlet/src/org/restlet/engine/converter/ConverterHelper.java +++ b/modules/org.restlet/src/org/restlet/engine/converter/ConverterHelper.java @@ -34,6 +34,7 @@ import java.util.ArrayList; import java.util.List; +import org.restlet.engine.Helper; import org.restlet.representation.Representation; import org.restlet.representation.Variant; import org.restlet.resource.UniformResource; @@ -43,7 +44,7 @@ * * @author Jerome Louvel */ -public abstract class ConverterHelper { +public abstract class ConverterHelper extends Helper { /** * Adds an object class to the given list. Creates a new list if necessary. diff --git a/modules/org.restlet/src/org/restlet/engine/http/HttpConverter.java b/modules/org.restlet/src/org/restlet/engine/http/HttpAdapter.java similarity index 99% rename from modules/org.restlet/src/org/restlet/engine/http/HttpConverter.java rename to modules/org.restlet/src/org/restlet/engine/http/HttpAdapter.java index c9829e1a4e..77358036f4 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/HttpConverter.java +++ b/modules/org.restlet/src/org/restlet/engine/http/HttpAdapter.java @@ -41,7 +41,7 @@ * * @author Jerome Louvel */ -public class HttpConverter { +public class HttpAdapter { /** The context. */ private volatile Context context; @@ -51,7 +51,7 @@ public class HttpConverter { * @param context * The context to use. */ - public HttpConverter(Context context) { + public HttpAdapter(Context context) { this.context = context; } diff --git a/modules/org.restlet/src/org/restlet/engine/http/HttpClientConverter.java b/modules/org.restlet/src/org/restlet/engine/http/HttpClientAdapter.java similarity index 99% rename from modules/org.restlet/src/org/restlet/engine/http/HttpClientConverter.java rename to modules/org.restlet/src/org/restlet/engine/http/HttpClientAdapter.java index 734bbe09a8..9931682e53 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/HttpClientConverter.java +++ b/modules/org.restlet/src/org/restlet/engine/http/HttpClientAdapter.java @@ -60,7 +60,7 @@ * * @author Jerome Louvel */ -public class HttpClientConverter extends HttpConverter { +public class HttpClientAdapter extends HttpAdapter { /** * Copies headers into a response. * @@ -150,7 +150,7 @@ public static void copyResponseTransportHeaders( * @param context * The context to use. */ - public HttpClientConverter(Context context) { + public HttpClientAdapter(Context context) { super(context); } diff --git a/modules/org.restlet/src/org/restlet/engine/http/HttpClientCall.java b/modules/org.restlet/src/org/restlet/engine/http/HttpClientCall.java index 06cf1278b3..7bf3df5ebd 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/HttpClientCall.java +++ b/modules/org.restlet/src/org/restlet/engine/http/HttpClientCall.java @@ -76,7 +76,7 @@ public abstract class HttpClientCall extends HttpCall { * if no representation has been provided and the response has not * sent any entity header. * @throws NumberFormatException - * @see HttpClientConverter#copyResponseTransportHeaders(Iterable, Response) + * @see HttpClientAdapter#copyResponseTransportHeaders(Iterable, Response) */ public static Representation copyResponseEntityHeaders( Iterable<Parameter> responseHeaders, Representation representation) diff --git a/modules/org.restlet/src/org/restlet/engine/http/HttpClientHelper.java b/modules/org.restlet/src/org/restlet/engine/http/HttpClientHelper.java index 1e8d886ce5..24a6864a32 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/HttpClientHelper.java +++ b/modules/org.restlet/src/org/restlet/engine/http/HttpClientHelper.java @@ -52,7 +52,7 @@ * <tr> * <td>converter</td> * <td>String</td> - * <td>org.restlet.engine.http.HttpClientConverter</td> + * <td>org.restlet.engine.http.HttpClientAdapter</td> * <td>Class name of the converter of low-level HTTP calls into high level * requests and responses.</td> * </tr> @@ -62,7 +62,7 @@ */ public abstract class HttpClientHelper extends ClientHelper { /** The converter from uniform calls to HTTP calls. */ - private volatile HttpClientConverter converter; + private volatile HttpClientAdapter converter; /** * Constructor. @@ -89,13 +89,12 @@ public HttpClientHelper(Client client) { * * @return the converter from uniform calls to HTTP calls. */ - public HttpClientConverter getConverter() throws Exception { + public HttpClientAdapter getConverter() throws Exception { if (this.converter == null) { final String converterClass = getHelpedParameters().getFirstValue( - "converter", "org.restlet.engine.http.HttpClientConverter"); - this.converter = (HttpClientConverter) Class - .forName(converterClass).getConstructor(Context.class) - .newInstance(getContext()); + "converter", "org.restlet.engine.http.HttpClientAdapter"); + this.converter = (HttpClientAdapter) Class.forName(converterClass) + .getConstructor(Context.class).newInstance(getContext()); } return this.converter; @@ -120,7 +119,7 @@ public void handle(Request request, Response response) { * @param converter * The converter to set. */ - public void setConverter(HttpClientConverter converter) { + public void setConverter(HttpClientAdapter converter) { this.converter = converter; } } diff --git a/modules/org.restlet/src/org/restlet/engine/http/HttpServerConverter.java b/modules/org.restlet/src/org/restlet/engine/http/HttpServerAdapter.java similarity index 99% rename from modules/org.restlet/src/org/restlet/engine/http/HttpServerConverter.java rename to modules/org.restlet/src/org/restlet/engine/http/HttpServerAdapter.java index 279f45bb48..7807170363 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/HttpServerConverter.java +++ b/modules/org.restlet/src/org/restlet/engine/http/HttpServerAdapter.java @@ -59,7 +59,7 @@ * * @author Jerome Louvel */ -public class HttpServerConverter extends HttpConverter { +public class HttpServerAdapter extends HttpAdapter { /** * Copies the entity headers from the {@link Representation} to the * {@link Series}. @@ -267,7 +267,7 @@ public static void addResponseHeaders(Response response, * @param context * The client context. */ - public HttpServerConverter(Context context) { + public HttpServerAdapter(Context context) { super(context); } diff --git a/modules/org.restlet/src/org/restlet/engine/http/HttpServerHelper.java b/modules/org.restlet/src/org/restlet/engine/http/HttpServerHelper.java index dbc15c966a..ad23a92851 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/HttpServerHelper.java +++ b/modules/org.restlet/src/org/restlet/engine/http/HttpServerHelper.java @@ -61,7 +61,7 @@ * <tr> * <td>converter</td> * <td>String</td> - * <td>org.restlet.engine.http.HttpServerConverter</td> + * <td>org.restlet.engine.http.HttpServerAdapter</td> * <td>Class name of the converter of low-level HTTP calls into high level * requests and responses.</td> * </tr> @@ -71,7 +71,7 @@ */ public class HttpServerHelper extends ServerHelper { /** The converter from HTTP calls to uniform calls. */ - private volatile HttpServerConverter converter; + private volatile HttpServerAdapter converter; /** * Default constructor. Note that many methods assume that a non-null server @@ -98,13 +98,13 @@ public HttpServerHelper(Server server) { * * @return the converter from HTTP calls to uniform calls. */ - public HttpServerConverter getConverter() { + public HttpServerAdapter getConverter() { if (this.converter == null) { try { final String converterClass = getHelpedParameters() .getFirstValue("converter", - "org.restlet.engine.http.HttpServerConverter"); - this.converter = (HttpServerConverter) Engine.loadClass( + "org.restlet.engine.http.HttpServerAdapter"); + this.converter = (HttpServerAdapter) Engine.loadClass( converterClass).getConstructor(Context.class) .newInstance(getContext()); } catch (IllegalArgumentException e) { @@ -163,7 +163,7 @@ public void handle(HttpServerCall httpCall) { * @param converter * The converter to set. */ - public void setConverter(HttpServerConverter converter) { + public void setConverter(HttpServerAdapter converter) { this.converter = converter; } } diff --git a/modules/org.restlet/src/org/restlet/engine/security/AuthenticatorHelper.java b/modules/org.restlet/src/org/restlet/engine/security/AuthenticatorHelper.java index 49cb48fddc..d0eb2c05df 100644 --- a/modules/org.restlet/src/org/restlet/engine/security/AuthenticatorHelper.java +++ b/modules/org.restlet/src/org/restlet/engine/security/AuthenticatorHelper.java @@ -40,6 +40,7 @@ import org.restlet.data.Request; import org.restlet.data.Response; import org.restlet.data.Status; +import org.restlet.engine.Helper; import org.restlet.security.Guard; import org.restlet.util.Series; @@ -48,243 +49,243 @@ * * @author Jerome Louvel */ -public abstract class AuthenticatorHelper { +public abstract class AuthenticatorHelper extends Helper { - /** The supported challenge scheme. */ - private volatile ChallengeScheme challengeScheme; + /** The supported challenge scheme. */ + private volatile ChallengeScheme challengeScheme; - /** Indicates if client side authentication is supported. */ - private volatile boolean clientSide; + /** Indicates if client side authentication is supported. */ + private volatile boolean clientSide; - /** Indicates if server side authentication is supported. */ - private volatile boolean serverSide; + /** Indicates if server side authentication is supported. */ + private volatile boolean serverSide; - /** - * Constructor. - * - * @param challengeScheme - * The supported challenge scheme. - * @param clientSide - * Indicates if client side authentication is supported. - * @param serverSide - * Indicates if server side authentication is supported. - */ - public AuthenticatorHelper(ChallengeScheme challengeScheme, - boolean clientSide, boolean serverSide) { - this.challengeScheme = challengeScheme; - this.clientSide = clientSide; - this.serverSide = serverSide; - } + /** + * Constructor. + * + * @param challengeScheme + * The supported challenge scheme. + * @param clientSide + * Indicates if client side authentication is supported. + * @param serverSide + * Indicates if server side authentication is supported. + */ + public AuthenticatorHelper(ChallengeScheme challengeScheme, + boolean clientSide, boolean serverSide) { + this.challengeScheme = challengeScheme; + this.clientSide = clientSide; + this.serverSide = serverSide; + } - /** - * Indicates if the call is properly authenticated. You are guaranteed that - * the request has a challenge response with a scheme matching the one - * supported by the plugin. - * - * @param cr - * The challenge response in the request. - * @param request - * The request to authenticate. - * @param guard - * The associated guard to callback. - * @return -1 if the given credentials were invalid, 0 if no credentials - * were found and 1 otherwise. - * @see Guard#checkSecret(Request, String, char[]) - * @deprecated See new org.restlet.security package. - */ - @Deprecated - public int authenticate(ChallengeResponse cr, Request request, Guard guard) { - int result = Guard.AUTHENTICATION_MISSING; + /** + * Indicates if the call is properly authenticated. You are guaranteed that + * the request has a challenge response with a scheme matching the one + * supported by the plugin. + * + * @param cr + * The challenge response in the request. + * @param request + * The request to authenticate. + * @param guard + * The associated guard to callback. + * @return -1 if the given credentials were invalid, 0 if no credentials + * were found and 1 otherwise. + * @see Guard#checkSecret(Request, String, char[]) + * @deprecated See new org.restlet.security package. + */ + @Deprecated + public int authenticate(ChallengeResponse cr, Request request, Guard guard) { + int result = Guard.AUTHENTICATION_MISSING; - // The challenge schemes are compatible - final String identifier = cr.getIdentifier(); - final char[] secret = cr.getSecret(); + // The challenge schemes are compatible + final String identifier = cr.getIdentifier(); + final char[] secret = cr.getSecret(); - // Check the credentials - if ((identifier != null) && (secret != null)) { - result = guard.checkSecret(request, identifier, secret) ? Guard.AUTHENTICATION_VALID - : Guard.AUTHENTICATION_INVALID; - } + // Check the credentials + if ((identifier != null) && (secret != null)) { + result = guard.checkSecret(request, identifier, secret) ? Guard.AUTHENTICATION_VALID + : Guard.AUTHENTICATION_INVALID; + } - return result; - } + return result; + } - /** - * Challenges the client by adding a challenge request to the response and - * by setting the status to CLIENT_ERROR_UNAUTHORIZED. - * - * @param response - * The response to update. - * @param stale - * Indicates if the new challenge is due to a stale response. - * @param guard - * The associated guard to callback. - * @deprecated See new org.restlet.security package. - */ - @Deprecated - public void challenge(Response response, boolean stale, Guard guard) { - response.setStatus(Status.CLIENT_ERROR_UNAUTHORIZED); - response.setChallengeRequest(new ChallengeRequest(guard.getScheme(), - guard.getRealm())); - } + /** + * Challenges the client by adding a challenge request to the response and + * by setting the status to CLIENT_ERROR_UNAUTHORIZED. + * + * @param response + * The response to update. + * @param stale + * Indicates if the new challenge is due to a stale response. + * @param guard + * The associated guard to callback. + * @deprecated See new org.restlet.security package. + */ + @Deprecated + public void challenge(Response response, boolean stale, Guard guard) { + response.setStatus(Status.CLIENT_ERROR_UNAUTHORIZED); + response.setChallengeRequest(new ChallengeRequest(guard.getScheme(), + guard.getRealm())); + } - /** - * Formats a challenge request as a HTTP header value. - * - * @param request - * The challenge request to format. - * @return The authenticate header value. - */ - public String format(ChallengeRequest request) { - final StringBuilder sb = new StringBuilder(); - sb.append(request.getScheme().getTechnicalName()); + /** + * Formats a challenge request as a HTTP header value. + * + * @param request + * The challenge request to format. + * @return The authenticate header value. + */ + public String format(ChallengeRequest request) { + final StringBuilder sb = new StringBuilder(); + sb.append(request.getScheme().getTechnicalName()); - if (request.getRealm() != null) { - sb.append(" realm=\"").append(request.getRealm()).append('"'); - } + if (request.getRealm() != null) { + sb.append(" realm=\"").append(request.getRealm()).append('"'); + } - formatParameters(sb, request.getParameters(), request); - return sb.toString(); - } + formatParameters(sb, request.getParameters(), request); + return sb.toString(); + } - /** - * Formats a challenge response as raw credentials. - * - * @param challenge - * The challenge response to format. - * @param request - * The parent request. - * @param httpHeaders - * The current request HTTP headers. - * @return The authorization header value. - */ - public String format(ChallengeResponse challenge, Request request, - Series<Parameter> httpHeaders) { - final StringBuilder sb = new StringBuilder(); - sb.append(challenge.getScheme().getTechnicalName()).append(' '); + /** + * Formats a challenge response as raw credentials. + * + * @param challenge + * The challenge response to format. + * @param request + * The parent request. + * @param httpHeaders + * The current request HTTP headers. + * @return The authorization header value. + */ + public String format(ChallengeResponse challenge, Request request, + Series<Parameter> httpHeaders) { + final StringBuilder sb = new StringBuilder(); + sb.append(challenge.getScheme().getTechnicalName()).append(' '); - if (challenge.getCredentials() != null) { - sb.append(challenge.getCredentials()); - } else { - formatCredentials(sb, challenge, request, httpHeaders); - } + if (challenge.getCredentials() != null) { + sb.append(challenge.getCredentials()); + } else { + formatCredentials(sb, challenge, request, httpHeaders); + } - return sb.toString(); - } + return sb.toString(); + } - /** - * Formats a challenge response as raw credentials. - * - * @param sb - * The String builder to update. - * @param challenge - * The challenge response to format. - * @param request - * The parent request. - * @param httpHeaders - * The current request HTTP headers. - */ - public abstract void formatCredentials(StringBuilder sb, - ChallengeResponse challenge, Request request, - Series<Parameter> httpHeaders); + /** + * Formats a challenge response as raw credentials. + * + * @param sb + * The String builder to update. + * @param challenge + * The challenge response to format. + * @param request + * The parent request. + * @param httpHeaders + * The current request HTTP headers. + */ + public abstract void formatCredentials(StringBuilder sb, + ChallengeResponse challenge, Request request, + Series<Parameter> httpHeaders); - /** - * Formats the parameters of a challenge request, to be appended to the - * scheme technical name and realm. - * - * @param sb - * The string builder to update. - * @param parameters - * The parameters to format. - * @param request - * The challenger request. - */ - public void formatParameters(StringBuilder sb, - Series<Parameter> parameters, ChallengeRequest request) { - } + /** + * Formats the parameters of a challenge request, to be appended to the + * scheme technical name and realm. + * + * @param sb + * The string builder to update. + * @param parameters + * The parameters to format. + * @param request + * The challenger request. + */ + public void formatParameters(StringBuilder sb, + Series<Parameter> parameters, ChallengeRequest request) { + } - /** - * Returns the supported challenge scheme. - * - * @return The supported challenge scheme. - */ - public ChallengeScheme getChallengeScheme() { - return this.challengeScheme; - } + /** + * Returns the supported challenge scheme. + * + * @return The supported challenge scheme. + */ + public ChallengeScheme getChallengeScheme() { + return this.challengeScheme; + } - /** - * Returns the context's logger. - * - * @return The context's logger. - */ - public Logger getLogger() { - return Context.getCurrentLogger(); - } + /** + * Returns the context's logger. + * + * @return The context's logger. + */ + public Logger getLogger() { + return Context.getCurrentLogger(); + } - /** - * Indicates if client side authentication is supported. - * - * @return True if client side authentication is supported. - */ - public boolean isClientSide() { - return this.clientSide; - } + /** + * Indicates if client side authentication is supported. + * + * @return True if client side authentication is supported. + */ + public boolean isClientSide() { + return this.clientSide; + } - /** - * Indicates if server side authentication is supported. - * - * @return True if server side authentication is supported. - */ - public boolean isServerSide() { - return this.serverSide; - } + /** + * Indicates if server side authentication is supported. + * + * @return True if server side authentication is supported. + */ + public boolean isServerSide() { + return this.serverSide; + } - /** - * Parses an authenticate header into a challenge request. - * - * @param header - * The HTTP header value to parse. - */ - public void parseRequest(ChallengeRequest cr, String header) { - } + /** + * Parses an authenticate header into a challenge request. + * + * @param header + * The HTTP header value to parse. + */ + public void parseRequest(ChallengeRequest cr, String header) { + } - /** - * Parses an authorization header into a challenge response. - * - * @param request - * The request. - */ - public void parseResponse(ChallengeResponse cr, Request request) { - } + /** + * Parses an authorization header into a challenge response. + * + * @param request + * The request. + */ + public void parseResponse(ChallengeResponse cr, Request request) { + } - /** - * Sets the supported challenge scheme. - * - * @param challengeScheme - * The supported challenge scheme. - */ - public void setChallengeScheme(ChallengeScheme challengeScheme) { - this.challengeScheme = challengeScheme; - } + /** + * Sets the supported challenge scheme. + * + * @param challengeScheme + * The supported challenge scheme. + */ + public void setChallengeScheme(ChallengeScheme challengeScheme) { + this.challengeScheme = challengeScheme; + } - /** - * Indicates if client side authentication is supported. - * - * @param clientSide - * True if client side authentication is supported. - */ - public void setClientSide(boolean clientSide) { - this.clientSide = clientSide; - } + /** + * Indicates if client side authentication is supported. + * + * @param clientSide + * True if client side authentication is supported. + */ + public void setClientSide(boolean clientSide) { + this.clientSide = clientSide; + } - /** - * Indicates if server side authentication is supported. - * - * @param serverSide - * True if server side authentication is supported. - */ - public void setServerSide(boolean serverSide) { - this.serverSide = serverSide; - } + /** + * Indicates if server side authentication is supported. + * + * @param serverSide + * True if server side authentication is supported. + */ + public void setServerSide(boolean serverSide) { + this.serverSide = serverSide; + } }
245f782801254eefc4442c0850acf3eeee468697
restlet-framework-java
- Bumped versions to 1.0 RC3 - Updated docs -- Removed deprecated methods--
p
https://github.com/restlet/restlet-framework-java
diff --git a/build/tmpl/changes.txt b/build/tmpl/changes.txt index 5b0e2cb01b..91e6e98dc0 100644 --- a/build/tmpl/changes.txt +++ b/build/tmpl/changes.txt @@ -4,6 +4,11 @@ Changes log =========== @version-full@ (@release-date@) +[Bugs fixed] +[API changes] +[Enhancements] + +1.0 RC2 (2007-01-09) [Bugs fixed] - Fixed minor bug with the URI parsing (query sign "?" inside fragment). Contributed by Thierry Boileau. diff --git a/build/www/roadmap.html b/build/www/roadmap.html index 2dffd1f13e..c9fe8f466c 100644 --- a/build/www/roadmap.html +++ b/build/www/roadmap.html @@ -10,7 +10,7 @@ <body> <a target="_top" href="http://www.restlet.org"><img - src="http://www.restlet.org/images/logo200" /></a> + src="http://www.restlet.org/images/logo200" /></a> <br /> <br /> @@ -28,62 +28,62 @@ <h3><a name="timeline">Timeline</a></h3> <p> <ul> - <li>2007 Q1 - <ul> - <li><u>[Optional]</u> 1.0 RC2 release</li> - <li><b>1.0 release.</b> <a - href="http://restlet.tigris.org/issues/buglist.cgi?Submit+query=Submit+query&issue_status=NEW&issue_status=STARTED&issue_status=REOPENED&target_milestone=1.0+release">List - of issues</a> must be resolved (documentation, etc.).</li> - </ul> - </li> - <li>2007 Q2 - <ul> - <li>1.1 beta 1.<a - href="http://restlet.tigris.org/issues/buglist.cgi?Submit+query=Submit+query&issue_status=NEW&issue_status=STARTED&issue_status=REOPENED&target_milestone=1.1+beta">Open - issues</a> to resolve.</li> - </ul> - <li>2007 Q3 - <ul> - <li><b>1.1 release.</b> <a - href="http://restlet.tigris.org/issues/buglist.cgi?Submit+query=Submit+query&issue_status=NEW&issue_status=STARTED&issue_status=REOPENED&target_milestone=1.1+release">Remaining - issues</a> must be resolved.</li> - </ul> - </li> - <li>2008 Q1 - <ul> - <li><u>[JSR]</u> Submission of a proposal to standardize a - Restlet API 2.0 to the <a href="http://www.jcp.org">JCP</a> - <li><u>[JSR]</u> If submission approved, formation of expert - group</li> - <li><u>[JSR]</u> Write first draft</li> - </ul> - </li> - <li>2008 Q2 - <ul> - <li><u>[JSR]</u> Updates to the draft (optional)</li> - <li><u>[JSR]</u> Public Review</li> - </ul> - </li> - <li>2008 Q3 - <ul> - <li><u>[JSR]</u> Drafts updates (optional)</li> - <li><u>[JSR]</u> Draft Specification approval ballot by EC</li> - </ul> - </li> - <li>2008 Q4 - <ul> - <li><u>[JSR]</u> Proposed final draft</li> - <li><u>[JSR]</u> Completion of RI and TCK</li> - </ul> - </li> - <li>2009 Q1 - <ul> - <li><u>[JSR]</u> Final draft Submission</li> - <li><u>[JSR]</u> Final approval ballot by EC</li> - <li><u>[JSR]</u> Final release of Restlet API 2.0</li> - <li><b>2.0 release</b></li> - </ul> - </li> + <li>2007 Q1 + <ul> + <li><u>[Optional]</u> 1.0 RC3 release</li> + <li><b>1.0 release.</b> <a + href="http://restlet.tigris.org/issues/buglist.cgi?Submit+query=Submit+query&issue_status=NEW&issue_status=STARTED&issue_status=REOPENED&target_milestone=1.0+release">List + of issues</a> must be resolved (documentation, etc.).</li> + </ul> + </li> + <li>2007 Q2 + <ul> + <li>1.1 beta 1.<a + href="http://restlet.tigris.org/issues/buglist.cgi?Submit+query=Submit+query&issue_status=NEW&issue_status=STARTED&issue_status=REOPENED&target_milestone=1.1+beta">Open + issues</a> to resolve.</li> + </ul> + <li>2007 Q3 + <ul> + <li><b>1.1 release.</b> <a + href="http://restlet.tigris.org/issues/buglist.cgi?Submit+query=Submit+query&issue_status=NEW&issue_status=STARTED&issue_status=REOPENED&target_milestone=1.1+release">Remaining + issues</a> must be resolved.</li> + </ul> + </li> + <li>2008 Q1 + <ul> + <li><u>[JSR]</u> Submission of a proposal to standardize a + Restlet API 2.0 to the <a href="http://www.jcp.org">JCP</a> + <li><u>[JSR]</u> If submission approved, formation of expert + group</li> + <li><u>[JSR]</u> Write first draft</li> + </ul> + </li> + <li>2008 Q2 + <ul> + <li><u>[JSR]</u> Updates to the draft (optional)</li> + <li><u>[JSR]</u> Public Review</li> + </ul> + </li> + <li>2008 Q3 + <ul> + <li><u>[JSR]</u> Drafts updates (optional)</li> + <li><u>[JSR]</u> Draft Specification approval ballot by EC</li> + </ul> + </li> + <li>2008 Q4 + <ul> + <li><u>[JSR]</u> Proposed final draft</li> + <li><u>[JSR]</u> Completion of RI and TCK</li> + </ul> + </li> + <li>2009 Q1 + <ul> + <li><u>[JSR]</u> Final draft Submission</li> + <li><u>[JSR]</u> Final approval ballot by EC</li> + <li><u>[JSR]</u> Final release of Restlet API 2.0</li> + <li><b>2.0 release</b></li> + </ul> + </li> </ul> </p> @@ -92,15 +92,15 @@ <h3><a name="conclusion">Conclusion</a></h3> <p>Due to the open source nature of the Restlet project, the above timeline is subject to change. However, we will do our best to respect it and update it as frequently as possible. Please, feel free to <a - href="mailto:[email protected]">contact us</a> for questions and + href="mailto:[email protected]">contact us</a> for questions and comments.</p> <br /> -<small> Last modified on 2006-12-26. Copyright &copy; 2005-2007 +<small> Last modified on 2007-01-09. Copyright &copy; 2005-2007 <a href="mailto:[email protected]">J&eacute;r&ocirc;me Louvel</a>. Restlet is a registered trademark of <a target="_top" - href="http://www.noelios.com">Noelios Consulting</a>. </small> + href="http://www.noelios.com">Noelios Consulting</a>. </small> </body> </html> diff --git a/build/www/tutorial.html b/build/www/tutorial.html index 2a7b31c6cc..2e64110fbd 100644 --- a/build/www/tutorial.html +++ b/build/www/tutorial.html @@ -538,7 +538,7 @@ <h4>Notes</h4> <br /> -<small> Last modified on 2006-12-26. Copyright &copy; 2005-2007 +<small> Last modified on 2007-01-09. Copyright &copy; 2005-2007 <a href="mailto:[email protected]">J&eacute;r&ocirc;me Louvel</a>. Restlet is a registered trademark of <a target="_top" href="http://www.noelios.com">Noelios Consulting</a>. </small> diff --git a/build/www/tutorial.pdf b/build/www/tutorial.pdf index 7b6e58a3e1..f788e61873 100644 Binary files a/build/www/tutorial.pdf and b/build/www/tutorial.pdf differ diff --git a/module/com.noelios.restlet/src/com/noelios/restlet/Factory.java b/module/com.noelios.restlet/src/com/noelios/restlet/Factory.java index 77b74d0e5d..0a33f8169b 100644 --- a/module/com.noelios.restlet/src/com/noelios/restlet/Factory.java +++ b/module/com.noelios.restlet/src/com/noelios/restlet/Factory.java @@ -651,23 +651,6 @@ public void parse(Logger logger, Form form, Representation webForm) { } } - /** - * Parses an URL encoded query string into a given form. - * - * @param logger - * The logger to use. - * @param form - * The target form. - * @param queryString - * Query string. - * @deprecated Use the parse(Logger,String,CharacterSet) method to specify - * the encoding. This method uses the UTF-8 character set. - */ - @Deprecated - public void parse(Logger logger, Form form, String queryString) { - parse(logger, form, queryString, CharacterSet.UTF_8); - } - /** * Parses an URL encoded query string into a given form. * diff --git a/module/com.noelios.restlet/src/com/noelios/restlet/util/FormReader.java b/module/com.noelios.restlet/src/com/noelios/restlet/util/FormReader.java index c69385d022..caf2f93070 100644 --- a/module/com.noelios.restlet/src/com/noelios/restlet/util/FormReader.java +++ b/module/com.noelios.restlet/src/com/noelios/restlet/util/FormReader.java @@ -69,24 +69,6 @@ public FormReader(Logger logger, Representation representation) } } - /** - * Constructor. - * - * @param logger - * The logger. - * @param query - * The query string. - * @deprecated Use the FormReader(Logger,String,CharacterSet) constructor to - * specify the encoding. - */ - @Deprecated - public FormReader(Logger logger, String query) throws IOException { - this.logger = logger; - this.stream = new ByteArrayInputStream(query.getBytes()); - this.characterSet = CharacterSet.UTF_8; - - } - /** * Constructor. * diff --git a/module/com.noelios.restlet/src/com/noelios/restlet/util/FormUtils.java b/module/com.noelios.restlet/src/com/noelios/restlet/util/FormUtils.java index 78d4d3df88..4cf06b5261 100644 --- a/module/com.noelios.restlet/src/com/noelios/restlet/util/FormUtils.java +++ b/module/com.noelios.restlet/src/com/noelios/restlet/util/FormUtils.java @@ -35,24 +35,6 @@ * @author Jerome Louvel ([email protected]) */ public class FormUtils { - /** - * Parses a query into a given form. - * - * @param logger - * The logger. - * @param form - * The target form. - * @param query - * Query string. - * @deprecated Use the parseQuery(Logger,Form, String,CharacterSet) method - * to specify the encoding. This method uses the UTF-8 character - * set. - */ - @Deprecated - public static void parseQuery(Logger logger, Form form, String query) { - parseQuery(logger, form, query, CharacterSet.UTF_8); - } - /** * Parses a query into a given form. * @@ -112,27 +94,6 @@ public static void parsePost(Logger logger, Form form, Representation post) { } } - /** - * Reads the parameters whose name is a key in the given map.<br/> If a - * matching parameter is found, its value is put in the map.<br/> If - * multiple values are found, a list is created and set in the map. - * - * @param logger - * The logger. - * @param query - * The query string. - * @param parameters - * The parameters map controlling the reading. - * @deprecated Use the getParameters(Logger,String,Map<String, - * Object>,CharacterSet) method to specify the encoding. This - * method uses the UTF-8 character set. - */ - @Deprecated - public static void getParameters(Logger logger, String query, - Map<String, Object> parameters) throws IOException { - getParameters(logger, query, parameters, CharacterSet.UTF_8); - } - /** * Reads the parameters whose name is a key in the given map.<br/> If a * matching parameter is found, its value is put in the map.<br/> If @@ -170,27 +131,6 @@ public static void getParameters(Logger logger, Representation post, new FormReader(logger, post).readParameters(parameters); } - /** - * Reads the first parameter with the given name. - * - * @param logger - * The logger. - * @param query - * The query string. - * @param name - * The parameter name to match. - * @return The parameter. - * @throws IOException - * @deprecated Use the getFirstParameter(Logger,String,String,CharacterSet) - * method to specify the encoding. This method uses the UTF-8 - * character set. - */ - @Deprecated - public static Parameter getFirstParameter(Logger logger, String query, - String name) throws IOException { - return getFirstParameter(logger, query, name, CharacterSet.UTF_8); - } - /** * Reads the first parameter with the given name. * @@ -228,27 +168,6 @@ public static Parameter getFirstParameter(Logger logger, return new FormReader(logger, post).readFirstParameter(name); } - /** - * Reads the parameters with the given name.<br/> If multiple values are - * found, a list is returned created. - * - * @param logger - * The logger. - * @param query - * The query string. - * @param name - * The parameter name to match. - * @return The parameter value or list of values. - * @deprecated Use the getParameter(Logger,String,String,CharacterSet) - * method to specify the encoding. This method uses the UTF-8 - * character set. - */ - @Deprecated - public static Object getParameter(Logger logger, String query, String name) - throws IOException { - return getParameter(logger, query, name, CharacterSet.UTF_8); - } - /** * Reads the parameters with the given name.<br/> If multiple values are * found, a list is returned created. @@ -314,23 +233,4 @@ public static Parameter create(CharSequence name, CharSequence value, return result; } - - /** - * Creates a parameter. - * - * @param name - * The parameter name buffer. - * @param value - * The parameter value buffer (can be null). - * @return The created parameter. - * @throws IOException - * @deprecated Use the create(CharSequence,CharSequence,CharacterSet) method - * instead. This method uses the UTF-8 character set. - */ - @Deprecated - public static Parameter create(CharSequence name, CharSequence value) - throws IOException { - return create(name, value, CharacterSet.UTF_8); - } - } diff --git a/module/com.noelios.restlet/src/com/noelios/restlet/util/HeaderUtils.java b/module/com.noelios.restlet/src/com/noelios/restlet/util/HeaderUtils.java index 60a75b3718..8cda5c5875 100644 --- a/module/com.noelios.restlet/src/com/noelios/restlet/util/HeaderUtils.java +++ b/module/com.noelios.restlet/src/com/noelios/restlet/util/HeaderUtils.java @@ -111,25 +111,6 @@ public static Appendable appendQuote(CharSequence source, return destination; } - /** - * Appends a source string as an URI encoded string. - * - * @param source - * The source string to format. - * @param destination - * The appendable destination. - * @throws IOException - * @deprecated Use the - * appendUriEncoded(CharSequence,Appendable,CharacterSet) method - * to specify the encoding. This method uses the UTF-8 character - * set. - */ - @Deprecated - public static Appendable appendUriEncoded(CharSequence source, - Appendable destination) throws IOException { - return appendUriEncoded(source, destination, CharacterSet.UTF_8); - } - /** * Appends a source string as an URI encoded string. * diff --git a/module/org.restlet/src/org/restlet/data/Form.java b/module/org.restlet/src/org/restlet/data/Form.java index e16175cb07..306efd5bc3 100644 --- a/module/org.restlet/src/org/restlet/data/Form.java +++ b/module/org.restlet/src/org/restlet/data/Form.java @@ -73,23 +73,6 @@ public Form(Logger logger, Representation representation) { Factory.getInstance().parse(logger, this, representation); } - /** - * Constructor. - * - * @param logger - * The logger to use. - * @param queryString - * The Web form parameters as a string. - * @throws IOException - * @deprecated Use the Form(Logger,String,CharacterSet) constructor to - * specify the encoding. This method uses the UTF-8 character - * set. - */ - @Deprecated - public Form(Logger logger, String queryString) { - Factory.getInstance().parse(logger, this, queryString); - } - /** * Constructor. * @@ -125,7 +108,8 @@ public Form(Representation webForm) { * @throws IOException */ public Form(String queryString) { - this(Logger.getLogger(Form.class.getCanonicalName()), queryString); + this(Logger.getLogger(Form.class.getCanonicalName()), queryString, + CharacterSet.UTF_8); } /** @@ -204,19 +188,6 @@ public Representation getWebRepresentation(CharacterSet characterSet) { MediaType.APPLICATION_WWW_FORM, null, characterSet); } - /** - * URL encodes the form. - * - * @return The encoded form. - * @throws IOException - * @deprecated Use the urlEncode(CharacterSet) method to specify the - * encoding. This method uses the UTF-8 character set. - */ - @Deprecated - public String urlEncode() throws IOException { - return encode(CharacterSet.UTF_8); - } - /** * Encodes the form using the standard URI encoding mechanism and the UTF-8 * character set. diff --git a/module/org.restlet/src/org/restlet/data/Parameter.java b/module/org.restlet/src/org/restlet/data/Parameter.java index dd812ff6df..ebdbd48d98 100644 --- a/module/org.restlet/src/org/restlet/data/Parameter.java +++ b/module/org.restlet/src/org/restlet/data/Parameter.java @@ -145,33 +145,6 @@ public String toString() { return getName() + ": " + getValue(); } - /** - * Encodes the parameter. - * - * @return The encoded string. - * @throws IOException - * @deprecated Use the urlEncode(CharacterSet) method to specify the - * encoding. This method uses the UTF-8 character set. - */ - @Deprecated - public String urlEncode() throws IOException { - return encode(CharacterSet.UTF_8); - } - - /** - * Encodes the parameter and append the result to the given buffer. - * - * @param buffer - * The buffer to append. - * @throws IOException - * @deprecated Use the urlEncode(Appendable,CharacterSet) method to specify - * the encoding. This method uses the UTF-8 character set. - */ - @Deprecated - public void urlEncode(Appendable buffer) throws IOException { - encode(buffer, CharacterSet.UTF_8); - } - /** * Encodes the parameter using the standard URI encoding mechanism. * diff --git a/module/org.restlet/src/org/restlet/resource/DomRepresentation.java b/module/org.restlet/src/org/restlet/resource/DomRepresentation.java index ba340584a8..d6fa094950 100644 --- a/module/org.restlet/src/org/restlet/resource/DomRepresentation.java +++ b/module/org.restlet/src/org/restlet/resource/DomRepresentation.java @@ -94,22 +94,6 @@ public DomRepresentation(MediaType mediaType, Document xmlDocument) { this.dom = xmlDocument; } - /** - * Constructor. - * - * @param mediaType - * The representation's media type. - * @param xmlRepresentation - * A source XML representation to parse. - * @deprecated Use the other constructor instead. - */ - @Deprecated - public DomRepresentation(MediaType mediaType, - Representation xmlRepresentation) { - super(mediaType); - this.xmlRepresentation = xmlRepresentation; - } - /** * Constructor. * diff --git a/module/org.restlet/src/org/restlet/resource/Representation.java b/module/org.restlet/src/org/restlet/resource/Representation.java index a3fb1de0b3..f96dfb24be 100644 --- a/module/org.restlet/src/org/restlet/resource/Representation.java +++ b/module/org.restlet/src/org/restlet/resource/Representation.java @@ -120,19 +120,6 @@ public String getText() throws IOException { return result; } - /** - * Converts the representation to a string value. Be careful when using this - * method as the conversion of large content to a string fully stored in - * memory can result in OutOfMemoryErrors being thrown. - * - * @return The representation as a string value. - * @deprecated Use getText instead. - */ - @Deprecated - public String getValue() throws IOException { - return getText(); - } - /** * Indicates if some fresh content is available, without having to actually * call one of the content manipulation method like getStream() that would diff --git a/module/org.restlet/src/org/restlet/resource/Resource.java b/module/org.restlet/src/org/restlet/resource/Resource.java index 0dd7b041dc..268c5d5b6a 100644 --- a/module/org.restlet/src/org/restlet/resource/Resource.java +++ b/module/org.restlet/src/org/restlet/resource/Resource.java @@ -23,7 +23,6 @@ import java.util.logging.Logger; import org.restlet.data.Reference; -import org.restlet.data.ReferenceList; import org.restlet.data.Status; /** @@ -58,9 +57,6 @@ public class Resource { /** The identifier. */ private Reference identifier; - /** The modifiable list of identifiers. */ - private ReferenceList identifiers; - /** The modifiable list of variants. */ private List<Variant> variants; @@ -80,7 +76,6 @@ public Resource() { public Resource(Logger logger) { this.logger = logger; this.identifier = null; - this.identifiers = null; this.variants = null; } @@ -142,21 +137,6 @@ public Reference getIdentifier() { return this.identifier; } - /** - * Returns the list of all the identifiers for the resource. The list is - * composed of the official identifier followed by all the alias - * identifiers. - * - * @return The list of all the identifiers for the resource. - * @deprecated No obvious usage. More the role of Handler to map URIs. - */ - @Deprecated - public ReferenceList getIdentifiers() { - if (this.identifiers == null) - this.identifiers = new ReferenceList(); - return this.identifiers; - } - /** * Returns the logger to use. * @@ -252,18 +232,6 @@ public void setIdentifier(String identifierUri) { setIdentifier(new Reference(identifierUri)); } - /** - * Sets a new list of all the identifiers for the resource. - * - * @param identifiers - * The new list of identifiers. - * @deprecated No obvious usage. More the role of Handler to map URIs. - */ - @Deprecated - public void setIdentifiers(ReferenceList identifiers) { - this.identifiers = identifiers; - } - /** * Sets the logger to use. * diff --git a/module/org.restlet/src/org/restlet/resource/StringRepresentation.java b/module/org.restlet/src/org/restlet/resource/StringRepresentation.java index 1d55c4e36d..d92e48f2e9 100644 --- a/module/org.restlet/src/org/restlet/resource/StringRepresentation.java +++ b/module/org.restlet/src/org/restlet/resource/StringRepresentation.java @@ -146,18 +146,6 @@ public String getText() { return (this.text == null) ? null : this.text.toString(); } - /** - * Sets the string value. - * - * @param text - * The string value. - * @deprecated Use setText instead. - */ - @Deprecated - public void setValue(String text) { - setText(text); - } - /** * Sets the string value. * diff --git a/module/org.restlet/src/org/restlet/util/Factory.java b/module/org.restlet/src/org/restlet/util/Factory.java index b416d27068..ce4b23ce9a 100644 --- a/module/org.restlet/src/org/restlet/util/Factory.java +++ b/module/org.restlet/src/org/restlet/util/Factory.java @@ -51,7 +51,7 @@ public abstract class Factory { .getCanonicalName()); /** Common version info. */ - public static final String MINOR_NUMBER = "2"; + public static final String MINOR_NUMBER = "3"; public static final String VERSION_LONG = "1.0 RC" + MINOR_NUMBER; @@ -272,21 +272,6 @@ public abstract Variant getPreferredVariant(ClientInfo client, public abstract void parse(Logger logger, Form form, Representation representation); - /** - * Parses an URL encoded query string into a given form. - * - * @param logger - * The logger to use. - * @param form - * The target form. - * @param queryString - * Query string. - * @deprecated Use the parse(Logger,String,CharacterSet) method to - * specify the encoding. - */ - @Deprecated - public abstract void parse(Logger logger, Form form, String queryString); - /** * Parses an URL encoded query string into a given form. * diff --git a/module/org.restlet/src/org/restlet/util/WrapperRepresentation.java b/module/org.restlet/src/org/restlet/util/WrapperRepresentation.java index 2e5ac4e842..78b6219ece 100644 --- a/module/org.restlet/src/org/restlet/util/WrapperRepresentation.java +++ b/module/org.restlet/src/org/restlet/util/WrapperRepresentation.java @@ -32,7 +32,6 @@ import org.restlet.data.Language; import org.restlet.data.MediaType; import org.restlet.data.Reference; -import org.restlet.data.ReferenceList; import org.restlet.data.Tag; import org.restlet.resource.Representation; import org.restlet.resource.Result; @@ -163,19 +162,6 @@ public Reference getIdentifier() { return getWrappedRepresentation().getIdentifier(); } - /** - * Returns the list of all the identifiers for the resource. The list is - * composed of the official identifier followed by all the alias - * identifiers. - * - * @return The list of all the identifiers for the resource. - * @deprecated No obvious usage. More the role of Handler to map URIs. - */ - @Deprecated - public ReferenceList getIdentifiers() { - return getWrappedRepresentation().getIdentifiers(); - } - /** * Returns the language or null if not applicable. * @@ -258,19 +244,6 @@ public String getText() throws IOException { return getWrappedRepresentation().getText(); } - /** - * Converts the representation to a string value. Be careful when using this - * method as the conversion of large content to a string fully stored in - * memory can result in OutOfMemoryErrors being thrown. - * - * @return The representation as a string value. - * @deprecated Use getText instead - */ - @Deprecated - public String getValue() throws IOException { - return getWrappedRepresentation().getValue(); - } - /** * Returns a full representation for a given variant previously returned via * the getVariants() method. The default implementation directly returns the @@ -424,18 +397,6 @@ public void setIdentifier(String identifierUri) { getWrappedRepresentation().setIdentifier(identifierUri); } - /** - * Sets a new list of all the identifiers for the resource. - * - * @param identifiers - * The new list of identifiers. - * @deprecated No obvious usage. More the role of Handler to map URIs. - */ - @Deprecated - public void setIdentifiers(ReferenceList identifiers) { - getWrappedRepresentation().setIdentifiers(identifiers); - } - /** * Sets the language or null if not applicable. * diff --git a/module/org.restlet/src/org/restlet/util/WrapperResource.java b/module/org.restlet/src/org/restlet/util/WrapperResource.java index a94ef728d7..fd277788c3 100644 --- a/module/org.restlet/src/org/restlet/util/WrapperResource.java +++ b/module/org.restlet/src/org/restlet/util/WrapperResource.java @@ -22,7 +22,6 @@ import java.util.logging.Logger; import org.restlet.data.Reference; -import org.restlet.data.ReferenceList; import org.restlet.resource.Representation; import org.restlet.resource.Resource; import org.restlet.resource.Result; @@ -108,20 +107,6 @@ public Reference getIdentifier() { return getWrappedResource().getIdentifier(); } - /** - * Returns the list of all the identifiers for the resource. The list is - * composed of the official identifier followed by all the alias - * identifiers. - * - * @return The list of all the identifiers for the resource. - * @deprecated The URIs should only be managed by the application routers - * and handlers. - */ - @Deprecated - public ReferenceList getIdentifiers() { - return getWrappedResource().getIdentifiers(); - } - /** * Returns the logger to use. * @@ -217,19 +202,6 @@ public void setIdentifier(String identifierUri) { getWrappedResource().setIdentifier(identifierUri); } - /** - * Sets a new list of all the identifiers for the resource. - * - * @param identifiers - * The new list of identifiers. - * @deprecated The URIs should only be managed by the application routers - * and handlers. - */ - @Deprecated - public void setIdentifiers(ReferenceList identifiers) { - getWrappedResource().setIdentifiers(identifiers); - } - /** * Sets the logger to use. *
041af28166c270b29b51f5f42fb3269c2dbe1159
kotlin
Deprecate and don't write KotlinClass$Kind
c
https://github.com/JetBrains/kotlin
diff --git a/compiler/backend/src/org/jetbrains/kotlin/codegen/ImplementationBodyCodegen.java b/compiler/backend/src/org/jetbrains/kotlin/codegen/ImplementationBodyCodegen.java index 8106ba4385088..89662512950c8 100644 --- a/compiler/backend/src/org/jetbrains/kotlin/codegen/ImplementationBodyCodegen.java +++ b/compiler/backend/src/org/jetbrains/kotlin/codegen/ImplementationBodyCodegen.java @@ -43,7 +43,6 @@ import org.jetbrains.kotlin.lexer.JetTokens; import org.jetbrains.kotlin.load.java.JvmAbi; import org.jetbrains.kotlin.load.java.JvmAnnotationNames; -import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass; import org.jetbrains.kotlin.load.java.descriptors.JavaCallableMemberDescriptor; import org.jetbrains.kotlin.name.FqName; import org.jetbrains.kotlin.name.FqNameUnsafe; @@ -256,17 +255,12 @@ protected void generateBody() { protected void generateKotlinAnnotation() { if (state.getClassBuilderMode() != ClassBuilderMode.FULL) return; - KotlinClass.Kind kind; - if (isAnonymousObject(descriptor)) { - kind = KotlinClass.Kind.ANONYMOUS_OBJECT; - } - else if (isTopLevelOrInnerClass(descriptor)) { - // Default value is Kind.CLASS - kind = null; - } - else { - // LOCAL_CLASS is also written to inner classes of local classes - kind = KotlinClass.Kind.LOCAL_CLASS; + if (!isTopLevelOrInnerClass(descriptor)) { + AnnotationVisitor av = v.getVisitor().visitAnnotation( + asmDescByFqNameWithoutInnerClasses(JvmAnnotationNames.KOTLIN_LOCAL_CLASS), true + ); + av.visit(JvmAnnotationNames.VERSION_FIELD_NAME, JvmAbi.VERSION.toArray()); + av.visitEnd(); } DescriptorSerializer serializer = @@ -276,13 +270,6 @@ else if (isTopLevelOrInnerClass(descriptor)) { AnnotationVisitor av = v.getVisitor().visitAnnotation(asmDescByFqNameWithoutInnerClasses(JvmAnnotationNames.KOTLIN_CLASS), true); writeAnnotationData(av, serializer, classProto); - if (kind != null) { - av.visitEnum( - JvmAnnotationNames.KIND_FIELD_NAME, - Type.getObjectType(KotlinClass.KIND_INTERNAL_NAME).getDescriptor(), - kind.toString() - ); - } av.visitEnd(); } diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/kotlin/FileBasedKotlinClass.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/kotlin/FileBasedKotlinClass.java index 67a9529da12fd..f59d77e8bf604 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/kotlin/FileBasedKotlinClass.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/kotlin/FileBasedKotlinClass.java @@ -270,9 +270,6 @@ private static ClassId resolveNameByInternalName(@NotNull String name, @NotNull if (name.equals(JvmAnnotationNames.KotlinSyntheticClass.KIND_INTERNAL_NAME)) { return JvmAnnotationNames.KotlinSyntheticClass.KIND_CLASS_ID; } - else if (name.equals(JvmAnnotationNames.KotlinClass.KIND_INTERNAL_NAME)) { - return JvmAnnotationNames.KotlinClass.KIND_CLASS_ID; - } List<String> classes = new ArrayList<String>(1); boolean local = false; diff --git a/compiler/testData/codegen/bytecodeListing/annotations/literals.txt b/compiler/testData/codegen/bytecodeListing/annotations/literals.txt index 7ceb93edb710b..6666962805c18 100644 --- a/compiler/testData/codegen/bytecodeListing/annotations/literals.txt +++ b/compiler/testData/codegen/bytecodeListing/annotations/literals.txt @@ -20,7 +20,7 @@ method <init>(p0: int): void } [email protected] LiteralsKt$foo$3 { [email protected] @kotlin.jvm.internal.KotlinClass LiteralsKt$foo$3 { inner class LiteralsKt$foo$3 field $kotlinClass: kotlin.reflect.KClass method <clinit>(): void diff --git a/compiler/tests/org/jetbrains/kotlin/codegen/InlineTestUtil.kt b/compiler/tests/org/jetbrains/kotlin/codegen/InlineTestUtil.kt index 3c09b0e4d375d..ccb23e253a03a 100644 --- a/compiler/tests/org/jetbrains/kotlin/codegen/InlineTestUtil.kt +++ b/compiler/tests/org/jetbrains/kotlin/codegen/InlineTestUtil.kt @@ -185,7 +185,7 @@ public object InlineTestUtil { } private fun isClassOrPackagePartKind(header: KotlinClassHeader): Boolean { - return header.classKind == JvmAnnotationNames.KotlinClass.Kind.CLASS || header.isInterfaceDefaultImpls + return (header.kind == KotlinClassHeader.Kind.CLASS && !header.isLocalClass) || header.isInterfaceDefaultImpls } private fun getClassHeader(file: OutputFile): KotlinClassHeader { diff --git a/compiler/tests/org/jetbrains/kotlin/codegen/KotlinSyntheticClassAnnotationTest.java b/compiler/tests/org/jetbrains/kotlin/codegen/KotlinSyntheticClassAnnotationTest.java index c02e8d2ff2afd..9ec16af4125cf 100644 --- a/compiler/tests/org/jetbrains/kotlin/codegen/KotlinSyntheticClassAnnotationTest.java +++ b/compiler/tests/org/jetbrains/kotlin/codegen/KotlinSyntheticClassAnnotationTest.java @@ -19,14 +19,12 @@ import com.google.common.base.Predicate; import com.google.common.collect.Collections2; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; import org.jetbrains.kotlin.backend.common.output.OutputFile; import org.jetbrains.kotlin.load.java.AbiVersionUtil; import org.jetbrains.kotlin.load.java.JvmAbi; -import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass; +import org.jetbrains.kotlin.load.java.JvmAnnotationNames; import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinSyntheticClass; import org.jetbrains.kotlin.name.FqName; -import org.jetbrains.kotlin.resolve.jvm.JvmClassName; import org.jetbrains.kotlin.serialization.deserialization.BinaryVersion; import org.jetbrains.kotlin.test.ConfigurationKind; @@ -34,9 +32,6 @@ import java.util.Collection; import java.util.List; -import static org.jetbrains.kotlin.load.java.JvmAnnotationNames.KIND_FIELD_NAME; -import static org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass.Kind.ANONYMOUS_OBJECT; -import static org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass.Kind.LOCAL_CLASS; import static org.jetbrains.kotlin.load.java.JvmAnnotationNames.VERSION_FIELD_NAME; public class KotlinSyntheticClassAnnotationTest extends CodegenTestCase { @@ -93,8 +88,7 @@ public void testAnonymousFunction() { public void testLocalClass() { doTestKotlinClass( "fun foo() { class Local }", - "Local", - LOCAL_CLASS + "Local" ); } @@ -108,24 +102,21 @@ public void testLocalTraitImpl() { public void testLocalTraitInterface() { doTestKotlinClass( "fun foo() { interface Local { fun bar() = 42 } }", - "Local.class", - LOCAL_CLASS + "Local.class" ); } public void testInnerClassOfLocalClass() { doTestKotlinClass( "fun foo() { class Local { inner class Inner } }", - "Inner", - LOCAL_CLASS + "Inner" ); } public void testAnonymousObject() { doTestKotlinClass( "val o = object {}", - "$1", - ANONYMOUS_OBJECT + "$1" ); } @@ -138,22 +129,17 @@ public void testWhenMappings() { } private void doTestKotlinSyntheticClass(@NotNull String code, @NotNull String classFilePart) { - doTest(code, classFilePart, KotlinSyntheticClass.CLASS_NAME, null); + doTest(code, classFilePart, KotlinSyntheticClass.CLASS_NAME.getFqNameForClassNameWithoutDollars()); } - private void doTestKotlinClass( - @NotNull String code, - @NotNull String classFilePart, - @NotNull KotlinClass.Kind expectedKind - ) { - doTest(code, classFilePart, KotlinClass.CLASS_NAME, expectedKind.toString()); + private void doTestKotlinClass(@NotNull String code, @NotNull String classFilePart) { + doTest(code, classFilePart, JvmAnnotationNames.KOTLIN_CLASS, JvmAnnotationNames.KOTLIN_LOCAL_CLASS); } private void doTest( @NotNull String code, @NotNull final String classFilePart, - @NotNull JvmClassName annotationName, - @Nullable String expectedKind + @NotNull FqName... annotationFqNames ) { loadText("package " + PACKAGE_NAME + "\n\n" + code); List<OutputFile> output = generateClassesInFile().asList(); @@ -169,14 +155,12 @@ public boolean apply(OutputFile file) { String path = files.iterator().next().getRelativePath(); String fqName = path.substring(0, path.length() - ".class".length()).replace('/', '.'); Class<?> aClass = generateClass(fqName); - assertAnnotatedWithKind(aClass, annotationName.getFqNameForClassNameWithoutDollars().asString(), expectedKind); + for (FqName annotationFqName : annotationFqNames) { + assertAnnotatedWith(aClass, annotationFqName.asString()); + } } - private void assertAnnotatedWithKind( - @NotNull Class<?> aClass, - @NotNull String annotationFqName, - @Nullable String expectedKind - ) { + private void assertAnnotatedWith(@NotNull Class<?> aClass, @NotNull String annotationFqName) { Class<? extends Annotation> annotationClass = loadAnnotationClassQuietly(annotationFqName); assertTrue("No annotation " + annotationFqName + " found in " + aClass, aClass.isAnnotationPresent(annotationClass)); @@ -186,9 +170,5 @@ private void assertAnnotatedWithKind( assertNotNull(version); assertTrue("Annotation " + annotationFqName + " is written with an unsupported format", AbiVersionUtil.isAbiVersionCompatible(BinaryVersion.create(version))); - - Object actualKind = CodegenTestUtil.getAnnotationAttribute(annotation, KIND_FIELD_NAME); - assertNotNull(actualKind); - assertEquals("Annotation " + annotationFqName + " has the wrong kind", expectedKind, actualKind.toString()); } } diff --git a/compiler/tests/org/jetbrains/kotlin/jvm/runtime/AbstractJvmRuntimeDescriptorLoaderTest.kt b/compiler/tests/org/jetbrains/kotlin/jvm/runtime/AbstractJvmRuntimeDescriptorLoaderTest.kt index 44836798c9779..21dc941dda869 100644 --- a/compiler/tests/org/jetbrains/kotlin/jvm/runtime/AbstractJvmRuntimeDescriptorLoaderTest.kt +++ b/compiler/tests/org/jetbrains/kotlin/jvm/runtime/AbstractJvmRuntimeDescriptorLoaderTest.kt @@ -150,11 +150,10 @@ public abstract class AbstractJvmRuntimeDescriptorLoaderTest : TestCaseWithTmpdi val packageView = module.getPackage(LoadDescriptorUtil.TEST_PACKAGE_FQNAME) packageScopes.add(packageView.memberScope) } - else if (header == null || - (header.kind == KotlinClassHeader.Kind.CLASS && header.classKind == JvmAnnotationNames.KotlinClass.Kind.CLASS)) { + else if (header == null || (header.kind == KotlinClassHeader.Kind.CLASS && !header.isLocalClass)) { // Either a normal Kotlin class or a Java class val classId = klass.classId - if (!classId.isLocal()) { + if (!classId.isLocal) { val classDescriptor = module.findClassAcrossModuleDependencies(classId).sure { "Couldn't resolve class $className" } if (DescriptorUtils.isTopLevelDeclaration(classDescriptor)) { classes.add(classDescriptor) diff --git a/compiler/tests/org/jetbrains/kotlin/serialization/AbstractLocalClassProtoTest.kt b/compiler/tests/org/jetbrains/kotlin/serialization/AbstractLocalClassProtoTest.kt index fbd1246cba2b0..f1899b6a4fc20 100644 --- a/compiler/tests/org/jetbrains/kotlin/serialization/AbstractLocalClassProtoTest.kt +++ b/compiler/tests/org/jetbrains/kotlin/serialization/AbstractLocalClassProtoTest.kt @@ -76,17 +76,13 @@ public abstract class AbstractLocalClassProtoTest : TestCaseWithTmpdir() { ) } + @Suppress("UNCHECKED_CAST") private fun assertHasAnnotationData(clazz: Class<*>) { - @Suppress("UNCHECKED_CAST") - val annotation = clazz.getAnnotation( + checkNotNull(clazz.getAnnotation( clazz.classLoader.loadClass(JvmAnnotationNames.KOTLIN_CLASS.asString()) as Class<Annotation> - ) - assert(annotation != null) { "KotlinClass annotation is not found for class $clazz" } - - val kindMethod = annotation.annotationType().getDeclaredMethod("kind") - val kind = kindMethod(annotation) - assert(kind.toString() != JvmAnnotationNames.KotlinClass.Kind.CLASS.toString()) { - "'kind' should not be CLASS: $clazz (was $kind)" - } + )) { "KotlinClass annotation is not found for class $clazz" } + checkNotNull(clazz.getAnnotation( + clazz.classLoader.loadClass(JvmAnnotationNames.KOTLIN_LOCAL_CLASS.asString()) as Class<Annotation> + )) { "KotlinLocalClass annotation is not found for class $clazz" } } } diff --git a/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/java/JvmAnnotationNames.java b/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/java/JvmAnnotationNames.java index c690ccb0d1496..4c28d1f533236 100644 --- a/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/java/JvmAnnotationNames.java +++ b/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/java/JvmAnnotationNames.java @@ -28,13 +28,14 @@ import java.util.Set; public final class JvmAnnotationNames { - public static final FqName KOTLIN_CLASS = KotlinClass.CLASS_NAME.getFqNameForClassNameWithoutDollars(); + public static final FqName KOTLIN_CLASS = new FqName("kotlin.jvm.internal.KotlinClass"); public static final FqName KOTLIN_PACKAGE = new FqName("kotlin.jvm.internal.KotlinPackage"); public static final FqName KOTLIN_FILE_FACADE = new FqName("kotlin.jvm.internal.KotlinFileFacade"); public static final FqName KOTLIN_MULTIFILE_CLASS = new FqName("kotlin.jvm.internal.KotlinMultifileClass"); public static final FqName KOTLIN_MULTIFILE_CLASS_PART = new FqName("kotlin.jvm.internal.KotlinMultifileClassPart"); public static final FqName KOTLIN_CALLABLE = new FqName("kotlin.jvm.internal.KotlinCallable"); public static final FqName KOTLIN_INTERFACE_DEFAULT_IMPLS = new FqName("kotlin.jvm.internal.KotlinInterfaceDefaultImpls"); + public static final FqName KOTLIN_LOCAL_CLASS = new FqName("kotlin.jvm.internal.KotlinLocalClass"); public static final FqName JAVA_LANG_DEPRECATED = new FqName("java.lang.Deprecated"); @@ -68,23 +69,6 @@ public final class JvmAnnotationNames { public static final FqName ENHANCED_NULLABILITY_ANNOTATION = new FqName("kotlin.jvm.internal.EnhancedNullability"); public static final FqName ENHANCED_MUTABILITY_ANNOTATION = new FqName("kotlin.jvm.internal.EnhancedMutability"); - public static class KotlinClass { - public static final JvmClassName CLASS_NAME = JvmClassName.byInternalName("kotlin/jvm/internal/KotlinClass"); - public static final ClassId KIND_CLASS_ID = - ClassId.topLevel(CLASS_NAME.getFqNameForClassNameWithoutDollars()).createNestedClassId(Name.identifier("Kind")); - public static final String KIND_INTERNAL_NAME = JvmClassName.byClassId(KIND_CLASS_ID).getInternalName(); - - /** - * This enum duplicates {@link kotlin.jvm.internal.KotlinClass.Kind}. Both places should be updated simultaneously. - */ - public enum Kind { - CLASS, - LOCAL_CLASS, - ANONYMOUS_OBJECT, - ; - } - } - public static class KotlinSyntheticClass { public static final JvmClassName CLASS_NAME = JvmClassName.byInternalName("kotlin/jvm/internal/KotlinSyntheticClass"); public static final ClassId KIND_CLASS_ID = @@ -125,6 +109,7 @@ public static class KotlinSyntheticClass { } SPECIAL_ANNOTATIONS.add(KotlinSyntheticClass.CLASS_NAME); SPECIAL_ANNOTATIONS.add(JvmClassName.byFqNameWithoutInnerClasses(KOTLIN_INTERFACE_DEFAULT_IMPLS)); + SPECIAL_ANNOTATIONS.add(JvmClassName.byFqNameWithoutInnerClasses(KOTLIN_LOCAL_CLASS)); for (FqName fqName : Arrays.asList(JETBRAINS_NOT_NULL_ANNOTATION, JETBRAINS_NULLABLE_ANNOTATION)) { NULLABILITY_ANNOTATIONS.add(JvmClassName.byFqNameWithoutInnerClasses(fqName)); diff --git a/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/kotlin/header/KotlinClassHeader.kt b/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/kotlin/header/KotlinClassHeader.kt index f12bc55a7a6fc..dadad589ede50 100644 --- a/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/kotlin/header/KotlinClassHeader.kt +++ b/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/kotlin/header/KotlinClassHeader.kt @@ -17,24 +17,22 @@ package org.jetbrains.kotlin.load.kotlin.header import org.jetbrains.kotlin.load.java.AbiVersionUtil -import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass -import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinSyntheticClass import org.jetbrains.kotlin.serialization.deserialization.BinaryVersion -public class KotlinClassHeader( - public val kind: KotlinClassHeader.Kind, - public val version: BinaryVersion, - public val annotationData: Array<String>?, - public val strings: Array<String>?, - public val classKind: KotlinClass.Kind?, - public val syntheticClassKind: String?, - public val filePartClassNames: Array<String>?, - public val multifileClassName: String?, - public val isInterfaceDefaultImpls: Boolean +class KotlinClassHeader( + val kind: KotlinClassHeader.Kind, + val version: BinaryVersion, + val annotationData: Array<String>?, + val strings: Array<String>?, + val syntheticClassKind: String?, + val filePartClassNames: Array<String>?, + val multifileClassName: String?, + val isInterfaceDefaultImpls: Boolean, + val isLocalClass: Boolean ) { - public val isCompatibleAbiVersion: Boolean get() = AbiVersionUtil.isAbiVersionCompatible(version) + val isCompatibleAbiVersion: Boolean get() = AbiVersionUtil.isAbiVersionCompatible(version) - public enum class Kind { + enum class Kind { CLASS, PACKAGE_FACADE, FILE_FACADE, @@ -45,13 +43,13 @@ public class KotlinClassHeader( override fun toString() = "$kind " + - (if (classKind != null) "$classKind " else "") + + (if (isLocalClass) "(local) " else "") + (if (syntheticClassKind != null) "$syntheticClassKind " else "") + "version=$version" } -public fun KotlinClassHeader.isCompatibleClassKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.CLASS -public fun KotlinClassHeader.isCompatiblePackageFacadeKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.PACKAGE_FACADE -public fun KotlinClassHeader.isCompatibleFileFacadeKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.FILE_FACADE -public fun KotlinClassHeader.isCompatibleMultifileClassKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.MULTIFILE_CLASS -public fun KotlinClassHeader.isCompatibleMultifileClassPartKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.MULTIFILE_CLASS_PART +fun KotlinClassHeader.isCompatibleClassKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.CLASS +fun KotlinClassHeader.isCompatiblePackageFacadeKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.PACKAGE_FACADE +fun KotlinClassHeader.isCompatibleFileFacadeKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.FILE_FACADE +fun KotlinClassHeader.isCompatibleMultifileClassKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.MULTIFILE_CLASS +fun KotlinClassHeader.isCompatibleMultifileClassPartKind(): Boolean = isCompatibleAbiVersion && kind == KotlinClassHeader.Kind.MULTIFILE_CLASS_PART diff --git a/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/kotlin/header/ReadKotlinClassHeaderAnnotationVisitor.java b/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/kotlin/header/ReadKotlinClassHeaderAnnotationVisitor.java index ecbb2d71effe9..384d96eafee46 100644 --- a/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/kotlin/header/ReadKotlinClassHeaderAnnotationVisitor.java +++ b/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/kotlin/header/ReadKotlinClassHeaderAnnotationVisitor.java @@ -21,6 +21,7 @@ import org.jetbrains.kotlin.descriptors.SourceElement; import org.jetbrains.kotlin.load.java.AbiVersionUtil; import org.jetbrains.kotlin.name.ClassId; +import org.jetbrains.kotlin.name.FqName; import org.jetbrains.kotlin.name.Name; import org.jetbrains.kotlin.resolve.jvm.JvmClassName; import org.jetbrains.kotlin.serialization.deserialization.BinaryVersion; @@ -39,7 +40,7 @@ public class ReadKotlinClassHeaderAnnotationVisitor implements AnnotationVisitor private static final Map<JvmClassName, KotlinClassHeader.Kind> OLD_DEPRECATED_ANNOTATIONS_KINDS = new HashMap<JvmClassName, KotlinClassHeader.Kind>(); static { - HEADER_KINDS.put(KotlinClass.CLASS_NAME, CLASS); + HEADER_KINDS.put(JvmClassName.byFqNameWithoutInnerClasses(KOTLIN_CLASS), CLASS); HEADER_KINDS.put(JvmClassName.byFqNameWithoutInnerClasses(KOTLIN_PACKAGE), PACKAGE_FACADE); HEADER_KINDS.put(JvmClassName.byFqNameWithoutInnerClasses(KOTLIN_FILE_FACADE), FILE_FACADE); HEADER_KINDS.put(JvmClassName.byFqNameWithoutInnerClasses(KOTLIN_MULTIFILE_CLASS), MULTIFILE_CLASS); @@ -66,9 +67,9 @@ private static void initOldAnnotations() { private String[] annotationData = null; private String[] strings = null; private KotlinClassHeader.Kind headerKind = null; - private KotlinClass.Kind classKind = null; private String syntheticClassKind = null; private boolean isInterfaceDefaultImpls = false; + private boolean isLocalClass = false; @Nullable public KotlinClassHeader createHeader() { @@ -76,11 +77,6 @@ public KotlinClassHeader createHeader() { return null; } - if (headerKind == CLASS && classKind == null) { - // Default class kind is Kind.CLASS - classKind = KotlinClass.Kind.CLASS; - } - if (!AbiVersionUtil.isAbiVersionCompatible(version)) { annotationData = null; } @@ -91,8 +87,8 @@ else if (shouldHaveData() && annotationData == null) { } return new KotlinClassHeader( - headerKind, version, annotationData, strings, classKind, syntheticClassKind, filePartClassNames, multifileClassName, - isInterfaceDefaultImpls + headerKind, version, annotationData, strings, syntheticClassKind, filePartClassNames, multifileClassName, + isInterfaceDefaultImpls, isLocalClass ); } @@ -106,10 +102,15 @@ private boolean shouldHaveData() { @Nullable @Override public AnnotationArgumentVisitor visitAnnotation(@NotNull ClassId classId, @NotNull SourceElement source) { - if (KOTLIN_INTERFACE_DEFAULT_IMPLS.equals(classId.asSingleFqName())) { + FqName fqName = classId.asSingleFqName(); + if (KOTLIN_INTERFACE_DEFAULT_IMPLS.equals(fqName)) { isInterfaceDefaultImpls = true; return null; } + else if (KOTLIN_LOCAL_CLASS.equals(fqName)) { + isLocalClass = true; + return null; + } if (headerKind != null) { // Ignore all Kotlin annotations except the first found @@ -269,14 +270,7 @@ public void visitEnd() { private class ClassHeaderReader extends HeaderAnnotationArgumentVisitor { public ClassHeaderReader() { - super(KotlinClass.CLASS_NAME); - } - - @Override - public void visitEnum(@NotNull Name name, @NotNull ClassId enumClassId, @NotNull Name enumEntryName) { - if (KotlinClass.KIND_CLASS_ID.equals(enumClassId) && KIND_FIELD_NAME.equals(name.asString())) { - classKind = valueOfOrNull(KotlinClass.Kind.class, enumEntryName.asString()); - } + super(JvmClassName.byFqNameWithoutInnerClasses(KOTLIN_CLASS)); } } @@ -316,16 +310,4 @@ public void visitEnum(@NotNull Name name, @NotNull ClassId enumClassId, @NotNull } } } - - // This function is needed here because Enum.valueOf() throws exception if there's no such value, - // but we don't want to fail if we're loading the header with an _incompatible_ ABI version - @Nullable - private static <E extends Enum<E>> E valueOfOrNull(@NotNull Class<E> enumClass, @NotNull String entry) { - try { - return Enum.valueOf(enumClass, entry); - } - catch (IllegalArgumentException e) { - return null; - } - } } diff --git a/core/runtime.jvm/src/kotlin/jvm/internal/KotlinClass.java b/core/runtime.jvm/src/kotlin/jvm/internal/KotlinClass.java index 77270d3e02bad..b54eaa1db1586 100644 --- a/core/runtime.jvm/src/kotlin/jvm/internal/KotlinClass.java +++ b/core/runtime.jvm/src/kotlin/jvm/internal/KotlinClass.java @@ -29,12 +29,14 @@ int[] version() default {}; - Kind kind() default Kind.CLASS; - String[] data(); String[] strings(); + @Deprecated + Kind kind() default Kind.CLASS; + + @Deprecated enum Kind { CLASS, diff --git a/core/runtime.jvm/src/kotlin/jvm/internal/KotlinLocalClass.java b/core/runtime.jvm/src/kotlin/jvm/internal/KotlinLocalClass.java new file mode 100644 index 0000000000000..8086c610a9fa3 --- /dev/null +++ b/core/runtime.jvm/src/kotlin/jvm/internal/KotlinLocalClass.java @@ -0,0 +1,28 @@ +/* + * Copyright 2010-2015 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kotlin.jvm.internal; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface KotlinLocalClass { + int[] version() default {}; +} diff --git a/idea/idea-analysis/src/org/jetbrains/kotlin/idea/decompiler/DecompiledUtils.kt b/idea/idea-analysis/src/org/jetbrains/kotlin/idea/decompiler/DecompiledUtils.kt index 091bf3d523067..ef6644f7dae19 100644 --- a/idea/idea-analysis/src/org/jetbrains/kotlin/idea/decompiler/DecompiledUtils.kt +++ b/idea/idea-analysis/src/org/jetbrains/kotlin/idea/decompiler/DecompiledUtils.kt @@ -21,7 +21,6 @@ import com.intellij.openapi.vfs.VirtualFile import com.intellij.psi.ClassFileViewProvider import org.jetbrains.kotlin.idea.caches.JarUserDataManager import org.jetbrains.kotlin.idea.decompiler.textBuilder.DirectoryBasedClassFinder -import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass import org.jetbrains.kotlin.load.kotlin.KotlinBinaryClassCache import org.jetbrains.kotlin.load.kotlin.KotlinJvmBinaryClass import org.jetbrains.kotlin.load.kotlin.header.KotlinClassHeader @@ -71,9 +70,8 @@ public fun isKotlinInternalCompiledFile(file: VirtualFile): Boolean { val header = KotlinBinaryClassCache.getKotlinBinaryClass(file)?.classHeader ?: return false return header.kind == KotlinClassHeader.Kind.SYNTHETIC_CLASS || - (header.kind == KotlinClassHeader.Kind.CLASS && header.classKind != null && header.classKind != KotlinClass.Kind.CLASS) || header.kind == KotlinClassHeader.Kind.MULTIFILE_CLASS_PART || - header.syntheticClassKind == "PACKAGE_PART" + header.isLocalClass || header.syntheticClassKind == "PACKAGE_PART" } public fun isKotlinJavaScriptInternalCompiledFile(file: VirtualFile): Boolean = diff --git a/idea/idea-analysis/src/org/jetbrains/kotlin/idea/decompiler/stubBuilder/KotlinClsStubBuilder.kt b/idea/idea-analysis/src/org/jetbrains/kotlin/idea/decompiler/stubBuilder/KotlinClsStubBuilder.kt index 997e8179139b3..fc5cb8d7fd70a 100644 --- a/idea/idea-analysis/src/org/jetbrains/kotlin/idea/decompiler/stubBuilder/KotlinClsStubBuilder.kt +++ b/idea/idea-analysis/src/org/jetbrains/kotlin/idea/decompiler/stubBuilder/KotlinClsStubBuilder.kt @@ -27,7 +27,6 @@ import org.jetbrains.kotlin.idea.decompiler.isKotlinInternalCompiledFile import org.jetbrains.kotlin.idea.decompiler.textBuilder.DirectoryBasedClassFinder import org.jetbrains.kotlin.idea.decompiler.textBuilder.DirectoryBasedDataFinder import org.jetbrains.kotlin.idea.decompiler.textBuilder.LoggingErrorReporter -import org.jetbrains.kotlin.load.java.JvmAnnotationNames import org.jetbrains.kotlin.load.kotlin.KotlinBinaryClassCache import org.jetbrains.kotlin.load.kotlin.header.isCompatibleClassKind import org.jetbrains.kotlin.load.kotlin.header.isCompatibleFileFacadeKind @@ -82,7 +81,7 @@ public open class KotlinClsStubBuilder : ClsStubBuilder() { createPackageFacadeStub(packageProto, packageFqName, context) } header.isCompatibleClassKind() -> { - if (header.classKind != JvmAnnotationNames.KotlinClass.Kind.CLASS) return null + if (header.isLocalClass) return null val (nameResolver, classProto) = JvmProtoBufUtil.readClassDataFrom(annotationData, strings) val context = components.createContext(nameResolver, packageFqName) createTopLevelClassStub(classId, classProto, context) diff --git a/idea/tests/org/jetbrains/kotlin/idea/decompiler/AbstractInternalCompiledClassesTest.kt b/idea/tests/org/jetbrains/kotlin/idea/decompiler/AbstractInternalCompiledClassesTest.kt index 974a0a30eaa60..ffae3ec46d1fe 100644 --- a/idea/tests/org/jetbrains/kotlin/idea/decompiler/AbstractInternalCompiledClassesTest.kt +++ b/idea/tests/org/jetbrains/kotlin/idea/decompiler/AbstractInternalCompiledClassesTest.kt @@ -20,7 +20,6 @@ import com.intellij.openapi.vfs.VirtualFile import com.intellij.psi.PsiManager import org.jetbrains.kotlin.idea.decompiler.navigation.NavigateToDecompiledLibraryTest import org.jetbrains.kotlin.idea.test.JetLightCodeInsightFixtureTestCase -import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass import org.jetbrains.kotlin.load.kotlin.KotlinBinaryClassCache import org.jetbrains.kotlin.load.kotlin.header.KotlinClassHeader import org.junit.Assert @@ -34,11 +33,8 @@ public abstract class AbstractInternalCompiledClassesTest : JetLightCodeInsightF protected fun isSyntheticClass(): VirtualFile.() -> Boolean = isFileWithHeader { it.kind == KotlinClassHeader.Kind.SYNTHETIC_CLASS } - private fun isClassOfKind(kind: KotlinClass.Kind): VirtualFile.() -> Boolean = - isFileWithHeader { it.classKind == kind } - - protected fun doTestNoPsiFilesAreBuiltForLocalClass(kind: KotlinClass.Kind): Unit = - doTestNoPsiFilesAreBuiltFor(kind.name(), isClassOfKind(kind)) + protected fun doTestNoPsiFilesAreBuiltForLocalClass(): Unit = + doTestNoPsiFilesAreBuiltFor("local", isFileWithHeader { it.isLocalClass }) protected fun doTestNoPsiFilesAreBuiltForSyntheticClasses(): Unit = doTestNoPsiFilesAreBuiltFor("synthetic", isSyntheticClass()) diff --git a/idea/tests/org/jetbrains/kotlin/idea/decompiler/InternalCompiledClassesTest.kt b/idea/tests/org/jetbrains/kotlin/idea/decompiler/InternalCompiledClassesTest.kt index 37b930949c5cd..5c372773f053c 100644 --- a/idea/tests/org/jetbrains/kotlin/idea/decompiler/InternalCompiledClassesTest.kt +++ b/idea/tests/org/jetbrains/kotlin/idea/decompiler/InternalCompiledClassesTest.kt @@ -20,17 +20,13 @@ import com.intellij.psi.ClassFileViewProvider import com.intellij.testFramework.LightProjectDescriptor import org.jetbrains.kotlin.idea.test.JdkAndMockLibraryProjectDescriptor import org.jetbrains.kotlin.idea.test.PluginTestCaseBase -import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass.Kind.ANONYMOUS_OBJECT -import org.jetbrains.kotlin.load.java.JvmAnnotationNames.KotlinClass.Kind.LOCAL_CLASS public class InternalCompiledClassesTest : AbstractInternalCompiledClassesTest() { private val TEST_DATA_PATH = PluginTestCaseBase.getTestDataPathBase() + "/decompiler/internalClasses" fun testSyntheticClassesAreInvisible() = doTestNoPsiFilesAreBuiltForSyntheticClasses() - fun testLocalClassIsInvisible() = doTestNoPsiFilesAreBuiltForLocalClass(LOCAL_CLASS) - - fun testAnonymousObjectIsInvisible() = doTestNoPsiFilesAreBuiltForLocalClass(ANONYMOUS_OBJECT) + fun testLocalClassesAreInvisible() = doTestNoPsiFilesAreBuiltForLocalClass() fun testInnerClassIsInvisible() = doTestNoPsiFilesAreBuiltFor("inner or nested class") { ClassFileViewProvider.isInnerClass(this) diff --git a/jps-plugin/src/org/jetbrains/kotlin/jps/incremental/IncrementalCacheImpl.kt b/jps-plugin/src/org/jetbrains/kotlin/jps/incremental/IncrementalCacheImpl.kt index 6a1573694f045..093349ccec181 100644 --- a/jps-plugin/src/org/jetbrains/kotlin/jps/incremental/IncrementalCacheImpl.kt +++ b/jps-plugin/src/org/jetbrains/kotlin/jps/incremental/IncrementalCacheImpl.kt @@ -31,7 +31,6 @@ import org.jetbrains.kotlin.jps.build.GeneratedJvmClass import org.jetbrains.kotlin.jps.build.KotlinBuilder import org.jetbrains.kotlin.jps.incremental.storage.BasicMap import org.jetbrains.kotlin.jps.incremental.storage.BasicStringMap -import org.jetbrains.kotlin.load.java.JvmAnnotationNames import org.jetbrains.kotlin.load.kotlin.KotlinJvmBinaryClass import org.jetbrains.kotlin.load.kotlin.ModuleMapping import org.jetbrains.kotlin.load.kotlin.PackageClassUtils @@ -216,10 +215,11 @@ public class IncrementalCacheImpl( constantsMap.process(kotlinClass) + inlineFunctionsMap.process(kotlinClass) } - header.isCompatibleClassKind() && JvmAnnotationNames.KotlinClass.Kind.CLASS == header.classKind -> + header.isCompatibleClassKind() && !header.isLocalClass -> { protoMap.process(kotlinClass, isPackage = false) + constantsMap.process(kotlinClass) + inlineFunctionsMap.process(kotlinClass) + } else -> ChangesInfo.NO_CHANGES }
b079f9911a118b01bbef4515b2cea7fa09fdd5de
drools
[DROOLS-128] fix retriving of global entry set from- a Stateless session--
c
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/test/java/org/drools/compiler/integrationtests/GlobalsTest.java b/drools-compiler/src/test/java/org/drools/compiler/integrationtests/GlobalsTest.java new file mode 100644 index 00000000000..3866be067be --- /dev/null +++ b/drools-compiler/src/test/java/org/drools/compiler/integrationtests/GlobalsTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2013 JBoss Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.drools.compiler.integrationtests; + +import org.drools.core.impl.StatelessKnowledgeSessionImpl; +import org.kie.api.io.ResourceType; +import org.drools.core.base.MapGlobalResolver; +import org.junit.Test; +import org.kie.internal.KnowledgeBase; +import org.kie.internal.KnowledgeBaseFactory; +import org.kie.internal.builder.KnowledgeBuilder; +import org.kie.internal.builder.KnowledgeBuilderFactory; +import org.kie.internal.io.ResourceFactory; +import org.kie.internal.runtime.StatefulKnowledgeSession; +import org.kie.internal.runtime.StatelessKnowledgeSession; + +import java.util.Map; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertTrue; + + +public class GlobalsTest { + + @Test + public void testGlobalAccess() { + + String drl = "import org.drools.core.base.MapGlobalResolver;\n" + + "global java.lang.String myGlobal;\n" + + "global String unused; \n" ; + + KnowledgeBuilder kbuilder = KnowledgeBuilderFactory.newKnowledgeBuilder(); + kbuilder.add(ResourceFactory.newByteArrayResource(drl.getBytes()), ResourceType.DRL); + KnowledgeBase kbase = KnowledgeBaseFactory.newKnowledgeBase(); + kbase.addKnowledgePackages(kbuilder.getKnowledgePackages()); + StatefulKnowledgeSession session1 = kbase.newStatefulKnowledgeSession(); + + String sample = "default string"; + + // Testing 1. + System.out.println("Start testing 1."); + session1.setGlobal("myGlobal", "Testing 1"); + session1.insert(sample); + session1.fireAllRules(); + Map.Entry[] entries1 = ((MapGlobalResolver) session1.getGlobals()).getGlobals(); + assertEquals( 1, entries1.length ); + assertEquals( entries1[0].getValue(), "Testing 1" ); + assertEquals( 1, session1.getGlobals().getGlobalKeys().size() ); + assertTrue( session1.getGlobals().getGlobalKeys().contains("myGlobal") ); + session1.dispose(); + + // Testing 2. + System.out.println("Start testing 2."); + StatelessKnowledgeSession session2 = session1.getKieBase().newStatelessKnowledgeSession(); + session2.setGlobal("myGlobal", "Testing 2"); + session2.execute(sample); + Map.Entry[] entries2 = ((MapGlobalResolver) session2.getGlobals()).getGlobals(); + assertEquals(1, entries2.length); + assertEquals( entries2[0].getValue(), "Testing 2" ); + assertEquals( 1, session2.getGlobals().getGlobalKeys().size() ); + assertTrue( session2.getGlobals().getGlobalKeys().contains("myGlobal") ); + + // Testing 3. + System.out.println("Start testing 3."); + StatefulKnowledgeSession session3 = ((StatelessKnowledgeSessionImpl) session2).newWorkingMemory(); + session3.insert(sample); + session3.fireAllRules(); + Map.Entry[] entries3 = ((MapGlobalResolver) session3.getGlobals()).getGlobals(); + assertEquals( 1, entries3.length ); + assertEquals( entries3[0].getValue(), "Testing 2" ); + assertEquals( 1, session3.getGlobals().getGlobalKeys().size() ); + assertTrue( session3.getGlobals().getGlobalKeys().contains("myGlobal") ); + + + session3.setGlobal("myGlobal", "Testing 3 Over"); + entries3 = ((MapGlobalResolver) session3.getGlobals()).getGlobals(); + assertEquals(1, entries3.length); + assertEquals( entries3[0].getValue(), "Testing 3 Over" ); + assertEquals( 1, session3.getGlobals().getGlobalKeys().size() ); + assertTrue( session3.getGlobals().getGlobalKeys().contains("myGlobal") ); + + session3.dispose(); + + // Testing 4. + System.out.println("Start testing 4."); + StatefulKnowledgeSession session4 = ((StatelessKnowledgeSessionImpl) session2).newWorkingMemory(); + session4.setGlobal("myGlobal", "Testing 4"); + session4.insert(sample); + session4.fireAllRules(); + Map.Entry[] entries4 = ((MapGlobalResolver) session4.getGlobals()).getGlobals(); + assertEquals(1, entries4.length); + assertEquals( entries4[0].getValue(), "Testing 4" ); + assertEquals( 1, session4.getGlobals().getGlobalKeys().size() ); + assertTrue( session4.getGlobals().getGlobalKeys().contains("myGlobal") ); + + session4.dispose(); + } +} diff --git a/drools-core/src/main/java/org/drools/core/base/MapGlobalResolver.java b/drools-core/src/main/java/org/drools/core/base/MapGlobalResolver.java index 505bb1fede8..a0cf91e9034 100644 --- a/drools-core/src/main/java/org/drools/core/base/MapGlobalResolver.java +++ b/drools-core/src/main/java/org/drools/core/base/MapGlobalResolver.java @@ -20,7 +20,10 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Map.Entry; @@ -34,7 +37,7 @@ public class MapGlobalResolver private static final long serialVersionUID = 510l; - private Map map; + private Map<String,Object> map; private Globals delegate; @@ -59,7 +62,19 @@ public void writeExternal(ObjectOutput out) throws IOException { public void setDelegate(Globals delegate) { this.delegate = delegate; } - + + public Collection<String> getGlobalKeys() { + if ( delegate == null ) { + return Collections.unmodifiableCollection(map.keySet()); + } else if ( delegate != null && map.size() == 0 ) { + return Collections.unmodifiableCollection( ((MapGlobalResolver) delegate).map.keySet() ); + } else { + Collection<String> combined = new HashSet<String>( map.keySet() ); + combined.addAll( ((MapGlobalResolver) delegate).map.keySet() ); + return Collections.unmodifiableCollection( combined ); + } + } + public Object get(String identifier) { return resolveGlobal( identifier ); } @@ -81,14 +96,23 @@ public void setGlobal(String identifier, Object value) { value ); } - public Entry[] getGlobals() { - return (Entry[]) this.map.entrySet().toArray(new Entry[this.map.size()]); + public Entry<String,Object>[] getGlobals() { + if ( delegate == null ) { + return (Entry[]) this.map.entrySet().toArray(new Entry[this.map.size()]); + } else if ( delegate != null && map.size() == 0 ) { + Map<String,Object> delegateMap = ((MapGlobalResolver) delegate).map; + return (Entry[]) delegateMap.entrySet().toArray(new Entry[delegateMap.size()]); + } else { + Map<String,Object> combined = new HashMap<String,Object>( ((MapGlobalResolver) delegate).map ); + combined.putAll( map ); + return (Entry[]) combined.entrySet().toArray(new Entry[combined.size()]); + } } public GlobalResolver clone() { - Map clone = new HashMap(); + Map<String,Object> clone = new HashMap<String,Object>(); - for ( Entry entry : getGlobals() ) { + for ( Entry<String,Object> entry : getGlobals() ) { clone.put( entry.getKey(), entry.getValue() ); } return new MapGlobalResolver( clone );
384542ccb54c28d73d9f368f2375ef60d99127ac
restlet-framework-java
- Fixed error in Conditions.getStatus()- sometimes returning 304 for methods other than HEAD and GET.- Contributed by Stephan Koops.--
c
https://github.com/restlet/restlet-framework-java
diff --git a/build/tmpl/text/changes.txt b/build/tmpl/text/changes.txt index e78b997580..783366ca6f 100644 --- a/build/tmpl/text/changes.txt +++ b/build/tmpl/text/changes.txt @@ -33,6 +33,9 @@ Changes log the parent resource has no 'id' attribute. Also supports root resources with no leading slash. Patches contributed by Vincent Ricard. + - Fixed error in Conditions.getStatus() sometimes returning + 304 for methods other than HEAD and GET. Contributed by + Stephan Koops. - Misc - Updated db4o to version 7.4.58. - Updated FreeMarker to version 2.3.14. diff --git a/modules/org.restlet/src/org/restlet/data/Conditions.java b/modules/org.restlet/src/org/restlet/data/Conditions.java index 0c717a46ff..d862e06a5e 100644 --- a/modules/org.restlet/src/org/restlet/data/Conditions.java +++ b/modules/org.restlet/src/org/restlet/data/Conditions.java @@ -233,7 +233,11 @@ public Status getStatus(Method method, Representation representation) { .getModificationDate())); if (!isModifiedSince) { - result = Status.REDIRECTION_NOT_MODIFIED; + if (Method.GET.equals(method) || Method.HEAD.equals(method)) { + result = Status.REDIRECTION_NOT_MODIFIED; + } else { + result = Status.CLIENT_ERROR_PRECONDITION_FAILED; + } } } } diff --git a/modules/org.restlet/src/org/restlet/data/Request.java b/modules/org.restlet/src/org/restlet/data/Request.java index cc2e0ed6ad..9c81cbd5d5 100644 --- a/modules/org.restlet/src/org/restlet/data/Request.java +++ b/modules/org.restlet/src/org/restlet/data/Request.java @@ -509,9 +509,11 @@ public void setOriginalRef(Reference originalRef) { /** * Sets the ranges to return from the target resource's representation. * + * @param ranges + * The ranges. */ - public void setRanges(List<Range> range) { - this.ranges = range; + public void setRanges(List<Range> ranges) { + this.ranges = ranges; } /**
90d8dade254b4f34fd28d498ce529efd2177c606
spring-framework
fixed bug related to array autogrow--
c
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.beans/src/main/java/org/springframework/beans/BeanWrapperImpl.java b/org.springframework.beans/src/main/java/org/springframework/beans/BeanWrapperImpl.java index 46424861a78a..8a4cf937be05 100644 --- a/org.springframework.beans/src/main/java/org/springframework/beans/BeanWrapperImpl.java +++ b/org.springframework.beans/src/main/java/org/springframework/beans/BeanWrapperImpl.java @@ -848,8 +848,9 @@ private Object growArrayIfNecessary(Object array, int index, String name) { for (int i = length; i < Array.getLength(newArray); i++) { Array.set(newArray, i, newValue(componentType, name)); } + // TODO this is not efficient because conversion may create a copy ... set directly because we know its assignable. setPropertyValue(name, newArray); - return newArray; + return getPropertyValue(name); } else { return array; @@ -953,10 +954,6 @@ else if (propValue.getClass().isArray()) { // TODO reduce this grow algorithm along side the null gap algorithm for setting lists below ... the two are inconsistent propValue = growArrayIfNecessary(propValue, arrayIndex, actualName); Array.set(propValue, arrayIndex, convertedValue); - PropertyValue newValue = new PropertyValue(actualName, propValue); - newValue.resolvedDescriptor = pd; - newValue.conversionNecessary = false; - setPropertyValue(newValue); } catch (IndexOutOfBoundsException ex) { throw new InvalidPropertyException(getRootClass(), this.nestedPath + propertyName, diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/mvc/annotation/Spr7839Tests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/mvc/annotation/Spr7839Tests.java index 7f04d7b5a6b4..299c99194ad0 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/mvc/annotation/Spr7839Tests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/mvc/annotation/Spr7839Tests.java @@ -8,7 +8,6 @@ import java.util.Map; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; import org.springframework.core.convert.converter.Converter; import org.springframework.core.convert.support.ConversionServiceFactory; @@ -92,7 +91,6 @@ public void listOfListsElementAutogrowObject() throws Exception { } @Test - @Ignore public void arrayOfLists() throws Exception { // TODO TypeDescriptor not capable of accessing nested element type for arrays request.setRequestURI("/nested/arrayOfLists"); @@ -101,7 +99,6 @@ public void arrayOfLists() throws Exception { } @Test - @Ignore public void map() throws Exception { request.setRequestURI("/nested/map"); request.addParameter("nested.map['apple'].foo", "bar");
2eeb4ebd8c3af6f5838efe510da4c92ceb0ceb35
elasticsearch
improve memcached test--
p
https://github.com/elastic/elasticsearch
diff --git a/plugins/transport/memcached/src/test/java/org/elasticsearch/memcached/test/AbstractMemcachedActionsTests.java b/plugins/transport/memcached/src/test/java/org/elasticsearch/memcached/test/AbstractMemcachedActionsTests.java index 71c01e745a039..2a5bcbd2c7c72 100644 --- a/plugins/transport/memcached/src/test/java/org/elasticsearch/memcached/test/AbstractMemcachedActionsTests.java +++ b/plugins/transport/memcached/src/test/java/org/elasticsearch/memcached/test/AbstractMemcachedActionsTests.java @@ -21,6 +21,7 @@ import net.spy.memcached.MemcachedClient; import org.elasticsearch.node.Node; +import org.hamcrest.Matchers; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -31,6 +32,8 @@ import static org.elasticsearch.common.xcontent.XContentFactory.*; import static org.elasticsearch.node.NodeBuilder.*; +import static org.hamcrest.MatcherAssert.*; +import static org.hamcrest.Matchers.*; /** * @author kimchy (shay.banon) @@ -56,20 +59,29 @@ public void tearDown() { } @Test public void testSimpleOperations() throws Exception { - Future setResult = memcachedClient.set("/test/person/1", 0, jsonBuilder().startObject().field("test", "value").endObject().copiedBytes()); - setResult.get(10, TimeUnit.SECONDS); + Future<Boolean> setResult = memcachedClient.set("/test/person/1", 0, jsonBuilder().startObject().field("test", "value").endObject().copiedBytes()); + assertThat(setResult.get(10, TimeUnit.SECONDS), equalTo(true)); String getResult = (String) memcachedClient.get("/_refresh"); System.out.println("REFRESH " + getResult); + assertThat(getResult, Matchers.containsString("\"total\":10")); + assertThat(getResult, Matchers.containsString("\"successful\":5")); + assertThat(getResult, Matchers.containsString("\"failed\":0")); getResult = (String) memcachedClient.get("/test/person/1"); System.out.println("GET " + getResult); + assertThat(getResult, Matchers.containsString("\"_index\":\"test\"")); + assertThat(getResult, Matchers.containsString("\"_type\":\"person\"")); + assertThat(getResult, Matchers.containsString("\"_id\":\"1\"")); - Future deleteResult = memcachedClient.delete("/test/person/1"); - deleteResult.get(10, TimeUnit.SECONDS); + Future<Boolean> deleteResult = memcachedClient.delete("/test/person/1"); + assertThat(deleteResult.get(10, TimeUnit.SECONDS), equalTo(true)); getResult = (String) memcachedClient.get("/_refresh"); System.out.println("REFRESH " + getResult); + assertThat(getResult, Matchers.containsString("\"total\":10")); + assertThat(getResult, Matchers.containsString("\"successful\":5")); + assertThat(getResult, Matchers.containsString("\"failed\":0")); getResult = (String) memcachedClient.get("/test/person/1"); System.out.println("GET " + getResult);
495847ad95644bae1c1776b904297579d0ffe62d
fenix-framework$fenix-framework
[bplustree] New Feature: Special version of B+Trees to hold domain objects. * The serialization of the backing maps is done via JSON objects, avoiding serialization of byte[]. This shows greater performance as well as allowing the trees to be portable across backends and Framework versions * The new tree shares the code base with the regular tree, differing only in the serialization.
a
https://github.com/fenix-framework/fenix-framework
diff --git a/core/adt/bplustree/pom.xml b/core/adt/bplustree/pom.xml index fc9fffaf3..9dc8f6abf 100644 --- a/core/adt/bplustree/pom.xml +++ b/core/adt/bplustree/pom.xml @@ -54,5 +54,9 @@ <artifactId>fenix-framework-core-api</artifactId> <version>${project.version}</version> </dependency> + <dependency> + <groupId>com.google.code.gson</groupId> + <artifactId>gson</artifactId> + </dependency> </dependencies> </project> diff --git a/core/adt/bplustree/src/main/dml/fenix-framework-adt-bplustree.dml b/core/adt/bplustree/src/main/dml/fenix-framework-adt-bplustree.dml index c6a4f73d7..8a07443d8 100644 --- a/core/adt/bplustree/src/main/dml/fenix-framework-adt-bplustree.dml +++ b/core/adt/bplustree/src/main/dml/fenix-framework-adt-bplustree.dml @@ -9,14 +9,34 @@ valueType java.util.TreeMap as GenericTreeMap { internalizeWith pt.ist.fenixframework.adt.bplustree.AbstractNode.internalizeTreeMap(); } +valueType java.util.TreeMap as DomainObjectMap { + externalizeWith { + String pt.ist.fenixframework.adt.bplustree.DomainLeafNode.externalizeDomainObjectMap(); + } + internalizeWith pt.ist.fenixframework.adt.bplustree.DomainLeafNode.internalizeDomainObjectMap(); +} + +valueType java.util.TreeMap as OidIndexedMap { + externalizeWith { + String pt.ist.fenixframework.adt.bplustree.DomainInnerNode.externalizeOidIndexedMap(); + } + internalizeWith pt.ist.fenixframework.adt.bplustree.DomainInnerNode.internalizeOidIndexedMap(); +} + class BPlusTree {} +class DomainBPlusTree extends BPlusTree {} + class AbstractNode {} class LeafNode extends AbstractNode { // key: any Serializable and Comparable // value: any Serializable - GenericTreeMap<Comparable,java.io.Serializable> entries; + GenericTreeMap<Comparable, ? extends java.io.Serializable> entries; +} + +class DomainLeafNode extends LeafNode { + DomainObjectMap<Comparable, pt.ist.fenixframework.core.AbstractDomainObject> domainEntries; } class InnerNode extends AbstractNode { @@ -29,6 +49,10 @@ class InnerNode extends AbstractNode { GenericTreeMap<Comparable,AbstractNode> subNodes; } +class DomainInnerNode extends InnerNode { + OidIndexedMap<Comparable, AbstractNode> indexedSubNodes; +} + relation AdtBPlusTreeHasRootNode { BPlusTree playsRole; AbstractNode playsRole root; diff --git a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/BPlusTree.java b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/BPlusTree.java index 9941e6e77..fed687d0c 100644 --- a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/BPlusTree.java +++ b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/BPlusTree.java @@ -56,6 +56,11 @@ protected Object writeReplace() throws ObjectStreamException { static final Comparable LAST_KEY = new ComparableLastKey(); + /** + * Textual representation of the LastKey. + */ + static final String LAST_KEY_REPRESENTATION = "_$LAST_KEY$_"; + /* Special comparator that takes into account LAST_KEY */ private static class ComparatorSupportingLastKey implements Comparator<Comparable>, Serializable { // only LAST_KEY knows how to compare itself with others, so we must check for it before @@ -99,7 +104,7 @@ public BPlusTree() { initRoot(); } - private void initRoot() { + protected void initRoot() { this.setRoot(new LeafNode()); } diff --git a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainBPlusTree.java b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainBPlusTree.java new file mode 100644 index 000000000..8cab4231f --- /dev/null +++ b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainBPlusTree.java @@ -0,0 +1,57 @@ +package pt.ist.fenixframework.adt.bplustree; + +import java.io.Serializable; + +import pt.ist.fenixframework.NoDomainMetaObjects; +import pt.ist.fenixframework.core.AbstractDomainObject; + +/** + * {@link BPlusTree} specialized for storing {@link AbstractDomainObject}. + * Uses the object's Oid as the Key, and the object as the Value. + * + * The serialization of a {@link DomainBPlusTree} is done using JSON objects, + * thus allowing for better performance and human-readable representation. + * + * @author João Carvalho ([email protected]) + * + */ +@NoDomainMetaObjects +public class DomainBPlusTree<T extends AbstractDomainObject> extends DomainBPlusTree_Base { + + public DomainBPlusTree() { + super(); + } + + @Override + protected void initRoot() { + this.setRoot(new DomainLeafNode()); + } + + /** + * Inserts the given {@link AbstractDomainObject} into the tree. + * + * @param domainObject + * The object to be inserted + * @return Whether the object was inserted + */ + public boolean insert(AbstractDomainObject domainObject) { + return super.insert(domainObject.getOid(), domainObject); + } + + /** + * + * Inserting {@link Serializable} into a {@link DomainBPlusTree} is not valid. + * Throws {@link UnsupportedOperationException}. + * Use {@code insert(AbstractDomainObject)} instead. + */ + @Override + public boolean insert(Comparable key, Serializable value) { + if (value instanceof AbstractDomainObject) { + if (((AbstractDomainObject) value).getOid().equals(key)) { + return super.insert(key, value); + } + } + throw new UnsupportedOperationException("Cannot insert Serializable in DomainBPlusTree."); + } + +} diff --git a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainInnerNode.java b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainInnerNode.java new file mode 100644 index 000000000..d173c33d9 --- /dev/null +++ b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainInnerNode.java @@ -0,0 +1,149 @@ +package pt.ist.fenixframework.adt.bplustree; + +import java.util.Map.Entry; +import java.util.TreeMap; + +import pt.ist.fenixframework.FenixFramework; +import pt.ist.fenixframework.NoDomainMetaObjects; +import pt.ist.fenixframework.backend.BackEnd; +import pt.ist.fenixframework.core.AbstractDomainObject; + +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import com.google.gson.JsonPrimitive; + +/** + * {@link InnerNode} specialized to hold Oids as Keys. + * + * The serialization of {@link DomainInnerNode} is done using a JSON object, + * containing the External Id of the key objects, and the external id of + * the {@link AbstractNode}. + * + * @author João Carvalho ([email protected]) + * + */ +@NoDomainMetaObjects +public class DomainInnerNode extends DomainInnerNode_Base { + + /* + * DomainInnerNode constructors. + * + * Due to the limitations of constructors in _Base classes, these have + * to be copied from {@link InnerNode} :( + */ + + DomainInnerNode(AbstractNode leftNode, AbstractNode rightNode, Comparable splitKey) { + TreeMap<Comparable, AbstractNode> newMap = + new TreeMap<Comparable, AbstractNode>(BPlusTree.COMPARATOR_SUPPORTING_LAST_KEY); + newMap.put(splitKey, leftNode); + newMap.put(BPlusTree.LAST_KEY, rightNode); + + setSubNodes(newMap); + leftNode.setParent(this); + rightNode.setParent(this); + } + + private DomainInnerNode(TreeMap<Comparable, AbstractNode> subNodes) { + setSubNodes(subNodes); + for (AbstractNode subNode : subNodes.values()) { // smf: either don't do this or don't setParent when making new + subNode.setParent(this); + } + } + + /* + * Overriden entries getter and setter. + * This allows the {@link InnerNode} to use the correct serialized + * form without changing the parent's serialization. + */ + + @Override + public TreeMap<Comparable, AbstractNode> getSubNodes() { + return getIndexedSubNodes(); + } + + @Override + public void setSubNodes(TreeMap<Comparable, AbstractNode> subNodes) { + setIndexedSubNodes(subNodes); + } + + /* + * Node instantiators. + * + * This allows for the algorithm to remain in {@link InnerNode} while + * still creating the correct subclasses. + */ + + @Override + protected InnerNode createNode(AbstractNode leftNode, AbstractNode rightNode, Comparable splitKey) { + return new DomainInnerNode(leftNode, rightNode, splitKey); + } + + @Override + protected InnerNode createNodeWithSubNodes(TreeMap<Comparable, AbstractNode> subNodes) { + return new DomainInnerNode(subNodes); + } + + /* + * Serialization code + */ + + /** + * The {@link JsonParser} to be used. Because its instances are + * stateless we can use only one parser. + */ + private static final JsonParser parser = new JsonParser(); + + /** + * Serializes the given map to a JSON object containing a mapping between the + * External Ids of the Key and Value objects. + * + * Uses {@link BPlusTree.LAST_KEY_REPRESENTATION} as a well-known value to + * represent the Last Key. + * + * @param map + * Map to serialize. Must be in the form [Oid, AbstractNode] + * @return + * A JSON Object containing the mapping + */ + public static String externalizeOidIndexedMap(TreeMap map) { + BackEnd backend = FenixFramework.getConfig().getBackEnd(); + JsonObject jsonObject = new JsonObject(); + for (Object obj : map.entrySet()) { + Entry<Comparable, AbstractNode> entry = (Entry<Comparable, AbstractNode>) obj; + String key; + if (entry.getKey().equals(BPlusTree.LAST_KEY)) { + key = BPlusTree.LAST_KEY_REPRESENTATION; + } else { + key = backend.fromOid(entry.getKey()).getExternalId(); + } + jsonObject.add(key, new JsonPrimitive(entry.getValue().getExternalId())); + } + return jsonObject.toString(); + } + + /** + * Internalizes the given JSON object. + * + * @param externalizedMap + * A JSON array returned by {@code externalizeOidIndexedMap} + * @return + * A TreeMap containing pairs [Oid, AbstractNode] + */ + public static TreeMap internalizeOidIndexedMap(String externalizedMap) { + TreeMap map = new TreeMap(BPlusTree.COMPARATOR_SUPPORTING_LAST_KEY); + JsonObject object = parser.parse(externalizedMap).getAsJsonObject(); + for (Entry<String, JsonElement> entry : object.entrySet()) { + Comparable key; + if (entry.getKey().equals(BPlusTree.LAST_KEY_REPRESENTATION)) { + key = BPlusTree.LAST_KEY; + } else { + key = FenixFramework.<AbstractDomainObject> getDomainObject(entry.getKey()).getOid(); + } + AbstractNode value = FenixFramework.getDomainObject(entry.getValue().getAsString()); + map.put(key, value); + } + return map; + } + +} diff --git a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainLeafNode.java b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainLeafNode.java new file mode 100644 index 000000000..41d006fbe --- /dev/null +++ b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/DomainLeafNode.java @@ -0,0 +1,124 @@ +package pt.ist.fenixframework.adt.bplustree; + +import java.io.Serializable; +import java.util.TreeMap; + +import pt.ist.fenixframework.DomainObject; +import pt.ist.fenixframework.FenixFramework; +import pt.ist.fenixframework.NoDomainMetaObjects; +import pt.ist.fenixframework.core.AbstractDomainObject; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonParser; +import com.google.gson.JsonPrimitive; + +/** + * {@link LeafNode} specialized to hold a mapping of [Oid, DomainObject] + * + * The serialization of {@link DomainLeafNode} is done using a JSON array, + * containing the External Id of the stored objects. + * + * @author João Carvalho ([email protected]) + * + */ +@NoDomainMetaObjects +public class DomainLeafNode extends DomainLeafNode_Base { + + /** + * Initialize a {@link DomainLeafNode} with no entries. + * + * @see LeafNode + */ + public DomainLeafNode() { + super(); + } + + /** + * Initialize a {@link DomainLeafNode} with the given entries. + * + * @see LeafNode + */ + private DomainLeafNode(TreeMap<Comparable, Serializable> entries) { + setEntries(entries); + } + + /* + * Overriden entries getter and setter. + * This allows the {@link LeafNode} to use the correct serialized + * form without changing the parent's serialization. + */ + + @Override + public TreeMap<Comparable, ? extends Serializable> getEntries() { + return getDomainEntries(); + } + + @Override + public void setEntries(TreeMap<Comparable, ? extends Serializable> entries) { + setDomainEntries((TreeMap<Comparable, AbstractDomainObject>) entries); + } + + /* + * Node instantiators. + * + * This allows for the algorithm to remain in {@link LeafNode} while + * still creating the correct subclasses. + */ + + @Override + protected LeafNode createNodeWithEntries(TreeMap<Comparable, Serializable> entries) { + return new DomainLeafNode(entries); + } + + @Override + protected InnerNode createInnerNode(AbstractNode leftNode, AbstractNode rightNode, Comparable splitKey) { + return new DomainInnerNode(leftNode, rightNode, splitKey); + } + + /* + * Serialization code + */ + + /** + * The {@link JsonParser} to be used. Because its instances are + * stateless we can use only one parser. + */ + private static final JsonParser parser = new JsonParser(); + + /** + * Serializes the given map to a JSON array containing the ExternalId of + * the values. + * + * @param map + * Map to serialize. Must be in the form [Oid, DomainObject] + * @return + * A JSON array containing the External Ids + */ + public static String externalizeDomainObjectMap(TreeMap map) { + JsonArray array = new JsonArray(); + for (Object obj : map.values()) { + DomainObject domainObject = (DomainObject) obj; + array.add(new JsonPrimitive(domainObject.getExternalId())); + } + return array.toString(); + } + + /** + * Internalizes the given JSON array. + * + * @param externalizedMap + * A JSON array returned by {@code externalizeDomainObjectMap} + * @return + * A TreeMap containing pairs [Oid, DomainObject] + */ + public static TreeMap internalizeDomainObjectMap(String externalizedMap) { + TreeMap map = new TreeMap(); + JsonArray array = parser.parse(externalizedMap).getAsJsonArray(); + for (JsonElement element : array) { + AbstractDomainObject ado = FenixFramework.getDomainObject(element.getAsString()); + map.put(ado.getOid(), ado); + } + return map; + } +} diff --git a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/InnerNode.java b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/InnerNode.java index 230b225a6..eddd07d74 100644 --- a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/InnerNode.java +++ b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/InnerNode.java @@ -19,7 +19,7 @@ @NoDomainMetaObjects public class InnerNode extends InnerNode_Base { - private InnerNode() { + protected InnerNode() { super(); } @@ -41,6 +41,14 @@ private InnerNode(TreeMap<Comparable, AbstractNode> subNodes) { } } + protected InnerNode createNode(AbstractNode leftNode, AbstractNode rightNode, Comparable splitKey) { + return new InnerNode(leftNode, rightNode, splitKey); + } + + protected InnerNode createNodeWithSubNodes(TreeMap<Comparable, AbstractNode> subNodes) { + return new InnerNode(subNodes); + } + private TreeMap<Comparable, AbstractNode> duplicateMap() { return new TreeMap<Comparable, AbstractNode>(getSubNodes()); } @@ -71,14 +79,14 @@ AbstractNode rebase(AbstractNode subLeftNode, AbstractNode subRightNode, Compara // this level. It will be moved up. TreeMap<Comparable, AbstractNode> leftSubNodes = new TreeMap<Comparable, AbstractNode>(newMap.headMap(keyToSplit)); leftSubNodes.put(BPlusTree.LAST_KEY, subNodeToMoveLeft); - InnerNode leftNode = new InnerNode(leftSubNodes); + InnerNode leftNode = createNodeWithSubNodes(leftSubNodes); subNodeToMoveLeft.setParent(leftNode); // smf: maybe it is not necessary because of the code in the constructor - InnerNode rightNode = new InnerNode(new TreeMap<Comparable, AbstractNode>(newMap.tailMap(nextKey))); + InnerNode rightNode = createNodeWithSubNodes(new TreeMap<Comparable, AbstractNode>(newMap.tailMap(nextKey))); // propagate split to parent if (this.getParent() == null) { - InnerNode newRoot = new InnerNode(leftNode, rightNode, keyToSplit); + InnerNode newRoot = createNode(leftNode, rightNode, keyToSplit); return newRoot; } else { return this.getParent().rebase(leftNode, rightNode, keyToSplit); diff --git a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/LeafNode.java b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/LeafNode.java index 5a6461297..aba0145e7 100644 --- a/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/LeafNode.java +++ b/core/adt/bplustree/src/main/java/pt/ist/fenixframework/adt/bplustree/LeafNode.java @@ -38,13 +38,13 @@ public AbstractNode insert(Comparable key, Serializable value) { Comparable keyToSplit = findRightMiddlePosition(localMap.keySet()); // split node in two - LeafNode leftNode = new LeafNode(new TreeMap<Comparable, Serializable>(localMap.headMap(keyToSplit))); - LeafNode rightNode = new LeafNode(new TreeMap<Comparable, Serializable>(localMap.tailMap(keyToSplit))); + LeafNode leftNode = createNodeWithEntries(new TreeMap<Comparable, Serializable>(localMap.headMap(keyToSplit))); + LeafNode rightNode = createNodeWithEntries(new TreeMap<Comparable, Serializable>(localMap.tailMap(keyToSplit))); fixLeafNodesListAfterSplit(leftNode, rightNode); // propagate split to parent if (getParent() == null) { // make new root node - InnerNode newRoot = new InnerNode(leftNode, rightNode, keyToSplit); + InnerNode newRoot = createInnerNode(leftNode, rightNode, keyToSplit); return newRoot; } else { // leftNode.parent = getParent(); @@ -54,6 +54,14 @@ public AbstractNode insert(Comparable key, Serializable value) { } } + protected LeafNode createNodeWithEntries(TreeMap<Comparable, Serializable> entries) { + return new LeafNode(entries); + } + + protected InnerNode createInnerNode(AbstractNode leftNode, AbstractNode rightNode, Comparable splitKey) { + return new InnerNode(leftNode, rightNode, splitKey); + } + private <T extends Comparable> Comparable findRightMiddlePosition(Collection<T> keys) { Iterator<T> keysIterator = keys.iterator(); @@ -64,7 +72,7 @@ private <T extends Comparable> Comparable findRightMiddlePosition(Collection<T> } private TreeMap<Comparable, Serializable> justInsert(Comparable key, Serializable value) { - TreeMap<Comparable, Serializable> localEntries = this.getEntries(); + TreeMap<Comparable, ? extends Serializable> localEntries = this.getEntries(); // this test is performed because we need to return a new structure in // case an update occurs. Value types must be immutable. @@ -118,7 +126,7 @@ void delete() { } private TreeMap<Comparable, Serializable> justRemove(Comparable key) { - TreeMap<Comparable, Serializable> localEntries = this.getEntries(); + TreeMap<Comparable, ? extends Serializable> localEntries = this.getEntries(); // this test is performed because we need to return a new structure in // case an update occurs. Value types must be immutable. @@ -202,7 +210,7 @@ public Serializable getIndex(int index) { } if (index < shallowSize()) { // the required position is here - Iterator<Serializable> values = this.getEntries().values().iterator(); + Iterator<? extends Serializable> values = this.getEntries().values().iterator(); for (int i = 0; i < index; i++) { values.next(); } @@ -311,7 +319,7 @@ private class LeafNodeValuesIterator extends GenericLeafNodeIterator<Serializabl @Override protected Iterator<Serializable> getInternalIterator(LeafNode leafNode) { - return leafNode.getEntries().values().iterator(); + return (Iterator<Serializable>) leafNode.getEntries().values().iterator(); } } @@ -339,7 +347,7 @@ public String dump(int level, boolean dumpKeysOnly, boolean dumpNodeIds) { str.append("[: "); } - for (Map.Entry<Comparable, Serializable> entry : this.getEntries().entrySet()) { + for (Map.Entry<Comparable, ? extends Serializable> entry : this.getEntries().entrySet()) { Comparable key = entry.getKey(); Serializable value = entry.getValue(); str.append("(" + key); diff --git a/pom.xml b/pom.xml index 718f77e15..ba3a8d98c 100644 --- a/pom.xml +++ b/pom.xml @@ -34,6 +34,7 @@ <version.commons.codec>1.7</version.commons.codec> <version.commons.io>2.4</version.commons.io> <version.commons.lang>2.6</version.commons.lang> + <version.com.google.code.gson>2.2.3</version.com.google.code.gson> <version.dap-framework>2.0</version.dap-framework> <version.hazelcast.api>2.5.1</version.hazelcast.api> <version.hibernate.ogm.core>${version.hibernate.ogm}</version.hibernate.ogm.core> @@ -143,7 +144,7 @@ <configuration> <pushChanges>false</pushChanges> <tagNameFormat>v@{project.version}</tagNameFormat> - + <!-- Ensure that every module is versioned together --> <autoVersionSubmodules>true</autoVersionSubmodules> </configuration> @@ -313,6 +314,11 @@ <artifactId>hazelcast</artifactId> <version>${version.hazelcast.api}</version> </dependency> + <dependency> + <groupId>com.google.code.gson</groupId> + <artifactId>gson</artifactId> + <version>${version.com.google.code.gson}</version> + </dependency> </dependencies> </dependencyManagement>
c5f5ced6b4280c73da019703957c8859c14fbb43
buddycloud$buddycloud-server-java
Completed <recent-items> functionality
p
https://github.com/buddycloud/buddycloud-server-java
diff --git a/src/main/java/org/buddycloud/channelserver/packetprocessor/iq/namespace/pubsub/get/RecentItemsGet.java b/src/main/java/org/buddycloud/channelserver/packetprocessor/iq/namespace/pubsub/get/RecentItemsGet.java index 0e0c9947..7fc724c6 100644 --- a/src/main/java/org/buddycloud/channelserver/packetprocessor/iq/namespace/pubsub/get/RecentItemsGet.java +++ b/src/main/java/org/buddycloud/channelserver/packetprocessor/iq/namespace/pubsub/get/RecentItemsGet.java @@ -1,5 +1,6 @@ package org.buddycloud.channelserver.packetprocessor.iq.namespace.pubsub.get; +import java.io.StringReader; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; @@ -9,14 +10,19 @@ import org.apache.log4j.Logger; import org.buddycloud.channelserver.channel.ChannelManager; import org.buddycloud.channelserver.channel.Conf; +import org.buddycloud.channelserver.db.CloseableIterator; import org.buddycloud.channelserver.db.exception.NodeStoreException; import org.buddycloud.channelserver.packetprocessor.iq.namespace.pubsub.JabberPubsub; import org.buddycloud.channelserver.packetprocessor.iq.namespace.pubsub.PubSubElementProcessor; import org.buddycloud.channelserver.packetprocessor.iq.namespace.pubsub.PubSubElementProcessorAbstract; import org.buddycloud.channelserver.packetprocessor.iq.namespace.pubsub.PubSubGet; import org.buddycloud.channelserver.pubsub.model.NodeAffiliation; +import org.buddycloud.channelserver.pubsub.model.NodeItem; +import org.buddycloud.channelserver.pubsub.model.NodeSubscription; import org.buddycloud.channelserver.queue.FederatedQueueManager; +import org.dom4j.DocumentException; import org.dom4j.Element; +import org.dom4j.io.SAXReader; import org.xmpp.packet.IQ; import org.xmpp.packet.JID; import org.xmpp.packet.Packet; @@ -29,22 +35,35 @@ public class RecentItemsGet extends PubSubElementProcessorAbstract { private String firstItem; private String lastItem; private int totalEntriesCount; - + private SimpleDateFormat sdf; - + private Date maxAge; private Integer maxItems; - private static final Logger logger = Logger - .getLogger(RecentItemsGet.class); + private Element pubsub; + private SAXReader xmlReader; + private String nodeEnding = "/posts"; + + // RSM details + private String firstItemId = null; + private String lastItemId = null; + private String afterItemId = null; + private int maxResults = -1; + + private static final Logger logger = Logger.getLogger(RecentItemsGet.class); + + public static final String NS_RSM = "http://jabber.org/protocol/rsm"; public RecentItemsGet(BlockingQueue<Packet> outQueue, ChannelManager channelManager) { setChannelManager(channelManager); setOutQueue(outQueue); + xmlReader = new SAXReader(); + sdf = new SimpleDateFormat(Conf.DATE_FORMAT); - sdf.setTimeZone(TimeZone.getTimeZone("UTC")); + sdf.setTimeZone(TimeZone.getTimeZone("UTC")); } @Override @@ -55,36 +74,121 @@ public void process(Element elm, JID actorJID, IQ reqIQ, Element rsm) actor = actorJID; node = elm.attributeValue("node"); resultSetManagement = rsm; - - if (false == validStanza()) { + + if (null == actor) { + actor = request.getFrom(); + } + + if (false == isValidStanza()) { + outQueue.put(response); + return; + } + + if (false == channelManager.isLocalJID(request.getFrom())) { + response.getElement().addAttribute("remote-server-discover", + "false"); + } + pubsub = response.getElement().addElement("pubsub", + JabberPubsub.NAMESPACE_URI); + try { + parseRsmElement(); + addRecentItems(); + addRsmElement(); outQueue.put(response); + } catch (NodeStoreException e) { + logger.error(e); + response.getElement().remove(pubsub); + setErrorCondition(PacketError.Type.wait, + PacketError.Condition.internal_server_error); + } + outQueue.put(response); + + } + + private void parseRsmElement() { + Element rsmElement = pubsub.element("set"); + if (null == rsmElement) return; + Element max; + Element after; + if (null != (max = rsmElement.element("max"))) + maxResults = Integer.parseInt(max.getTextTrim()); + if (null != (after = rsmElement.element("after"))) + afterItemId = after.getTextTrim(); + } + + private void addRsmElement() throws NodeStoreException { + if (null == firstItemId) return; + Element rsm = pubsub.addElement("set"); + rsm.addNamespace("", NS_RSM); + rsm.addElement("first").setText(firstItemId); + rsm.addElement("last").setText(lastItemId); + rsm.addElement("count").setText( + String.valueOf(channelManager.getCountRecentItems(actor, + maxAge, maxItems, nodeEnding))); + } + + private void addRecentItems() throws NodeStoreException { + CloseableIterator<NodeItem> items = channelManager.getRecentItems( + actor, maxAge, maxItems, maxResults, afterItemId, nodeEnding); + String lastNode = ""; + NodeItem item; + Element itemsElement = null; + Element itemElement; + Element entry; + while (items.hasNext()) { + item = items.next(); + + if (false == item.getNodeId().equals(lastNode)) { + itemsElement = pubsub.addElement("items"); + itemsElement.addAttribute("node", item.getNodeId()); + lastNode = item.getNodeId(); + } + try { + entry = xmlReader.read(new StringReader(item.getPayload())) + .getRootElement(); + itemElement = itemsElement.addElement("item"); + itemElement.addAttribute("id", item.getId()); + if (null == firstItemId) + firstItemId = item.getId(); + lastItemId = item.getId(); + itemElement.add(entry); + } catch (DocumentException e) { + logger.error("Error parsing a node entry, ignoring. " + + item.getId()); + } } } - private boolean validStanza() { + private boolean isValidStanza() { Element recentItems = request.getChildElement().element("recent-items"); try { String max = recentItems.attributeValue("max"); if (null == max) { - createExtendedErrorReply(PacketError.Type.modify, PacketError.Condition.bad_request, "max-required"); + createExtendedErrorReply(PacketError.Type.modify, + PacketError.Condition.bad_request, "max-required"); return false; } maxItems = Integer.parseInt(max); - String since = recentItems.attributeValue("since"); - if (null == since) { - createExtendedErrorReply(PacketError.Type.modify, PacketError.Condition.bad_request, "since-required"); + String since = recentItems.attributeValue("since"); + if (null == since) { + createExtendedErrorReply(PacketError.Type.modify, + PacketError.Condition.bad_request, "since-required"); return false; - } - maxAge = sdf.parse(since); + } + maxAge = sdf.parse(since); } catch (NumberFormatException e) { logger.error(e); - createExtendedErrorReply(PacketError.Type.modify, PacketError.Condition.bad_request, "invalid-max-value-provided"); - return false; + createExtendedErrorReply(PacketError.Type.modify, + PacketError.Condition.bad_request, + "invalid-max-value-provided"); + return false; } catch (ParseException e) { - createExtendedErrorReply(PacketError.Type.modify, PacketError.Condition.bad_request, "invalid-since-value-provided"); - logger.error(e); + createExtendedErrorReply(PacketError.Type.modify, + PacketError.Condition.bad_request, + "invalid-since-value-provided"); + logger.error(e); return false; } return true; diff --git a/src/test/java/org/buddycloud/channelserver/db/jdbc/JDBCNodeStoreTest.java b/src/test/java/org/buddycloud/channelserver/db/jdbc/JDBCNodeStoreTest.java index 415a80d9..7ca4251e 100644 --- a/src/test/java/org/buddycloud/channelserver/db/jdbc/JDBCNodeStoreTest.java +++ b/src/test/java/org/buddycloud/channelserver/db/jdbc/JDBCNodeStoreTest.java @@ -1511,6 +1511,135 @@ public void testGetNodeItemById() throws Exception { assertTrue("Unexpected Node content returned", result.getPayload() .contains(TEST_SERVER1_NODE1_ITEM1_CONTENT)); } + + + @Test + public void testGetRecentItems() throws Exception { + + Date since = new Date(); + dbTester.loadData("node_1"); + store.addRemoteNode(TEST_SERVER1_NODE2_ID); + store.addUserSubscription(new NodeSubscriptionImpl( + TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, + Subscriptions.subscribed)); + + NodeItem nodeItem1 = new NodeItemImpl(TEST_SERVER1_NODE1_ID, "123", + new Date(), "payload"); + NodeItem nodeItem2 = new NodeItemImpl(TEST_SERVER1_NODE2_ID, "123", + new Date(), "payload2"); + store.addNodeItem(nodeItem1); + store.addNodeItem(nodeItem2); + + CloseableIterator<NodeItem> items = store.getRecentItems( + TEST_SERVER1_USER1_JID, since, -1, -1, null, null); + + // 2 -> 1 on purpose results are most recent first! + assertSameNodeItem(items.next(), nodeItem2); + assertSameNodeItem(items.next(), nodeItem1); + assertEquals(false, items.hasNext()); + } + + @Test + public void testGetRecentItemsWithNoResultsPerNodeRequestedReturnsExpectedCount() + throws Exception { + Date since = new Date(0); + dbTester.loadData("node_1"); + store.addRemoteNode(TEST_SERVER1_NODE2_ID); + store.addUserSubscription(new NodeSubscriptionImpl( + TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, + Subscriptions.subscribed)); + + NodeItem nodeItem1 = new NodeItemImpl(TEST_SERVER1_NODE1_ID, "1", + new Date(), "payload"); + NodeItem nodeItem2 = new NodeItemImpl(TEST_SERVER1_NODE2_ID, "2", + new Date(), "payload2"); + store.addNodeItem(nodeItem1); + store.addNodeItem(nodeItem2); + + CloseableIterator<NodeItem> items = store.getRecentItems( + TEST_SERVER1_USER1_JID, since, 0, -1, null, null); + + int count = 0; + while (items.hasNext()) { + items.next(); + ++count; + } + assertEquals(0, count); + } + + @Test + public void testCanPageGetRecentItemsUsingResultSetManagement() + throws Exception { + Date since = new Date(0); + + dbTester.loadData("node_1"); + store.addRemoteNode(TEST_SERVER1_NODE2_ID); + store.addUserSubscription(new NodeSubscriptionImpl( + TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, + Subscriptions.subscribed)); + + for (int i = 1; i < 20; i++) { + Thread.sleep(1); + store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE1_ID, String + .valueOf(i), new Date(), "payload" + String.valueOf(i))); + } + + CloseableIterator<NodeItem> items = store.getRecentItems( + TEST_SERVER1_USER1_JID, since, -1, -1, "5", null); + + int count = 0; + int i = 19; + + NodeItem item; + while (items.hasNext()) { + assertSameNodeItem(items.next(), new NodeItemImpl( + TEST_SERVER1_NODE1_ID, String.valueOf(i), new Date(), + "payload" + String.valueOf(i))); + --i; + ++count; + } + assertEquals(15, count); + } + + @Test + public void testGetRecentItemCountWithNoResultsPerNodeRequestedReturnsExpectedCount() + throws Exception { + Date since = new Date(); + dbTester.loadData("node_1"); + store.addRemoteNode(TEST_SERVER1_NODE2_ID); + store.addUserSubscription(new NodeSubscriptionImpl( + TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, + Subscriptions.subscribed)); + + store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE1_ID, "123", + new Date(), "payload")); + store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE2_ID, "123", + new Date(), "payload2")); + + int count = store.getCountRecentItems(TEST_SERVER1_USER1_JID, since, 0, + null); + assertEquals(0, count); + } + + @Test + public void testGetRecentItemCount() throws Exception { + + Date since = new Date(); + dbTester.loadData("node_1"); + store.addRemoteNode(TEST_SERVER1_NODE2_ID); + store.addUserSubscription(new NodeSubscriptionImpl( + TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, + Subscriptions.subscribed)); + Thread.sleep(1); + store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE1_ID, "123", + new Date(), "payload")); + store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE2_ID, "123", + new Date(), "payload2")); + + int count = store.getCountRecentItems(TEST_SERVER1_USER1_JID, since, + -1, null); + assertEquals(2, count); + } @Test public void testAddNodeItem() throws Exception { @@ -1829,83 +1958,4 @@ private void assertSameNodeItem(NodeItem actual, NodeItem expected) { assertEquals(actual.getNodeId(), expected.getNodeId()); assertEquals(actual.getPayload(), expected.getPayload()); } - - //1277 - @Test - public void testGetRecentItemCount() throws Exception { - - Date since = new Date(); - dbTester.loadData("node_1"); - store.addRemoteNode(TEST_SERVER1_NODE2_ID); - store.addUserSubscription(new NodeSubscriptionImpl(TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, Subscriptions.subscribed)); - - store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE1_ID, "123", new Date(), "payload")); - store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE2_ID, "123", new Date(), "payload2")); - - int count = store.getCountRecentItems(TEST_SERVER1_USER1_JID, since, -1, null); - assertEquals(2, count); - } - - @Test - public void testGetRecentItemCountWithNoResultsPerNodeRequestedReturnsExpectedCount() throws Exception { - Date since = new Date(); - dbTester.loadData("node_1"); - store.addRemoteNode(TEST_SERVER1_NODE2_ID); - store.addUserSubscription(new NodeSubscriptionImpl(TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, Subscriptions.subscribed)); - - store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE1_ID, "123", new Date(), "payload")); - store.addNodeItem(new NodeItemImpl(TEST_SERVER1_NODE2_ID, "123", new Date(), "payload2")); - - int count = store.getCountRecentItems(TEST_SERVER1_USER1_JID, since, 0, null); - assertEquals(0, count); - } - - @Test - public void testGetRecentItems() throws Exception { - - Date since = new Date(); - dbTester.loadData("node_1"); - store.addRemoteNode(TEST_SERVER1_NODE2_ID); - store.addUserSubscription(new NodeSubscriptionImpl(TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, Subscriptions.subscribed)); - - NodeItem nodeItem1 = new NodeItemImpl(TEST_SERVER1_NODE1_ID, "123", new Date(), "payload"); - NodeItem nodeItem2 = new NodeItemImpl(TEST_SERVER1_NODE2_ID, "123", new Date(), "payload2"); - store.addNodeItem(nodeItem1); - store.addNodeItem(nodeItem2); - - CloseableIterator<NodeItem> items = store.getRecentItems(TEST_SERVER1_USER1_JID, since, -1, -1, null, null); - - // 2 -> 1 on purpose results are most recent first! - assertSameNodeItem(items.next(), nodeItem2); - assertSameNodeItem(items.next(), nodeItem1); - assertEquals(false, items.hasNext()); - } - - @Test - public void testGetRecentItemsWithNoResultsPerNodeRequestedReturnsExpectedCount() throws Exception { - Date since = new Date(); - dbTester.loadData("node_1"); - store.addRemoteNode(TEST_SERVER1_NODE2_ID); - store.addUserSubscription(new NodeSubscriptionImpl(TEST_SERVER1_NODE2_ID, TEST_SERVER1_USER1_JID, Subscriptions.subscribed)); - - NodeItem nodeItem1 = new NodeItemImpl(TEST_SERVER1_NODE1_ID, "123", new Date(), "payload"); - NodeItem nodeItem2 = new NodeItemImpl(TEST_SERVER1_NODE2_ID, "123", new Date(), "payload2"); - store.addNodeItem(nodeItem1); - store.addNodeItem(nodeItem2); - - CloseableIterator<NodeItem> items = store.getRecentItems(TEST_SERVER1_USER1_JID, since, 0, -1, null, null); - - int count = 0; - while (items.hasNext()) { - items.next(); - ++count; - } - assertEquals(0, count); - } - - @Test - public void testCanPageGetRecentItemsUsingResultSetManagement() throws Exception { - assertTrue(false); - } - } \ No newline at end of file diff --git a/src/test/java/org/buddycloud/channelserver/packetprocessor/iq/namespace/pubsub/get/RecentItemsGetTest.java b/src/test/java/org/buddycloud/channelserver/packetprocessor/iq/namespace/pubsub/get/RecentItemsGetTest.java index 9d0b486c..e376de69 100644 --- a/src/test/java/org/buddycloud/channelserver/packetprocessor/iq/namespace/pubsub/get/RecentItemsGetTest.java +++ b/src/test/java/org/buddycloud/channelserver/packetprocessor/iq/namespace/pubsub/get/RecentItemsGetTest.java @@ -2,6 +2,9 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Date; +import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; @@ -19,6 +22,7 @@ import org.buddycloud.channelserver.pubsub.model.NodeItem; import org.buddycloud.channelserver.pubsub.model.NodeSubscription; import org.buddycloud.channelserver.pubsub.model.impl.NodeAffiliationImpl; +import org.buddycloud.channelserver.pubsub.model.impl.NodeItemImpl; import org.buddycloud.channelserver.pubsub.model.impl.NodeSubscriptionImpl; import org.buddycloud.channelserver.pubsub.subscription.Subscriptions; import org.buddycloud.channelserver.utils.node.NodeAclRefuseReason; @@ -47,14 +51,20 @@ public class RecentItemsGetTest extends IQTestHandler { private JID jid = new JID("[email protected]"); private ChannelManager channelManager; + private String TEST_NODE_1 = "node1"; + private String TEST_NODE_2 = "node2"; + + private JID TEST_JID_1 = new JID("user1@server1"); + private JID TEST_JID_2 = new JID("user2@server1"); + @Before public void setUp() throws Exception { queue = new LinkedBlockingQueue<Packet>(); channelManager = Mockito.mock(ChannelManager.class); - + recentItemsGet = new RecentItemsGet(queue, channelManager); - + request = readStanzaAsIq("/iq/pubsub/recent-items/request.stanza"); element = new BaseElement("recent-items"); } @@ -72,8 +82,9 @@ public void testPassingNotRecentItemsAsElementNameReturnsFalse() { @Test public void testMissingMaxAttributeReturnsErrorStanza() throws Exception { - - request.getChildElement().element("recent-items").addAttribute("max", null); + + request.getChildElement().element("recent-items") + .addAttribute("max", null); recentItemsGet.process(element, jid, request, null); Packet response = queue.poll(); @@ -81,14 +92,14 @@ public void testMissingMaxAttributeReturnsErrorStanza() throws Exception { PacketError error = response.getError(); Assert.assertNotNull(error); Assert.assertEquals(PacketError.Type.modify, error.getType()); - Assert.assertEquals("max-required", - error.getApplicationConditionName()); + Assert.assertEquals("max-required", error.getApplicationConditionName()); } - + @Test public void testInvalidMaxAttributesReturnsErrorStanza() throws Exception { - - request.getChildElement().element("recent-items").addAttribute("max", "three"); + + request.getChildElement().element("recent-items") + .addAttribute("max", "three"); recentItemsGet.process(element, jid, request, null); Packet response = queue.poll(); @@ -99,11 +110,12 @@ public void testInvalidMaxAttributesReturnsErrorStanza() throws Exception { Assert.assertEquals("invalid-max-value-provided", error.getApplicationConditionName()); } - + @Test public void testMissingSinceAttributeReturnsErrorStanza() throws Exception { - - request.getChildElement().element("recent-items").addAttribute("since", null); + + request.getChildElement().element("recent-items") + .addAttribute("since", null); recentItemsGet.process(element, jid, request, null); Packet response = queue.poll(); @@ -114,11 +126,12 @@ public void testMissingSinceAttributeReturnsErrorStanza() throws Exception { Assert.assertEquals("since-required", error.getApplicationConditionName()); } - + @Test public void testInvalidSinceAttributesReturnsErrorStanza() throws Exception { - - request.getChildElement().element("recent-items").addAttribute("since", "a week ago"); + + request.getChildElement().element("recent-items") + .addAttribute("since", "a week ago"); recentItemsGet.process(element, jid, request, null); Packet response = queue.poll(); @@ -128,6 +141,167 @@ public void testInvalidSinceAttributesReturnsErrorStanza() throws Exception { Assert.assertEquals(PacketError.Type.modify, error.getType()); Assert.assertEquals("invalid-since-value-provided", error.getApplicationConditionName()); - } + } + + @Test + public void testNodeStoreExceptionGeneratesAnErrorStanza() throws Exception { + + Mockito.when( + channelManager.getRecentItems(Mockito.any(JID.class), + Mockito.any(Date.class), Mockito.anyInt(), + Mockito.anyInt(), Mockito.anyString(), + Mockito.anyString())).thenThrow( + new NodeStoreException()); + + recentItemsGet.process(element, jid, request, null); + Packet response = queue.poll(); + + PacketError error = response.getError(); + Assert.assertNotNull(error); + Assert.assertEquals(PacketError.Type.wait, error.getType()); + Assert.assertEquals(PacketError.Condition.internal_server_error, + error.getCondition()); + } + + @Test + public void testNoRecentItemsReturnsEmptyStanza() throws Exception { + + Mockito.when( + channelManager.getRecentItems(Mockito.any(JID.class), + Mockito.any(Date.class), Mockito.anyInt(), + Mockito.anyInt(), Mockito.anyString(), + Mockito.anyString())).thenReturn( + new ClosableIteratorImpl<NodeItem>(new ArrayList<NodeItem>() + .iterator())); + + recentItemsGet.process(element, jid, request, null); + IQ response = (IQ) queue.poll(); + + Assert.assertEquals(IQ.Type.result, response.getType()); + Element pubsub = response.getChildElement(); + Assert.assertEquals("pubsub", pubsub.getName()); + Assert.assertEquals(JabberPubsub.NAMESPACE_URI, + pubsub.getNamespaceURI()); + } + + @Test + public void testOutgoingStanzaFormattedAsExpected() throws Exception { + + NodeItem item1 = new NodeItemImpl(TEST_NODE_1, "1", new Date(), + "<entry>item1</entry>"); + NodeItem item2 = new NodeItemImpl(TEST_NODE_2, "1", new Date(), + "<entry>item2</entry>"); + NodeItem item3 = new NodeItemImpl(TEST_NODE_1, "2", new Date(), + "<entry>item3</entry>"); + NodeItem item4 = new NodeItemImpl(TEST_NODE_1, "3", new Date(), + "<entry>item4</entry>"); + + ArrayList<NodeItem> results = new ArrayList<NodeItem>(); + results.add(item1); + results.add(item2); + results.add(item3); + results.add(item4); + + Mockito.when( + channelManager.getRecentItems(Mockito.any(JID.class), + Mockito.any(Date.class), Mockito.anyInt(), + Mockito.anyInt(), Mockito.anyString(), + Mockito.anyString())).thenReturn( + new ClosableIteratorImpl<NodeItem>(results.iterator())); + + recentItemsGet.process(element, jid, request, null); + IQ response = (IQ) queue.poll(); + + Assert.assertEquals(IQ.Type.result, response.getType()); + Element pubsub = response.getChildElement(); + Assert.assertEquals("pubsub", pubsub.getName()); + Assert.assertEquals(JabberPubsub.NAMESPACE_URI, + pubsub.getNamespaceURI()); + + List<Element> items = pubsub.elements("items"); + Assert.assertEquals(3, items.size()); + + Assert.assertEquals(TEST_NODE_1, items.get(0).attributeValue("node")); + Assert.assertEquals(TEST_NODE_2, items.get(1).attributeValue("node")); + Assert.assertEquals(TEST_NODE_1, items.get(2).attributeValue("node")); + + Assert.assertEquals(1, items.get(0).elements("item").size()); + Assert.assertEquals(2, items.get(2).elements("item").size()); + } + + @Test + public void testUnparsableItemEntriesAreSimplyIgnored() throws Exception { + + NodeItem item1 = new NodeItemImpl(TEST_NODE_1, "1", new Date(), + "<entry>item1</entry>"); + NodeItem item2 = new NodeItemImpl(TEST_NODE_1, "2", new Date(), + "<entry>item2"); + + ArrayList<NodeItem> results = new ArrayList<NodeItem>(); + results.add(item1); + results.add(item2); + + Mockito.when( + channelManager.getRecentItems(Mockito.any(JID.class), + Mockito.any(Date.class), Mockito.anyInt(), + Mockito.anyInt(), Mockito.anyString(), + Mockito.anyString())).thenReturn( + new ClosableIteratorImpl<NodeItem>(results.iterator())); + recentItemsGet.process(element, jid, request, null); + IQ response = (IQ) queue.poll(); + Assert.assertEquals(1, response.getChildElement().element("items") + .elements("item").size()); + } + + @Test + public void testCanControlGatheredEntriesUsingRsm() throws Exception { + + NodeItem item2 = new NodeItemImpl(TEST_NODE_2, "node2:1", new Date(), + "<entry>item2</entry>"); + NodeItem item3 = new NodeItemImpl(TEST_NODE_1, "node1:2", new Date(), + "<entry>item3</entry>"); + + ArrayList<NodeItem> results = new ArrayList<NodeItem>(); + results.add(item2); + results.add(item3); + + Mockito.when( + channelManager.getRecentItems(Mockito.any(JID.class), + Mockito.any(Date.class), Mockito.anyInt(), + Mockito.anyInt(), Mockito.anyString(), + Mockito.anyString())).thenReturn( + new ClosableIteratorImpl<NodeItem>(results.iterator())); + Mockito.when( + channelManager.getCountRecentItems(Mockito.any(JID.class), + Mockito.any(Date.class), Mockito.anyInt(), + Mockito.anyString())).thenReturn(2); + + Element rsm = request.getElement().addElement("rsm"); + rsm.addNamespace("", recentItemsGet.NS_RSM); + rsm.addElement("max").addText("2"); + rsm.addElement("after").addText("node1:1"); + + recentItemsGet.process(element, jid, request, null); + IQ response = (IQ) queue.poll(); + + Assert.assertEquals(IQ.Type.result, response.getType()); + Element pubsub = response.getChildElement(); + Assert.assertEquals("pubsub", pubsub.getName()); + Assert.assertEquals(JabberPubsub.NAMESPACE_URI, + pubsub.getNamespaceURI()); + + List<Element> items = pubsub.elements("items"); + Assert.assertEquals(2, items.size()); + + Assert.assertEquals(TEST_NODE_2, items.get(0).attributeValue("node")); + Assert.assertEquals(TEST_NODE_1, items.get(1).attributeValue("node")); + Assert.assertEquals(1, items.get(0).elements("item").size()); + Assert.assertEquals(1, items.get(1).elements("item").size()); + + Element rsmResult = pubsub.element("set"); + Assert.assertEquals("2", rsmResult.element("count").getText()); + Assert.assertEquals("node2:1", rsmResult.element("first").getText()); + Assert.assertEquals("node1:2", rsmResult.element("last").getText()); + } } \ No newline at end of file
188a11bdb925e3a4c1f38f8ff52f0039eb343eaf
spring-framework
Fixed setFavorPathExtension delegation code--
c
https://github.com/spring-projects/spring-framework
diff --git a/spring-webmvc/src/main/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.java b/spring-webmvc/src/main/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.java index 2d543941af6f..f7b59c3f6c24 100644 --- a/spring-webmvc/src/main/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.java +++ b/spring-webmvc/src/main/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.java @@ -1,5 +1,5 @@ /* - * Copyright 2002-2012 the original author or authors. + * Copyright 2002-2013 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,6 @@ import java.util.Map; import java.util.Properties; import java.util.Set; - import javax.activation.FileTypeMap; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; @@ -33,6 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; + import org.springframework.beans.factory.BeanFactoryUtils; import org.springframework.beans.factory.InitializingBean; import org.springframework.core.OrderComparator; @@ -95,7 +95,7 @@ public class ContentNegotiatingViewResolver extends WebApplicationObjectSupport private ContentNegotiationManager contentNegotiationManager; - private ContentNegotiationManagerFactoryBean cnManagerFactoryBean = new ContentNegotiationManagerFactoryBean(); + private final ContentNegotiationManagerFactoryBean cnManagerFactoryBean = new ContentNegotiationManagerFactoryBean(); private boolean useNotAcceptableStatusCode = false; @@ -104,10 +104,6 @@ public class ContentNegotiatingViewResolver extends WebApplicationObjectSupport private List<ViewResolver> viewResolvers; - public ContentNegotiatingViewResolver() { - super(); - } - public void setOrder(int order) { this.order = order; } @@ -118,7 +114,9 @@ public int getOrder() { /** * Set the {@link ContentNegotiationManager} to use to determine requested media types. - * If not set, the default constructor is used. + * <p>If not set, ContentNegotiationManager's default constructor will be used, + * applying a {@link org.springframework.web.accept.HeaderContentNegotiationStrategy}. + * @see ContentNegotiationManager#ContentNegotiationManager() */ public void setContentNegotiationManager(ContentNegotiationManager contentNegotiationManager) { this.contentNegotiationManager = contentNegotiationManager; @@ -130,18 +128,16 @@ public void setContentNegotiationManager(ContentNegotiationManager contentNegoti * <p>For instance, when this flag is {@code true} (the default), a request for {@code /hotels.pdf} * will result in an {@code AbstractPdfView} being resolved, while the {@code Accept} header can be the * browser-defined {@code text/html,application/xhtml+xml}. - * * @deprecated use {@link #setContentNegotiationManager(ContentNegotiationManager)} */ @Deprecated public void setFavorPathExtension(boolean favorPathExtension) { - this.cnManagerFactoryBean.setFavorParameter(favorPathExtension); + this.cnManagerFactoryBean.setFavorPathExtension(favorPathExtension); } /** * Indicate whether to use the Java Activation Framework to map from file extensions to media types. * <p>Default is {@code true}, i.e. the Java Activation Framework is used (if available). - * * @deprecated use {@link #setContentNegotiationManager(ContentNegotiationManager)} */ @Deprecated @@ -155,7 +151,6 @@ public void setUseJaf(boolean useJaf) { * <p>For instance, when this flag is {@code true}, a request for {@code /hotels?format=pdf} will result * in an {@code AbstractPdfView} being resolved, while the {@code Accept} header can be the browser-defined * {@code text/html,application/xhtml+xml}. - * * @deprecated use {@link #setContentNegotiationManager(ContentNegotiationManager)} */ @Deprecated @@ -166,7 +161,6 @@ public void setFavorParameter(boolean favorParameter) { /** * Set the parameter name that can be used to determine the requested media type if the {@link * #setFavorParameter} property is {@code true}. The default parameter name is {@code format}. - * * @deprecated use {@link #setContentNegotiationManager(ContentNegotiationManager)} */ @Deprecated @@ -179,7 +173,6 @@ public void setParameterName(String parameterName) { * <p>If set to {@code true}, this view resolver will only refer to the file extension and/or * parameter, as indicated by the {@link #setFavorPathExtension favorPathExtension} and * {@link #setFavorParameter favorParameter} properties. - * * @deprecated use {@link #setContentNegotiationManager(ContentNegotiationManager)} */ @Deprecated @@ -191,7 +184,6 @@ public void setIgnoreAcceptHeader(boolean ignoreAcceptHeader) { * Set the mapping from file extensions to media types. * <p>When this mapping is not set or when an extension is not present, this view resolver * will fall back to using a {@link FileTypeMap} when the Java Action Framework is available. - * * @deprecated use {@link #setContentNegotiationManager(ContentNegotiationManager)} */ @Deprecated @@ -207,7 +199,6 @@ public void setMediaTypes(Map<String, String> mediaTypes) { * Set the default content type. * <p>This content type will be used when file extension, parameter, nor {@code Accept} * header define a content-type, either through being disabled or empty. - * * @deprecated use {@link #setContentNegotiationManager(ContentNegotiationManager)} */ @Deprecated @@ -275,7 +266,7 @@ protected void initServletContext(ServletContext servletContext) { this.cnManagerFactoryBean.setServletContext(servletContext); } - public void afterPropertiesSet() throws Exception { + public void afterPropertiesSet() { if (this.contentNegotiationManager == null) { this.cnManagerFactoryBean.afterPropertiesSet(); this.contentNegotiationManager = this.cnManagerFactoryBean.getObject();
ead10c3f3e22fe313a0280ff524e639587151d3c
gitools$gitools
General clustering functionality modifications (performance, visualisation,...) git-svn-id: https://bg.upf.edu/svn/gitools/trunk@865 1b512f91-3386-4a98-81e7-b8836ddf8916
p
https://github.com/gitools/gitools
diff --git a/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringAnalysis.java b/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringAnalysis.java deleted file mode 100644 index 0f12eaa7..00000000 --- a/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringAnalysis.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2010 xrafael. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * under the License. - */ - -package org.gitools.analysis.clustering; - -import java.util.Properties; -import org.gitools.matrix.model.IMatrixView; - - -public class ClusteringAnalysis { - - private String indexData; - - private boolean applyToRows; - - private boolean applyToColumns; - - private boolean applyToRowsColumns; - - private Properties params; - - private IMatrixView data; - - private ClusteringResult results; - - public ClusteringResult getResults() { - return results; - } - - public void setResults(ClusteringResult results) { - this.results = results; - } - - public boolean isApplyToRowsColumns() { - return applyToRowsColumns; - } - - public void setApplyToRowsColumns(boolean applyToRowsColumns) { - this.applyToRowsColumns = applyToRowsColumns; - } - - public IMatrixView getData() { - return data; - } - - public void setData(IMatrixView data) { - this.data = data; - } - - public boolean isApplyToColumns() { - return applyToColumns; - } - - public void setApplyToColumns(boolean applyToColumns) { - this.applyToColumns = applyToColumns; - } - - public boolean isApplyToRows() { - return applyToRows; - } - - public void setApplyToRows(boolean applyToRows) { - this.applyToRows = applyToRows; - } - - public String getIndexData() { - return indexData; - } - - public void setIndexData(String indexData) { - this.indexData = indexData; - } - - public Properties getParams() { - return params; - } - - public void setParams(Properties params) { - this.params = params; - } - -} diff --git a/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringProcessor.java b/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringProcessor.java deleted file mode 100644 index 7f37388c..00000000 --- a/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringProcessor.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2010 xrafael. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * under the License. - */ - -package org.gitools.analysis.clustering; - -import edu.upf.bg.progressmonitor.IProgressMonitor; -import org.gitools.analysis.AnalysisException; -import org.gitools.analysis.clustering.methods.ClusteringMethodFactory; - - -public class ClusteringProcessor { - - protected ClusteringAnalysis analysis; - - public ClusteringProcessor(ClusteringAnalysis analysis) { - this.analysis = analysis; - } - - public void run(IProgressMonitor monitor) throws AnalysisException, Exception { - - ClusteringMethod method = ClusteringMethodFactory.createMethod(analysis.getParams()); - - if (analysis.isApplyToRows()) - method.buildAndCluster(analysis.getData(), "rows", monitor); - - if (analysis.isApplyToColumns()) - method.buildAndCluster(analysis.getData(), "cols", monitor); - } - - -} diff --git a/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringResult.java b/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringResult.java deleted file mode 100644 index cdfd7b98..00000000 --- a/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringResult.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2010 xrafael. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * under the License. - */ - -package org.gitools.analysis.clustering; - -public class ClusteringResult { - - Integer[] resultList; - - public Integer[] getResultList() { - return resultList; - } - - public void setResultList(Integer[] resultList) { - this.resultList = resultList; - } - - public ClusteringResult(Integer[] resultList) { - this.resultList = resultList; - } - - -} diff --git a/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/MatrixViewWekaLoader.java b/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/MatrixViewWekaLoader.java deleted file mode 100644 index b48aaff8..00000000 --- a/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/MatrixViewWekaLoader.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright 2010 xrafael. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * under the License. - */ - -package org.gitools.analysis.clustering.methods; - -import java.io.IOException; -import org.gitools.matrix.MatrixUtils; -import org.gitools.matrix.model.IMatrixView; -import weka.core.Attribute; -import weka.core.FastVector; -import weka.core.Instance; -import weka.core.Instances; -import weka.core.converters.AbstractLoader; - -// FIXME This code is very inefficient !!! check getNextInstance and getStructure - -public class MatrixViewWekaLoader extends AbstractLoader { - - private IMatrixView matrixView; - - private String indexValueMatrix; - - //FIXME very inefficient, use integer or enum instead, - // or better use inheritance properly - // or even better use MatrixViewTransposed - private String type; - - private int indexRows; - - private int indexCols; - - - public MatrixViewWekaLoader(IMatrixView matrixView, String index, String type) { - - this.matrixView = matrixView; - - this.indexValueMatrix = index; - - this.type = type; - - indexRows = indexCols = -1; - - } - - // FIXME structure should be constructed ONCE in the constructor - // and returned here whenever it is needed - @Override - public Instances getStructure() throws IOException { - - FastVector attribNames = new FastVector(); - - Integer capacity = 0; - - String name = "matrix"+type; - - if (type.equals("cols")) - { - //Adding attributes (rows name) - for (int rows = 0; rows < matrixView.getRowCount(); rows++) - - attribNames.addElement(new Attribute(matrixView.getRowLabel(rows))); - - } - else{ - //Adding attributes (columns name) - - for (int cols = 0; cols < matrixView.getColumnCount(); cols++) - - attribNames.addElement(new Attribute(matrixView.getColumnLabel(cols))); - - } - - return new Instances(name, attribNames, capacity); - - - } - - @Override - public Instances getDataSet() throws IOException { - - Instances dataSet = getStructure(); - - Instance current = null; - - Integer auxCols = indexCols, auxRows = indexRows; - - indexCols = -1; - indexRows = -1; - - while ((current = getNextInstance(dataSet)) != null){ - dataSet.add(current); - } - indexCols = auxCols; - indexRows = auxRows; - return dataSet; - - } - - - @Override - //Param ds it is not modified nor altered - public Instance getNextInstance(Instances ds) throws IOException - { - - Instance current = null; - - if (type.equals("cols")) - { - - current = new Instance(matrixView.getRowCount()); - - if ((matrixView.getVisibleColumns().length < 1) || (indexCols >= matrixView.getVisibleColumns().length-1)) return null; - - indexCols++; - - for (int row = 0;row < current.numAttributes();row++) - { - current.setValue(row, MatrixUtils.doubleValue( - matrixView.getCellValue(row,indexCols, indexValueMatrix))); - } - - } - else - { - current = new Instance(matrixView.getColumnCount()); - - if ((matrixView.getVisibleRows().length < 1) || (indexRows >= matrixView.getVisibleRows().length-1)) return null; - - indexRows++; - - for (int col = 0;col < current.numAttributes();col++) - { - current.setValue(col, MatrixUtils.doubleValue( - matrixView.getCellValue(indexRows, col, indexValueMatrix))); - } - } - - Instances dataset = getStructure(); - dataset.add(current); - current.setDataset(dataset); - - return current; - } - - @Override - public String getRevision() { - - throw new UnsupportedOperationException("Not supported yet."); - - } -} diff --git a/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/WekaCobWebMethod.java b/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/WekaCobWebMethod.java deleted file mode 100644 index aaeb6b32..00000000 --- a/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/WekaCobWebMethod.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2010 xrafael. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * under the License. - */ - -package org.gitools.analysis.clustering.methods; - -import edu.upf.bg.progressmonitor.IProgressMonitor; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Properties; -import org.gitools.analysis.AbstractMethod; -import org.gitools.analysis.MethodException; -import org.gitools.analysis.clustering.ClusteringMethod; -import org.gitools.analysis.clustering.ClusteringResult; -import org.gitools.matrix.model.IMatrixView; -import weka.clusterers.Cobweb; -import weka.core.Instance; -import weka.core.Instances; - -public class WekaCobWebMethod extends AbstractMethod implements ClusteringMethod{ - - public static final String ID = "cobweb"; - - public WekaCobWebMethod(Properties properties) { - super(ID, - "CobWeb's clustering", - "CobWeb's Weka clustering", - ClusteringResult.class, properties); - } - - //FIXME type shouldn't be an string - @Override - public void buildAndCluster(IMatrixView matrixView, String type, IProgressMonitor monitor) throws Exception, IOException, NumberFormatException { - - // FIXME valueIndex should be an integer !!! - String valueIndex = properties.getProperty("index", "0"); - MatrixViewWekaLoader loader = - new MatrixViewWekaLoader(matrixView, valueIndex, type); - - Instances structure = loader.getStructure(); - - Cobweb clusterer = new Cobweb(); - - // FIXME consider empty values using getProperty(key, defaultValue) - - clusterer.setAcuity(Float.valueOf(properties.getProperty("acuity"))); - clusterer.setCutoff(Float.valueOf(properties.getProperty("cutoff"))); - clusterer.setSeed(Integer.valueOf(properties.getProperty("seed"))); - - clusterer.buildClusterer(structure); - - monitor.begin("Creating clustering model ...", structure.numInstances() + 1); - - Instance current; - - while ((current = loader.getNextInstance(structure)) != null - && !monitor.isCancelled()) { - - clusterer.updateClusterer(current); - monitor.worked(1); - } - - clusterer.updateFinished(); - - monitor.end(); - - // Identificar el cluster de cada instancia - Instances dataset = loader.getDataSet(); - - monitor.begin("Classifying instances ...", dataset.numInstances()); - - int cluster; - - //One cluster differnt instances - - HashMap<Integer, List<Integer>> clusterResults = new HashMap<Integer, List<Integer>>(); - - for (int i=0; i < dataset.numInstances() && !monitor.isCancelled(); i++) { - - cluster = clusterer.clusterInstance(dataset.instance(i)); - - List<Integer> instancesCluster = clusterResults.get(cluster); - if (instancesCluster == null) { - instancesCluster = new ArrayList<Integer>(); - clusterResults.put(cluster, instancesCluster); - } - - instancesCluster.add(i); - - monitor.worked(1); - } - - updateVisibility(type, matrixView, dataset.numInstances(), clusterResults); - - monitor.end(); - } - - private void updateVisibility(String type, IMatrixView matrixView, Integer numInstances, HashMap<Integer, List<Integer>> clusterResults) { - - int[] visibleData = null; - - if (type.equals("rows")) - visibleData = matrixView.getVisibleRows(); - else - visibleData = matrixView.getVisibleColumns(); - - final int[] sortedVisibleData = new int[numInstances]; - - int index = 0; - - //Integer[] clustersSorted = new Integer[clusterResults.keySet().size()]; - //clustersSorted = (Integer[]) clusterResults.keySet().toArray(); - Integer[] clustersSorted = (Integer[]) clusterResults.keySet().toArray( - new Integer[clusterResults.keySet().size()]); - - Arrays.sort(clustersSorted); - - for (Integer i : clustersSorted) - for( Integer val : clusterResults.get(i)) - sortedVisibleData[index++] = visibleData[val]; - - if (type.equals("rows")) - matrixView.setVisibleRows(sortedVisibleData); - else - matrixView.setVisibleColumns(sortedVisibleData); - - } - - @Override - public String getId() { - return ID; - } - - - @Override - public void build(IMatrixView matrixView, String type, IProgressMonitor monitor) throws MethodException { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public ClusteringResult cluster() throws MethodException { - throw new UnsupportedOperationException("Not supported yet."); - } - - - - -} diff --git a/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringMethod.java b/gitools-core/src/main/java/org/gitools/matrix/clustering/ClusteringMethod.java similarity index 69% rename from gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringMethod.java rename to gitools-core/src/main/java/org/gitools/matrix/clustering/ClusteringMethod.java index 0dfbec9f..95dce78c 100644 --- a/gitools-core/src/main/java/org/gitools/analysis/clustering/ClusteringMethod.java +++ b/gitools-core/src/main/java/org/gitools/matrix/clustering/ClusteringMethod.java @@ -15,20 +15,15 @@ * under the License. */ -package org.gitools.analysis.clustering; +package org.gitools.matrix.clustering; import edu.upf.bg.progressmonitor.IProgressMonitor; import java.io.IOException; import org.gitools.analysis.Method; -import org.gitools.analysis.MethodException; import org.gitools.matrix.model.IMatrixView; public interface ClusteringMethod extends Method{ - void buildAndCluster(IMatrixView matrixView, String type, IProgressMonitor monitor) throws Exception, IOException, NumberFormatException; - - void build(IMatrixView matrixView, String type, IProgressMonitor monitor) throws MethodException; - - ClusteringResult cluster() throws MethodException; + void buildAndCluster(IMatrixView matrixView, IProgressMonitor monitor) throws Exception, IOException, NumberFormatException; } diff --git a/gitools-core/src/main/java/org/gitools/matrix/clustering/MatrixViewClusterer.java b/gitools-core/src/main/java/org/gitools/matrix/clustering/MatrixViewClusterer.java new file mode 100644 index 00000000..d82cf81c --- /dev/null +++ b/gitools-core/src/main/java/org/gitools/matrix/clustering/MatrixViewClusterer.java @@ -0,0 +1,42 @@ +/* + * Copyright 2010 xrafael. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * under the License. + */ +package org.gitools.matrix.clustering; + +import edu.upf.bg.progressmonitor.IProgressMonitor; +import java.util.Properties; +import org.gitools.analysis.AnalysisException; +import org.gitools.matrix.clustering.methods.ClusteringMethodFactory; +import org.gitools.matrix.MatrixViewTransposition; +import org.gitools.matrix.model.IMatrixView; + +public class MatrixViewClusterer { + + public static void cluster (IMatrixView matrixView, Properties clusterParameters, IProgressMonitor monitor) throws AnalysisException, Exception { + + ClusteringMethod method = ClusteringMethodFactory.createMethod(clusterParameters); + + if (Boolean.valueOf(clusterParameters.getProperty("transpose", "false"))) { + + MatrixViewTransposition mt = new MatrixViewTransposition(); + mt.setMatrix(matrixView); + matrixView = mt; + } + + method.buildAndCluster(matrixView, monitor); + + } +} diff --git a/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/ClusteringMethodFactory.java b/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/ClusteringMethodFactory.java similarity index 85% rename from gitools-core/src/main/java/org/gitools/analysis/clustering/methods/ClusteringMethodFactory.java rename to gitools-core/src/main/java/org/gitools/matrix/clustering/methods/ClusteringMethodFactory.java index f34d8f79..228d8b33 100644 --- a/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/ClusteringMethodFactory.java +++ b/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/ClusteringMethodFactory.java @@ -15,10 +15,10 @@ * under the License. */ -package org.gitools.analysis.clustering.methods; +package org.gitools.matrix.clustering.methods; -import java.util.Properties; -import org.gitools.analysis.clustering.ClusteringMethod; +import java.util.Properties; +import org.gitools.matrix.clustering.ClusteringMethod; import org.gitools.analysis.AnalysisException; @@ -26,7 +26,7 @@ public class ClusteringMethodFactory { public static ClusteringMethod createMethod(Properties properties) throws AnalysisException { String methodId =properties.getProperty("method"); - if (WekaCobWebMethod.ID.equalsIgnoreCase(methodId)) + if (methodId.toLowerCase().contains(WekaCobWebMethod.ID)) return new WekaCobWebMethod(properties); else if (WekaKmeansMethod.ID.equalsIgnoreCase(methodId)) return new WekaKmeansMethod(properties); diff --git a/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/MatrixViewWekaLoader.java b/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/MatrixViewWekaLoader.java new file mode 100644 index 00000000..d4d6d5d0 --- /dev/null +++ b/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/MatrixViewWekaLoader.java @@ -0,0 +1,150 @@ +/* + * Copyright 2010 xrafael. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * under the License. + */ +package org.gitools.matrix.clustering.methods; + +import cern.colt.matrix.DoubleMatrix2D; +import java.io.IOException; +import org.gitools.matrix.MatrixUtils; +import org.gitools.matrix.model.DoubleMatrix; +import org.gitools.matrix.model.IMatrixView; +import weka.core.Attribute; +import weka.core.FastVector; +import weka.core.Instance; +import weka.core.Instances; +import weka.core.converters.AbstractLoader; + +// FIXME This code is very inefficient !!! check getNextInstance and getStructure +public class MatrixViewWekaLoader extends AbstractLoader { + + private IMatrixView matrixView; + private Integer indexValueMatrix; + private int indexRows; + private int indexCols; + private Instances dataSet; + private FastVector attribNames; + + public MatrixViewWekaLoader(IMatrixView matrixView, Integer index) { + + this.matrixView = matrixView; + + this.indexValueMatrix = index; + + indexRows = indexCols = -1; + + attribNames = new FastVector(); + + //Adding attributes (rows name) + for (int rows = 0; rows < matrixView.getRowCount(); rows++) { + attribNames.addElement(new Attribute(matrixView.getRowLabel(rows))); + } + + dataSet = new Instances("matrixToCluster", attribNames, 0); + + } + + // FIXME structure should be constructed ONCE in the constructor + // and returned here whenever it is needed + @Override + public Instances getStructure() throws IOException { + + return dataSet; + + } + + @Override + public Instances getDataSet() throws IOException { + + Instance current = null; + + Integer auxCols = indexCols, auxRows = indexRows; + + indexCols = -1; + indexRows = -1; + + while ((current = getNextInstance(dataSet)) != null) { + dataSet.add(current); + } + + indexCols = auxCols; + indexRows = auxRows; + + return dataSet; + + } + + @Override + //Param ds it is not modified nor altered + public Instance getNextInstance(Instances ds) throws IOException { + + if (indexCols >= matrixView.getVisibleColumns().length - 1) { + return null; + } + + indexCols++; + + //MatrixViewInstance current = new MatrixViewInstance(matrixView,indexCols,indexValueMatrix); + + double[] values = new double[matrixView.getRowCount()]; + for (int row = 0; row < matrixView.getRowCount(); row++) { + values[row] = MatrixUtils.doubleValue( + matrixView.getCellValue(row, indexCols, indexValueMatrix)); + } + //Instance is created once data in array values. This improves time performance + Instance current = new Instance(1, values); + + Instances dataset = new Instances("matrixToCluster", attribNames, 0); + dataset.add(current); + current.setDataset(dataset); + + return current; + } + + @Override + public String getRevision() { + + throw new UnsupportedOperationException("Not supported yet."); + + } + + /** + * Given an index (col,row) from the matrix we retrieve the instance + * + */ + public Instance get(Integer index) { + + if (index >= matrixView.getVisibleColumns().length - 1) { + return null; + } + + double[] values = new double[matrixView.getRowCount()]; + + for (int row = 0; row < matrixView.getRowCount(); row++) { + values[row] = MatrixUtils.doubleValue( + matrixView.getCellValue(row, index, indexValueMatrix)); + } + + //Instance is created once data in array values. This improves time performance + Instance current = new Instance(1, values); + + //The dataset for the instance + Instances dataset = new Instances("matrixToCluster", attribNames, 0); + dataset.add(current); + current.setDataset(dataset); + + return current; + } +} diff --git a/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/WekaCobWebMethod.java b/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/WekaCobWebMethod.java new file mode 100644 index 00000000..e5f450f7 --- /dev/null +++ b/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/WekaCobWebMethod.java @@ -0,0 +1,141 @@ +/* + * Copyright 2010 xrafael. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * under the License. + */ + +package org.gitools.matrix.clustering.methods; + +import edu.upf.bg.progressmonitor.IProgressMonitor; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Properties; +import org.gitools.analysis.AbstractMethod; +import org.gitools.matrix.clustering.ClusteringMethod; +import org.gitools.matrix.model.IMatrixView; +import weka.clusterers.Cobweb; +import weka.core.Instance; +import weka.core.Instances; + +public class WekaCobWebMethod extends AbstractMethod implements ClusteringMethod{ + + public static final String ID = "hierarchical"; + + public WekaCobWebMethod(Properties properties) { + super(ID, + "CobWeb's clustering", + "CobWeb's Weka clustering",null, properties); + } + + @Override + public void buildAndCluster(IMatrixView matrixView, IProgressMonitor monitor) throws Exception, IOException, NumberFormatException { + + Integer valueIndex = new Integer(properties.getProperty("index", "0")); + MatrixViewWekaLoader loader = + new MatrixViewWekaLoader(matrixView, valueIndex); + + Instances structure = loader.getStructure(); + + Cobweb clusterer = new Cobweb(); + + clusterer.setAcuity(Float.valueOf(properties.getProperty("acuity","1.0"))); + clusterer.setCutoff(Float.valueOf(properties.getProperty("cutoff","0.0028"))); + clusterer.setSeed(Integer.valueOf(properties.getProperty("seed","42"))); + + clusterer.buildClusterer(structure); + + monitor.begin("Creating clustering model ...", matrixView.getVisibleColumns().length + 1); + + Instance current = null; + + int j = 0; + while ((j < matrixView.getVisibleColumns().length ) && !monitor.isCancelled()) { + + if ((current = loader.get(j)) != null) + clusterer.updateClusterer(current); + + monitor.worked(1); + j++; + } + + clusterer.updateFinished(); + + monitor.end(); + + // Identificar el cluster de cada instancia + + monitor.begin("Clustering instances ...", matrixView.getVisibleColumns().length); + + int cluster; + + //One cluster different instances + HashMap<Integer, List<Integer>> clusterResults = new HashMap<Integer, List<Integer>>(); + + for (int i=0; i < matrixView.getVisibleColumns().length && !monitor.isCancelled(); i++) { + + if ((current = loader.get(i)) != null) { + + cluster = clusterer.clusterInstance(current); + + List<Integer> instancesCluster = clusterResults.get(cluster); + if (instancesCluster == null) { + instancesCluster = new ArrayList<Integer>(); + clusterResults.put(cluster, instancesCluster); + } + + instancesCluster.add(i); + } + monitor.worked(1); + } + + updateVisibility(matrixView, clusterResults); + + monitor.end(); + } + + private void updateVisibility(IMatrixView matrixView, HashMap<Integer, List<Integer>> clusterResults) { + + int[] visibleData = matrixView.getVisibleColumns(); + + final int[] sortedVisibleData = new int[matrixView.getVisibleColumns().length]; + + int index = 0; + + + Integer[] clustersSorted = (Integer[]) clusterResults.keySet().toArray( + new Integer[clusterResults.keySet().size()]); + + Arrays.sort(clustersSorted); + + for (Integer i : clustersSorted) + for( Integer val : clusterResults.get(i)) + sortedVisibleData[index++] = visibleData[val]; + + matrixView.setVisibleColumns(sortedVisibleData); + + } + + @Override + public String getId() { + return ID; + } + + + + + +} diff --git a/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/WekaKmeansMethod.java b/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/WekaKmeansMethod.java similarity index 59% rename from gitools-core/src/main/java/org/gitools/analysis/clustering/methods/WekaKmeansMethod.java rename to gitools-core/src/main/java/org/gitools/matrix/clustering/methods/WekaKmeansMethod.java index b2f5d934..15421bf5 100644 --- a/gitools-core/src/main/java/org/gitools/analysis/clustering/methods/WekaKmeansMethod.java +++ b/gitools-core/src/main/java/org/gitools/matrix/clustering/methods/WekaKmeansMethod.java @@ -15,7 +15,7 @@ * under the License. */ -package org.gitools.analysis.clustering.methods; +package org.gitools.matrix.clustering.methods; import edu.upf.bg.progressmonitor.IProgressMonitor; import java.io.IOException; @@ -24,9 +24,7 @@ import java.util.List; import java.util.Properties; import org.gitools.analysis.AbstractMethod; -import org.gitools.analysis.MethodException; -import org.gitools.analysis.clustering.ClusteringMethod; -import org.gitools.analysis.clustering.ClusteringResult; +import org.gitools.matrix.clustering.ClusteringMethod; import org.gitools.matrix.model.IMatrixView; import weka.clusterers.SimpleKMeans; import weka.core.EuclideanDistance; @@ -42,11 +40,12 @@ public WekaKmeansMethod(Properties properties) { super(ID, "K-Means clustering", "K-Means Weka clustering", - ClusteringResult.class, properties); + null, properties); } + @Override - public void buildAndCluster(IMatrixView matrixView, String type, IProgressMonitor monitor) throws Exception, IOException, NumberFormatException { + public void buildAndCluster(IMatrixView matrixView, IProgressMonitor monitor) throws Exception, IOException, NumberFormatException { Instance instancia; @@ -54,34 +53,30 @@ public void buildAndCluster(IMatrixView matrixView, String type, IProgressMonito if ((Integer.valueOf(properties.getProperty("k"))) < 2) return; - MatrixViewWekaLoader loader = new MatrixViewWekaLoader(matrixView, properties.getProperty("index"),type); - - Instances structure = loader.getStructure(); - - System.out.println("Loading clustering algorithm ..."); + Integer valueIndex = Integer.valueOf(properties.getProperty("index", "0")); + MatrixViewWekaLoader loader = + new MatrixViewWekaLoader(matrixView, valueIndex); SimpleKMeans clusterer = new SimpleKMeans(); - clusterer.setMaxIterations(Integer.valueOf(properties.getProperty("iterations"))); + clusterer.setMaxIterations(Integer.valueOf(properties.getProperty("iterations","500"))); + clusterer.setNumClusters(Integer.valueOf(properties.getProperty("k","2"))); + clusterer.setSeed(Integer.valueOf(properties.getProperty("seed","10"))); - clusterer.setNumClusters(Integer.valueOf(properties.getProperty("k"))); - - clusterer.setSeed(Integer.valueOf(properties.getProperty("seed"))); - - if (properties.getProperty("distance").toLowerCase().equals("euclidean")) + if (properties.getProperty("distance","euclidean").toLowerCase().equals("euclidean")) clusterer.setDistanceFunction(new EuclideanDistance()); else - if (properties.getProperty("distance").toLowerCase().equals("manhattan")) clusterer.setDistanceFunction(new ManhattanDistance()); + Instances dataset = loader.getDataSet(); - System.out.println("Training clustering model..."); + monitor.begin("Creating clustering model ...", 1); - clusterer.buildClusterer(loader.getDataSet()); + clusterer.buildClusterer(dataset); - System.out.println("Setting instances into clusters ..."); + monitor.end(); - Instances dataset = loader.getDataSet(); + monitor.begin("Clustering instances ...", dataset.numInstances()); //Cluster -> List instances HashMap<Integer,List<Integer>> clusterResults = new HashMap<Integer,List<Integer>>(); @@ -102,41 +97,32 @@ public void buildAndCluster(IMatrixView matrixView, String type, IProgressMonito clusterResults.put(cluster, instancesCluster); + monitor.worked(1); + } - rePaintHeatMap (type, matrixView, dataset.numInstances(), clusterResults); + updateVisibility (matrixView, dataset.numInstances(), clusterResults); + monitor.end(); } - private void rePaintHeatMap (String type, IMatrixView matrixView, Integer numInstances, HashMap<Integer,List<Integer>> clusterResults){ + private void updateVisibility (IMatrixView matrixView, Integer numInstances, HashMap<Integer,List<Integer>> clusterResults){ + int index = 0; int[] visibleData = null; - - if (type.equals("rows")) - visibleData = matrixView.getVisibleRows(); - else - visibleData = matrixView.getVisibleColumns(); - final int[] sortedVisibleData = new int[numInstances]; - int index = 0; - - for (Integer i : clusterResults.keySet()){ + visibleData = matrixView.getVisibleColumns(); + for (Integer i : clusterResults.keySet()) for( Integer val : clusterResults.get(i)){ sortedVisibleData[index] = visibleData[val]; - index++; } - } - - if (type.equals("rows")) - matrixView.setVisibleRows(sortedVisibleData); - else - matrixView.setVisibleColumns(sortedVisibleData); + matrixView.setVisibleColumns(sortedVisibleData); } @@ -146,16 +132,6 @@ public String getId() { } - @Override - public void build(IMatrixView matrixView, String type, IProgressMonitor monitor) throws MethodException { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public ClusteringResult cluster() throws MethodException { - throw new UnsupportedOperationException("Not supported yet."); - } - diff --git a/gitools-ui/src/main/java/org/gitools/ui/actions/AnalysisActions.java b/gitools-ui/src/main/java/org/gitools/ui/actions/AnalysisActions.java index f9803e15..87023177 100755 --- a/gitools-ui/src/main/java/org/gitools/ui/actions/AnalysisActions.java +++ b/gitools-ui/src/main/java/org/gitools/ui/actions/AnalysisActions.java @@ -9,7 +9,6 @@ import org.gitools.stats.mtc.Bonferroni; import org.gitools.ui.actions.analysis.CombinationsAction; import org.gitools.ui.actions.analysis.CorrelationsAction; -import org.gitools.ui.actions.analysis.ClusteringAction; import org.gitools.ui.platform.AppFrame; public class AnalysisActions { @@ -17,8 +16,6 @@ public class AnalysisActions { public static final BaseAction combinations = new CombinationsAction(); public static final BaseAction correlations = new CorrelationsAction(); - - public static final BaseAction clusteringAction = new ClusteringAction(); public static final BaseAction mtcBonferroniAction = new MtcAction(new Bonferroni()); diff --git a/gitools-ui/src/main/java/org/gitools/ui/actions/DataActions.java b/gitools-ui/src/main/java/org/gitools/ui/actions/DataActions.java index d7dc9706..9ec43118 100644 --- a/gitools-ui/src/main/java/org/gitools/ui/actions/DataActions.java +++ b/gitools-ui/src/main/java/org/gitools/ui/actions/DataActions.java @@ -1,5 +1,6 @@ package org.gitools.ui.actions; +import org.gitools.ui.actions.data.ClusteringAction; import org.gitools.ui.platform.actions.BaseAction; import org.gitools.ui.actions.data.FastSortRowsAction; import org.gitools.ui.actions.data.FilterByLabelAction; @@ -39,4 +40,7 @@ public final class DataActions { public static final BaseAction moveColsLeftAction = new MoveSelectionAction(MoveDirection.COL_LEFT); public static final BaseAction moveColsRightAction = new MoveSelectionAction(MoveDirection.COL_RIGHT); + + public static final BaseAction clusteringAction = new ClusteringAction(); + } diff --git a/gitools-ui/src/main/java/org/gitools/ui/actions/MenuActionSet.java b/gitools-ui/src/main/java/org/gitools/ui/actions/MenuActionSet.java index 8e78e77b..999b9a77 100644 --- a/gitools-ui/src/main/java/org/gitools/ui/actions/MenuActionSet.java +++ b/gitools-ui/src/main/java/org/gitools/ui/actions/MenuActionSet.java @@ -86,12 +86,12 @@ public MenuActionSet() { DataActions.hideSelectedRowsAction, DataActions.showAllColumnsAction, DataActions.hideSelectedColumnsAction - }) + }), + DataActions.clusteringAction }), new ActionSet("Analysis", new BaseAction[] { AnalysisActions.correlations, AnalysisActions.combinations, - AnalysisActions.clusteringAction, // new ActionSet("Clustering", new BaseAction[] { // AnalysisActions.clusteringAction // }), diff --git a/gitools-ui/src/main/java/org/gitools/ui/actions/analysis/ClusteringAction.java b/gitools-ui/src/main/java/org/gitools/ui/actions/data/ClusteringAction.java similarity index 78% rename from gitools-ui/src/main/java/org/gitools/ui/actions/analysis/ClusteringAction.java rename to gitools-ui/src/main/java/org/gitools/ui/actions/data/ClusteringAction.java index 5764a2ed..982390fb 100644 --- a/gitools-ui/src/main/java/org/gitools/ui/actions/analysis/ClusteringAction.java +++ b/gitools-ui/src/main/java/org/gitools/ui/actions/data/ClusteringAction.java @@ -15,19 +15,16 @@ * under the License. */ -package org.gitools.ui.actions.analysis; +package org.gitools.ui.actions.data; import edu.upf.bg.progressmonitor.IProgressMonitor; import java.awt.event.ActionEvent; -import java.util.List; -import org.gitools.analysis.clustering.ClusteringAnalysis; -import org.gitools.analysis.clustering.ClusteringProcessor; +import java.util.Properties; +import org.gitools.matrix.clustering.MatrixViewClusterer; import org.gitools.heatmap.model.Heatmap; import org.gitools.matrix.model.IMatrixView; -import org.gitools.matrix.model.MatrixView; -import org.gitools.matrix.model.element.IElementAttribute; import org.gitools.ui.actions.ActionUtils; -import org.gitools.ui.analysis.clustering.dialog.ClusteringDialog; +import org.gitools.ui.dialog.clustering.ClusteringDialog; import org.gitools.ui.platform.AppFrame; import org.gitools.ui.platform.actions.BaseAction; import org.gitools.ui.platform.progress.JobRunnable; @@ -48,26 +45,22 @@ public boolean isEnabledByModel(Object model) { @Override public void actionPerformed(ActionEvent e) { - final IMatrixView matrixView = ActionUtils.getMatrixView(); + final IMatrixView matrixView = ActionUtils.getMatrixView(); if (matrixView == null) return; - - ClusteringDialog dlg = new ClusteringDialog(AppFrame.instance()); dlg.setAttributes(matrixView.getContents().getCellAttributes()); dlg.setVisible(true); if (dlg.getReturnStatus() != ClusteringDialog.RET_OK) { - AppFrame.instance().setStatusText("Filter cancelled."); + AppFrame.instance().setStatusText("Clustering cancelled."); return; } - final ClusteringAnalysis analysis = dlg.getAnalysis(); - - analysis.setData(matrixView); + final Properties clusterParameters = dlg.getClusterParameters(); JobThread.execute(AppFrame.instance(), new JobRunnable() { @Override public void run(IProgressMonitor monitor) { @@ -75,10 +68,9 @@ public void actionPerformed(ActionEvent e) { try { monitor.begin("Clustering ...", 1); - - new ClusteringProcessor(analysis).run(monitor); - + MatrixViewClusterer.cluster(matrixView, clusterParameters, monitor); monitor.end(); + } catch (Throwable ex) { monitor.exception(ex); diff --git a/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/ClusteringDialog.form b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/ClusteringDialog.form new file mode 100644 index 00000000..ab6e60d9 --- /dev/null +++ b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/ClusteringDialog.form @@ -0,0 +1,175 @@ +<?xml version="1.0" encoding="UTF-8" ?> + +<Form version="1.3" maxVersion="1.7" type="org.netbeans.modules.form.forminfo.JDialogFormInfo"> + <NonVisualComponents> + <Component class="javax.swing.ButtonGroup" name="applayGroup"> + </Component> + </NonVisualComponents> + <Properties> + <Property name="title" type="java.lang.String" value="Clustering analysis"/> + <Property name="locationByPlatform" type="boolean" value="true"/> + <Property name="minimumSize" type="java.awt.Dimension" editor="org.netbeans.beaninfo.editors.DimensionEditor"> + <Dimension value="[397, 248]"/> + </Property> + </Properties> + <SyntheticProperties> + <SyntheticProperty name="formSizePolicy" type="int" value="2"/> + </SyntheticProperties> + <AuxValues> + <AuxValue name="FormSettings_autoResourcing" type="java.lang.Integer" value="0"/> + <AuxValue name="FormSettings_autoSetComponentName" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_generateFQN" type="java.lang.Boolean" value="true"/> + <AuxValue name="FormSettings_generateMnemonicsCode" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_i18nAutoMode" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_layoutCodeTarget" type="java.lang.Integer" value="1"/> + <AuxValue name="FormSettings_listenerGenerationStyle" type="java.lang.Integer" value="0"/> + <AuxValue name="FormSettings_variablesLocal" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_variablesModifier" type="java.lang.Integer" value="2"/> + </AuxValues> + + <Layout> + <DimensionLayout dim="0"> + <Group type="103" groupAlignment="0" attributes="0"> + <Group type="102" attributes="0"> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="0" attributes="0"> + <Component id="jSeparator1" alignment="0" pref="373" max="32767" attributes="0"/> + <Group type="102" alignment="1" attributes="0"> + <Component id="okButton" min="-2" pref="67" max="-2" attributes="0"/> + <EmptySpace max="-2" attributes="0"/> + <Component id="cancelButton" min="-2" max="-2" attributes="0"/> + </Group> + <Group type="102" alignment="0" attributes="0"> + <Component id="jLabel3" min="-2" max="-2" attributes="0"/> + <EmptySpace min="-2" pref="40" max="-2" attributes="0"/> + <Component id="algorithmTypeCombo" min="-2" pref="202" max="-2" attributes="2"/> + <EmptySpace type="separate" max="-2" attributes="0"/> + <Component id="jButton1" max="32767" attributes="0"/> + </Group> + <Group type="102" alignment="0" attributes="0"> + <Component id="jLabel6" min="-2" max="-2" attributes="0"/> + <EmptySpace min="-2" max="-2" attributes="0"/> + <Component id="dataClustCombo" pref="284" max="32767" attributes="3"/> + </Group> + <Component id="rowsRadio" alignment="0" min="-2" max="-2" attributes="0"/> + <Component id="columnsRadio" alignment="0" min="-2" max="-2" attributes="0"/> + <Component id="jLabel5" alignment="0" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace max="-2" attributes="0"/> + </Group> + </Group> + </DimensionLayout> + <DimensionLayout dim="1"> + <Group type="103" groupAlignment="0" attributes="0"> + <Group type="102" alignment="0" attributes="0"> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="algorithmTypeCombo" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="jLabel3" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="jButton1" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace min="-2" pref="19" max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="jLabel6" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="dataClustCombo" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace type="separate" max="-2" attributes="0"/> + <Component id="jLabel5" min="-2" max="-2" attributes="0"/> + <EmptySpace min="-2" pref="14" max="-2" attributes="0"/> + <Component id="columnsRadio" min="-2" max="-2" attributes="0"/> + <EmptySpace min="-2" max="-2" attributes="0"/> + <Component id="rowsRadio" min="-2" max="-2" attributes="0"/> + <EmptySpace max="32767" attributes="0"/> + <Component id="jSeparator1" min="-2" pref="6" max="-2" attributes="0"/> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="cancelButton" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="okButton" alignment="3" min="-2" pref="26" max="-2" attributes="0"/> + </Group> + <EmptySpace max="-2" attributes="0"/> + </Group> + </Group> + </DimensionLayout> + </Layout> + <SubComponents> + <Component class="javax.swing.JLabel" name="jLabel3"> + <Properties> + <Property name="text" type="java.lang.String" value="Method :"/> + </Properties> + </Component> + <Component class="javax.swing.JRadioButton" name="rowsRadio"> + <Properties> + <Property name="buttonGroup" type="javax.swing.ButtonGroup" editor="org.netbeans.modules.form.RADComponent$ButtonGroupPropertyEditor"> + <ComponentRef name="applayGroup"/> + </Property> + <Property name="text" type="java.lang.String" value="rows"/> + </Properties> + </Component> + <Component class="javax.swing.JRadioButton" name="columnsRadio"> + <Properties> + <Property name="buttonGroup" type="javax.swing.ButtonGroup" editor="org.netbeans.modules.form.RADComponent$ButtonGroupPropertyEditor"> + <ComponentRef name="applayGroup"/> + </Property> + <Property name="selected" type="boolean" value="true"/> + <Property name="text" type="java.lang.String" value="columns"/> + </Properties> + </Component> + <Component class="javax.swing.JComboBox" name="algorithmTypeCombo"> + <Properties> + <Property name="model" type="javax.swing.ComboBoxModel" editor="org.netbeans.modules.form.editors2.ComboBoxModelEditor"> + <StringArray count="2"> + <StringItem index="0" value="Hierarchical clustering"/> + <StringItem index="1" value="K-means"/> + </StringArray> + </Property> + </Properties> + <Events> + <EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="algorithmTypeComboActionPerformed"/> + </Events> + </Component> + <Component class="javax.swing.JLabel" name="jLabel5"> + <Properties> + <Property name="text" type="java.lang.String" value="Apply to :"/> + </Properties> + </Component> + <Component class="javax.swing.JLabel" name="jLabel6"> + <Properties> + <Property name="text" type="java.lang.String" value="Values from :"/> + </Properties> + </Component> + <Component class="javax.swing.JComboBox" name="dataClustCombo"> + <Properties> + <Property name="model" type="javax.swing.ComboBoxModel" editor="org.netbeans.modules.form.editors2.ComboBoxModelEditor"> + <StringArray count="0"/> + </Property> + </Properties> + </Component> + <Component class="javax.swing.JSeparator" name="jSeparator1"> + </Component> + <Component class="javax.swing.JButton" name="okButton"> + <Properties> + <Property name="text" type="java.lang.String" value="OK"/> + </Properties> + <Events> + <EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="okButtonActionPerformed"/> + </Events> + </Component> + <Component class="javax.swing.JButton" name="cancelButton"> + <Properties> + <Property name="text" type="java.lang.String" value="Cancel"/> + <Property name="defaultCapable" type="boolean" value="false"/> + </Properties> + <Events> + <EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="cancelButtonActionPerformed"/> + </Events> + </Component> + <Component class="javax.swing.JButton" name="jButton1"> + <Properties> + <Property name="text" type="java.lang.String" value="Config ..."/> + </Properties> + <Events> + <EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="jButton1ActionPerformed"/> + </Events> + </Component> + </SubComponents> +</Form> diff --git a/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/ClusteringDialog.java b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/ClusteringDialog.java new file mode 100644 index 00000000..16e88286 --- /dev/null +++ b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/ClusteringDialog.java @@ -0,0 +1,291 @@ +/* + * Copyright 2010 xrafael. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * under the License. + */ +package org.gitools.ui.dialog.clustering; + +import java.util.List; +import java.util.Properties; +import javax.swing.DefaultComboBoxModel; +import org.gitools.matrix.model.element.IElementAttribute; + +public class ClusteringDialog extends javax.swing.JDialog { + + // model wrappers + private static class MatrixAttributeWrapper { + + private IElementAttribute attribute; + + public MatrixAttributeWrapper(IElementAttribute a) { + this.attribute = a; + } + + public IElementAttribute getMatrixAttribute() { + return attribute; + } + + public void setMatrixAttribute(IElementAttribute a) { + this.attribute = a; + } + + @Override + public String toString() { + return attribute.getName(); + } + } + /** A return status code - returned if Cancel button has been pressed */ + public static final int RET_CANCEL = 0; + /** A return status code - returned if OK button has been pressed */ + public static final int RET_OK = 1; + private int returnStatus = RET_CANCEL; + + /** Creates new form clusteringPage */ + public ClusteringDialog(java.awt.Window parent) { + + super(parent); + setModal(true); + + initComponents(); + + validate(); + + } + + /** @return the return status of this dialog - one of RET_OK or RET_CANCEL */ + public int getReturnStatus() { + return returnStatus; + } + + + private void doClose(int retStatus) { + returnStatus = retStatus; + setVisible(false); + dispose(); + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + @SuppressWarnings("unchecked") + // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents + private void initComponents() { + + applayGroup = new javax.swing.ButtonGroup(); + jLabel3 = new javax.swing.JLabel(); + rowsRadio = new javax.swing.JRadioButton(); + columnsRadio = new javax.swing.JRadioButton(); + algorithmTypeCombo = new javax.swing.JComboBox(); + jLabel5 = new javax.swing.JLabel(); + jLabel6 = new javax.swing.JLabel(); + dataClustCombo = new javax.swing.JComboBox(); + jSeparator1 = new javax.swing.JSeparator(); + okButton = new javax.swing.JButton(); + cancelButton = new javax.swing.JButton(); + jButton1 = new javax.swing.JButton(); + + setTitle("Clustering analysis"); + setLocationByPlatform(true); + setMinimumSize(new java.awt.Dimension(397, 248)); + + jLabel3.setText("Method :"); + + applayGroup.add(rowsRadio); + rowsRadio.setText("rows"); + + applayGroup.add(columnsRadio); + columnsRadio.setSelected(true); + columnsRadio.setText("columns"); + + algorithmTypeCombo.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Hierarchical clustering", "K-means" })); + algorithmTypeCombo.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + algorithmTypeComboActionPerformed(evt); + } + }); + + jLabel5.setText("Apply to :"); + + jLabel6.setText("Values from :"); + + okButton.setText("OK"); + okButton.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + okButtonActionPerformed(evt); + } + }); + + cancelButton.setText("Cancel"); + cancelButton.setDefaultCapable(false); + cancelButton.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + cancelButtonActionPerformed(evt); + } + }); + + jButton1.setText("Config ..."); + jButton1.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + jButton1ActionPerformed(evt); + } + }); + + javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane()); + getContentPane().setLayout(layout); + layout.setHorizontalGroup( + layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addGroup(layout.createSequentialGroup() + .addContainerGap() + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addComponent(jSeparator1, javax.swing.GroupLayout.DEFAULT_SIZE, 373, Short.MAX_VALUE) + .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() + .addComponent(okButton, javax.swing.GroupLayout.PREFERRED_SIZE, 67, javax.swing.GroupLayout.PREFERRED_SIZE) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addComponent(cancelButton)) + .addGroup(layout.createSequentialGroup() + .addComponent(jLabel3) + .addGap(40, 40, 40) + .addComponent(algorithmTypeCombo, javax.swing.GroupLayout.PREFERRED_SIZE, 202, javax.swing.GroupLayout.PREFERRED_SIZE) + .addGap(18, 18, 18) + .addComponent(jButton1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)) + .addGroup(layout.createSequentialGroup() + .addComponent(jLabel6) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addComponent(dataClustCombo, 0, 284, Short.MAX_VALUE)) + .addComponent(rowsRadio) + .addComponent(columnsRadio) + .addComponent(jLabel5)) + .addContainerGap()) + ); + layout.setVerticalGroup( + layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addGroup(layout.createSequentialGroup() + .addContainerGap() + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(algorithmTypeCombo, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) + .addComponent(jLabel3) + .addComponent(jButton1)) + .addGap(19, 19, 19) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(jLabel6) + .addComponent(dataClustCombo, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) + .addGap(18, 18, 18) + .addComponent(jLabel5) + .addGap(14, 14, 14) + .addComponent(columnsRadio) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addComponent(rowsRadio) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) + .addComponent(jSeparator1, javax.swing.GroupLayout.PREFERRED_SIZE, 6, javax.swing.GroupLayout.PREFERRED_SIZE) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(cancelButton) + .addComponent(okButton, javax.swing.GroupLayout.PREFERRED_SIZE, 26, javax.swing.GroupLayout.PREFERRED_SIZE)) + .addContainerGap()) + ); + }// </editor-fold>//GEN-END:initComponents + + private void algorithmTypeComboActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_algorithmTypeComboActionPerformed + + + }//GEN-LAST:event_algorithmTypeComboActionPerformed + + private void okButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_okButtonActionPerformed + doClose(RET_OK); +}//GEN-LAST:event_okButtonActionPerformed + + private void cancelButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cancelButtonActionPerformed + doClose(RET_CANCEL); +}//GEN-LAST:event_cancelButtonActionPerformed + + private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed + + if (algorithmTypeCombo.getSelectedItem().toString().toLowerCase().equals("k-means")){ + KmeansParamsDialog dlg = new KmeansParamsDialog(this); + dlg.setVisible(true); + }else{ + CobwebParamsDialog dlg = new CobwebParamsDialog(this); + dlg.setVisible(true); + } + + }//GEN-LAST:event_jButton1ActionPerformed + // Variables declaration - do not modify//GEN-BEGIN:variables + private javax.swing.JComboBox algorithmTypeCombo; + private javax.swing.ButtonGroup applayGroup; + private javax.swing.JButton cancelButton; + private javax.swing.JRadioButton columnsRadio; + private javax.swing.JComboBox dataClustCombo; + private javax.swing.JButton jButton1; + private javax.swing.JLabel jLabel3; + private javax.swing.JLabel jLabel5; + private javax.swing.JLabel jLabel6; + private javax.swing.JSeparator jSeparator1; + private javax.swing.JButton okButton; + private javax.swing.JRadioButton rowsRadio; + // End of variables declaration//GEN-END:variables + + private Properties params = new Properties(); + + public Properties getParams() { + return params; + } + + public void setParams(Properties params) { + this.params = params; + } + + public Properties getClusterParameters() { + + Properties clusterParams = new Properties(); + + clusterParams.put("method", algorithmTypeCombo.getSelectedItem().toString().toLowerCase()); + clusterParams.put("index", dataClustCombo.getSelectedItem()); + clusterParams.put("transpose", rowsRadio.isSelected()); + + if (algorithmTypeCombo.getSelectedItem().toString().toLowerCase().equals("k-means")) { + + clusterParams.put("iterations", params.getProperty("iterations","500")); + clusterParams.put("seed", params.getProperty("seed","10")); + clusterParams.put("k", params.getProperty("k","2")); + clusterParams.put("distance", params.getProperty("distance","euclidean")); + + }else{ + + clusterParams.put("cutoff", params.getProperty("cutoff","0.0028")); + clusterParams.put("seed", params.getProperty("seed","42")); + clusterParams.put("acuity", params.getProperty("acuity","1.0")); + } + + return clusterParams; + } + + public void setAttributes(List<IElementAttribute> cellAttributes) { + + DefaultComboBoxModel model = new DefaultComboBoxModel(); + MatrixAttributeWrapper attrWrapper = null; + for (IElementAttribute attr : cellAttributes) { + attrWrapper = new MatrixAttributeWrapper(attr); + model.addElement(attrWrapper); + } + + dataClustCombo.setModel(model); + + } + + public boolean isTransposeEnabled() { + return rowsRadio.isSelected(); + } +} diff --git a/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/CobwebParamsDialog.form b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/CobwebParamsDialog.form new file mode 100644 index 00000000..3f2050ac --- /dev/null +++ b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/CobwebParamsDialog.form @@ -0,0 +1,137 @@ +<?xml version="1.0" encoding="UTF-8" ?> + +<Form version="1.3" maxVersion="1.7" type="org.netbeans.modules.form.forminfo.JDialogFormInfo"> + <Properties> + <Property name="title" type="java.lang.String" value="Hierarchical parameters"/> + <Property name="locationByPlatform" type="boolean" value="true"/> + <Property name="minimumSize" type="java.awt.Dimension" editor="org.netbeans.beaninfo.editors.DimensionEditor"> + <Dimension value="[243, 200]"/> + </Property> + </Properties> + <SyntheticProperties> + <SyntheticProperty name="formSizePolicy" type="int" value="2"/> + </SyntheticProperties> + <AuxValues> + <AuxValue name="FormSettings_autoResourcing" type="java.lang.Integer" value="0"/> + <AuxValue name="FormSettings_autoSetComponentName" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_generateFQN" type="java.lang.Boolean" value="true"/> + <AuxValue name="FormSettings_generateMnemonicsCode" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_i18nAutoMode" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_layoutCodeTarget" type="java.lang.Integer" value="1"/> + <AuxValue name="FormSettings_listenerGenerationStyle" type="java.lang.Integer" value="0"/> + <AuxValue name="FormSettings_variablesLocal" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_variablesModifier" type="java.lang.Integer" value="2"/> + </AuxValues> + + <Layout> + <DimensionLayout dim="0"> + <Group type="103" groupAlignment="0" attributes="0"> + <Group type="102" attributes="0"> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="0" attributes="0"> + <Group type="102" alignment="0" attributes="0"> + <Group type="103" groupAlignment="0" attributes="0"> + <Component id="jLabel3" alignment="0" min="-2" max="-2" attributes="0"/> + <Component id="jLabel2" alignment="0" min="-2" max="-2" attributes="0"/> + <Component id="jLabel1" alignment="0" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="1" attributes="0"> + <Component id="seedField" pref="171" max="32767" attributes="1"/> + <Component id="cutOffField" alignment="1" pref="171" max="32767" attributes="1"/> + <Component id="acuityField" alignment="1" pref="171" max="32767" attributes="1"/> + </Group> + </Group> + <Component id="jSeparator1" alignment="0" pref="229" max="32767" attributes="0"/> + <Group type="102" alignment="1" attributes="0"> + <Component id="okButton" min="-2" pref="67" max="-2" attributes="0"/> + <EmptySpace max="-2" attributes="0"/> + <Component id="cancelButton" min="-2" max="-2" attributes="0"/> + </Group> + </Group> + <EmptySpace max="-2" attributes="0"/> + </Group> + </Group> + </DimensionLayout> + <DimensionLayout dim="1"> + <Group type="103" groupAlignment="0" attributes="0"> + <Group type="102" alignment="1" attributes="0"> + <EmptySpace min="-2" max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="acuityField" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="jLabel1" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace min="-2" pref="18" max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="cutOffField" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="jLabel2" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace type="separate" max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="seedField" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="jLabel3" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace type="separate" max="-2" attributes="0"/> + <Component id="jSeparator1" min="-2" pref="6" max="-2" attributes="0"/> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="cancelButton" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="okButton" alignment="3" min="-2" pref="26" max="-2" attributes="0"/> + </Group> + <EmptySpace max="32767" attributes="0"/> + </Group> + </Group> + </DimensionLayout> + </Layout> + <SubComponents> + <Component class="javax.swing.JLabel" name="jLabel1"> + <Properties> + <Property name="text" type="java.lang.String" value="Acuity : "/> + </Properties> + </Component> + <Component class="javax.swing.JTextField" name="acuityField"> + <Properties> + <Property name="text" type="java.lang.String" value="1.0"/> + </Properties> + </Component> + <Component class="javax.swing.JLabel" name="jLabel2"> + <Properties> + <Property name="text" type="java.lang.String" value="Cutoff : "/> + </Properties> + </Component> + <Component class="javax.swing.JTextField" name="cutOffField"> + <Properties> + <Property name="text" type="java.lang.String" value="0.0028"/> + </Properties> + </Component> + <Component class="javax.swing.JLabel" name="jLabel3"> + <Properties> + <Property name="text" type="java.lang.String" value="Seed : "/> + </Properties> + </Component> + <Component class="javax.swing.JTextField" name="seedField"> + <Properties> + <Property name="text" type="java.lang.String" value="42"/> + </Properties> + </Component> + <Component class="javax.swing.JSeparator" name="jSeparator1"> + </Component> + <Component class="javax.swing.JButton" name="okButton"> + <Properties> + <Property name="text" type="java.lang.String" value="OK"/> + </Properties> + <Events> + <EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="okButtonActionPerformed"/> + </Events> + </Component> + <Component class="javax.swing.JButton" name="cancelButton"> + <Properties> + <Property name="text" type="java.lang.String" value="Cancel"/> + <Property name="defaultCapable" type="boolean" value="false"/> + </Properties> + <Events> + <EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="cancelButtonActionPerformed"/> + </Events> + </Component> + </SubComponents> +</Form> diff --git a/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/CobwebParamsDialog.java b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/CobwebParamsDialog.java new file mode 100644 index 00000000..2504cc50 --- /dev/null +++ b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/CobwebParamsDialog.java @@ -0,0 +1,178 @@ +/* + * Copyright 2010 xrafael. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * under the License. + */ + + +package org.gitools.ui.dialog.clustering; + +import java.util.Properties; + +public class CobwebParamsDialog extends javax.swing.JDialog { + + /** A return status code - returned if Cancel button has been pressed */ + public static final int RET_CANCEL = 0; + /** A return status code - returned if OK button has been pressed */ + public static final int RET_OK = 1; + private int returnStatus = RET_CANCEL; + + private ClusteringDialog parent; + + + /** Creates new form cobwebParamsPanel */ + public CobwebParamsDialog(ClusteringDialog parent) { + + super(parent); + setModal(true); + initComponents(); + + this.parent = parent; + + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + @SuppressWarnings("unchecked") + // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents + private void initComponents() { + + jLabel1 = new javax.swing.JLabel(); + acuityField = new javax.swing.JTextField(); + jLabel2 = new javax.swing.JLabel(); + cutOffField = new javax.swing.JTextField(); + jLabel3 = new javax.swing.JLabel(); + seedField = new javax.swing.JTextField(); + jSeparator1 = new javax.swing.JSeparator(); + okButton = new javax.swing.JButton(); + cancelButton = new javax.swing.JButton(); + + setTitle("Hierarchical parameters"); + setLocationByPlatform(true); + setMinimumSize(new java.awt.Dimension(243, 200)); + + jLabel1.setText("Acuity : "); + + acuityField.setText("1.0"); + + jLabel2.setText("Cutoff : "); + + cutOffField.setText("0.0028"); + + jLabel3.setText("Seed : "); + + seedField.setText("42"); + + okButton.setText("OK"); + okButton.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + okButtonActionPerformed(evt); + } + }); + + cancelButton.setText("Cancel"); + cancelButton.setDefaultCapable(false); + cancelButton.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + cancelButtonActionPerformed(evt); + } + }); + + javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane()); + getContentPane().setLayout(layout); + layout.setHorizontalGroup( + layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addGroup(layout.createSequentialGroup() + .addContainerGap() + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addGroup(layout.createSequentialGroup() + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addComponent(jLabel3) + .addComponent(jLabel2) + .addComponent(jLabel1)) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING) + .addComponent(seedField, javax.swing.GroupLayout.DEFAULT_SIZE, 171, Short.MAX_VALUE) + .addComponent(cutOffField, javax.swing.GroupLayout.DEFAULT_SIZE, 171, Short.MAX_VALUE) + .addComponent(acuityField, javax.swing.GroupLayout.DEFAULT_SIZE, 171, Short.MAX_VALUE))) + .addComponent(jSeparator1, javax.swing.GroupLayout.DEFAULT_SIZE, 229, Short.MAX_VALUE) + .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() + .addComponent(okButton, javax.swing.GroupLayout.PREFERRED_SIZE, 67, javax.swing.GroupLayout.PREFERRED_SIZE) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addComponent(cancelButton))) + .addContainerGap()) + ); + layout.setVerticalGroup( + layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() + .addContainerGap() + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(acuityField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) + .addComponent(jLabel1)) + .addGap(18, 18, 18) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(cutOffField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) + .addComponent(jLabel2)) + .addGap(18, 18, 18) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(seedField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) + .addComponent(jLabel3)) + .addGap(18, 18, 18) + .addComponent(jSeparator1, javax.swing.GroupLayout.PREFERRED_SIZE, 6, javax.swing.GroupLayout.PREFERRED_SIZE) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(cancelButton) + .addComponent(okButton, javax.swing.GroupLayout.PREFERRED_SIZE, 26, javax.swing.GroupLayout.PREFERRED_SIZE)) + .addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)) + ); + }// </editor-fold>//GEN-END:initComponents + + private void okButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_okButtonActionPerformed + Properties params = new Properties(); + if (cutOffField.getText() != null && !cutOffField.getText().equals("")) params.put("cutoff",cutOffField.getText()); + if (seedField.getText() != null && !seedField.getText().equals("")) params.put("seed",seedField.getText()); + if (acuityField.getText() != null && !acuityField.getText().equals("")) params.put("acuity",acuityField.getText()); + + parent.setParams(params); + doClose(RET_OK); +}//GEN-LAST:event_okButtonActionPerformed + + private void cancelButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cancelButtonActionPerformed + doClose(RET_CANCEL); +}//GEN-LAST:event_cancelButtonActionPerformed + + + // Variables declaration - do not modify//GEN-BEGIN:variables + private javax.swing.JTextField acuityField; + private javax.swing.JButton cancelButton; + private javax.swing.JTextField cutOffField; + private javax.swing.JLabel jLabel1; + private javax.swing.JLabel jLabel2; + private javax.swing.JLabel jLabel3; + private javax.swing.JSeparator jSeparator1; + private javax.swing.JButton okButton; + private javax.swing.JTextField seedField; + // End of variables declaration//GEN-END:variables + + + + private void doClose(int retStatus) { + returnStatus = retStatus; + setVisible(false); + dispose(); + } +} diff --git a/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/KmeansParamsDialog.form b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/KmeansParamsDialog.form new file mode 100644 index 00000000..2982e493 --- /dev/null +++ b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/KmeansParamsDialog.form @@ -0,0 +1,159 @@ +<?xml version="1.0" encoding="UTF-8" ?> + +<Form version="1.3" maxVersion="1.7" type="org.netbeans.modules.form.forminfo.JDialogFormInfo"> + <Properties> + <Property name="title" type="java.lang.String" value="K-means parameters"/> + <Property name="locationByPlatform" type="boolean" value="true"/> + <Property name="minimumSize" type="java.awt.Dimension" editor="org.netbeans.beaninfo.editors.DimensionEditor"> + <Dimension value="[297, 245]"/> + </Property> + </Properties> + <SyntheticProperties> + <SyntheticProperty name="formSizePolicy" type="int" value="2"/> + </SyntheticProperties> + <AuxValues> + <AuxValue name="FormSettings_autoResourcing" type="java.lang.Integer" value="0"/> + <AuxValue name="FormSettings_autoSetComponentName" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_generateFQN" type="java.lang.Boolean" value="true"/> + <AuxValue name="FormSettings_generateMnemonicsCode" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_i18nAutoMode" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_layoutCodeTarget" type="java.lang.Integer" value="1"/> + <AuxValue name="FormSettings_listenerGenerationStyle" type="java.lang.Integer" value="0"/> + <AuxValue name="FormSettings_variablesLocal" type="java.lang.Boolean" value="false"/> + <AuxValue name="FormSettings_variablesModifier" type="java.lang.Integer" value="2"/> + </AuxValues> + + <Layout> + <DimensionLayout dim="0"> + <Group type="103" groupAlignment="0" attributes="0"> + <Group type="102" attributes="0"> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="0" attributes="0"> + <Group type="102" alignment="0" attributes="0"> + <Group type="103" groupAlignment="0" attributes="0"> + <Component id="jLabel7" alignment="0" min="-2" max="-2" attributes="0"/> + <Component id="jLabel1" alignment="0" min="-2" max="-2" attributes="0"/> + <Component id="jLabel2" alignment="0" min="-2" max="-2" attributes="0"/> + <Component id="jLabel3" alignment="0" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace min="-2" pref="12" max="-2" attributes="0"/> + <Group type="103" groupAlignment="0" attributes="0"> + <Component id="distAlgCombo" alignment="0" pref="142" max="32767" attributes="1"/> + <Component id="seedField" alignment="0" pref="142" max="32767" attributes="1"/> + <Component id="iterField" alignment="0" pref="142" max="32767" attributes="1"/> + <Component id="kField" alignment="1" pref="142" max="32767" attributes="1"/> + </Group> + </Group> + <Component id="jSeparator1" alignment="0" pref="273" max="32767" attributes="0"/> + <Group type="102" alignment="1" attributes="0"> + <Component id="okButton" min="-2" pref="67" max="-2" attributes="0"/> + <EmptySpace max="-2" attributes="0"/> + <Component id="cancelButton" min="-2" max="-2" attributes="0"/> + </Group> + </Group> + <EmptySpace max="-2" attributes="0"/> + </Group> + </Group> + </DimensionLayout> + <DimensionLayout dim="1"> + <Group type="103" groupAlignment="0" attributes="0"> + <Group type="102" alignment="0" attributes="0"> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="jLabel7" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="distAlgCombo" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace type="separate" max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="jLabel1" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="kField" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace min="-2" pref="18" max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="iterField" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="jLabel2" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace type="separate" max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="seedField" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="jLabel3" alignment="3" min="-2" max="-2" attributes="0"/> + </Group> + <EmptySpace type="separate" max="-2" attributes="0"/> + <Component id="jSeparator1" min="-2" pref="6" max="-2" attributes="0"/> + <EmptySpace max="-2" attributes="0"/> + <Group type="103" groupAlignment="3" attributes="0"> + <Component id="cancelButton" alignment="3" min="-2" max="-2" attributes="0"/> + <Component id="okButton" alignment="3" min="-2" pref="26" max="-2" attributes="0"/> + </Group> + <EmptySpace max="32767" attributes="0"/> + </Group> + </Group> + </DimensionLayout> + </Layout> + <SubComponents> + <Component class="javax.swing.JLabel" name="jLabel1"> + <Properties> + <Property name="text" type="java.lang.String" value="Num. Clusters : "/> + </Properties> + </Component> + <Component class="javax.swing.JTextField" name="kField"> + <Properties> + <Property name="text" type="java.lang.String" value="2"/> + </Properties> + </Component> + <Component class="javax.swing.JLabel" name="jLabel2"> + <Properties> + <Property name="text" type="java.lang.String" value="Max Iterations : "/> + </Properties> + </Component> + <Component class="javax.swing.JTextField" name="iterField"> + <Properties> + <Property name="text" type="java.lang.String" value="500"/> + </Properties> + </Component> + <Component class="javax.swing.JLabel" name="jLabel3"> + <Properties> + <Property name="text" type="java.lang.String" value="Seed : "/> + </Properties> + </Component> + <Component class="javax.swing.JTextField" name="seedField"> + <Properties> + <Property name="text" type="java.lang.String" value="10"/> + </Properties> + </Component> + <Component class="javax.swing.JLabel" name="jLabel7"> + <Properties> + <Property name="text" type="java.lang.String" value="Distance algorithm : "/> + </Properties> + </Component> + <Component class="javax.swing.JComboBox" name="distAlgCombo"> + <Properties> + <Property name="model" type="javax.swing.ComboBoxModel" editor="org.netbeans.modules.form.editors2.ComboBoxModelEditor"> + <StringArray count="2"> + <StringItem index="0" value="Euclidean"/> + <StringItem index="1" value="Manhattan"/> + </StringArray> + </Property> + </Properties> + </Component> + <Component class="javax.swing.JButton" name="cancelButton"> + <Properties> + <Property name="text" type="java.lang.String" value="Cancel"/> + <Property name="defaultCapable" type="boolean" value="false"/> + </Properties> + <Events> + <EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="cancelButtonActionPerformed"/> + </Events> + </Component> + <Component class="javax.swing.JButton" name="okButton"> + <Properties> + <Property name="text" type="java.lang.String" value="OK"/> + </Properties> + <Events> + <EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="okButtonActionPerformed"/> + </Events> + </Component> + <Component class="javax.swing.JSeparator" name="jSeparator1"> + </Component> + </SubComponents> +</Form> diff --git a/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/KmeansParamsDialog.java b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/KmeansParamsDialog.java new file mode 100644 index 00000000..63a737bf --- /dev/null +++ b/gitools-ui/src/main/java/org/gitools/ui/dialog/clustering/KmeansParamsDialog.java @@ -0,0 +1,192 @@ +/* + * Copyright 2010 xrafael. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * under the License. + */ +package org.gitools.ui.dialog.clustering; + +import java.util.Properties; + +public class KmeansParamsDialog extends javax.swing.JDialog { + + /** A return status code - returned if Cancel button has been pressed */ + public static final int RET_CANCEL = 0; + /** A return status code - returned if OK button has been pressed */ + public static final int RET_OK = 1; + + private int returnStatus = RET_CANCEL; + + private ClusteringDialog parent; + + /** Creates new form cobwebParamsPanel */ + public KmeansParamsDialog(ClusteringDialog parent) { + + super(parent); + setModal(true); + initComponents(); + + this.parent = parent; + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + @SuppressWarnings("unchecked") + // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents + private void initComponents() { + + jLabel1 = new javax.swing.JLabel(); + kField = new javax.swing.JTextField(); + jLabel2 = new javax.swing.JLabel(); + iterField = new javax.swing.JTextField(); + jLabel3 = new javax.swing.JLabel(); + seedField = new javax.swing.JTextField(); + jLabel7 = new javax.swing.JLabel(); + distAlgCombo = new javax.swing.JComboBox(); + cancelButton = new javax.swing.JButton(); + okButton = new javax.swing.JButton(); + jSeparator1 = new javax.swing.JSeparator(); + + setTitle("K-means parameters"); + setLocationByPlatform(true); + setMinimumSize(new java.awt.Dimension(297, 245)); + + jLabel1.setText("Num. Clusters : "); + + kField.setText("2"); + + jLabel2.setText("Max Iterations : "); + + iterField.setText("500"); + + jLabel3.setText("Seed : "); + + seedField.setText("10"); + + jLabel7.setText("Distance algorithm : "); + + distAlgCombo.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Euclidean", "Manhattan" })); + + cancelButton.setText("Cancel"); + cancelButton.setDefaultCapable(false); + cancelButton.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + cancelButtonActionPerformed(evt); + } + }); + + okButton.setText("OK"); + okButton.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + okButtonActionPerformed(evt); + } + }); + + javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane()); + getContentPane().setLayout(layout); + layout.setHorizontalGroup( + layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addGroup(layout.createSequentialGroup() + .addContainerGap() + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addGroup(layout.createSequentialGroup() + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addComponent(jLabel7) + .addComponent(jLabel1) + .addComponent(jLabel2) + .addComponent(jLabel3)) + .addGap(12, 12, 12) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addComponent(distAlgCombo, 0, 142, Short.MAX_VALUE) + .addComponent(seedField, javax.swing.GroupLayout.DEFAULT_SIZE, 142, Short.MAX_VALUE) + .addComponent(iterField, javax.swing.GroupLayout.DEFAULT_SIZE, 142, Short.MAX_VALUE) + .addComponent(kField, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.DEFAULT_SIZE, 142, Short.MAX_VALUE))) + .addComponent(jSeparator1, javax.swing.GroupLayout.DEFAULT_SIZE, 273, Short.MAX_VALUE) + .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() + .addComponent(okButton, javax.swing.GroupLayout.PREFERRED_SIZE, 67, javax.swing.GroupLayout.PREFERRED_SIZE) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addComponent(cancelButton))) + .addContainerGap()) + ); + layout.setVerticalGroup( + layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) + .addGroup(layout.createSequentialGroup() + .addContainerGap() + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(jLabel7) + .addComponent(distAlgCombo, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) + .addGap(18, 18, 18) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(jLabel1) + .addComponent(kField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) + .addGap(18, 18, 18) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(iterField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) + .addComponent(jLabel2)) + .addGap(18, 18, 18) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(seedField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) + .addComponent(jLabel3)) + .addGap(18, 18, 18) + .addComponent(jSeparator1, javax.swing.GroupLayout.PREFERRED_SIZE, 6, javax.swing.GroupLayout.PREFERRED_SIZE) + .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) + .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) + .addComponent(cancelButton) + .addComponent(okButton, javax.swing.GroupLayout.PREFERRED_SIZE, 26, javax.swing.GroupLayout.PREFERRED_SIZE)) + .addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)) + ); + }// </editor-fold>//GEN-END:initComponents + +private void cancelButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cancelButtonActionPerformed + doClose(RET_CANCEL); +}//GEN-LAST:event_cancelButtonActionPerformed + +private void okButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_okButtonActionPerformed + + Properties params = new Properties(); + if (iterField.getText() != null && !iterField.getText().equals("")) params.put("iterations", iterField.getText()); + if (seedField.getText() != null && !seedField.getText().equals("")) params.put("seed", seedField.getText()); + if (kField.getText() != null && !kField.getText().equals("")) params.put("k", kField.getText()); + if (distAlgCombo.getSelectedItem() != null && !distAlgCombo.getSelectedItem().toString().equals("")) params.put("distance", distAlgCombo.getSelectedItem().toString()); + parent.setParams(params); + + doClose(RET_OK); +}//GEN-LAST:event_okButtonActionPerformed + // Variables declaration - do not modify//GEN-BEGIN:variables + private javax.swing.JButton cancelButton; + private javax.swing.JComboBox distAlgCombo; + private javax.swing.JTextField iterField; + private javax.swing.JLabel jLabel1; + private javax.swing.JLabel jLabel2; + private javax.swing.JLabel jLabel3; + private javax.swing.JLabel jLabel7; + private javax.swing.JSeparator jSeparator1; + private javax.swing.JTextField kField; + private javax.swing.JButton okButton; + private javax.swing.JTextField seedField; + // End of variables declaration//GEN-END:variables + + + public String getDistanceMethod() { + return distAlgCombo.getSelectedItem().toString(); + } + + private void doClose(int retStatus) { + returnStatus = retStatus; + setVisible(false); + dispose(); + } +}
2f3d64b70fbd92d4f3fe132bf0dd46a822ee14d6
drools
JBRULES-976: fixing problems with collections- clonning and adding unit tests--git-svn-id: https://svn.jboss.org/repos/labs/labs/jbossrules/trunk@13162 c60d74c8-e8f6-0310-9e8f-d4a2fc68ab70-
p
https://github.com/kiegroup/drools
diff --git a/drools-core/src/main/java/org/drools/util/ShadowProxyUtils.java b/drools-core/src/main/java/org/drools/util/ShadowProxyUtils.java index c9645536720..6ba4bb284ec 100644 --- a/drools-core/src/main/java/org/drools/util/ShadowProxyUtils.java +++ b/drools-core/src/main/java/org/drools/util/ShadowProxyUtils.java @@ -20,6 +20,7 @@ import java.lang.reflect.Array; import java.lang.reflect.Method; import java.util.Collection; +import java.util.Collections; import java.util.Map; /** @@ -29,6 +30,12 @@ */ public class ShadowProxyUtils { + /* Collections.UnmodifiableCollection is a package + * private class, thus the confusing bit above + * to get a Class to compare to. */ + private static final Class UNMODIFIABLE_MAP = Collections.unmodifiableMap( Collections.EMPTY_MAP ).getClass(); + private static final Class UNMODIFIABLE_COLLECTION = Collections.unmodifiableCollection( Collections.EMPTY_LIST ).getClass(); + private ShadowProxyUtils() { } @@ -41,24 +48,37 @@ public static Object cloneObject(Object original) { clone = cloneMethod.invoke( original, new Object[0] ); } catch ( Exception e ) { - // Nothing to do + /* Failed to clone. Don't worry about it, and just return + * the original object. */ } } if ( clone == null ) { try { - if ( original instanceof Map ) { + if ( original instanceof Map && + original != Collections.EMPTY_MAP && + !UNMODIFIABLE_MAP.isAssignableFrom( original.getClass() ) ) { + + /* empty and unmodifiable maps can't (and don't need to) be shadowed */ clone = original.getClass().newInstance(); ((Map) clone).putAll( (Map) original ); - } else if ( original instanceof Collection ) { + + } else if ( original instanceof Collection && + original != Collections.EMPTY_LIST && + original != Collections.EMPTY_SET && + !UNMODIFIABLE_COLLECTION.isAssignableFrom( original.getClass() ) ) { + + /* empty and unmodifiable collections can't (and don't need to) be shadowed */ clone = original.getClass().newInstance(); ((Collection) clone).addAll( (Collection) original ); + } else if ( original.getClass().isArray() ) { clone = cloneArray( original ); } + } catch ( Exception e ) { - e.printStackTrace(); - // nothing to do + /* Failed to clone. Don't worry about it, and just return + * the original object. */ } } @@ -85,10 +105,13 @@ public static Object cloneArray(Object original) { // cannot be invoked reflectively, so need to do copy construction: result = Array.newInstance( componentType, arrayLength ); - - if( componentType.isArray() ) { - for( int i = 0; i < arrayLength; i++ ) { - Array.set( result, i, cloneArray( Array.get( original, i ) ) ); + + if ( componentType.isArray() ) { + for ( int i = 0; i < arrayLength; i++ ) { + Array.set( result, + i, + cloneArray( Array.get( original, + i ) ) ); } } else { System.arraycopy( original, diff --git a/drools-core/src/test/java/org/drools/util/ShadowProxyUtilsTest.java b/drools-core/src/test/java/org/drools/util/ShadowProxyUtilsTest.java new file mode 100644 index 00000000000..47de0c7ab0a --- /dev/null +++ b/drools-core/src/test/java/org/drools/util/ShadowProxyUtilsTest.java @@ -0,0 +1,134 @@ +package org.drools.util; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import junit.framework.TestCase; + +public class ShadowProxyUtilsTest extends TestCase { + + protected void setUp() throws Exception { + super.setUp(); + } + + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testCloneList() { + List list = new ArrayList(); + list.add( "a" ); + list.add( "b" ); + + List clone = (List) ShadowProxyUtils.cloneObject( list ); + assertEquals( list, + clone ); + assertNotSame( list, + clone ); + } + + public void testCloneMap() { + Map map = new TreeMap(); + map.put( "a", + "a" ); + map.put( "b", + "b" ); + + Map clone = (Map) ShadowProxyUtils.cloneObject( map ); + assertEquals( map, + clone ); + assertNotSame( map, + clone ); + } + + public void testCloneArray() { + int[][] array = new int[][]{{0, 0}, {0, 1}, {1, 0}, {1, 1}}; + + int[][] clone = (int[][]) ShadowProxyUtils.cloneObject( array ); + assertTrue( Arrays.deepEquals( array, + clone ) ); + assertNotSame( array, + clone ); + } + + public void testCloneUnmodifiableSet() { + Set set = new HashSet(); + set.add( "a" ); + set.add( "b" ); + + Set unmod = Collections.unmodifiableSet( set ); + + Set clone = (Set) ShadowProxyUtils.cloneObject( unmod ); + assertEquals( unmod, + clone ); + assertSame( unmod, + clone ); + } + + public void testCloneUnmodifiableMap() { + Map map = new TreeMap(); + map.put( "a", + "a" ); + map.put( "b", + "b" ); + Map unmod = Collections.unmodifiableMap( map ); + + Map clone = (Map) ShadowProxyUtils.cloneObject( unmod ); + assertEquals( unmod, + clone ); + assertSame( unmod, + clone ); + } + + public void testCloneEmptyList() { + List list = Collections.EMPTY_LIST; + + List clone = (List) ShadowProxyUtils.cloneObject( list ); + assertEquals( list, + clone ); + assertSame( list, + clone ); + } + + public void testCloneEmptySet() { + Set set = Collections.EMPTY_SET; + + Set clone = (Set) ShadowProxyUtils.cloneObject( set ); + assertEquals( set, + clone ); + assertSame( set, + clone ); + } + + public void testCloneEmptyMap() { + Map map = Collections.EMPTY_MAP; + + Map clone = (Map) ShadowProxyUtils.cloneObject( map ); + assertEquals( map, + clone ); + assertSame( map, + clone ); + } + + public void testCloneRegularObject() { + // this is never supposed to happen, + // but we don't want the method to blow up if it happens + Object obj = new Object(); + + Object clone = (Object) ShadowProxyUtils.cloneObject( obj ); + assertEquals( obj, + clone ); + assertSame( obj, + clone ); + + } + + + +}
8b9e5a2edd935dd40e6dc30c14829e208d7945f2
drools
[BZ-1092084] raise a compilation error when the- same attribute is defined twice on a rule--
a
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/main/java/org/drools/compiler/lang/ParserHelper.java b/drools-compiler/src/main/java/org/drools/compiler/lang/ParserHelper.java index 69c80fa3646..a5c41bb374e 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/lang/ParserHelper.java +++ b/drools-compiler/src/main/java/org/drools/compiler/lang/ParserHelper.java @@ -801,6 +801,12 @@ void setEnd( DescrBuilder< ? , ? > db ) { NamedConsequenceDescrBuilder.class.isAssignableFrom( clazz )) ) { popParaphrases(); } + + if (RuleDescrBuilder.class.isAssignableFrom(clazz)) { + RuleDescrBuilder ruleDescrBuilder = (RuleDescrBuilder)builder; + ruleDescrBuilder.end().getDescr().afterRuleAdded(ruleDescrBuilder.getDescr()); + } + setEnd( builder ); return (T) builder; } diff --git a/drools-compiler/src/main/java/org/drools/compiler/lang/descr/PackageDescr.java b/drools-compiler/src/main/java/org/drools/compiler/lang/descr/PackageDescr.java index 090cc979fe4..ed70694ad00 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/lang/descr/PackageDescr.java +++ b/drools-compiler/src/main/java/org/drools/compiler/lang/descr/PackageDescr.java @@ -16,6 +16,10 @@ package org.drools.compiler.lang.descr; +import org.drools.core.rule.Namespaceable; +import org.kie.api.io.Resource; +import org.kie.internal.definition.KnowledgeDescr; + import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; @@ -27,10 +31,6 @@ import java.util.List; import java.util.Set; -import org.drools.core.rule.Namespaceable; -import org.kie.internal.definition.KnowledgeDescr; -import org.kie.api.io.Resource; - public class PackageDescr extends BaseDescr implements Namespaceable, @@ -199,6 +199,11 @@ public void addRule(final RuleDescr rule) { if (this.rules == Collections.EMPTY_LIST) { this.rules = new ArrayList<RuleDescr>(1); } + rule.setLoadOrder(rules.size()); + this.rules.add(rule); + } + + public void afterRuleAdded(RuleDescr rule) { for (final AttributeDescr at : attributes) { // check if rule overrides the attribute if (!rule.getAttributes().containsKey(at.getName())) { @@ -206,8 +211,6 @@ public void addRule(final RuleDescr rule) { rule.addAttribute(at); } } - rule.setLoadOrder(rules.size()); - this.rules.add(rule); } public List<RuleDescr> getRules() { diff --git a/drools-compiler/src/main/java/org/drools/compiler/lang/descr/RuleDescr.java b/drools-compiler/src/main/java/org/drools/compiler/lang/descr/RuleDescr.java index 9de6fce97f4..e9f0c901440 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/lang/descr/RuleDescr.java +++ b/drools-compiler/src/main/java/org/drools/compiler/lang/descr/RuleDescr.java @@ -16,12 +16,16 @@ package org.drools.compiler.lang.descr; +import org.drools.core.rule.Dialectable; + import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import java.util.*; - -import org.drools.core.rule.Dialectable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; public class RuleDescr extends AnnotatedBaseDescr implements @@ -139,7 +143,11 @@ public Map<String, AttributeDescr> getAttributes() { public void addAttribute(final AttributeDescr attribute) { if ( attribute != null ) { - this.attributes.put( attribute.getName(), attribute ); + if (attributes.containsKey(attribute.getName())) { + addError("Duplicate attribute definition: " + attribute.getName()); + } else { + this.attributes.put( attribute.getName(), attribute ); + } } } @@ -165,15 +173,19 @@ public Map<String, Object> getNamedConsequences() { public void addNamedConsequences(String name, Object consequence) { if ( namedConsequence.containsKey(name) ) { - if (errors == null) { - errors = new ArrayList<String>(); - } - errors.add("Duplicate consequence name: " + name); + addError("Duplicate consequence name: " + name); } else { namedConsequence.put(name, consequence); } } + private void addError(String message) { + if (errors == null) { + errors = new ArrayList<String>(); + } + errors.add(message); + } + public void setConsequenceLocation(final int line, final int pattern) { this.consequenceLine = line; diff --git a/drools-compiler/src/test/java/org/drools/compiler/integrationtests/Misc2Test.java b/drools-compiler/src/test/java/org/drools/compiler/integrationtests/Misc2Test.java index 42a3209b3d0..0667ed50e98 100644 --- a/drools-compiler/src/test/java/org/drools/compiler/integrationtests/Misc2Test.java +++ b/drools-compiler/src/test/java/org/drools/compiler/integrationtests/Misc2Test.java @@ -5662,4 +5662,15 @@ public void testEvalInSubnetwork() { assertEquals(1, list.size()); assertEquals(0, (int)list.get(0)); } + + @Test + public void testRedeclaringRuleAttribute() { + // BZ-1092084 + String str = "rule R salience 10 salience 100 when then end\n"; + + KieServices ks = KieServices.Factory.get(); + KieFileSystem kfs = ks.newKieFileSystem().write( "src/main/resources/r1.drl", str ); + Results results = ks.newKieBuilder( kfs ).buildAll().getResults(); + assertEquals(1, results.getMessages().size()); + } } \ No newline at end of file diff --git a/drools-compiler/src/test/java/org/drools/compiler/lang/descr/PackageDescrTest.java b/drools-compiler/src/test/java/org/drools/compiler/lang/descr/PackageDescrTest.java index 5045640983e..9cd64b50001 100644 --- a/drools-compiler/src/test/java/org/drools/compiler/lang/descr/PackageDescrTest.java +++ b/drools-compiler/src/test/java/org/drools/compiler/lang/descr/PackageDescrTest.java @@ -1,5 +1,10 @@ package org.drools.compiler.lang.descr; +import org.drools.compiler.Person; +import org.drools.compiler.lang.api.DescrFactory; +import org.drools.compiler.lang.api.PackageDescrBuilder; +import org.junit.Test; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -10,13 +15,6 @@ import java.util.List; import java.util.Map; -import org.drools.compiler.Person; -import org.drools.compiler.lang.api.DescrFactory; -import org.drools.compiler.lang.api.PackageDescrBuilder; -import org.drools.compiler.lang.descr.AttributeDescr; -import org.drools.compiler.lang.descr.PackageDescr; -import org.drools.compiler.lang.descr.RuleDescr; -import org.junit.Test; import static org.junit.Assert.*; public class PackageDescrTest { @@ -39,7 +37,8 @@ public void testAttributeOverriding() { List pkgAts = desc.getAttributes(); assertEquals("bar", ((AttributeDescr)pkgAts.get( 0 )).getValue()); assertEquals("default", ((AttributeDescr)pkgAts.get( 1 )).getValue()); - + + desc.afterRuleAdded( rule ); Map<String, AttributeDescr> ruleAts = rule.getAttributes(); assertEquals("overridden", ((AttributeDescr)ruleAts.get( "foo" )).getValue());
b657bdfa1a9467848cc1844b5c732087e5eae1ca
elasticsearch
Optimize aliases processing--Closes- 2832-
p
https://github.com/elastic/elasticsearch
diff --git a/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java index 09d607bf9d63b..47b1de66fd6d5 100644 --- a/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java +++ b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java @@ -39,6 +39,7 @@ import org.elasticsearch.indices.InvalidAliasNameException; import java.io.IOException; +import java.util.Map; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; @@ -67,10 +68,20 @@ public IndexAlias alias(String alias) { return aliases.get(alias); } + public IndexAlias create(String alias, @Nullable CompressedString filter) { + return new IndexAlias(alias, filter, parse(alias, filter)); + } + public void add(String alias, @Nullable CompressedString filter) { add(new IndexAlias(alias, filter, parse(alias, filter))); } + public void addAll(Map<String, IndexAlias> aliases) { + synchronized (mutex) { + this.aliases = newMapBuilder(this.aliases).putAll(aliases).immutableMap(); + } + } + /** * Returns the filter associated with listed filtering aliases. * <p/> diff --git a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index f7cbd4e12be14..d7ca96d866701 100644 --- a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -62,10 +62,13 @@ import org.elasticsearch.indices.recovery.StartRecoveryRequest; import org.elasticsearch.threadpool.ThreadPool; +import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import static com.google.common.collect.Maps.newHashMap; import static com.google.common.collect.Sets.newHashSet; import static org.elasticsearch.ExceptionsHelper.detailedMessage; @@ -433,9 +436,7 @@ private void applyAliases(ClusterChangedEvent event) { String index = indexMetaData.index(); IndexService indexService = indicesService.indexService(index); IndexAliasesService indexAliasesService = indexService.aliasesService(); - for (AliasMetaData aliasesMd : indexMetaData.aliases().values()) { - processAlias(index, aliasesMd.alias(), aliasesMd.filter(), indexAliasesService); - } + processAliases(index, indexMetaData.aliases().values(), indexAliasesService); // go over and remove aliases for (IndexAlias indexAlias : indexAliasesService) { if (!indexMetaData.aliases().containsKey(indexAlias.alias())) { @@ -450,26 +451,31 @@ private void applyAliases(ClusterChangedEvent event) { } } - private void processAlias(String index, String alias, CompressedString filter, IndexAliasesService indexAliasesService) { - try { - if (!indexAliasesService.hasAlias(alias)) { - if (logger.isDebugEnabled()) { - logger.debug("[{}] adding alias [{}], filter [{}]", index, alias, filter); - } - indexAliasesService.add(alias, filter); - } else { - if ((filter == null && indexAliasesService.alias(alias).filter() != null) || - (filter != null && !filter.equals(indexAliasesService.alias(alias).filter()))) { + private void processAliases(String index, Collection<AliasMetaData> aliases, IndexAliasesService indexAliasesService) { + HashMap<String, IndexAlias> newAliases = newHashMap(); + for (AliasMetaData aliasMd : aliases) { + String alias = aliasMd.alias(); + CompressedString filter = aliasMd.filter(); + try { + if (!indexAliasesService.hasAlias(alias)) { if (logger.isDebugEnabled()) { - logger.debug("[{}] updating alias [{}], filter [{}]", index, alias, filter); + logger.debug("[{}] adding alias [{}], filter [{}]", index, alias, filter); + } + newAliases.put(alias, indexAliasesService.create(alias, filter)); + } else { + if ((filter == null && indexAliasesService.alias(alias).filter() != null) || + (filter != null && !filter.equals(indexAliasesService.alias(alias).filter()))) { + if (logger.isDebugEnabled()) { + logger.debug("[{}] updating alias [{}], filter [{}]", index, alias, filter); + } + newAliases.put(alias, indexAliasesService.create(alias, filter)); } - indexAliasesService.add(alias, filter); } + } catch (Exception e) { + logger.warn("[{}] failed to add alias [{}], filter [{}]", e, index, alias, filter); } - } catch (Exception e) { - logger.warn("[{}] failed to add alias [{}], filter [{}]", e, index, alias, filter); } - + indexAliasesService.addAll(newAliases); } private void applyNewOrUpdatedShards(final ClusterChangedEvent event) throws ElasticSearchException {