commit_id
stringlengths
40
40
project
stringclasses
11 values
commit_message
stringlengths
3
3.04k
type
stringclasses
3 values
url
stringclasses
11 values
git_diff
stringlengths
555
691k
d79c2322e801524146441b1f61739ea079a9a15a
restlet-framework-java
- Initial code for new default HTTP connector.--
a
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet.ext.grizzly/src/org/restlet/ext/grizzly/HttpServerHelper.java b/modules/org.restlet.ext.grizzly/src/org/restlet/ext/grizzly/HttpServerHelper.java index 215e65cad1..91ab71ce9a 100644 --- a/modules/org.restlet.ext.grizzly/src/org/restlet/ext/grizzly/HttpServerHelper.java +++ b/modules/org.restlet.ext.grizzly/src/org/restlet/ext/grizzly/HttpServerHelper.java @@ -64,7 +64,8 @@ public HttpServerHelper(Server server) { @Override protected void configure(Controller controller) throws Exception { // Get the TCP select handler of the controller - final TCPSelectorHandler selectorHandler = getSelectorHandler(); + TCPSelectorHandler selectorHandler = getSelectorHandler(); + // Configure it selectorHandler.setPort(getHelped().getPort()); if (getHelped().getAddress() != null) { diff --git a/modules/org.restlet/src/org/restlet/engine/http/connector/Connection.java b/modules/org.restlet/src/org/restlet/engine/http/connector/Connection.java index ab8fa996c8..a53be36333 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/connector/Connection.java +++ b/modules/org.restlet/src/org/restlet/engine/http/connector/Connection.java @@ -37,8 +37,6 @@ import java.security.cert.Certificate; import java.util.Arrays; import java.util.List; -import java.util.Queue; -import java.util.concurrent.ConcurrentLinkedQueue; import java.util.logging.Level; import java.util.logging.Logger; @@ -47,8 +45,6 @@ import javax.net.ssl.SSLSocket; import org.restlet.Connector; -import org.restlet.Request; -import org.restlet.Response; import org.restlet.data.Parameter; import org.restlet.engine.ConnectorHelper; import org.restlet.engine.http.header.HeaderConstants; @@ -88,20 +84,12 @@ public static boolean isBroken(Exception exception) { return result; } - private volatile boolean persistent; - - private volatile boolean pipelining; - - private final Queue<Request> requests; - - private final Queue<Response> responses; - - private volatile ConnectionState state; - private volatile boolean inboundBusy; private volatile boolean outboundBusy; + private volatile ConnectionState state; + /** The connecting user */ private final Socket socket; @@ -117,10 +105,6 @@ public static boolean isBroken(Exception exception) { public Connection(ConnectorHelper<T> helper, Socket socket) throws IOException { this.helper = helper; - this.persistent = false; - this.pipelining = false; - this.requests = new ConcurrentLinkedQueue<Request>(); - this.responses = new ConcurrentLinkedQueue<Response>(); this.state = ConnectionState.CLOSED; this.socket = socket; this.inboundBusy = false; @@ -265,10 +249,10 @@ public void addAdditionalHeaders(Series<Parameter> existingHeaders, /** * Closes the connection. By default, set the state to - * {@link ConnectionState#CLOSING}. + * {@link ConnectionState#CLOSED}. */ public void close() { - setState(ConnectionState.CLOSING); + setState(ConnectionState.CLOSED); } public String getAddress() { @@ -322,14 +306,6 @@ protected Representation getRepresentation( null); } - public Queue<Request> getRequests() { - return requests; - } - - public Queue<Response> getResponses() { - return responses; - } - public Socket getSocket() { return socket; } @@ -395,20 +371,12 @@ public boolean isOutboundBusy() { return outboundBusy; } - public boolean isPersistent() { - return persistent; - } - - public boolean isPipelining() { - return pipelining; - } - /** * Opens the connection. By default, set the state to - * {@link ConnectionState#OPENING}. + * {@link ConnectionState#OPEN}. */ public void open() { - setState(ConnectionState.OPENING); + setState(ConnectionState.OPEN); } public void setInboundBusy(boolean inboundBusy) { @@ -419,14 +387,6 @@ public void setOutboundBusy(boolean outboundBusy) { this.outboundBusy = outboundBusy; } - public void setPersistent(boolean persistent) { - this.persistent = persistent; - } - - public void setPipelining(boolean pipelining) { - this.pipelining = pipelining; - } - public void setState(ConnectionState state) { this.state = state; } diff --git a/modules/org.restlet/src/org/restlet/engine/http/connector/ConnectionState.java b/modules/org.restlet/src/org/restlet/engine/http/connector/ConnectionState.java index baef77acf0..a0388a3b8b 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/connector/ConnectionState.java +++ b/modules/org.restlet/src/org/restlet/engine/http/connector/ConnectionState.java @@ -41,7 +41,7 @@ public enum ConnectionState { OPENING, /** The network connection has been successfully opened. */ - OPENED, + OPEN, /** The network connection is being closed. */ CLOSING, diff --git a/modules/org.restlet/src/org/restlet/engine/http/connector/DefaultServerConnection.java b/modules/org.restlet/src/org/restlet/engine/http/connector/DefaultServerConnection.java index 44471cc41d..436a7a3579 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/connector/DefaultServerConnection.java +++ b/modules/org.restlet/src/org/restlet/engine/http/connector/DefaultServerConnection.java @@ -38,9 +38,14 @@ import java.net.Socket; import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutorService; import java.util.logging.Level; +import org.restlet.Request; +import org.restlet.Response; + /** * An internal HTTP server connection. * @@ -54,6 +59,14 @@ public class DefaultServerConnection extends ServerConnection { /** The outbound stream. */ private final OutputStream outboundStream; + private volatile boolean persistent; + + private volatile boolean pipelining; + + private final Queue<Request> requests; + + private final Queue<Response> responses; + /** * Constructor. * @@ -66,23 +79,10 @@ public DefaultServerConnection(DefaultServerHelper helper, Socket socket) super(helper, socket); this.inboundStream = new BufferedInputStream(socket.getInputStream()); this.outboundStream = new BufferedOutputStream(socket.getOutputStream()); - } - - @Override - public void open() { - super.open(); - - if (!getHelper().getHandlerService().isShutdown()) { - try { - getHelper().handle(null, null); - } catch (Exception e) { - getLogger().log(Level.WARNING, - "Error while handling an HTTP server call: ", - e.getMessage()); - getLogger().log(Level.INFO, - "Error while handling an HTTP server call", e); - } - } + this.persistent = false; + this.pipelining = false; + this.requests = new ConcurrentLinkedQueue<Request>(); + this.responses = new ConcurrentLinkedQueue<Response>(); } @Override @@ -134,6 +134,10 @@ public InputStream getRequestHeadStream() { return getInboundStream(); } + public Queue<Request> getRequests() { + return requests; + } + @Override public WritableByteChannel getResponseEntityChannel() { return null; @@ -144,4 +148,42 @@ public OutputStream getResponseEntityStream() { return null; } + public Queue<Response> getResponses() { + return responses; + } + + public boolean isPersistent() { + return persistent; + } + + public boolean isPipelining() { + return pipelining; + } + + @Override + public void open() { + super.open(); + + if (!getHelper().getHandlerService().isShutdown()) { + try { + + getHelper().handle(null, null); + } catch (Exception e) { + getLogger().log(Level.WARNING, + "Error while handling an HTTP server call: ", + e.getMessage()); + getLogger().log(Level.INFO, + "Error while handling an HTTP server call", e); + } + } + } + + public void setPersistent(boolean persistent) { + this.persistent = persistent; + } + + public void setPipelining(boolean pipelining) { + this.pipelining = pipelining; + } + }
a527c071070d82d56150c7d040189629fbda54c8
drools
JBRULES-2817 Make the KnowledgeAgent Tests more- robust and faster - KAgent Event Listener test are enabled again! There- were modified to adopt the new KAgent test architecture--
p
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/test/java/org/drools/agent/BaseKnowledgeAgentTest.java b/drools-compiler/src/test/java/org/drools/agent/BaseKnowledgeAgentTest.java index ed9b1e159c6..33c42b6b5d0 100644 --- a/drools-compiler/src/test/java/org/drools/agent/BaseKnowledgeAgentTest.java +++ b/drools-compiler/src/test/java/org/drools/agent/BaseKnowledgeAgentTest.java @@ -135,7 +135,7 @@ public void afterChangeSetApplied(AfterChangeSetAppliedEvent event) { if (!resourceCompilationFailedEvents.isEmpty()){ //A compilation error occured - throw new RuntimeException("Unable to compile Knowledge"+ resourceCompilationFailedEvents.get(0) ); + throw new RuntimeException("Unable to compile Knowledge"+ resourceCompilationFailedEvents.get(0).getKnowledgeBuilder().getErrors() ); } } diff --git a/drools-compiler/src/test/java/org/drools/agent/KnowledgeAgentEventListenerTest.java b/drools-compiler/src/test/java/org/drools/agent/KnowledgeAgentEventListenerTest.java index 333279f5ff8..1b1d1693e3e 100644 --- a/drools-compiler/src/test/java/org/drools/agent/KnowledgeAgentEventListenerTest.java +++ b/drools-compiler/src/test/java/org/drools/agent/KnowledgeAgentEventListenerTest.java @@ -1,14 +1,9 @@ package org.drools.agent; -import java.io.BufferedWriter; import java.io.File; -import java.io.FileWriter; -import java.io.Writer; import java.util.ArrayList; import java.util.List; -import org.junit.After; -import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import static org.junit.Assert.*; @@ -16,7 +11,6 @@ import org.drools.KnowledgeBase; import org.drools.KnowledgeBaseFactory; import org.drools.Person; -import org.drools.core.util.FileManager; import org.drools.event.knowledgeagent.AfterChangeSetAppliedEvent; import org.drools.event.knowledgeagent.AfterChangeSetProcessedEvent; import org.drools.event.knowledgeagent.AfterResourceProcessedEvent; @@ -26,13 +20,8 @@ import org.drools.event.knowledgeagent.KnowledgeAgentEventListener; import org.drools.event.knowledgeagent.KnowledgeBaseUpdatedEvent; import org.drools.event.knowledgeagent.ResourceCompilationFailedEvent; -import org.drools.io.ResourceChangeScannerConfiguration; import org.drools.io.ResourceFactory; -import org.drools.io.impl.ResourceChangeNotifierImpl; -import org.drools.io.impl.ResourceChangeScannerImpl; import org.drools.runtime.StatefulKnowledgeSession; -import org.mortbay.jetty.Server; -import org.mortbay.jetty.handler.ResourceHandler; public class KnowledgeAgentEventListenerTest extends BaseKnowledgeAgentTest { @@ -47,7 +36,7 @@ public class KnowledgeAgentEventListenerTest extends BaseKnowledgeAgentTest { private int beforeResourceProcessed; private int afterResourceProcessed; - @Test @Ignore + @Test public void testEventListenerWithIncrementalChangeSet() throws Exception { fileManager.write( "myExpander.dsl", this.createCommonDSL( null ) ); @@ -98,7 +87,7 @@ public void testEventListenerWithIncrementalChangeSet() throws Exception { ksession.setGlobal( "list", list ); - ksession.insert( new Person( "John" ) ); + ksession.insert( new Person( "John", 34 ) ); ksession.fireAllRules(); assertEquals( 1, @@ -196,7 +185,14 @@ public void testEventListenerWithIncrementalChangeSet() throws Exception { fileManager.write( "rules.drl", createCommonDSLRRule( "Rule1" ) ); - scan( kagent ); + try{ + scan( kagent ); + fail("An exception was expected"); + } catch (Exception e){ + //We have 2 listeners: one defined in this.createKAgent() and another + //that is dinamically set in scan() method. The later will throw + //this exception + } assertEquals( 2, this.beforeChangeSetApplied ); assertEquals( 2, @@ -217,7 +213,7 @@ public void testEventListenerWithIncrementalChangeSet() throws Exception { kagent.dispose(); } - @Test @Ignore + @Test public void testEventListenerWithoutIncrementalChangeSet() throws Exception { fileManager.write( "myExpander.dsl", this.createCommonDSL( null ) ); @@ -268,7 +264,7 @@ public void testEventListenerWithoutIncrementalChangeSet() throws Exception { StatefulKnowledgeSession ksession = kagent.getKnowledgeBase().newStatefulKnowledgeSession(); ksession.setGlobal( "list", list ); - ksession.insert( new Person( "John" ) ); + ksession.insert( new Person( "John", 34 ) ); ksession.fireAllRules(); ksession.dispose(); assertEquals( 1, @@ -287,7 +283,7 @@ public void testEventListenerWithoutIncrementalChangeSet() throws Exception { ksession = kagent.getKnowledgeBase().newStatefulKnowledgeSession(); ksession.setGlobal( "list", list ); - ksession.insert( new Person( "John" ) ); + ksession.insert( new Person( "John", 34 ) ); ksession.fireAllRules(); ksession.dispose(); assertEquals( 1, @@ -314,7 +310,7 @@ public void testEventListenerWithoutIncrementalChangeSet() throws Exception { ksession = kagent.getKnowledgeBase().newStatefulKnowledgeSession(); ksession.setGlobal( "list", list ); - ksession.insert( new Person( "John" ) ); + ksession.insert( new Person( "John", 34 ) ); ksession.fireAllRules(); ksession.dispose(); assertEquals( 1, @@ -346,7 +342,7 @@ public void testEventListenerWithoutIncrementalChangeSet() throws Exception { ksession = kagent.getKnowledgeBase().newStatefulKnowledgeSession(); ksession.setGlobal( "list", list ); - ksession.insert( new Person( "John" ) ); + ksession.insert( new Person( "John", 34 ) ); ksession.fireAllRules(); ksession.dispose(); assertEquals( 2, @@ -379,7 +375,7 @@ public void testEventListenerWithoutIncrementalChangeSet() throws Exception { ksession = kagent.getKnowledgeBase().newStatefulKnowledgeSession(); ksession.setGlobal( "list", list ); - ksession.insert( new Person( "John" ) ); + ksession.insert( new Person( "John", 34 ) ); ksession.fireAllRules(); ksession.dispose(); assertEquals( 1, @@ -393,7 +389,17 @@ public void testEventListenerWithoutIncrementalChangeSet() throws Exception { fileManager.write( "rules.drl", createCommonDSLRRule( "Rule1" ) ); - scan( kagent ); + + try{ + scan( kagent ); + fail("An exception was expected"); + } catch (Exception e){ + //We have 2 listeners: one defined in this.createKAgent() and another + //that is dinamically set in scan() method. The later will throw + //this exception + } + + assertEquals( 2, this.beforeChangeSetApplied ); assertEquals( 2, @@ -414,6 +420,7 @@ public void testEventListenerWithoutIncrementalChangeSet() throws Exception { kagent.dispose(); } + @Override public KnowledgeAgent createKAgent(KnowledgeBase kbase, boolean newInstance) { KnowledgeAgent kagent = super.createKAgent( kbase,
8456536126f631d665ffd715dca9ae981588632e
intellij-community
correctly-sized icon in completion extender--
c
https://github.com/JetBrains/intellij-community
diff --git a/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/CompletionExtender.java b/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/CompletionExtender.java index 9b27fb7ef9fa1..9fffaf934164d 100644 --- a/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/CompletionExtender.java +++ b/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/CompletionExtender.java @@ -64,8 +64,7 @@ public boolean sameAsFor(LookupElement item) { } private static JComponent createComponent(LookupElement element, LookupImpl lookup) { - final LookupCellRenderer renderer = new LookupCellRenderer(lookup); - renderer.setFullSize(true); + final LookupCellRenderer renderer = ((LookupCellRenderer)lookup.getList().getCellRenderer()).createExtenderRenderer(); final JComponent component = (JComponent)renderer.getListCellRendererComponent(lookup.getList(), element, lookup.getList().getSelectedIndex(), true, false); diff --git a/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/LookupCellRenderer.java b/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/LookupCellRenderer.java index c1381a5700485..0a5e258a0c146 100644 --- a/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/LookupCellRenderer.java +++ b/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/LookupCellRenderer.java @@ -62,8 +62,6 @@ public class LookupCellRenderer implements ListCellRenderer { private static final Color EMPTY_ITEM_FOREGROUND_COLOR = FOREGROUND_COLOR; - private static final int MAX_LENGTH = 70; - private final LookupImpl myLookup; private final SimpleColoredComponent myNameComponent; @@ -71,12 +69,13 @@ public class LookupCellRenderer implements ListCellRenderer { private final SimpleColoredComponent myTypeLabel; private final JPanel myPanel; - public static final Color PREFERRED_BACKGROUND_COLOR = new Color(220, 245, 220); private static final String ELLIPSIS = "\u2026"; - private boolean myFullSize; + private final boolean myFullSize; private int myMaxWidth = -1; - public LookupCellRenderer(LookupImpl lookup) { + public LookupCellRenderer(LookupImpl lookup, boolean fullSize) { + myFullSize = fullSize; + EditorColorsScheme scheme = lookup.getEditor().getColorsScheme(); myNormalFont = scheme.getFont(EditorFontType.PLAIN); myBoldFont = scheme.getFont(EditorFontType.BOLD); @@ -121,7 +120,7 @@ public Component getListCellRendererComponent( final LookupElement item = (LookupElement)value; final Color foreground = isSelected ? SELECTED_FOREGROUND_COLOR : FOREGROUND_COLOR; - final Color background = getItemBackground(list, index, isSelected); + final Color background = isSelected ? SELECTED_BACKGROUND_COLOR : BACKGROUND_COLOR; int allowedWidth = list.getWidth() - AFTER_TAIL - AFTER_TYPE - getIconIndent(); final LookupElementPresentation presentation = new RealLookupElementPresentation(myFullSize ? getMaxWidth() : allowedWidth, myNormalMetrics, myBoldMetrics, myLookup); @@ -160,10 +159,6 @@ private int getMaxWidth() { return myMaxWidth; } - private Color getItemBackground(JList list, int index, boolean isSelected) { - return isSelected ? SELECTED_BACKGROUND_COLOR : BACKGROUND_COLOR; - } - private void setTailTextLabel(boolean isSelected, LookupElementPresentation presentation, Color foreground, int allowedWidth) { final Color fg = getTailTextColor(isSelected, presentation, foreground); @@ -182,10 +177,6 @@ private void setTailTextLabel(boolean isSelected, LookupElementPresentation pres myTailComponent.append(trimLabelText(tailText, allowedWidth, myNormalMetrics), attributes); } - public void setFullSize(boolean fullSize) { - myFullSize = fullSize; - } - private static String trimLabelText(@Nullable String text, int maxWidth, FontMetrics metrics) { if (text == null || StringUtil.isEmpty(text)) { return ""; @@ -335,6 +326,12 @@ public int updateMaximumWidth(final LookupElementPresentation p) { return RealLookupElementPresentation.calculateWidth(p, myNormalMetrics, myBoldMetrics) + AFTER_TAIL + AFTER_TYPE; } + public LookupCellRenderer createExtenderRenderer() { + LookupCellRenderer renderer = new LookupCellRenderer(myLookup, true); + renderer.myEmptyIcon = myEmptyIcon; + return renderer; + } + public int getIconIndent() { return myNameComponent.getIconTextGap() + myEmptyIcon.getIconWidth(); } diff --git a/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/LookupImpl.java b/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/LookupImpl.java index 9534a81cf2b42..c4ce0492a960d 100644 --- a/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/LookupImpl.java +++ b/platform/lang-impl/src/com/intellij/codeInsight/lookup/impl/LookupImpl.java @@ -161,7 +161,7 @@ public LookupImpl(Project project, Editor editor, @NotNull LookupArranger arrang myPresentableArranger = arranger; myIconPanel.setVisible(false); - myCellRenderer = new LookupCellRenderer(this); + myCellRenderer = new LookupCellRenderer(this, false); myList.setCellRenderer(myCellRenderer); myList.setFocusable(false);
ab033086f98e633fa7314edf3adfab67fe9bd85b
spring-framework
SPR-8454 Introduce Registration style objects,- rename several Spring MVC *Configurer helpers to *Registry, add more tests--
a
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/DefaultServletHandlerConfigurer.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/DefaultServletHandlerConfigurer.java index e9b713fc7ead..cb80e95c2f28 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/DefaultServletHandlerConfigurer.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/DefaultServletHandlerConfigurer.java @@ -21,75 +21,78 @@ import javax.servlet.ServletContext; +import org.springframework.util.Assert; import org.springframework.web.HttpRequestHandler; import org.springframework.web.servlet.DispatcherServlet; -import org.springframework.web.servlet.HandlerMapping; +import org.springframework.web.servlet.handler.AbstractHandlerMapping; import org.springframework.web.servlet.handler.SimpleUrlHandlerMapping; import org.springframework.web.servlet.resource.DefaultServletHttpRequestHandler; /** - * Helps with configuring a handler for serving static resources by forwarding to the Servlet container's default - * Servlet. This is commonly used when the {@link DispatcherServlet} is mapped to "/", which results in cleaner - * URLs (without a servlet prefix) but may need to still allow some requests (e.g. static resources) to be handled - * by the Servlet container's default servlet. - * - * <p>It is important the configured handler remains last in the order of all {@link HandlerMapping} instances in - * the Spring MVC web application context. That is is the case if relying on @{@link EnableWebMvc}. - * However, if you register your own HandlerMapping instance sure to set its "order" property to a value lower - * than that of the {@link DefaultServletHttpRequestHandler}, which is {@link Integer#MAX_VALUE}. + * Configures a request handler for serving static resources by forwarding the request to the Servlet container's + * "default" Servlet. This is indended to be used when the Spring MVC {@link DispatcherServlet} is mapped to "/" + * thus overriding the Servlet container's default handling of static resources. Since this handler is configured + * at the lowest precedence, effectively it allows all other handler mappings to handle the request, and if none + * of them do, this handler can forward it to the "default" Servlet. * * @author Rossen Stoyanchev * @since 3.1 * - * @see ResourceConfigurer + * @see DefaultServletHttpRequestHandler */ public class DefaultServletHandlerConfigurer { - private DefaultServletHttpRequestHandler requestHandler; - private final ServletContext servletContext; + private DefaultServletHttpRequestHandler handler; + + /** + * Create a {@link DefaultServletHandlerConfigurer} instance. + * @param servletContext the ServletContext to use to configure the underlying DefaultServletHttpRequestHandler. + */ public DefaultServletHandlerConfigurer(ServletContext servletContext) { + Assert.notNull(servletContext, "A ServletContext is required to configure default servlet handling"); this.servletContext = servletContext; } /** - * Enable forwarding to the Servlet container default servlet. The {@link DefaultServletHttpRequestHandler} - * will try to auto-detect the default Servlet at startup using a list of known names. Alternatively, you can - * specify the name of the default Servlet, see {@link #enable(String)}. + * Enable forwarding to the "default" Servlet. When this method is used the {@link DefaultServletHttpRequestHandler} + * will try to auto-detect the "default" Servlet name. Alternatively, you can specify the name of the default + * Servlet via {@link #enable(String)}. + * @see DefaultServletHttpRequestHandler */ public void enable() { enable(null); } /** - * Enable forwarding to the Servlet container default servlet specifying explicitly the name of the default - * Servlet to forward static resource requests to. This is useful when the default Servlet cannot be detected - * (e.g. when using an unknown container or when it has been manually configured). + * Enable forwarding to the "default" Servlet identified by the given name. + * This is useful when the default Servlet cannot be auto-detected, for example when it has been manually configured. + * @see DefaultServletHttpRequestHandler */ public void enable(String defaultServletName) { - requestHandler = new DefaultServletHttpRequestHandler(); - requestHandler.setDefaultServletName(defaultServletName); - requestHandler.setServletContext(servletContext); + handler = new DefaultServletHttpRequestHandler(); + handler.setDefaultServletName(defaultServletName); + handler.setServletContext(servletContext); } /** - * Return a {@link SimpleUrlHandlerMapping} instance ordered at {@link Integer#MAX_VALUE} containing a - * {@link DefaultServletHttpRequestHandler} mapped to {@code /**}. + * Return a handler mapping instance ordered at {@link Integer#MAX_VALUE} containing the + * {@link DefaultServletHttpRequestHandler} instance mapped to {@code "/**"}; or {@code null} if + * default servlet handling was not been enabled. */ - protected SimpleUrlHandlerMapping getHandlerMapping() { + protected AbstractHandlerMapping getHandlerMapping() { + if (handler == null) { + return null; + } + + Map<String, HttpRequestHandler> urlMap = new HashMap<String, HttpRequestHandler>(); + urlMap.put("/**", handler); + SimpleUrlHandlerMapping handlerMapping = new SimpleUrlHandlerMapping(); handlerMapping.setOrder(Integer.MAX_VALUE); - handlerMapping.setUrlMap(getUrlMap()); + handlerMapping.setUrlMap(urlMap); return handlerMapping; } - private Map<String, HttpRequestHandler> getUrlMap() { - Map<String, HttpRequestHandler> urlMap = new HashMap<String, HttpRequestHandler>(); - if (requestHandler != null) { - urlMap.put("/**", requestHandler); - } - return urlMap ; - } - } \ No newline at end of file diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/DelegatingWebMvcConfiguration.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/DelegatingWebMvcConfiguration.java index 59bc12d4f7e7..a459a12700fe 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/DelegatingWebMvcConfiguration.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/DelegatingWebMvcConfiguration.java @@ -28,11 +28,13 @@ import org.springframework.web.servlet.HandlerExceptionResolver; /** - * A variant of {@link WebMvcConfigurationSupport} that delegates to one or more registered {@link WebMvcConfigurer} - * implementations allowing each of them to customize the default Spring MVC configuration. + * A variant of {@link WebMvcConfigurationSupport} that delegates to one or more registered + * {@link WebMvcConfigurer}s allowing each of them to customize the default Spring MVC + * code-based configuration. * - * <p>This class is automatically imported when @{@link EnableWebMvc} is used on an @{@link Configuration} class. - * In turn it detects all implementations of {@link WebMvcConfigurer} via autowiring and in turn delegates to them. + * <p>This class is automatically imported when @{@link EnableWebMvc} is used to annotate + * an @{@link Configuration} class. In turn it detects implementations of {@link WebMvcConfigurer} + * via autowiring and delegates to them. * * @see EnableWebMvc * @see WebMvcConfigurer @@ -54,18 +56,18 @@ public void setConfigurers(List<WebMvcConfigurer> configurers) { } @Override - protected final void configureInterceptors(InterceptorConfigurer configurer) { - configurers.configureInterceptors(configurer); + protected final void addInterceptors(InterceptorRegistry registry) { + configurers.addInterceptors(registry); } @Override - protected final void configureViewControllers(ViewControllerConfigurer configurer) { - configurers.configureViewControllers(configurer); + protected final void addViewControllers(ViewControllerRegistry registry) { + configurers.addViewControllers(registry); } @Override - protected final void configureResourceHandling(ResourceConfigurer configurer) { - configurers.configureResourceHandling(configurer); + protected final void addResourceHandlers(ResourceHandlerRegistry registry) { + configurers.addResourceHandlers(registry); } @Override diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/EnableWebMvc.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/EnableWebMvc.java index 92794b627c10..b6379fcb7170 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/EnableWebMvc.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/EnableWebMvc.java @@ -24,9 +24,8 @@ /** * Enables default Spring MVC configuration and registers Spring MVC infrastructure components expected by the - * {@link DispatcherServlet}. Add this annotation to an application @{@link Configuration} class. It will in - * turn import the @{@link Configuration} class {@link DelegatingWebMvcConfiguration}, which provides default Spring MVC - * configuration. + * {@link DispatcherServlet}. Use this annotation on an @{@link Configuration} class. In turn that will + * import {@link DelegatingWebMvcConfiguration}, which provides default Spring MVC configuration. * <pre class="code"> * &#064;Configuration * &#064;EnableWebMvc @@ -39,9 +38,9 @@ * } * </pre> * <p>To customize the imported configuration implement {@link WebMvcConfigurer}, or more conveniently extend - * {@link WebMvcConfigurerAdapter} overriding specific methods. Your @{@link Configuration} class and any other - * Spring bean that implements {@link WebMvcConfigurer} will be detected and given an opportunity to customize - * the default Spring MVC configuration through the callback methods on the {@link WebMvcConfigurer} interface. + * {@link WebMvcConfigurerAdapter} overriding specific methods only. Any @{@link Configuration} class that + * implements {@link WebMvcConfigurer} will be detected by {@link DelegatingWebMvcConfiguration} and given + * an opportunity to customize the default Spring MVC code-based configuration. * <pre class="code"> * &#064;Configuration * &#064;EnableWebMvc @@ -52,7 +51,7 @@ * public class MyConfiguration extends WebMvcConfigurerAdapter { * * &#064;Override - * public void registerFormatters(FormatterRegistry formatterRegistry) { + * public void addFormatters(FormatterRegistry formatterRegistry) { * formatterRegistry.addConverter(new MyConverter()); * } * @@ -61,7 +60,7 @@ * converters.add(new MyHttpMessageConverter()); * } * - * ... + * // &#064;Override methods ... * * } * </pre> diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorConfigurer.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorConfigurer.java deleted file mode 100644 index 3ce466121e14..000000000000 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorConfigurer.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2002-2011 the original author or authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.springframework.web.servlet.config.annotation; - -import java.util.ArrayList; -import java.util.List; - -import org.springframework.util.Assert; -import org.springframework.web.context.request.WebRequestInterceptor; -import org.springframework.web.servlet.HandlerInterceptor; -import org.springframework.web.servlet.handler.MappedInterceptor; -import org.springframework.web.servlet.handler.WebRequestHandlerInterceptorAdapter; - -/** - * Helps with configuring an ordered set of Spring MVC interceptors of type {@link HandlerInterceptor} or - * {@link WebRequestInterceptor}. Interceptors can be registered with a set of path patterns. - * - * @author Rossen Stoyanchev - * @since 3.1 - */ -public class InterceptorConfigurer { - - private final List<Object> interceptors = new ArrayList<Object>(); - - /** - * Add a {@link HandlerInterceptor} that should apply to any request. - */ - public void addInterceptor(HandlerInterceptor interceptor) { - register(interceptor); - } - - /** - * Add a {@link WebRequestInterceptor} that should apply to any request. - */ - public void addInterceptor(WebRequestInterceptor interceptor) { - register(asHandlerInterceptorArray(interceptor)); - } - - /** - * Add {@link HandlerInterceptor}s that should apply to any request. - */ - public void addInterceptors(HandlerInterceptor... interceptors) { - register( interceptors); - } - - /** - * Add {@link WebRequestInterceptor}s that should apply to any request. - */ - public void addInterceptors(WebRequestInterceptor... interceptors) { - register(asHandlerInterceptorArray(interceptors)); - } - - /** - * Add a {@link HandlerInterceptor} with a set of URL path patterns it should apply to. - */ - public void mapInterceptor(String[] pathPatterns, HandlerInterceptor interceptor) { - registerMappedInterceptors(pathPatterns, interceptor); - } - - /** - * Add a {@link WebRequestInterceptor} with a set of URL path patterns it should apply to. - */ - public void mapInterceptor(String[] pathPatterns, WebRequestInterceptor interceptors) { - registerMappedInterceptors(pathPatterns, asHandlerInterceptorArray(interceptors)); - } - - /** - * Add {@link HandlerInterceptor}s with a set of URL path patterns they should apply to. - */ - public void mapInterceptors(String[] pathPatterns, HandlerInterceptor... interceptors) { - registerMappedInterceptors(pathPatterns, interceptors); - } - - /** - * Add {@link WebRequestInterceptor}s with a set of URL path patterns they should apply to. - */ - public void mapInterceptors(String[] pathPatterns, WebRequestInterceptor... interceptors) { - registerMappedInterceptors(pathPatterns, asHandlerInterceptorArray(interceptors)); - } - - private static HandlerInterceptor[] asHandlerInterceptorArray(WebRequestInterceptor...interceptors) { - HandlerInterceptor[] result = new HandlerInterceptor[interceptors.length]; - for (int i = 0; i < result.length; i++) { - result[i] = new WebRequestHandlerInterceptorAdapter(interceptors[i]); - } - return result; - } - - /** - * Stores the given set of {@link HandlerInterceptor}s internally. - * @param interceptors one or more interceptors to be stored - */ - protected void register(HandlerInterceptor...interceptors) { - Assert.notEmpty(interceptors, "At least one interceptor must be provided"); - for (HandlerInterceptor interceptor : interceptors) { - this.interceptors.add(interceptor); - } - } - - /** - * Stores the given set of {@link HandlerInterceptor}s and path patterns internally. - * @param pathPatterns path patterns or {@code null} - * @param interceptors one or more interceptors to be stored - */ - protected void registerMappedInterceptors(String[] pathPatterns, HandlerInterceptor...interceptors) { - Assert.notEmpty(interceptors, "At least one interceptor must be provided"); - Assert.notEmpty(pathPatterns, "Path patterns must be provided"); - for (HandlerInterceptor interceptor : interceptors) { - this.interceptors.add(new MappedInterceptor(pathPatterns, interceptor)); - } - } - - /** - * Returns all registered interceptors. - */ - protected List<Object> getInterceptors() { - return interceptors; - } - -} diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorRegistration.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorRegistration.java new file mode 100644 index 000000000000..6ef77d2ef97c --- /dev/null +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorRegistration.java @@ -0,0 +1,68 @@ +/* + * Copyright 2002-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.web.servlet.config.annotation; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.springframework.util.Assert; +import org.springframework.web.servlet.HandlerInterceptor; +import org.springframework.web.servlet.handler.MappedInterceptor; + +/** + * Encapsulates a {@link HandlerInterceptor} and an optional list of URL patterns. + * Results in the creation of a {@link MappedInterceptor} if URL patterns are provided. + * + * @author Rossen Stoyanchev + * @author Keith Donald + * @since 3.1 + */ +public class InterceptorRegistration { + + private final HandlerInterceptor interceptor; + + private final List<String> pathPatterns = new ArrayList<String>(); + + /** + * Creates an {@link InterceptorRegistration} instance. + */ + public InterceptorRegistration(HandlerInterceptor interceptor) { + Assert.notNull(interceptor, "Interceptor is required"); + this.interceptor = interceptor; + } + + /** + * Adds one or more URL patterns to which the registered interceptor should apply to. + * If no URL patterns are provided, the interceptor applies to all paths. + */ + public void addPathPatterns(String... pathPatterns) { + this.pathPatterns.addAll(Arrays.asList(pathPatterns)); + } + + /** + * Returns the underlying interceptor. If URL patterns are provided the returned type is + * {@link MappedInterceptor}; otherwise {@link HandlerInterceptor}. + */ + protected Object getInterceptor() { + if (pathPatterns.isEmpty()) { + return interceptor; + } + return new MappedInterceptor(pathPatterns.toArray(new String[pathPatterns.size()]), interceptor); + } + +} diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorRegistry.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorRegistry.java new file mode 100644 index 000000000000..7988a67c702d --- /dev/null +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/InterceptorRegistry.java @@ -0,0 +1,75 @@ +/* + * Copyright 2002-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.web.servlet.config.annotation; + +import java.util.ArrayList; +import java.util.List; + +import org.springframework.web.context.request.WebRequestInterceptor; +import org.springframework.web.servlet.HandlerInterceptor; +import org.springframework.web.servlet.handler.WebRequestHandlerInterceptorAdapter; + +/** + * Stores and provides access to a list of interceptors. For each interceptor you can optionally + * specify one or more URL patterns it applies to. + * + * @author Rossen Stoyanchev + * @author Keith Donald + * + * @since 3.1 + */ +public class InterceptorRegistry { + + private final List<InterceptorRegistration> registrations = new ArrayList<InterceptorRegistration>(); + + /** + * Adds the provided {@link HandlerInterceptor}. + * @param interceptor the interceptor to add + * @return An {@link InterceptorRegistration} that allows you optionally configure the + * registered interceptor further for example adding URL patterns it should apply to. + */ + public InterceptorRegistration addInterceptor(HandlerInterceptor interceptor) { + InterceptorRegistration registration = new InterceptorRegistration(interceptor); + registrations.add(registration); + return registration; + } + + /** + * Adds the provided {@link WebRequestInterceptor}. + * @param interceptor the interceptor to add + * @return An {@link InterceptorRegistration} that allows you optionally configure the + * registered interceptor further for example adding URL patterns it should apply to. + */ + public InterceptorRegistration addWebRequestInterceptor(WebRequestInterceptor interceptor) { + WebRequestHandlerInterceptorAdapter adapted = new WebRequestHandlerInterceptorAdapter(interceptor); + InterceptorRegistration registration = new InterceptorRegistration(adapted); + registrations.add(registration); + return registration; + } + + /** + * Returns all registered interceptors. + */ + protected List<Object> getInterceptors() { + List<Object> interceptors = new ArrayList<Object>(); + for (InterceptorRegistration registration : registrations) { + interceptors.add(registration.getInterceptor()); + } + return interceptors ; + } + +} diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceConfigurer.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceConfigurer.java deleted file mode 100644 index 576f91f457b1..000000000000 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceConfigurer.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright 2002-2011 the original author or authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.springframework.web.servlet.config.annotation; - -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import javax.servlet.ServletContext; - -import org.springframework.context.ApplicationContext; -import org.springframework.core.io.Resource; -import org.springframework.util.Assert; -import org.springframework.util.CollectionUtils; -import org.springframework.web.HttpRequestHandler; -import org.springframework.web.servlet.HandlerMapping; -import org.springframework.web.servlet.handler.SimpleUrlHandlerMapping; -import org.springframework.web.servlet.resource.ResourceHttpRequestHandler; - -/** - * Helps with configuring a handler for serving static resources such as images, css files and others through - * Spring MVC including setting cache headers optimized for efficient loading in a web browser. Resources can - * be served out of locations under web application root, from the classpath, and others. - * - * <p>To configure resource handling, use {@link #addPathMappings(String...)} to add one or more URL path patterns - * within the current Servlet context, to use for serving resources from the handler, such as {@code "/resources/**"}. - * - * <p>Then use {@link #addResourceLocations(String...)} to add one or more locations from which to serve - * static content. For example, {{@code "/"}, {@code "classpath:/META-INF/public-web-resources/"}} allows resources - * to be served both from the web application root and from any JAR on the classpath that contains a - * {@code /META-INF/public-web-resources/} directory, with resources in the web application root taking precedence. - * - * <p>Optionally use {@link #setCachePeriod(Integer)} to specify the cache period for the resources served by the - * handler and {@link #setOrder(int)} to set the order in which to serve requests relative to other - * {@link HandlerMapping} instances in the Spring MVC web application context. - * - * @author Rossen Stoyanchev - * @since 3.1 - * - * @see DefaultServletHandlerConfigurer - */ -public class ResourceConfigurer { - - private final List<String> pathPatterns = new ArrayList<String>(); - - private final List<Resource> locations = new ArrayList<Resource>(); - - private Integer cachePeriod; - - private int order = Integer.MAX_VALUE -1; - - private final ServletContext servletContext; - - private final ApplicationContext applicationContext; - - public ResourceConfigurer(ApplicationContext applicationContext, ServletContext servletContext) { - Assert.notNull(applicationContext, "ApplicationContext is required"); - this.applicationContext = applicationContext; - this.servletContext = servletContext; - } - - /** - * Add a URL path pattern within the current Servlet context to use for serving static resources - * using the Spring MVC {@link ResourceHttpRequestHandler}, for example {@code "/resources/**"}. - * @return the same {@link ResourceConfigurer} instance for chained method invocation - */ - public ResourceConfigurer addPathMapping(String pathPattern) { - return addPathMappings(pathPattern); - } - - /** - * Add several URL path patterns within the current Servlet context to use for serving static resources - * using the Spring MVC {@link ResourceHttpRequestHandler}, for example {@code "/resources/**"}. - * @return the same {@link ResourceConfigurer} instance for chained method invocation - */ - public ResourceConfigurer addPathMappings(String...pathPatterns) { - for (String path : pathPatterns) { - this.pathPatterns.add(path); - } - return this; - } - - /** - * Add resource location from which to serve static content. The location must point to a valid - * directory. <p>For example, a value of {@code "/"} will allow resources to be served both from the web - * application root. Also see {@link #addResourceLocations(String...)} for mapping several resource locations. - * @return the same {@link ResourceConfigurer} instance for chained method invocation - */ - public ResourceConfigurer addResourceLocation(String resourceLocation) { - return addResourceLocations(resourceLocation); - } - - /** - * Add one or more resource locations from which to serve static content. Each location must point to a valid - * directory. Multiple locations may be specified as a comma-separated list, and the locations will be checked - * for a given resource in the order specified. - * <p>For example, {{@code "/"}, {@code "classpath:/META-INF/public-web-resources/"}} allows resources to - * be served both from the web application root and from any JAR on the classpath that contains a - * {@code /META-INF/public-web-resources/} directory, with resources in the web application root taking precedence. - * @return the same {@link ResourceConfigurer} instance for chained method invocation - */ - public ResourceConfigurer addResourceLocations(String...resourceLocations) { - for (String location : resourceLocations) { - this.locations.add(applicationContext.getResource(location)); - } - return this; - } - - /** - * Specify the cache period for the resources served by the resource handler, in seconds. The default is to not - * send any cache headers but to rely on last-modified timestamps only. Set to 0 in order to send cache headers - * that prevent caching, or to a positive number of seconds to send cache headers with the given max-age value. - * @param cachePeriod the time to cache resources in seconds - * @return the same {@link ResourceConfigurer} instance for chained method invocation - */ - public ResourceConfigurer setCachePeriod(Integer cachePeriod) { - this.cachePeriod = cachePeriod; - return this; - } - - /** - * Get the cache period for static resources served by the resource handler. - */ - public Integer getCachePeriod() { - return cachePeriod; - } - - /** - * Specify the order in which to serve static resources relative to other {@link HandlerMapping} instances in the - * Spring MVC web application context. The default value is {@code Integer.MAX_VALUE-1}. - */ - public ResourceConfigurer setOrder(int order) { - this.order = order; - return this; - } - - /** - * Get the order in which to serve static resources relative other {@link HandlerMapping} instances. - * @return the same {@link ResourceConfigurer} instance for chained method invocation - */ - public Integer getOrder() { - return order; - } - - /** - * Return a {@link SimpleUrlHandlerMapping} with a {@link ResourceHttpRequestHandler} mapped to one or more - * URL path patterns. If the no path patterns were specified, the HandlerMapping returned contains an empty map. - */ - protected SimpleUrlHandlerMapping getHandlerMapping() { - SimpleUrlHandlerMapping handlerMapping = new SimpleUrlHandlerMapping(); - handlerMapping.setOrder(order); - handlerMapping.setUrlMap(getUrlMap()); - return handlerMapping; - } - - private Map<String, HttpRequestHandler> getUrlMap() { - Map<String, HttpRequestHandler> urlMap = new LinkedHashMap<String, HttpRequestHandler>(); - if (!pathPatterns.isEmpty()) { - ResourceHttpRequestHandler requestHandler = createRequestHandler(); - for (String pathPattern : pathPatterns) { - urlMap.put(pathPattern, requestHandler); - } - } - return urlMap; - } - - /** - * Create a {@link ResourceHttpRequestHandler} instance. - */ - protected ResourceHttpRequestHandler createRequestHandler() { - Assert.isTrue(!CollectionUtils.isEmpty(locations), "Path patterns specified but not resource locations."); - ResourceHttpRequestHandler requestHandler = new ResourceHttpRequestHandler(); - requestHandler.setApplicationContext(applicationContext); - requestHandler.setServletContext(servletContext); - requestHandler.setLocations(locations); - if (cachePeriod != null) { - requestHandler.setCacheSeconds(cachePeriod); - } - return requestHandler; - } - -} \ No newline at end of file diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistration.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistration.java new file mode 100644 index 000000000000..cbad096879bf --- /dev/null +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistration.java @@ -0,0 +1,105 @@ +/* + * Copyright 2002-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.web.servlet.config.annotation; + +import java.util.ArrayList; +import java.util.List; + +import org.springframework.core.io.Resource; +import org.springframework.core.io.ResourceLoader; +import org.springframework.util.Assert; +import org.springframework.util.CollectionUtils; +import org.springframework.web.servlet.resource.ResourceHttpRequestHandler; + +/** + * Encapsulates information required to create a resource handlers. + * + * @author Rossen Stoyanchev + * @author Keith Donald + * + * @since 3.1 + */ +public class ResourceHandlerRegistration { + + private final ResourceLoader resourceLoader; + + private final String[] pathPatterns; + + private final List<Resource> locations = new ArrayList<Resource>(); + + private Integer cachePeriod; + + /** + * Create a {@link ResourceHandlerRegistration} instance. + * @param resourceLoader a resource loader for turning a String location into a {@link Resource} + * @param pathPatterns one or more resource URL path patterns + */ + public ResourceHandlerRegistration(ResourceLoader resourceLoader, String... pathPatterns) { + Assert.notEmpty(pathPatterns, "At least one path pattern is required for resource handling."); + this.resourceLoader = resourceLoader; + this.pathPatterns = pathPatterns; + } + + /** + * Add one or more resource locations from which to serve static content. Each location must point to a valid + * directory. Multiple locations may be specified as a comma-separated list, and the locations will be checked + * for a given resource in the order specified. + * <p>For example, {{@code "/"}, {@code "classpath:/META-INF/public-web-resources/"}} allows resources to + * be served both from the web application root and from any JAR on the classpath that contains a + * {@code /META-INF/public-web-resources/} directory, with resources in the web application root taking precedence. + * @return the same {@link ResourceHandlerRegistration} instance for chained method invocation + */ + public ResourceHandlerRegistration addResourceLocations(String...resourceLocations) { + for (String location : resourceLocations) { + this.locations.add(resourceLoader.getResource(location)); + } + return this; + } + + /** + * Specify the cache period for the resources served by the resource handler, in seconds. The default is to not + * send any cache headers but to rely on last-modified timestamps only. Set to 0 in order to send cache headers + * that prevent caching, or to a positive number of seconds to send cache headers with the given max-age value. + * @param cachePeriod the time to cache resources in seconds + * @return the same {@link ResourceHandlerRegistration} instance for chained method invocation + */ + public ResourceHandlerRegistration setCachePeriod(Integer cachePeriod) { + this.cachePeriod = cachePeriod; + return this; + } + + /** + * Returns the URL path patterns for the resource handler. + */ + protected String[] getPathPatterns() { + return pathPatterns; + } + + /** + * Returns a {@link ResourceHttpRequestHandler} instance. + */ + protected ResourceHttpRequestHandler getRequestHandler() { + Assert.isTrue(!CollectionUtils.isEmpty(locations), "At least one location is required for resource handling."); + ResourceHttpRequestHandler requestHandler = new ResourceHttpRequestHandler(); + requestHandler.setLocations(locations); + if (cachePeriod != null) { + requestHandler.setCacheSeconds(cachePeriod); + } + return requestHandler; + } + +} diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistry.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistry.java new file mode 100644 index 000000000000..e95cc82aacac --- /dev/null +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistry.java @@ -0,0 +1,111 @@ +/* + * Copyright 2002-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.web.servlet.config.annotation; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import javax.servlet.ServletContext; + +import org.springframework.context.ApplicationContext; +import org.springframework.util.Assert; +import org.springframework.web.HttpRequestHandler; +import org.springframework.web.servlet.HandlerMapping; +import org.springframework.web.servlet.handler.AbstractHandlerMapping; +import org.springframework.web.servlet.handler.SimpleUrlHandlerMapping; +import org.springframework.web.servlet.resource.ResourceHttpRequestHandler; + +/** + * Stores registrations of resource handlers for serving static resources such as images, css files and others + * through Spring MVC including setting cache headers optimized for efficient loading in a web browser. + * Resources can be served out of locations under web application root, from the classpath, and others. + * + * <p>To create a resource handler, use {@link #addResourceHandler(String...)} providing the URL path patterns + * for which the handler should be invoked to serve static resources (e.g. {@code "/resources/**"}). + * + * <p>Then use additional methods on the returned {@link ResourceHandlerRegistration} to add one or more + * locations from which to serve static content from (e.g. {{@code "/"}, + * {@code "classpath:/META-INF/public-web-resources/"}}) or to specify a cache period for served resources. + * + * @author Rossen Stoyanchev + * @since 3.1 + * + * @see DefaultServletHandlerConfigurer + */ +public class ResourceHandlerRegistry { + + private final ServletContext servletContext; + + private final ApplicationContext applicationContext; + + private final List<ResourceHandlerRegistration> registrations = new ArrayList<ResourceHandlerRegistration>(); + + private int order = Integer.MAX_VALUE -1; + + public ResourceHandlerRegistry(ApplicationContext applicationContext, ServletContext servletContext) { + Assert.notNull(applicationContext, "ApplicationContext is required"); + this.applicationContext = applicationContext; + this.servletContext = servletContext; + } + + /** + * Add a resource handler for serving static resources based on the specified URL path patterns. + * The handler will be invoked for every incoming request that matches to one of the specified path patterns. + * @return A {@link ResourceHandlerRegistration} to use to further configure the registered resource handler. + */ + public ResourceHandlerRegistration addResourceHandler(String... pathPatterns) { + ResourceHandlerRegistration registration = new ResourceHandlerRegistration(applicationContext, pathPatterns); + registrations.add(registration); + return registration; + } + + /** + * Specify the order to use for resource handling relative to other {@link HandlerMapping}s configured in + * the Spring MVC application context. The default value used is {@code Integer.MAX_VALUE-1}. + */ + public ResourceHandlerRegistry setOrder(int order) { + this.order = order; + return this; + } + + /** + * Return a handler mapping with the mapped resource handlers; or {@code null} in case of no registrations. + */ + protected AbstractHandlerMapping getHandlerMapping() { + if (registrations.isEmpty()) { + return null; + } + + Map<String, HttpRequestHandler> urlMap = new LinkedHashMap<String, HttpRequestHandler>(); + for (ResourceHandlerRegistration registration : registrations) { + for (String pathPattern : registration.getPathPatterns()) { + ResourceHttpRequestHandler requestHandler = registration.getRequestHandler(); + requestHandler.setServletContext(servletContext); + requestHandler.setApplicationContext(applicationContext); + urlMap.put(pathPattern, requestHandler); + } + } + + SimpleUrlHandlerMapping handlerMapping = new SimpleUrlHandlerMapping(); + handlerMapping.setOrder(order); + handlerMapping.setUrlMap(urlMap); + return handlerMapping; + } + +} \ No newline at end of file diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerConfigurer.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerConfigurer.java deleted file mode 100644 index a5946b63c26d..000000000000 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerConfigurer.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2002-2011 the original author or authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.springframework.web.servlet.config.annotation; - -import java.util.LinkedHashMap; -import java.util.Map; - -import org.springframework.web.servlet.HandlerMapping; -import org.springframework.web.servlet.RequestToViewNameTranslator; -import org.springframework.web.servlet.handler.SimpleUrlHandlerMapping; -import org.springframework.web.servlet.mvc.Controller; -import org.springframework.web.servlet.mvc.ParameterizableViewController; - -/** - * Helps with view controllers. View controllers provide a direct mapping between a URL path and view name. This is - * useful when serving requests that don't require application-specific controller logic and can be forwarded - * directly to a view for rendering. - * - * @author Rossen Stoyanchev - * @since 3.1 - */ -public class ViewControllerConfigurer { - - private final Map<String, Controller> urlMap = new LinkedHashMap<String, Controller>(); - - private int order = 1; - - /** - * Map the URL path to a view name derived by convention through the DispatcherServlet's - * {@link RequestToViewNameTranslator}. - * @return the same {@link ViewControllerConfigurer} instance for convenient chained method invocation - */ - public ViewControllerConfigurer mapViewNameByConvention(String urlPath) { - return mapViewName(urlPath, null); - } - - /** - * Map the URL path to the specified view name. - * @return the same {@link ViewControllerConfigurer} instance for convenient chained method invocation - */ - public ViewControllerConfigurer mapViewName(String urlPath, String viewName) { - ParameterizableViewController controller = new ParameterizableViewController(); - controller.setViewName(viewName); - urlMap.put(urlPath, controller); - return this; - } - - /** - * Specify the order in which to check view controller path mappings relative to other {@link HandlerMapping} - * instances in the Spring MVC web application context. The default value is 1. - */ - public void setOrder(int order) { - this.order = order; - } - - /** - * Get the order in which to check view controller path mappings relative to other {@link HandlerMapping}s. - */ - public int getOrder() { - return order; - } - - /** - * Return a {@link SimpleUrlHandlerMapping} with URL path to view controllers mappings. - */ - protected SimpleUrlHandlerMapping getHandlerMapping() { - SimpleUrlHandlerMapping handlerMapping = new SimpleUrlHandlerMapping(); - handlerMapping.setOrder(order); - handlerMapping.setUrlMap(urlMap); - return handlerMapping; - } - -} \ No newline at end of file diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistration.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistration.java new file mode 100644 index 000000000000..48700f905d78 --- /dev/null +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistration.java @@ -0,0 +1,70 @@ +/* + * Copyright 2002-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.web.servlet.config.annotation; + +import org.springframework.util.Assert; +import org.springframework.web.servlet.RequestToViewNameTranslator; +import org.springframework.web.servlet.mvc.ParameterizableViewController; + +/** + * Encapsulates information required to create a view controller. + * + * @author Rossen Stoyanchev + * @author Keith Donald + * @since 3.1 + */ +public class ViewControllerRegistration { + + private final String urlPath; + + private String viewName; + + /** + * Creates a {@link ViewControllerRegistration} with the given URL path. When a request matches + * to the given URL path this view controller will process it. + */ + public ViewControllerRegistration(String urlPath) { + Assert.notNull(urlPath, "A URL path is required to create a view controller."); + this.urlPath = urlPath; + } + + /** + * Sets the view name to use for this view controller. This field is optional. If not specified the + * view controller will return a {@code null} view name, which will be resolved through the configured + * {@link RequestToViewNameTranslator}. By default that means "/foo/bar" would resolve to "foo/bar". + */ + public void setViewName(String viewName) { + this.viewName = viewName; + } + + /** + * Returns the URL path for the view controller. + */ + protected String getUrlPath() { + return urlPath; + } + + /** + * Returns the view controllers. + */ + protected Object getViewController() { + ParameterizableViewController controller = new ParameterizableViewController(); + controller.setViewName(viewName); + return controller; + } + +} diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistry.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistry.java new file mode 100644 index 000000000000..55d3d457d050 --- /dev/null +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistry.java @@ -0,0 +1,77 @@ +/* + * Copyright 2002-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.web.servlet.config.annotation; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import org.springframework.web.servlet.HandlerMapping; +import org.springframework.web.servlet.handler.AbstractHandlerMapping; +import org.springframework.web.servlet.handler.SimpleUrlHandlerMapping; + +/** + * Stores registrations of view controllers. A view controller does nothing more than return a specified + * view name. It saves you from having to write a controller when you want to forward the request straight + * through to a view such as a JSP. + * + * @author Rossen Stoyanchev + * @author Keith Donald + * @since 3.1 + */ +public class ViewControllerRegistry { + + private final List<ViewControllerRegistration> registrations = new ArrayList<ViewControllerRegistration>(); + + private int order = 1; + + public ViewControllerRegistration addViewController(String urlPath) { + ViewControllerRegistration registration = new ViewControllerRegistration(urlPath); + registrations.add(registration); + return registration; + } + + /** + * Specify the order to use for ViewControllers mappings relative to other {@link HandlerMapping}s + * configured in the Spring MVC application context. The default value for view controllers is 1, + * which is 1 higher than the value used for annotated controllers. + */ + public void setOrder(int order) { + this.order = order; + } + + /** + * Returns a handler mapping with the mapped ViewControllers; or {@code null} in case of no registrations. + */ + protected AbstractHandlerMapping getHandlerMapping() { + if (registrations.isEmpty()) { + return null; + } + + Map<String, Object> urlMap = new LinkedHashMap<String, Object>(); + for (ViewControllerRegistration registration : registrations) { + urlMap.put(registration.getUrlPath(), registration.getViewController()); + } + + SimpleUrlHandlerMapping handlerMapping = new SimpleUrlHandlerMapping(); + handlerMapping.setOrder(order); + handlerMapping.setUrlMap(urlMap); + return handlerMapping; + } + +} \ No newline at end of file diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupport.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupport.java index 2fe02419360c..8da51d8aeb60 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupport.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupport.java @@ -20,6 +20,7 @@ import java.util.List; import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; import javax.xml.transform.Source; import org.springframework.beans.BeanUtils; @@ -46,7 +47,6 @@ import org.springframework.http.converter.xml.XmlAwareFormHttpMessageConverter; import org.springframework.util.ClassUtils; import org.springframework.validation.Errors; -import org.springframework.validation.MessageCodesResolver; import org.springframework.validation.Validator; import org.springframework.web.HttpRequestHandler; import org.springframework.web.bind.annotation.ExceptionHandler; @@ -59,10 +59,10 @@ import org.springframework.web.servlet.HandlerAdapter; import org.springframework.web.servlet.HandlerExceptionResolver; import org.springframework.web.servlet.HandlerMapping; +import org.springframework.web.servlet.handler.AbstractHandlerMapping; import org.springframework.web.servlet.handler.BeanNameUrlHandlerMapping; import org.springframework.web.servlet.handler.ConversionServiceExposingInterceptor; import org.springframework.web.servlet.handler.HandlerExceptionResolverComposite; -import org.springframework.web.servlet.handler.SimpleUrlHandlerMapping; import org.springframework.web.servlet.mvc.Controller; import org.springframework.web.servlet.mvc.HttpRequestHandlerAdapter; import org.springframework.web.servlet.mvc.SimpleControllerHandlerAdapter; @@ -75,27 +75,25 @@ /** * A base class that provides default configuration for Spring MVC applications by registering Spring MVC * infrastructure components to be detected by the {@link DispatcherServlet}. Typically applications should not - * have to start by extending this class. A much easier place to start is to annotate your @{@link Configuration} - * class with @{@link EnableWebMvc}. See @{@link EnableWebMvc} and {@link WebMvcConfigurer}. + * have to extend this class. A more likely place to start is to annotate an @{@link Configuration} + * class with @{@link EnableWebMvc} (see @{@link EnableWebMvc} and {@link WebMvcConfigurer} for details). * - * <p>If using @{@link EnableWebMvc} and extending from {@link WebMvcConfigurerAdapter} does not give you the level - * of flexibility you need, consider extending directly from this class instead. Remember to add @{@link Configuration} - * to your subclass and @{@link Bean} to any superclass @{@link Bean} methods you choose to override. A few example - * reasons for extending this class include providing a custom {@link MessageCodesResolver}, changing the order of - * {@link HandlerMapping} instances, plugging in a variant of any of the beans provided by this class, and so on. + * <p>If using @{@link EnableWebMvc} does not give you all you need, consider extending directly from this + * class. Remember to add @{@link Configuration} to your subclass and @{@link Bean} to any superclass + * @{@link Bean} methods you choose to override. * * <p>This class registers the following {@link HandlerMapping}s:</p> * <ul> * <li>{@link RequestMappingHandlerMapping} ordered at 0 for mapping requests to annotated controller methods. - * <li>{@link SimpleUrlHandlerMapping} ordered at 1 to map URL paths directly to view names. + * <li>{@link HandlerMapping} ordered at 1 to map URL paths directly to view names. * <li>{@link BeanNameUrlHandlerMapping} ordered at 2 to map URL paths to controller bean names. - * <li>{@link SimpleUrlHandlerMapping} ordered at {@code Integer.MAX_VALUE-1} to serve static resource requests. - * <li>{@link SimpleUrlHandlerMapping} ordered at {@code Integer.MAX_VALUE} to forward requests to the default servlet. + * <li>{@link HandlerMapping} ordered at {@code Integer.MAX_VALUE-1} to serve static resource requests. + * <li>{@link HandlerMapping} ordered at {@code Integer.MAX_VALUE} to forward requests to the default servlet. * </ul> * - * <p>Registers {@link HandlerAdapter}s: + * <p>Registers these {@link HandlerAdapter}s: * <ul> - * <li>{@link RequestMappingHandlerAdapter} for processing requests using annotated controller methods. + * <li>{@link RequestMappingHandlerAdapter} for processing requests with annotated controller methods. * <li>{@link HttpRequestHandlerAdapter} for processing requests with {@link HttpRequestHandler}s. * <li>{@link SimpleControllerHandlerAdapter} for processing requests with interface-based {@link Controller}s. * </ul> @@ -107,7 +105,7 @@ * <li>{@link DefaultHandlerExceptionResolver} for resolving known Spring exception types * </ul> * - * <p>Registers the following other instances: + * <p>Registers these other instances: * <ul> * <li>{@link FormattingConversionService} for use with annotated controller methods and the spring:eval JSP tag. * <li>{@link Validator} for validating model attributes on annotated controller methods. @@ -143,57 +141,53 @@ public void setApplicationContext(ApplicationContext applicationContext) throws */ @Bean public RequestMappingHandlerMapping requestMappingHandlerMapping() { - RequestMappingHandlerMapping mapping = new RequestMappingHandlerMapping(); - mapping.setInterceptors(getInterceptors()); - mapping.setOrder(0); - return mapping; + RequestMappingHandlerMapping handlerMapping = new RequestMappingHandlerMapping(); + handlerMapping.setOrder(0); + handlerMapping.setInterceptors(getInterceptors()); + return handlerMapping; } /** * Provides access to the shared handler interceptors used to configure {@link HandlerMapping} instances with. - * This method cannot be overridden, use {@link #configureInterceptors(InterceptorConfigurer)} instead. + * This method cannot be overridden, use {@link #addInterceptors(InterceptorRegistry)} instead. */ protected final Object[] getInterceptors() { if (interceptors == null) { - InterceptorConfigurer configurer = new InterceptorConfigurer(); - configureInterceptors(configurer); - configurer.addInterceptor(new ConversionServiceExposingInterceptor(mvcConversionService())); - interceptors = configurer.getInterceptors(); + InterceptorRegistry registry = new InterceptorRegistry(); + addInterceptors(registry); + registry.addInterceptor(new ConversionServiceExposingInterceptor(mvcConversionService())); + interceptors = registry.getInterceptors(); } return interceptors.toArray(); } /** - * Override this method to configure the Spring MVC interceptors to use. Interceptors allow requests to - * be pre- and post-processed before and after controller invocation. They can be registered to apply - * to all requests or be limited to a set of path patterns. - * @see InterceptorConfigurer + * Override this method to add Spring MVC interceptors for pre/post-processing of controller invocation. + * @see InterceptorRegistry */ - protected void configureInterceptors(InterceptorConfigurer configurer) { + protected void addInterceptors(InterceptorRegistry registry) { } /** - * Returns a {@link SimpleUrlHandlerMapping} ordered at 1 to map URL paths directly to view names. - * To configure view controllers see {@link #configureViewControllers(ViewControllerConfigurer)}. + * Returns a handler mapping ordered at 1 to map URL paths directly to view names. + * To configure view controllers, override {@link #addViewControllers(ViewControllerRegistry)}. */ @Bean - public SimpleUrlHandlerMapping viewControllerHandlerMapping() { - ViewControllerConfigurer configurer = new ViewControllerConfigurer(); - configurer.setOrder(1); - configureViewControllers(configurer); + public HandlerMapping viewControllerHandlerMapping() { + ViewControllerRegistry registry = new ViewControllerRegistry(); + addViewControllers(registry); - SimpleUrlHandlerMapping handlerMapping = configurer.getHandlerMapping(); + AbstractHandlerMapping handlerMapping = registry.getHandlerMapping(); + handlerMapping = handlerMapping != null ? handlerMapping : new EmptyHandlerMapping(); handlerMapping.setInterceptors(getInterceptors()); return handlerMapping; } /** - * Override this method to configure view controllers. View controllers provide a direct mapping between a - * URL path and view name. This is useful when serving requests that don't require application-specific - * controller logic and can be forwarded directly to a view for rendering. - * @see ViewControllerConfigurer + * Override this method to add view controllers. + * @see ViewControllerRegistry */ - protected void configureViewControllers(ViewControllerConfigurer configurer) { + protected void addViewControllers(ViewControllerRegistry registry) { } /** @@ -208,53 +202,50 @@ public BeanNameUrlHandlerMapping beanNameHandlerMapping() { } /** - * Returns a {@link SimpleUrlHandlerMapping} ordered at Integer.MAX_VALUE-1 to serve static resource requests. - * To configure resource handling, see {@link #configureResourceHandling(ResourceConfigurer)}. + * Returns a handler mapping ordered at Integer.MAX_VALUE-1 with mapped resource handlers. + * To configure resource handling, override {@link #addResourceHandlers(ResourceHandlerRegistry)}. */ @Bean - public SimpleUrlHandlerMapping resourceHandlerMapping() { - ResourceConfigurer configurer = new ResourceConfigurer(applicationContext, servletContext); - configurer.setOrder(Integer.MAX_VALUE-1); - configureResourceHandling(configurer); - return configurer.getHandlerMapping(); + public HandlerMapping resourceHandlerMapping() { + ResourceHandlerRegistry registry = new ResourceHandlerRegistry(applicationContext, servletContext); + addResourceHandlers(registry); + AbstractHandlerMapping handlerMapping = registry.getHandlerMapping(); + handlerMapping = handlerMapping != null ? handlerMapping : new EmptyHandlerMapping(); + return handlerMapping; } /** - * Override this method to configure a handler for serving static resources such as images, js, and, css files - * through Spring MVC including setting cache headers optimized for efficient loading in a web browser. - * Resources can be served out of locations under web application root, from the classpath, and others. - * @see ResourceConfigurer + * Override this method to add resource handlers for serving static resources. + * @see ResourceHandlerRegistry */ - protected void configureResourceHandling(ResourceConfigurer configurer) { + protected void addResourceHandlers(ResourceHandlerRegistry registry) { } /** - * Returns a {@link SimpleUrlHandlerMapping} ordered at Integer.MAX_VALUE to serve static resources by - * forwarding to the Servlet container's default servlet. To configure default servlet handling see + * Returns a handler mapping ordered at Integer.MAX_VALUE with a mapped default servlet handler. + * To configure "default" Servlet handling, override * {@link #configureDefaultServletHandling(DefaultServletHandlerConfigurer)}. */ @Bean - public SimpleUrlHandlerMapping defaultServletHandlerMapping() { + public HandlerMapping defaultServletHandlerMapping() { DefaultServletHandlerConfigurer configurer = new DefaultServletHandlerConfigurer(servletContext); configureDefaultServletHandling(configurer); - return configurer.getHandlerMapping(); + AbstractHandlerMapping handlerMapping = configurer.getHandlerMapping(); + handlerMapping = handlerMapping != null ? handlerMapping : new EmptyHandlerMapping(); + return handlerMapping; } /** - * Override this method to configure a handler for delegating unhandled requests by forwarding to the - * Servlet container's default servlet. This is commonly used when the {@link DispatcherServlet} is - * mapped to "/", which results in cleaner URLs (without a servlet prefix) but may need to still allow - * some requests (e.g. static resources) to be handled by the Servlet container's default servlet. + * Override this method to configure "default" Servlet handling. * @see DefaultServletHandlerConfigurer */ protected void configureDefaultServletHandling(DefaultServletHandlerConfigurer configurer) { } /** - * Returns a {@link RequestMappingHandlerAdapter} for processing requests using annotated controller methods. - * Also see the following other methods as an alternative to overriding this method: + * Returns a {@link RequestMappingHandlerAdapter} for processing requests through annotated controller methods. + * Consider overriding one of these other more fine-grained methods: * <ul> - * <li>{@link #initWebBindingInitializer()} for configuring data binding globally. * <li>{@link #addArgumentResolvers(List)} for adding custom argument resolvers. * <li>{@link #addReturnValueHandlers(List)} for adding custom return value handlers. * <li>{@link #configureMessageConverters(List)} for adding custom message converters. @@ -265,7 +256,6 @@ public RequestMappingHandlerAdapter requestMappingHandlerAdapter() { ConfigurableWebBindingInitializer webBindingInitializer = new ConfigurableWebBindingInitializer(); webBindingInitializer.setConversionService(mvcConversionService()); webBindingInitializer.setValidator(mvcValidator()); - configureWebBindingInitializer(webBindingInitializer); List<HandlerMethodArgumentResolver> argumentResolvers = new ArrayList<HandlerMethodArgumentResolver>(); addArgumentResolvers(argumentResolvers); @@ -282,28 +272,21 @@ public RequestMappingHandlerAdapter requestMappingHandlerAdapter() { } /** - * Override this method to customize the {@link ConfigurableWebBindingInitializer} the - * {@link RequestMappingHandlerAdapter} is configured with. - */ - protected void configureWebBindingInitializer(ConfigurableWebBindingInitializer webBindingInitializer) { - } - - /** - * Override this method to add custom argument resolvers to use in addition to the ones registered by default - * internally by the {@link RequestMappingHandlerAdapter}. - * <p>Generally custom argument resolvers are invoked first. However this excludes default argument resolvers that - * rely on the presence of annotations (e.g. {@code @RequestParameter}, {@code @PathVariable}, etc.). Those - * argument resolvers are not customizable without configuring RequestMappingHandlerAdapter directly. + * Add custom {@link HandlerMethodArgumentResolver}s to use in addition to the ones registered by default. + * <p>Custom argument resolvers are invoked before built-in resolvers except for those that rely on the presence + * of annotations (e.g. {@code @RequestParameter}, {@code @PathVariable}, etc.). The latter can be customized + * by configuring the {@link RequestMappingHandlerAdapter} directly. + * @param argumentResolvers the list of custom converters; initially an empty list. */ protected void addArgumentResolvers(List<HandlerMethodArgumentResolver> argumentResolvers) { } /** - * Override this method to add custom return value handlers to use in addition to the ones registered by default - * internally by the {@link RequestMappingHandlerAdapter}. - * <p>Generally custom return value handlers are invoked first. However this excludes default return value handlers - * that rely on the presence of annotations (e.g. {@code @ResponseBody}, {@code @ModelAttribute}, etc.). Those - * handlers are not customizable without configuring RequestMappingHandlerAdapter directly. + * Add custom {@link HandlerMethodReturnValueHandler}s in addition to the ones registered by default. + * <p>Custom return value handlers are invoked before built-in ones except for those that rely on the presence + * of annotations (e.g. {@code @ResponseBody}, {@code @ModelAttribute}, etc.). The latter can be customized + * by configuring the {@link RequestMappingHandlerAdapter} directly. + * @param returnValueHandlers the list of custom handlers; initially an empty list. */ protected void addReturnValueHandlers(List<HandlerMethodReturnValueHandler> returnValueHandlers) { } @@ -311,8 +294,8 @@ protected void addReturnValueHandlers(List<HandlerMethodReturnValueHandler> retu /** * Provides access to the shared {@link HttpMessageConverter}s used by the * {@link RequestMappingHandlerAdapter} and the {@link ExceptionHandlerExceptionResolver}. - * This method cannot be extended directly, use {@link #configureMessageConverters(List)} add custom converters. - * For the list of message converters added by default see {@link #addDefaultHttpMessageConverters(List)}. + * This method cannot be overridden. Use {@link #configureMessageConverters(List)} instead. + * Also see {@link #addDefaultHttpMessageConverters(List)} that can be used to add default message converters. */ protected final List<HttpMessageConverter<?>> getMessageConverters() { if (messageConverters == null) { @@ -328,16 +311,16 @@ protected final List<HttpMessageConverter<?>> getMessageConverters() { /** * Override this method to add custom {@link HttpMessageConverter}s to use with * the {@link RequestMappingHandlerAdapter} and the {@link ExceptionHandlerExceptionResolver}. - * If any converters are added through this method, default converters are added automatically. - * See {@link #addDefaultHttpMessageConverters(List)} for adding default converters to the list. - * @param messageConverters the list to add converters to + * Adding converters to the list turns off the default converters that would otherwise be registered by default. + * Also see {@link #addDefaultHttpMessageConverters(List)} that can be used to add default message converters. + * @param converters a list to add message converters to; initially an empty list. */ protected void configureMessageConverters(List<HttpMessageConverter<?>> converters) { } /** - * A method available to subclasses for adding default {@link HttpMessageConverter}s. - * @param messageConverters the list to add converters to + * A method available to subclasses to add default {@link HttpMessageConverter}s. + * @param messageConverters the list to add the default message converters to */ protected final void addDefaultHttpMessageConverters(List<HttpMessageConverter<?>> messageConverters) { StringHttpMessageConverter stringConverter = new StringHttpMessageConverter(); @@ -382,8 +365,7 @@ protected void addFormatters(FormatterRegistry registry) { /** * Returns {@link Validator} for validating {@code @ModelAttribute} and {@code @RequestBody} arguments of - * annotated controller methods. This method is closed for extension. Use {@link #getValidator()} to - * provide a custom validator. + * annotated controller methods. To configure a custom validation, override {@link #getValidator()}. */ @Bean Validator mvcValidator() { @@ -415,10 +397,7 @@ public void validate(Object target, Errors errors) { } /** - * Override this method to provide a custom {@link Validator} type. If this method returns {@code null}, by - * a check is made for the presence of a JSR-303 implementation on the classpath - if available a - * {@link org.springframework.validation.beanvalidation.LocalValidatorFactoryBean} instance is created. - * Otherwise if no JSR-303 implementation is detected, a no-op {@link Validator} is returned instead. + * Override this method to provide a custom {@link Validator}. */ protected Validator getValidator() { return null; @@ -442,8 +421,7 @@ public SimpleControllerHandlerAdapter simpleControllerHandlerAdapter() { /** * Returns a {@link HandlerExceptionResolverComposite} that contains a list of exception resolvers. - * This method is closed for extension. Use {@link #configureHandlerExceptionResolvers(List) to - * customize the list of exception resolvers. + * To customize the list of exception resolvers, override {@link #configureHandlerExceptionResolvers(List)}. */ @Bean HandlerExceptionResolver handlerExceptionResolver() throws Exception { @@ -461,10 +439,10 @@ HandlerExceptionResolver handlerExceptionResolver() throws Exception { } /** - * Override this method to configure the list of {@link HandlerExceptionResolver}s to use for handling - * unresolved controller exceptions. If any exception resolvers are added through this method, default - * exception resolvers are not added automatically. For the list of exception resolvers added by - * default see {@link #addDefaultHandlerExceptionResolvers(List)}. + * Override this method to configure the list of {@link HandlerExceptionResolver}s to use. + * Adding resolvers to the list turns off the default resolvers that would otherwise be registered by default. + * Also see {@link #addDefaultHandlerExceptionResolvers(List)} that can be used to add the default exception resolvers. + * @param exceptionResolvers a list to add exception resolvers to; initially an empty list. */ protected void configureHandlerExceptionResolvers(List<HandlerExceptionResolver> exceptionResolvers) { } @@ -487,5 +465,13 @@ protected final void addDefaultHandlerExceptionResolvers(List<HandlerExceptionRe exceptionResolvers.add(new ResponseStatusExceptionResolver()); exceptionResolvers.add(new DefaultHandlerExceptionResolver()); } + + private final static class EmptyHandlerMapping extends AbstractHandlerMapping { + + @Override + protected Object getHandlerInternal(HttpServletRequest request) throws Exception { + return null; + } + } } diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurer.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurer.java index 8e1e9b2b3bd3..665b9325df8a 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurer.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurer.java @@ -24,23 +24,21 @@ import org.springframework.http.converter.HttpMessageConverter; import org.springframework.validation.Validator; import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.context.request.WebRequestInterceptor; import org.springframework.web.method.support.HandlerMethodArgumentResolver; import org.springframework.web.method.support.HandlerMethodReturnValueHandler; import org.springframework.web.servlet.DispatcherServlet; import org.springframework.web.servlet.HandlerExceptionResolver; -import org.springframework.web.servlet.HandlerInterceptor; +import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter; import com.sun.corba.se.impl.presentation.rmi.ExceptionHandler; /** - * Defines configuration callback methods for customizing the default Spring MVC configuration enabled through the - * use of @{@link EnableWebMvc}. + * Defines configuration callback methods for customizing the default Spring MVC code-based configuration enabled + * through @{@link EnableWebMvc}. * * <p>Classes annotated with @{@link EnableWebMvc} can implement this interface in order to be called back and - * given a chance to customize the default configuration. The most convenient way to implement this interface is - * by extending from {@link WebMvcConfigurerAdapter}, which provides empty method implementations and allows - * overriding only the callback methods you're interested in. + * given a chance to customize the default configuration. The most convenient way to implement this interface + * is to extend {@link WebMvcConfigurerAdapter}, which provides empty method implementations. * * @author Rossen Stoyanchev * @author Keith Donald @@ -56,9 +54,9 @@ public interface WebMvcConfigurer { /** * Configure the list of {@link HttpMessageConverter}s to use when resolving method arguments or handling - * return values in @{@link RequestMapping} and @{@link ExceptionHandler} methods. - * Specifying custom converters overrides the converters registered by default. - * @param converters a list to add message converters to + * return values in @{@link RequestMapping} and @{@link ExceptionHandler} methods. + * Adding converters to the list turns off the default converters that would otherwise be registered by default. + * @param converters a list to add message converters to; initially an empty list. */ void configureMessageConverters(List<HttpMessageConverter<?>> converters); @@ -71,58 +69,56 @@ public interface WebMvcConfigurer { /** * Add custom {@link HandlerMethodArgumentResolver}s to use in addition to the ones registered by default. - * <p>Generally custom argument resolvers are invoked first. However this excludes default argument resolvers that - * rely on the presence of annotations (e.g. {@code @RequestParameter}, {@code @PathVariable}, etc.). Those - * argument resolvers are not customizable without configuring RequestMappingHandlerAdapter directly. - * @param argumentResolvers the list of custom converters, initially empty + * <p>Custom argument resolvers are invoked before built-in resolvers except for those that rely on the presence + * of annotations (e.g. {@code @RequestParameter}, {@code @PathVariable}, etc.). The latter can be customized + * by configuring the {@link RequestMappingHandlerAdapter} directly. + * @param argumentResolvers the list of custom converters; initially an empty list. */ void addArgumentResolvers(List<HandlerMethodArgumentResolver> argumentResolvers); /** - * Add custom {@link HandlerMethodReturnValueHandler}s to in addition to the ones registered by default. - * <p>Generally custom return value handlers are invoked first. However this excludes default return value handlers - * that rely on the presence of annotations (e.g. {@code @ResponseBody}, {@code @ModelAttribute}, etc.). Those - * handlers are not customizable without configuring RequestMappingHandlerAdapter directly. - * @param returnValueHandlers the list of custom handlers, initially empty + * Add custom {@link HandlerMethodReturnValueHandler}s in addition to the ones registered by default. + * <p>Custom return value handlers are invoked before built-in ones except for those that rely on the presence + * of annotations (e.g. {@code @ResponseBody}, {@code @ModelAttribute}, etc.). The latter can be customized + * by configuring the {@link RequestMappingHandlerAdapter} directly. + * @param returnValueHandlers the list of custom handlers; initially an empty list. */ void addReturnValueHandlers(List<HandlerMethodReturnValueHandler> returnValueHandlers); /** * Configure the list of {@link HandlerExceptionResolver}s to use for handling unresolved controller exceptions. - * Specifying exception resolvers overrides the ones registered by default. - * @param exceptionResolvers a list to add exception resolvers to + * Adding resolvers to the list turns off the default resolvers that would otherwise be registered by default. + * @param exceptionResolvers a list to add exception resolvers to; initially an empty list. */ void configureHandlerExceptionResolvers(List<HandlerExceptionResolver> exceptionResolvers); /** - * Configure the Spring MVC interceptors to use. Interceptors allow requests to be pre- and post-processed - * before and after controller invocation. They can be registered to apply to all requests or be limited - * to a set of path patterns. - * @see InterceptorConfigurer + * Add Spring MVC lifecycle interceptors for pre- and post-processing of controller method invocations. + * Interceptors can be registered to apply to all requests or to a set of URL path patterns. + * @see InterceptorRegistry */ - void configureInterceptors(InterceptorConfigurer configurer); + void addInterceptors(InterceptorRegistry registry); /** - * Configure view controllers. View controllers provide a direct mapping between a URL path and view name. - * This is useful when serving requests that don't require application-specific controller logic and can - * be forwarded directly to a view for rendering. - * @see ViewControllerConfigurer + * Add view controllers to create a direct mapping between a URL path and view name. This is useful when + * you just want to forward the request to a view such as a JSP without the need for controller logic. + * @see ViewControllerRegistry */ - void configureViewControllers(ViewControllerConfigurer configurer); + void addViewControllers(ViewControllerRegistry registry); /** - * Configure a handler for serving static resources such as images, js, and, css files through Spring MVC - * including setting cache headers optimized for efficient loading in a web browser. Resources can be served - * out of locations under web application root, from the classpath, and others. - * @see ResourceConfigurer + * Add resource handlers to use to serve static resources such as images, js, and, css files through + * the Spring MVC {@link DispatcherServlet} including the setting of cache headers optimized for efficient + * loading in a web browser. Resources can be served out of locations under web application root, + * from the classpath, and others. + * @see ResourceHandlerRegistry */ - void configureResourceHandling(ResourceConfigurer configurer); + void addResourceHandlers(ResourceHandlerRegistry registry); /** - * Configure a handler for delegating unhandled requests by forwarding to the Servlet container's default - * servlet. This is commonly used when the {@link DispatcherServlet} is mapped to "/", which results in - * cleaner URLs (without a servlet prefix) but may need to still allow some requests (e.g. static resources) - * to be handled by the Servlet container's default servlet. + * Configure a handler for delegating unhandled requests by forwarding to the Servlet container's "default" + * servlet. The use case for this is when the {@link DispatcherServlet} is mapped to "/" thus overriding + * the Servlet container's default handling of static resources. * @see DefaultServletHandlerConfigurer */ void configureDefaultServletHandling(DefaultServletHandlerConfigurer configurer); diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurerAdapter.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurerAdapter.java index d3ef44d8c51b..6f941eca5af5 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurerAdapter.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurerAdapter.java @@ -26,8 +26,7 @@ import org.springframework.web.servlet.HandlerExceptionResolver; /** - * An abstract class with empty method implementations of {@link WebMvcConfigurer}. - * Subclasses can override only the methods they need. + * An convenient base class with empty method implementations of {@link WebMvcConfigurer}. * * @author Rossen Stoyanchev * @since 3.1 @@ -81,21 +80,21 @@ public void configureHandlerExceptionResolvers(List<HandlerExceptionResolver> ex * {@inheritDoc} * <p>This implementation is empty. */ - public void configureInterceptors(InterceptorConfigurer configurer) { + public void addInterceptors(InterceptorRegistry registry) { } /** * {@inheritDoc} * <p>This implementation is empty. */ - public void configureViewControllers(ViewControllerConfigurer configurer) { + public void addViewControllers(ViewControllerRegistry registry) { } /** * {@inheritDoc} * <p>This implementation is empty. */ - public void configureResourceHandling(ResourceConfigurer configurer) { + public void addResourceHandlers(ResourceHandlerRegistry registry) { } /** diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurerComposite.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurerComposite.java index 5a6021742752..1c0e7494e9a5 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurerComposite.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurerComposite.java @@ -74,21 +74,21 @@ public void configureHandlerExceptionResolvers(List<HandlerExceptionResolver> ex } } - public void configureInterceptors(InterceptorConfigurer configurer) { + public void addInterceptors(InterceptorRegistry registry) { for (WebMvcConfigurer delegate : delegates) { - delegate.configureInterceptors(configurer); + delegate.addInterceptors(registry); } } - public void configureViewControllers(ViewControllerConfigurer configurer) { + public void addViewControllers(ViewControllerRegistry registry) { for (WebMvcConfigurer delegate : delegates) { - delegate.configureViewControllers(configurer); + delegate.addViewControllers(registry); } } - public void configureResourceHandling(ResourceConfigurer configurer) { + public void addResourceHandlers(ResourceHandlerRegistry registry) { for (WebMvcConfigurer delegate : delegates) { - delegate.configureResourceHandling(configurer); + delegate.addResourceHandlers(registry); } } diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/AnnotationDrivenBeanDefinitionParserTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/AnnotationDrivenBeanDefinitionParserTests.java index 72531cfa53ea..c9c586240f77 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/AnnotationDrivenBeanDefinitionParserTests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/AnnotationDrivenBeanDefinitionParserTests.java @@ -62,7 +62,7 @@ public void testMessageCodesResolver() { loadBeanDefinitions("mvc-config-message-codes-resolver.xml"); RequestMappingHandlerAdapter adapter = appContext.getBean(RequestMappingHandlerAdapter.class); assertNotNull(adapter); - Object initializer = new DirectFieldAccessor(adapter).getPropertyValue("webBindingInitializer"); + Object initializer = adapter.getWebBindingInitializer(); assertNotNull(initializer); MessageCodesResolver resolver = ((ConfigurableWebBindingInitializer) initializer).getMessageCodesResolver(); assertNotNull(resolver); diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/DefaultServletHandlerConfigurerTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/DefaultServletHandlerConfigurerTests.java index 874ef655d991..8d00d78dc902 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/DefaultServletHandlerConfigurerTests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/DefaultServletHandlerConfigurerTests.java @@ -18,7 +18,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNull; import javax.servlet.RequestDispatcher; @@ -53,13 +53,14 @@ public void setUp() { @Test public void notEnabled() { - assertTrue(configurer.getHandlerMapping().getUrlMap().isEmpty()); + assertNull(configurer.getHandlerMapping()); } @Test public void enable() throws Exception { configurer.enable(); - SimpleUrlHandlerMapping handlerMapping = configurer.getHandlerMapping(); + SimpleUrlHandlerMapping getHandlerMapping = getHandlerMapping(); + SimpleUrlHandlerMapping handlerMapping = getHandlerMapping; DefaultServletHttpRequestHandler handler = (DefaultServletHttpRequestHandler) handlerMapping.getUrlMap().get("/**"); assertNotNull(handler); @@ -75,7 +76,7 @@ public void enable() throws Exception { @Test public void enableWithServletName() throws Exception { configurer.enable("defaultServlet"); - SimpleUrlHandlerMapping handlerMapping = configurer.getHandlerMapping(); + SimpleUrlHandlerMapping handlerMapping = getHandlerMapping(); DefaultServletHttpRequestHandler handler = (DefaultServletHttpRequestHandler) handlerMapping.getUrlMap().get("/**"); assertNotNull(handler); @@ -99,4 +100,8 @@ public RequestDispatcher getNamedDispatcher(String url) { } } + private SimpleUrlHandlerMapping getHandlerMapping() { + return (SimpleUrlHandlerMapping) configurer.getHandlerMapping(); + } + } diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/DelegatingWebMvcConfigurationTests.java similarity index 73% rename from org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationTests.java rename to org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/DelegatingWebMvcConfigurationTests.java index 130342b24f41..2ce7795f76da 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationTests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/DelegatingWebMvcConfigurationTests.java @@ -18,11 +18,9 @@ import static org.easymock.EasyMock.capture; import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.isA; import static org.easymock.EasyMock.replay; import static org.easymock.EasyMock.verify; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; @@ -30,8 +28,6 @@ import java.util.Arrays; import java.util.List; -import javax.servlet.http.HttpServletRequest; - import org.easymock.Capture; import org.easymock.EasyMock; import org.junit.Before; @@ -39,29 +35,23 @@ import org.springframework.format.support.FormattingConversionService; import org.springframework.http.converter.HttpMessageConverter; import org.springframework.http.converter.StringHttpMessageConverter; -import org.springframework.mock.web.MockHttpServletRequest; -import org.springframework.stereotype.Controller; import org.springframework.validation.beanvalidation.LocalValidatorFactoryBean; -import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.support.ConfigurableWebBindingInitializer; -import org.springframework.web.context.support.StaticWebApplicationContext; import org.springframework.web.method.support.HandlerMethodArgumentResolver; import org.springframework.web.method.support.HandlerMethodReturnValueHandler; import org.springframework.web.servlet.HandlerExceptionResolver; -import org.springframework.web.servlet.HandlerExecutionChain; import org.springframework.web.servlet.handler.HandlerExceptionResolverComposite; import org.springframework.web.servlet.mvc.annotation.ResponseStatusExceptionResolver; import org.springframework.web.servlet.mvc.method.annotation.ExceptionHandlerExceptionResolver; import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter; -import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping; import org.springframework.web.servlet.mvc.support.DefaultHandlerExceptionResolver; /** - * A test fixture for WebMvcConfiguration tests. + * A test fixture for {@link DelegatingWebMvcConfiguration} tests. * * @author Rossen Stoyanchev */ -public class WebMvcConfigurationTests { +public class DelegatingWebMvcConfigurationTests { private DelegatingWebMvcConfiguration mvcConfiguration; @@ -74,7 +64,7 @@ public void setUp() { } @Test - public void annotationHandlerAdapter() throws Exception { + public void requestMappingHandlerAdapter() throws Exception { Capture<List<HttpMessageConverter<?>>> converters = new Capture<List<HttpMessageConverter<?>>>(); Capture<FormattingConversionService> conversionService = new Capture<FormattingConversionService>(); Capture<List<HandlerMethodArgumentResolver>> resolvers = new Capture<List<HandlerMethodArgumentResolver>>(); @@ -96,7 +86,6 @@ public void annotationHandlerAdapter() throws Exception { assertEquals(0, resolvers.getValue().size()); assertEquals(0, handlers.getValue().size()); - assertTrue(converters.getValue().size() > 0); assertEquals(converters.getValue(), adapter.getMessageConverters()); verify(configurer); @@ -104,9 +93,6 @@ public void annotationHandlerAdapter() throws Exception { @Test public void configureMessageConverters() { - RequestMappingHandlerAdapter adapter = mvcConfiguration.requestMappingHandlerAdapter(); - assertTrue("There should be at least two default converters ", adapter.getMessageConverters().size() > 1); - List<WebMvcConfigurer> configurers = new ArrayList<WebMvcConfigurer>(); configurers.add(new WebMvcConfigurerAdapter() { @Override @@ -117,7 +103,7 @@ public void configureMessageConverters(List<HttpMessageConverter<?>> converters) mvcConfiguration = new DelegatingWebMvcConfiguration(); mvcConfiguration.setConfigurers(configurers); - adapter = mvcConfiguration.requestMappingHandlerAdapter(); + RequestMappingHandlerAdapter adapter = mvcConfiguration.requestMappingHandlerAdapter(); assertEquals("Only one custom converter should be registered", 1, adapter.getMessageConverters().size()); } @@ -132,17 +118,6 @@ public void getCustomValidator() { verify(configurer); } - @Test - public void configureValidator() { - expect(configurer.getValidator()).andReturn(null); - replay(configurer); - - mvcConfiguration.setConfigurers(Arrays.asList(configurer)); - mvcConfiguration.mvcValidator(); - - verify(configurer); - } - @Test public void handlerExceptionResolver() throws Exception { Capture<List<HttpMessageConverter<?>>> converters = new Capture<List<HttpMessageConverter<?>>>(); @@ -166,11 +141,6 @@ public void handlerExceptionResolver() throws Exception { @Test public void configureExceptionResolvers() throws Exception { - HandlerExceptionResolverComposite composite; - - composite = (HandlerExceptionResolverComposite) mvcConfiguration.handlerExceptionResolver(); - assertTrue("Expected more than one exception resolver by default", composite.getExceptionResolvers().size() > 1); - List<WebMvcConfigurer> configurers = new ArrayList<WebMvcConfigurer>(); configurers.add(new WebMvcConfigurerAdapter() { @Override @@ -180,32 +150,9 @@ public void configureHandlerExceptionResolvers(List<HandlerExceptionResolver> ex }); mvcConfiguration.setConfigurers(configurers); - composite = (HandlerExceptionResolverComposite) mvcConfiguration.handlerExceptionResolver(); + HandlerExceptionResolverComposite composite = + (HandlerExceptionResolverComposite) mvcConfiguration.handlerExceptionResolver(); assertEquals("Only one custom converter is expected", 1, composite.getExceptionResolvers().size()); } - @Test - public void configureInterceptors() throws Exception { - HttpServletRequest request = new MockHttpServletRequest("GET", "/"); - - StaticWebApplicationContext context = new StaticWebApplicationContext(); - context.registerSingleton("controller", TestHandler.class); - - RequestMappingHandlerMapping hm = mvcConfiguration.requestMappingHandlerMapping(); - hm.setApplicationContext(context); - HandlerExecutionChain chain = hm.getHandler(request); - assertNotNull("No chain returned", chain); - assertNotNull("Expected at least one default converter", chain.getInterceptors()); - } - - @Controller - private static class TestHandler { - - @SuppressWarnings("unused") - @RequestMapping("/") - public void handle() { - } - - } - } diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/InterceptorConfigurerTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/InterceptorRegistryTests.java similarity index 75% rename from org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/InterceptorConfigurerTests.java rename to org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/InterceptorRegistryTests.java index 1f1534f6ea1c..a0c8fa442fdf 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/InterceptorConfigurerTests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/InterceptorRegistryTests.java @@ -40,14 +40,14 @@ import org.springframework.web.servlet.theme.ThemeChangeInterceptor; /** - * Test fixture with a {@link InterceptorConfigurer}, two {@link HandlerInterceptor}s and two + * Test fixture with a {@link InterceptorRegistry}, two {@link HandlerInterceptor}s and two * {@link WebRequestInterceptor}s. * * @author Rossen Stoyanchev */ -public class InterceptorConfigurerTests { +public class InterceptorRegistryTests { - private InterceptorConfigurer configurer; + private InterceptorRegistry registry; private final HandlerInterceptor interceptor1 = new LocaleChangeInterceptor(); @@ -63,54 +63,50 @@ public class InterceptorConfigurerTests { @Before public void setUp() { - configurer = new InterceptorConfigurer(); + registry = new InterceptorRegistry(); webRequestInterceptor1 = new TestWebRequestInterceptor(); webRequestInterceptor2 = new TestWebRequestInterceptor(); } @Test public void addInterceptor() { - configurer.addInterceptor(interceptor1); + registry.addInterceptor(interceptor1); List<HandlerInterceptor> interceptors = getInterceptorsForPath(null); + assertEquals(Arrays.asList(interceptor1), interceptors); } @Test - public void addInterceptors() { - configurer.addInterceptors(interceptor1, interceptor2); + public void addTwoInterceptors() { + registry.addInterceptor(interceptor1); + registry.addInterceptor(interceptor2); List<HandlerInterceptor> interceptors = getInterceptorsForPath(null); + assertEquals(Arrays.asList(interceptor1, interceptor2), interceptors); } @Test - public void mapInterceptor() { - configurer.mapInterceptor(new String[] {"/path1"}, interceptor1); - configurer.mapInterceptor(new String[] {"/path2"}, interceptor2); - + public void addInterceptorsWithUrlPatterns() { + registry.addInterceptor(interceptor1).addPathPatterns("/path1"); + registry.addInterceptor(interceptor2).addPathPatterns("/path2"); + assertEquals(Arrays.asList(interceptor1), getInterceptorsForPath("/path1")); assertEquals(Arrays.asList(interceptor2), getInterceptorsForPath("/path2")); } - @Test - public void mapInterceptors() { - configurer.mapInterceptors(new String[] {"/path1"}, interceptor1, interceptor2); - - assertEquals(Arrays.asList(interceptor1, interceptor2), getInterceptorsForPath("/path1")); - assertEquals(Arrays.asList(), getInterceptorsForPath("/path2")); - } - @Test public void addWebRequestInterceptor() throws Exception { - configurer.addInterceptor(webRequestInterceptor1); + registry.addWebRequestInterceptor(webRequestInterceptor1); List<HandlerInterceptor> interceptors = getInterceptorsForPath(null); - + assertEquals(1, interceptors.size()); verifyAdaptedInterceptor(interceptors.get(0), webRequestInterceptor1); } @Test public void addWebRequestInterceptors() throws Exception { - configurer.addInterceptors(webRequestInterceptor1, webRequestInterceptor2); + registry.addWebRequestInterceptor(webRequestInterceptor1); + registry.addWebRequestInterceptor(webRequestInterceptor2); List<HandlerInterceptor> interceptors = getInterceptorsForPath(null); assertEquals(2, interceptors.size()); @@ -119,9 +115,9 @@ public void addWebRequestInterceptors() throws Exception { } @Test - public void mapWebRequestInterceptor() throws Exception { - configurer.mapInterceptor(new String[] {"/path1"}, webRequestInterceptor1); - configurer.mapInterceptor(new String[] {"/path2"}, webRequestInterceptor2); + public void addWebRequestInterceptorsWithUrlPatterns() throws Exception { + registry.addWebRequestInterceptor(webRequestInterceptor1).addPathPatterns("/path1"); + registry.addWebRequestInterceptor(webRequestInterceptor2).addPathPatterns("/path2"); List<HandlerInterceptor> interceptors = getInterceptorsForPath("/path1"); assertEquals(1, interceptors.size()); @@ -132,22 +128,10 @@ public void mapWebRequestInterceptor() throws Exception { verifyAdaptedInterceptor(interceptors.get(0), webRequestInterceptor2); } - @Test - public void mapWebRequestInterceptor2() throws Exception { - configurer.mapInterceptors(new String[] {"/path1"}, webRequestInterceptor1, webRequestInterceptor2); - - List<HandlerInterceptor> interceptors = getInterceptorsForPath("/path1"); - assertEquals(2, interceptors.size()); - verifyAdaptedInterceptor(interceptors.get(0), webRequestInterceptor1); - verifyAdaptedInterceptor(interceptors.get(1), webRequestInterceptor2); - - assertEquals(0, getInterceptorsForPath("/path2").size()); - } - private List<HandlerInterceptor> getInterceptorsForPath(String lookupPath) { PathMatcher pathMatcher = new AntPathMatcher(); List<HandlerInterceptor> result = new ArrayList<HandlerInterceptor>(); - for (Object i : configurer.getInterceptors()) { + for (Object i : registry.getInterceptors()) { if (i instanceof MappedInterceptor) { MappedInterceptor mappedInterceptor = (MappedInterceptor) i; if (mappedInterceptor.matches(lookupPath, pathMatcher)) { diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ResourceConfigurerTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistryTests.java similarity index 55% rename from org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ResourceConfigurerTests.java rename to org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistryTests.java index ba8d59488a7a..145e1b7a6c90 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ResourceConfigurerTests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ResourceHandlerRegistryTests.java @@ -17,7 +17,7 @@ package org.springframework.web.servlet.config.annotation; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNull; import org.junit.Before; import org.junit.Test; @@ -26,32 +26,34 @@ import org.springframework.mock.web.MockServletContext; import org.springframework.web.context.support.GenericWebApplicationContext; import org.springframework.web.servlet.HandlerMapping; +import org.springframework.web.servlet.handler.SimpleUrlHandlerMapping; import org.springframework.web.servlet.resource.ResourceHttpRequestHandler; /** - * Test fixture with a {@link ResourceConfigurer}. + * Test fixture with a {@link ResourceHandlerRegistry}. * * @author Rossen Stoyanchev */ -public class ResourceConfigurerTests { +public class ResourceHandlerRegistryTests { - private ResourceConfigurer configurer; + private ResourceHandlerRegistry registry; + + private ResourceHandlerRegistration registration; private MockHttpServletResponse response; @Before public void setUp() { - configurer = new ResourceConfigurer(new GenericWebApplicationContext(), new MockServletContext()); - configurer.addPathMapping("/resources/**"); - configurer.addResourceLocation("classpath:org/springframework/web/servlet/config/annotation/"); - + registry = new ResourceHandlerRegistry(new GenericWebApplicationContext(), new MockServletContext()); + registration = registry.addResourceHandler("/resources/**"); + registration.addResourceLocations("classpath:org/springframework/web/servlet/config/annotation/"); response = new MockHttpServletResponse(); } @Test - public void noMappings() throws Exception { - configurer = new ResourceConfigurer(new GenericWebApplicationContext(), new MockServletContext()); - assertTrue(configurer.getHandlerMapping().getUrlMap().isEmpty()); + public void noResourceHandlers() throws Exception { + registry = new ResourceHandlerRegistry(new GenericWebApplicationContext(), new MockServletContext()); + assertNull(registry.getHandlerMapping()); } @Test @@ -60,7 +62,7 @@ public void mapPathToLocation() throws Exception { request.setMethod("GET"); request.setAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE, "/testStylesheet.css"); - ResourceHttpRequestHandler handler = getResourceHandler("/resources/**"); + ResourceHttpRequestHandler handler = getHandler("/resources/**"); handler.handleRequest(request, response); assertEquals("test stylesheet content", response.getContentAsString()); @@ -68,22 +70,23 @@ public void mapPathToLocation() throws Exception { @Test public void cachePeriod() { - assertEquals(-1, getResourceHandler("/resources/**").getCacheSeconds()); + assertEquals(-1, getHandler("/resources/**").getCacheSeconds()); - configurer.setCachePeriod(0); - assertEquals(0, getResourceHandler("/resources/**").getCacheSeconds()); + registration.setCachePeriod(0); + assertEquals(0, getHandler("/resources/**").getCacheSeconds()); } @Test public void order() { - assertEquals(Integer.MAX_VALUE -1, configurer.getHandlerMapping().getOrder()); + assertEquals(Integer.MAX_VALUE -1, registry.getHandlerMapping().getOrder()); - configurer.setOrder(0); - assertEquals(0, configurer.getHandlerMapping().getOrder()); + registry.setOrder(0); + assertEquals(0, registry.getHandlerMapping().getOrder()); } - private ResourceHttpRequestHandler getResourceHandler(String pathPattern) { - return (ResourceHttpRequestHandler) configurer.getHandlerMapping().getUrlMap().get(pathPattern); + private ResourceHttpRequestHandler getHandler(String pathPattern) { + SimpleUrlHandlerMapping handlerMapping = (SimpleUrlHandlerMapping) registry.getHandlerMapping(); + return (ResourceHttpRequestHandler) handlerMapping.getUrlMap().get(pathPattern); } } diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ViewControllerConfigurerTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistryTests.java similarity index 59% rename from org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ViewControllerConfigurerTests.java rename to org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistryTests.java index f0edd47decfe..b6e3ad28385d 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ViewControllerConfigurerTests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/ViewControllerRegistryTests.java @@ -16,7 +16,9 @@ package org.springframework.web.servlet.config.annotation; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import java.util.Map; @@ -26,50 +28,55 @@ import org.springframework.web.servlet.mvc.ParameterizableViewController; /** - * Test fixture with a {@link ViewControllerConfigurer}. + * Test fixture with a {@link ViewControllerRegistry}. * * @author Rossen Stoyanchev */ -public class ViewControllerConfigurerTests { +public class ViewControllerRegistryTests { - private ViewControllerConfigurer configurer; + private ViewControllerRegistry registry; @Before public void setUp() { - configurer = new ViewControllerConfigurer(); + registry = new ViewControllerRegistry(); } @Test - public void noMappings() throws Exception { - Map<String, ?> urlMap = configurer.getHandlerMapping().getUrlMap(); - assertTrue(urlMap.isEmpty()); + public void noViewControllers() throws Exception { + assertNull(registry.getHandlerMapping()); } @Test - public void mapViewName() { - configurer.mapViewName("/path", "viewName"); - Map<String, ?> urlMap = configurer.getHandlerMapping().getUrlMap(); + public void addViewController() { + registry.addViewController("/path"); + Map<String, ?> urlMap = getHandlerMapping().getUrlMap(); ParameterizableViewController controller = (ParameterizableViewController) urlMap.get("/path"); assertNotNull(controller); - assertEquals("viewName", controller.getViewName()); + assertNull(controller.getViewName()); } @Test - public void mapViewNameByConvention() { - configurer.mapViewNameByConvention("/path"); - Map<String, ?> urlMap = configurer.getHandlerMapping().getUrlMap(); + public void addViewControllerWithViewName() { + registry.addViewController("/path").setViewName("viewName"); + Map<String, ?> urlMap = getHandlerMapping().getUrlMap(); ParameterizableViewController controller = (ParameterizableViewController) urlMap.get("/path"); assertNotNull(controller); - assertNull(controller.getViewName()); + assertEquals("viewName", controller.getViewName()); } - + @Test public void order() { - SimpleUrlHandlerMapping handlerMapping = configurer.getHandlerMapping(); + registry.addViewController("/path"); + SimpleUrlHandlerMapping handlerMapping = getHandlerMapping(); assertEquals(1, handlerMapping.getOrder()); - configurer.setOrder(2); - handlerMapping = configurer.getHandlerMapping(); + registry.setOrder(2); + handlerMapping = getHandlerMapping(); assertEquals(2, handlerMapping.getOrder()); } + + private SimpleUrlHandlerMapping getHandlerMapping() { + return (SimpleUrlHandlerMapping) registry.getHandlerMapping(); + } + } diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupportTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupportTests.java new file mode 100644 index 000000000000..08ae16db95d8 --- /dev/null +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupportTests.java @@ -0,0 +1,315 @@ +/* + * Copyright 2002-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.web.servlet.config.annotation; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.List; + +import javax.servlet.http.HttpServletRequest; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.beans.DirectFieldAccessor; +import org.springframework.beans.TestBean; +import org.springframework.core.convert.ConversionService; +import org.springframework.core.convert.converter.Converter; +import org.springframework.core.io.FileSystemResourceLoader; +import org.springframework.format.FormatterRegistry; +import org.springframework.format.support.FormattingConversionService; +import org.springframework.http.converter.HttpMessageConverter; +import org.springframework.http.converter.json.MappingJacksonHttpMessageConverter; +import org.springframework.mock.web.MockHttpServletRequest; +import org.springframework.mock.web.MockServletContext; +import org.springframework.stereotype.Controller; +import org.springframework.validation.BeanPropertyBindingResult; +import org.springframework.validation.Errors; +import org.springframework.validation.Validator; +import org.springframework.validation.beanvalidation.LocalValidatorFactoryBean; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.support.ConfigurableWebBindingInitializer; +import org.springframework.web.context.support.StaticWebApplicationContext; +import org.springframework.web.method.annotation.support.ModelAttributeMethodProcessor; +import org.springframework.web.method.support.HandlerMethodArgumentResolver; +import org.springframework.web.method.support.HandlerMethodReturnValueHandler; +import org.springframework.web.servlet.HandlerExceptionResolver; +import org.springframework.web.servlet.HandlerExecutionChain; +import org.springframework.web.servlet.handler.AbstractHandlerMapping; +import org.springframework.web.servlet.handler.BeanNameUrlHandlerMapping; +import org.springframework.web.servlet.handler.ConversionServiceExposingInterceptor; +import org.springframework.web.servlet.handler.HandlerExceptionResolverComposite; +import org.springframework.web.servlet.handler.SimpleMappingExceptionResolver; +import org.springframework.web.servlet.i18n.LocaleChangeInterceptor; +import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter; +import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping; +import org.springframework.web.servlet.mvc.method.annotation.support.DefaultMethodReturnValueHandler; + +/** + * A test fixture for {@link WebMvcConfigurationSupport}. + * + * @author Rossen Stoyanchev + */ +public class WebMvcConfigurationSupportTests { + + private TestWebMvcConfiguration mvcConfiguration; + + @Before + public void setUp() { + mvcConfiguration = new TestWebMvcConfiguration(); + } + + @Test + public void requestMappingHandlerMapping() throws Exception { + StaticWebApplicationContext cxt = new StaticWebApplicationContext(); + cxt.registerSingleton("controller", TestController.class); + + RequestMappingHandlerMapping handlerMapping = mvcConfiguration.requestMappingHandlerMapping(); + assertEquals(0, handlerMapping.getOrder()); + + handlerMapping.setApplicationContext(cxt); + HandlerExecutionChain chain = handlerMapping.getHandler(new MockHttpServletRequest("GET", "/")); + assertNotNull(chain.getInterceptors()); + assertEquals(ConversionServiceExposingInterceptor.class, chain.getInterceptors()[0].getClass()); + } + + @Test + public void emptyViewControllerHandlerMapping() { + AbstractHandlerMapping handlerMapping = (AbstractHandlerMapping) mvcConfiguration.viewControllerHandlerMapping(); + assertNotNull(handlerMapping); + assertEquals(Integer.MAX_VALUE, handlerMapping.getOrder()); + assertTrue(handlerMapping.getClass().getName().endsWith("EmptyHandlerMapping")); + } + + @Test + public void beanNameHandlerMapping() throws Exception { + StaticWebApplicationContext cxt = new StaticWebApplicationContext(); + cxt.registerSingleton("/controller", TestController.class); + + HttpServletRequest request = new MockHttpServletRequest("GET", "/controller"); + + BeanNameUrlHandlerMapping handlerMapping = mvcConfiguration.beanNameHandlerMapping(); + assertEquals(2, handlerMapping.getOrder()); + + handlerMapping.setApplicationContext(cxt); + HandlerExecutionChain chain = handlerMapping.getHandler(request); + assertNotNull(chain.getInterceptors()); + assertEquals(2, chain.getInterceptors().length); + assertEquals(ConversionServiceExposingInterceptor.class, chain.getInterceptors()[1].getClass()); + } + + @Test + public void emptyResourceHandlerMapping() { + mvcConfiguration.setApplicationContext(new StaticWebApplicationContext()); + AbstractHandlerMapping handlerMapping = (AbstractHandlerMapping) mvcConfiguration.resourceHandlerMapping(); + assertNotNull(handlerMapping); + assertEquals(Integer.MAX_VALUE, handlerMapping.getOrder()); + assertTrue(handlerMapping.getClass().getName().endsWith("EmptyHandlerMapping")); + } + + @Test + public void emptyDefaultServletHandlerMapping() { + mvcConfiguration.setServletContext(new MockServletContext()); + AbstractHandlerMapping handlerMapping = (AbstractHandlerMapping) mvcConfiguration.defaultServletHandlerMapping(); + assertNotNull(handlerMapping); + assertEquals(Integer.MAX_VALUE, handlerMapping.getOrder()); + assertTrue(handlerMapping.getClass().getName().endsWith("EmptyHandlerMapping")); + } + + @Test + public void requestMappingHandlerAdapter() throws Exception { + RequestMappingHandlerAdapter adapter = mvcConfiguration.requestMappingHandlerAdapter(); + + List<HttpMessageConverter<?>> expectedConverters = new ArrayList<HttpMessageConverter<?>>(); + mvcConfiguration.addDefaultHttpMessageConverters(expectedConverters); + assertEquals(expectedConverters.size(), adapter.getMessageConverters().size()); + + ConfigurableWebBindingInitializer initializer = (ConfigurableWebBindingInitializer) adapter.getWebBindingInitializer(); + assertNotNull(initializer); + + ConversionService conversionService = initializer.getConversionService(); + assertNotNull(conversionService); + assertTrue(conversionService instanceof FormattingConversionService); + + Validator validator = initializer.getValidator(); + assertNotNull(validator); + assertTrue(validator instanceof LocalValidatorFactoryBean); + } + + @Test + public void handlerExceptionResolver() throws Exception { + HandlerExceptionResolverComposite compositeResolver = + (HandlerExceptionResolverComposite) mvcConfiguration.handlerExceptionResolver(); + + assertEquals(0, compositeResolver.getOrder()); + + List<HandlerExceptionResolver> expectedResolvers = new ArrayList<HandlerExceptionResolver>(); + mvcConfiguration.addDefaultHandlerExceptionResolvers(expectedResolvers); + assertEquals(expectedResolvers.size(), compositeResolver.getExceptionResolvers().size()); + } + + @Test + public void webMvcConfigurerExtensionHooks() throws Exception { + + StaticWebApplicationContext appCxt = new StaticWebApplicationContext(); + appCxt.setServletContext(new MockServletContext(new FileSystemResourceLoader())); + appCxt.registerSingleton("controller", TestController.class); + + WebConfig webConfig = new WebConfig(); + webConfig.setApplicationContext(appCxt); + webConfig.setServletContext(appCxt.getServletContext()); + + String actual = webConfig.mvcConversionService().convert(new TestBean(), String.class); + assertEquals("converted", actual); + + RequestMappingHandlerAdapter adapter = webConfig.requestMappingHandlerAdapter(); + assertEquals(1, adapter.getMessageConverters().size()); + + ConfigurableWebBindingInitializer initializer = (ConfigurableWebBindingInitializer) adapter.getWebBindingInitializer(); + assertNotNull(initializer); + + BeanPropertyBindingResult bindingResult = new BeanPropertyBindingResult(null, ""); + initializer.getValidator().validate(null, bindingResult); + assertEquals("invalid", bindingResult.getAllErrors().get(0).getCode()); + + @SuppressWarnings("unchecked") + List<HandlerMethodArgumentResolver> argResolvers= (List<HandlerMethodArgumentResolver>) + new DirectFieldAccessor(adapter).getPropertyValue("customArgumentResolvers"); + assertEquals(1, argResolvers.size()); + + @SuppressWarnings("unchecked") + List<HandlerMethodReturnValueHandler> handlers = (List<HandlerMethodReturnValueHandler>) + new DirectFieldAccessor(adapter).getPropertyValue("customReturnValueHandlers"); + assertEquals(1, handlers.size()); + + HandlerExceptionResolverComposite composite = (HandlerExceptionResolverComposite) webConfig.handlerExceptionResolver(); + assertEquals(1, composite.getExceptionResolvers().size()); + + RequestMappingHandlerMapping rmHandlerMapping = webConfig.requestMappingHandlerMapping(); + rmHandlerMapping.setApplicationContext(appCxt); + HandlerExecutionChain chain = rmHandlerMapping.getHandler(new MockHttpServletRequest("GET", "/")); + assertNotNull(chain.getInterceptors()); + assertEquals(2, chain.getInterceptors().length); + assertEquals(LocaleChangeInterceptor.class, chain.getInterceptors()[0].getClass()); + assertEquals(ConversionServiceExposingInterceptor.class, chain.getInterceptors()[1].getClass()); + + AbstractHandlerMapping handlerMapping = (AbstractHandlerMapping) webConfig.viewControllerHandlerMapping(); + handlerMapping.setApplicationContext(appCxt); + assertNotNull(handlerMapping); + assertEquals(1, handlerMapping.getOrder()); + HandlerExecutionChain handler = handlerMapping.getHandler(new MockHttpServletRequest("GET", "/path")); + assertNotNull(handler.getHandler()); + + handlerMapping = (AbstractHandlerMapping) webConfig.resourceHandlerMapping(); + handlerMapping.setApplicationContext(appCxt); + assertNotNull(handlerMapping); + assertEquals(Integer.MAX_VALUE-1, handlerMapping.getOrder()); + handler = handlerMapping.getHandler(new MockHttpServletRequest("GET", "/resources/foo.gif")); + assertNotNull(handler.getHandler()); + + handlerMapping = (AbstractHandlerMapping) webConfig.defaultServletHandlerMapping(); + handlerMapping.setApplicationContext(appCxt); + assertNotNull(handlerMapping); + assertEquals(Integer.MAX_VALUE, handlerMapping.getOrder()); + handler = handlerMapping.getHandler(new MockHttpServletRequest("GET", "/anyPath")); + assertNotNull(handler.getHandler()); + } + + @Controller + private static class TestController { + + @SuppressWarnings("unused") + @RequestMapping("/") + public void handle() { + } + } + + private static class TestWebMvcConfiguration extends WebMvcConfigurationSupport { + + } + + /** + * The purpose of this class is to test that an implementation of a {@link WebMvcConfigurer} + * can also apply customizations by extension from {@link WebMvcConfigurationSupport}. + */ + private class WebConfig extends WebMvcConfigurationSupport implements WebMvcConfigurer { + + @Override + public void addFormatters(FormatterRegistry registry) { + registry.addConverter(new Converter<TestBean, String>() { + public String convert(TestBean source) { + return "converted"; + } + }); + } + + @Override + public void configureMessageConverters(List<HttpMessageConverter<?>> converters) { + converters.add(new MappingJacksonHttpMessageConverter()); + } + + @Override + public Validator getValidator() { + return new Validator() { + public void validate(Object target, Errors errors) { + errors.reject("invalid"); + } + public boolean supports(Class<?> clazz) { + return true; + } + }; + } + + @Override + public void addArgumentResolvers(List<HandlerMethodArgumentResolver> argumentResolvers) { + argumentResolvers.add(new ModelAttributeMethodProcessor(true)); + } + + @Override + public void addReturnValueHandlers(List<HandlerMethodReturnValueHandler> returnValueHandlers) { + returnValueHandlers.add(new DefaultMethodReturnValueHandler()); + } + + @Override + public void configureHandlerExceptionResolvers(List<HandlerExceptionResolver> exceptionResolvers) { + exceptionResolvers.add(new SimpleMappingExceptionResolver()); + } + + @Override + public void addInterceptors(InterceptorRegistry registry) { + registry.addInterceptor(new LocaleChangeInterceptor()); + } + + @Override + public void addViewControllers(ViewControllerRegistry registry) { + registry.addViewController("/path"); + } + + @Override + public void addResourceHandlers(ResourceHandlerRegistry registry) { + registry.addResourceHandler("/resources/**").addResourceLocations("src/test/java"); + } + + @Override + public void configureDefaultServletHandling(DefaultServletHandlerConfigurer configurer) { + configurer.enable("default"); + } + } + +}
b0d00c5b3003a233c793a0f092f15358530a0acb
orientdb
Implemented new commands: - truncate cluster -- truncate class--
a
https://github.com/orientechnologies/orientdb
diff --git a/tools/src/main/java/com/orientechnologies/orient/console/OConsoleDatabaseApp.java b/tools/src/main/java/com/orientechnologies/orient/console/OConsoleDatabaseApp.java index b64c9f90d29..bbd44e84cc6 100644 --- a/tools/src/main/java/com/orientechnologies/orient/console/OConsoleDatabaseApp.java +++ b/tools/src/main/java/com/orientechnologies/orient/console/OConsoleDatabaseApp.java @@ -177,10 +177,14 @@ public void createDatabase( @ConsoleCommand(description = "Create a new cluster in the current database. The cluster can be physical or logical.") public void createCluster( @ConsoleParameter(name = "cluster-name", description = "The name of the cluster to create") String iClusterName, - @ConsoleParameter(name = "cluster-type", description = "Cluster type: 'physical' or 'logical'") String iClusterType) { + @ConsoleParameter(name = "cluster-type", description = "Cluster type: 'physical' or 'logical'") String iClusterType, + @ConsoleParameter(name = "position", description = "cluster id to replace an empty position or 'append' to append at the end") String iPosition) { checkCurrentDatabase(); - out.println("Creating cluster [" + iClusterName + "] of type '" + iClusterType + "' in database " + currentDatabaseName + "..."); + final int position = iPosition.toUpperCase().equals("append") ? -1 : Integer.parseInt(iPosition); + + out.println("Creating cluster [" + iClusterName + "] of type '" + iClusterType + "' in database " + currentDatabaseName + + (position == -1 ? " as last one" : " in place of #" + position) + "..."); int clusterId = iClusterType.equalsIgnoreCase("physical") ? currentDatabase.addPhysicalCluster(iClusterName, iClusterName, -1) : currentDatabase.addLogicalCluster(iClusterName, currentDatabase.getClusterIdByName(OStorage.CLUSTER_INTERNAL_NAME)); @@ -226,7 +230,7 @@ public void truncateCluster( cluster.truncate(); - out.println("Truncated " + recs + "records from cluster [" + iClusterName + "] in database " + currentDatabaseName); + out.println("Truncated " + recs + " records from cluster [" + iClusterName + "] in database " + currentDatabaseName); } catch (Exception e) { out.println("ERROR: " + e.toString()); } @@ -245,7 +249,7 @@ public void truncateClass( cls.truncate(); - out.println("Truncated " + recs + "records from class [" + iClassName + "] in database " + currentDatabaseName); + out.println("Truncated " + recs + " records from class [" + iClassName + "] in database " + currentDatabaseName); } catch (Exception e) { out.println("ERROR: " + e.toString()); }
4a9c7d672ed83604123448501d73d18258243c6f
elasticsearch
clean code--
p
https://github.com/elastic/elasticsearch
diff --git a/modules/elasticsearch/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java b/modules/elasticsearch/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java index 4a1a1e73d1e04..4aceed5818014 100644 --- a/modules/elasticsearch/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java +++ b/modules/elasticsearch/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java @@ -78,7 +78,7 @@ public class HighlightPhase implements SearchPhase { if (mapper != null) { indexName = mapper.names().indexName(); } - String[] fragments = null; + String[] fragments; try { fragments = highlighter.getBestFragments(fieldQuery, context.searcher().getIndexReader(), docId, indexName, parsedHighlightField.fragmentCharSize(), parsedHighlightField.numberOfFragments()); } catch (IOException e) {
e7950dccc0c4df38e172acbd99071e50397fc492
orientdb
Cleanup inspection warnings--
p
https://github.com/orientechnologies/orientdb
diff --git a/src/main/java/com/orientechnologies/orient/etl/OAbstractETLComponent.java b/src/main/java/com/orientechnologies/orient/etl/OAbstractETLComponent.java index fee25e54ec0..9f7dee4e191 100644 --- a/src/main/java/com/orientechnologies/orient/etl/OAbstractETLComponent.java +++ b/src/main/java/com/orientechnologies/orient/etl/OAbstractETLComponent.java @@ -124,7 +124,7 @@ protected Object resolve(final Object iContent) { if (context == null || iContent == null) return iContent; - Object value = null; + Object value; if (iContent instanceof String) { if (((String) iContent).startsWith("$") && !((String) iContent).startsWith(OSystemVariableResolver.VAR_BEGIN)) value = context.getVariable(iContent.toString()); diff --git a/src/test/java/com/orientechnologies/orient/etl/transformer/OVertexTransformerNGTest.java b/src/test/java/com/orientechnologies/orient/etl/transformer/OVertexTransformerTest.java similarity index 97% rename from src/test/java/com/orientechnologies/orient/etl/transformer/OVertexTransformerNGTest.java rename to src/test/java/com/orientechnologies/orient/etl/transformer/OVertexTransformerTest.java index d5d1ceef5fd..1e475423d02 100644 --- a/src/test/java/com/orientechnologies/orient/etl/transformer/OVertexTransformerNGTest.java +++ b/src/test/java/com/orientechnologies/orient/etl/transformer/OVertexTransformerTest.java @@ -27,7 +27,7 @@ * * @author Gregor Frey */ -public class OVertexTransformerNGTest extends ETLBaseTest { +public class OVertexTransformerTest extends ETLBaseTest { @Override public void setUp() { super.setUp();
386e39f4a2b3112cbbc06bcde6483f1cebaa4734
kotlin
Fixed KT-1797 No completion for nested class name- in extension function definition-- -KT-1797 Fixed-
c
https://github.com/JetBrains/kotlin
diff --git a/idea/src/org/jetbrains/jet/plugin/completion/JetCompletionContributor.kt b/idea/src/org/jetbrains/jet/plugin/completion/JetCompletionContributor.kt index c878f009e48ec..a624ef398b15c 100644 --- a/idea/src/org/jetbrains/jet/plugin/completion/JetCompletionContributor.kt +++ b/idea/src/org/jetbrains/jet/plugin/completion/JetCompletionContributor.kt @@ -27,19 +27,20 @@ import org.jetbrains.jet.lang.psi.JetExpression import org.jetbrains.jet.lang.psi.JetFile import org.jetbrains.jet.lexer.JetTokens import org.jetbrains.jet.plugin.completion.smart.SmartCompletion -import org.jetbrains.jet.plugin.references.JetSimpleNameReference import com.intellij.patterns.PsiJavaPatterns.elementType import com.intellij.patterns.PsiJavaPatterns.psiElement -import com.intellij.patterns.PsiElementPattern import com.intellij.psi.PsiElement +import com.intellij.psi.PsiComment +import com.intellij.psi.PsiWhiteSpace +import org.jetbrains.jet.lang.psi.JetTypeReference public class JetCompletionContributor : CompletionContributor() { private val AFTER_NUMBER_LITERAL = psiElement().afterLeafSkipping(psiElement().withText(""), psiElement().withElementType(elementType().oneOf(JetTokens.FLOAT_LITERAL, JetTokens.INTEGER_LITERAL))) - private val EXTENSION_RECEIVER_TYPE_DUMMY_IDENTIFIER: String = "KotlinExtensionDummy.fake() {}" // A way to add reference into file at completion place - private val EXTENSION_RECEIVER_TYPE_ACTIVATION_PATTERN: PsiElementPattern.Capture<PsiElement> = PlatformPatterns.psiElement().afterLeaf(JetTokens.FUN_KEYWORD.toString(), JetTokens.VAL_KEYWORD.toString(), JetTokens.VAR_KEYWORD.toString()) + private val EXTENSION_RECEIVER_TYPE_DUMMY_IDENTIFIER = "KotlinExtensionDummy.fake() {}" // A way to add reference into file at completion place + private val EXTENSION_RECEIVER_TYPE_ACTIVATION_PATTERN = psiElement().afterLeaf(JetTokens.FUN_KEYWORD.toString(), JetTokens.VAL_KEYWORD.toString(), JetTokens.VAR_KEYWORD.toString()) ;{ val provider = object : CompletionProvider<CompletionParameters>() { @@ -57,6 +58,7 @@ public class JetCompletionContributor : CompletionContributor() { val offset = context.getStartOffset() val tokenBefore = psiFile.findElementAt(Math.max(0, offset - 1)) + val dummyIdentifier = when { context.getCompletionType() == CompletionType.SMART -> CompletionUtilCore.DUMMY_IDENTIFIER_TRIMMED + "$" // add '$' to ignore context after the caret @@ -64,6 +66,8 @@ public class JetCompletionContributor : CompletionContributor() { EXTENSION_RECEIVER_TYPE_ACTIVATION_PATTERN.accepts(tokenBefore) -> EXTENSION_RECEIVER_TYPE_DUMMY_IDENTIFIER + tokenBefore != null && isExtensionReceiverAfterDot(tokenBefore) -> CompletionUtilCore.DUMMY_IDENTIFIER_TRIMMED + "." + else -> CompletionUtilCore.DUMMY_IDENTIFIER_TRIMMED } context.setDummyIdentifier(dummyIdentifier) @@ -102,6 +106,25 @@ public class JetCompletionContributor : CompletionContributor() { } } + private val declarationKeywords = setOf(JetTokens.FUN_KEYWORD, JetTokens.VAL_KEYWORD, JetTokens.VAR_KEYWORD) + + private fun isExtensionReceiverAfterDot(tokenBefore: PsiElement): Boolean { + var prev = tokenBefore.getPrevSibling() + if (tokenBefore.getNode()!!.getElementType() != JetTokens.DOT) { + if (prev == null || prev!!.getNode()!!.getElementType() != JetTokens.DOT) return false + prev = prev!!.getPrevSibling() + } + + while (prev != null) { + if (prev!!.getNode()!!.getElementType() in declarationKeywords) { + return true + } + if (prev !is PsiComment && prev !is PsiWhiteSpace && prev !is JetTypeReference) return false + prev = prev!!.getPrevSibling() + } + return false + } + private fun performCompletion(parameters: CompletionParameters, result: CompletionResultSet) { val position = parameters.getPosition() if (position.getContainingFile() !is JetFile) return diff --git a/idea/testData/completion/basic/common/NestedClassNameForExtension.kt b/idea/testData/completion/basic/common/NestedClassNameForExtension.kt new file mode 100644 index 0000000000000..929b6c57652d2 --- /dev/null +++ b/idea/testData/completion/basic/common/NestedClassNameForExtension.kt @@ -0,0 +1,8 @@ +class Test { + public class Nested +} + +fun Test.<caret> +} + +// EXIST: Nested diff --git a/idea/testData/completion/basic/common/NestedClassNameForExtension2.kt b/idea/testData/completion/basic/common/NestedClassNameForExtension2.kt new file mode 100644 index 0000000000000..f2a55c674b56c --- /dev/null +++ b/idea/testData/completion/basic/common/NestedClassNameForExtension2.kt @@ -0,0 +1,8 @@ +class Test { + public class Nested +} + +fun Test.N<caret> +} + +// EXIST: Nested diff --git a/idea/tests/org/jetbrains/jet/completion/JSBasicCompletionTestGenerated.java b/idea/tests/org/jetbrains/jet/completion/JSBasicCompletionTestGenerated.java index a46ed07b0c72a..14eacda04a7f0 100644 --- a/idea/tests/org/jetbrains/jet/completion/JSBasicCompletionTestGenerated.java +++ b/idea/tests/org/jetbrains/jet/completion/JSBasicCompletionTestGenerated.java @@ -334,6 +334,16 @@ public void testNamedObject() throws Exception { doTest("idea/testData/completion/basic/common/NamedObject.kt"); } + @TestMetadata("NestedClassNameForExtension.kt") + public void testNestedClassNameForExtension() throws Exception { + doTest("idea/testData/completion/basic/common/NestedClassNameForExtension.kt"); + } + + @TestMetadata("NestedClassNameForExtension2.kt") + public void testNestedClassNameForExtension2() throws Exception { + doTest("idea/testData/completion/basic/common/NestedClassNameForExtension2.kt"); + } + @TestMetadata("NoClassNameDuplication.kt") public void testNoClassNameDuplication() throws Exception { doTest("idea/testData/completion/basic/common/NoClassNameDuplication.kt"); diff --git a/idea/tests/org/jetbrains/jet/completion/JvmBasicCompletionTestGenerated.java b/idea/tests/org/jetbrains/jet/completion/JvmBasicCompletionTestGenerated.java index 828f934fdc4dd..ea4bcc1bf1012 100644 --- a/idea/tests/org/jetbrains/jet/completion/JvmBasicCompletionTestGenerated.java +++ b/idea/tests/org/jetbrains/jet/completion/JvmBasicCompletionTestGenerated.java @@ -334,6 +334,16 @@ public void testNamedObject() throws Exception { doTest("idea/testData/completion/basic/common/NamedObject.kt"); } + @TestMetadata("NestedClassNameForExtension.kt") + public void testNestedClassNameForExtension() throws Exception { + doTest("idea/testData/completion/basic/common/NestedClassNameForExtension.kt"); + } + + @TestMetadata("NestedClassNameForExtension2.kt") + public void testNestedClassNameForExtension2() throws Exception { + doTest("idea/testData/completion/basic/common/NestedClassNameForExtension2.kt"); + } + @TestMetadata("NoClassNameDuplication.kt") public void testNoClassNameDuplication() throws Exception { doTest("idea/testData/completion/basic/common/NoClassNameDuplication.kt");
f0e9bf9f4ccaaa8e0b41f28f97fb7b6d15a88363
intellij-community
Make is possible to enhance color schemes from- plugin in non-intellij environment (e.g. upsource)--
a
https://github.com/JetBrains/intellij-community
diff --git a/platform/editor-ui-ex/src/com/intellij/openapi/editor/colors/impl/AbstractColorsScheme.java b/platform/editor-ui-ex/src/com/intellij/openapi/editor/colors/impl/AbstractColorsScheme.java index e00655bef5cb5..93f9f3bf0b4c7 100644 --- a/platform/editor-ui-ex/src/com/intellij/openapi/editor/colors/impl/AbstractColorsScheme.java +++ b/platform/editor-ui-ex/src/com/intellij/openapi/editor/colors/impl/AbstractColorsScheme.java @@ -333,7 +333,7 @@ else if (ATTRIBUTES_ELEMENT.equals(childName)) { initFonts(); } - protected void readAttributes(@NotNull Element childNode) { + public void readAttributes(@NotNull Element childNode) { for (Element e : childNode.getChildren(OPTION_ELEMENT)) { TextAttributesKey name = TextAttributesKey.find(e.getAttributeValue(NAME_ATTR)); TextAttributes attr = new TextAttributes(e.getChild(VALUE_ELEMENT));
3e74d3b2fbea16c55805b9b73c182bdc02e70b83
spring-framework
Add putIfAbsent on Cache abstraction--This commit adds a putIfAbsent method to the Cache interface. This-method offers an atomic put if the key is not already associated in-the cache.--Issue: SPR-11400-
a
https://github.com/spring-projects/spring-framework
diff --git a/spring-context-support/src/main/java/org/springframework/cache/ehcache/EhCacheCache.java b/spring-context-support/src/main/java/org/springframework/cache/ehcache/EhCacheCache.java index c730c637e893..31bfd2f530cb 100644 --- a/spring-context-support/src/main/java/org/springframework/cache/ehcache/EhCacheCache.java +++ b/spring-context-support/src/main/java/org/springframework/cache/ehcache/EhCacheCache.java @@ -29,6 +29,7 @@ * * @author Costin Leau * @author Juergen Hoeller + * @author Stephane Nicoll * @since 3.1 */ public class EhCacheCache implements Cache { @@ -62,7 +63,7 @@ public final Ehcache getNativeCache() { @Override public ValueWrapper get(Object key) { Element element = this.cache.get(key); - return (element != null ? new SimpleValueWrapper(element.getObjectValue()) : null); + return toWrapper(element); } @Override @@ -81,6 +82,12 @@ public void put(Object key, Object value) { this.cache.put(new Element(key, value)); } + @Override + public ValueWrapper putIfAbsent(Object key, Object value) { + Element existingElement = this.cache.putIfAbsent(new Element(key, value)); + return toWrapper(existingElement); + } + @Override public void evict(Object key) { this.cache.remove(key); @@ -91,4 +98,8 @@ public void clear() { this.cache.removeAll(); } + private ValueWrapper toWrapper(Element element) { + return (element != null ? new SimpleValueWrapper(element.getObjectValue()) : null); + } + } diff --git a/spring-context-support/src/main/java/org/springframework/cache/guava/GuavaCache.java b/spring-context-support/src/main/java/org/springframework/cache/guava/GuavaCache.java index 6a9cf4cbe73f..6acc2aad0b6c 100644 --- a/spring-context-support/src/main/java/org/springframework/cache/guava/GuavaCache.java +++ b/spring-context-support/src/main/java/org/springframework/cache/guava/GuavaCache.java @@ -17,6 +17,8 @@ package org.springframework.cache.guava; import java.io.Serializable; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; import org.springframework.cache.Cache; import org.springframework.cache.support.SimpleValueWrapper; @@ -29,6 +31,7 @@ * <p>Requires Google Guava 12.0 or higher. * * @author Juergen Hoeller + * @author Stephane Nicoll * @since 4.0 */ public class GuavaCache implements Cache { @@ -83,7 +86,7 @@ public final boolean isAllowNullValues() { @Override public ValueWrapper get(Object key) { Object value = this.cache.getIfPresent(key); - return (value != null ? new SimpleValueWrapper(fromStoreValue(value)) : null); + return toWrapper(value); } @Override @@ -101,6 +104,17 @@ public void put(Object key, Object value) { this.cache.put(key, toStoreValue(value)); } + @Override + public ValueWrapper putIfAbsent(Object key, final Object value) { + try { + PutIfAbsentCallable callable = new PutIfAbsentCallable(value); + Object result = this.cache.get(key, callable); + return (callable.called ? null : toWrapper(result)); + } catch (ExecutionException e) { + throw new IllegalArgumentException(e); + } + } + @Override public void evict(Object key) { this.cache.invalidate(key); @@ -138,9 +152,29 @@ protected Object toStoreValue(Object userValue) { return userValue; } + private ValueWrapper toWrapper(Object value) { + return (value != null ? new SimpleValueWrapper(fromStoreValue(value)) : null); + } + @SuppressWarnings("serial") private static class NullHolder implements Serializable { } + private class PutIfAbsentCallable implements Callable<Object> { + private boolean called; + + private final Object value; + + private PutIfAbsentCallable(Object value) { + this.value = value; + } + + @Override + public Object call() throws Exception { + called = true; + return toStoreValue(value); + } + } + } diff --git a/spring-context-support/src/main/java/org/springframework/cache/jcache/JCacheCache.java b/spring-context-support/src/main/java/org/springframework/cache/jcache/JCacheCache.java index a0b34470f188..372e754c9fad 100644 --- a/spring-context-support/src/main/java/org/springframework/cache/jcache/JCacheCache.java +++ b/spring-context-support/src/main/java/org/springframework/cache/jcache/JCacheCache.java @@ -29,6 +29,7 @@ * <p>Note: This class has been updated for JCache 1.0, as of Spring 4.0. * * @author Juergen Hoeller + * @author Stephane Nicoll * @since 3.2 */ public class JCacheCache implements Cache { @@ -95,6 +96,12 @@ public void put(Object key, Object value) { this.cache.put(key, toStoreValue(value)); } + @Override + public ValueWrapper putIfAbsent(Object key, Object value) { + boolean set = this.cache.putIfAbsent(key, toStoreValue(value)); + return (set ? null : get(key)); + } + @Override public void evict(Object key) { this.cache.remove(key); diff --git a/spring-context-support/src/main/java/org/springframework/cache/transaction/TransactionAwareCacheDecorator.java b/spring-context-support/src/main/java/org/springframework/cache/transaction/TransactionAwareCacheDecorator.java index adc931abe33b..e239b96d1ee1 100644 --- a/spring-context-support/src/main/java/org/springframework/cache/transaction/TransactionAwareCacheDecorator.java +++ b/spring-context-support/src/main/java/org/springframework/cache/transaction/TransactionAwareCacheDecorator.java @@ -1,5 +1,5 @@ /* - * Copyright 2002-2013 the original author or authors. + * Copyright 2002-2014 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,7 +28,11 @@ * successful transaction. If no transaction is active, {@link #put} and {@link #evict} * operations will be performed immediately, as usual. * + * <p>Use of more aggressive operations such as {@link #putIfAbsent} cannot be deferred + * to the after-commit phase of a running transaction. Use these with care. + * * @author Juergen Hoeller + * @author Stephane Nicoll * @since 3.2 * @see TransactionAwareCacheManagerProxy */ @@ -82,6 +86,11 @@ public void afterCommit() { } } + @Override + public ValueWrapper putIfAbsent(final Object key, final Object value) { + return this.targetCache.putIfAbsent(key, value); + } + @Override public void evict(final Object key) { if (TransactionSynchronizationManager.isSynchronizationActive()) { diff --git a/spring-context-support/src/test/java/org/springframework/cache/AbstractCacheTests.java b/spring-context-support/src/test/java/org/springframework/cache/AbstractCacheTests.java new file mode 100644 index 000000000000..ba41bfa1800f --- /dev/null +++ b/spring-context-support/src/test/java/org/springframework/cache/AbstractCacheTests.java @@ -0,0 +1,105 @@ +/* + * Copyright 2002-2014 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cache; + +import static org.junit.Assert.*; + +import org.junit.Test; + +/** + * @author Stephane Nicoll + */ +public abstract class AbstractCacheTests<T extends Cache> { + + protected final static String CACHE_NAME = "testCache"; + + protected abstract T getCache(); + + protected abstract Object getNativeCache(); + + @Test + public void testCacheName() throws Exception { + assertEquals(CACHE_NAME, getCache().getName()); + } + + @Test + public void testNativeCache() throws Exception { + assertSame(getNativeCache(), getCache().getNativeCache()); + } + + @Test + public void testCachePut() throws Exception { + T cache = getCache(); + + Object key = "enescu"; + Object value = "george"; + + assertNull(cache.get(key)); + assertNull(cache.get(key, String.class)); + assertNull(cache.get(key, Object.class)); + + cache.put(key, value); + assertEquals(value, cache.get(key).get()); + assertEquals(value, cache.get(key, String.class)); + assertEquals(value, cache.get(key, Object.class)); + assertEquals(value, cache.get(key, null)); + + cache.put(key, null); + assertNotNull(cache.get(key)); + assertNull(cache.get(key).get()); + assertNull(cache.get(key, String.class)); + assertNull(cache.get(key, Object.class)); + } + + @Test + public void testCachePutIfAbsent() throws Exception { + T cache = getCache(); + + Object key = new Object(); + Object value = "initialValue"; + + assertNull(cache.get(key)); + assertNull(cache.putIfAbsent(key, value)); + assertEquals(value, cache.get(key).get()); + assertEquals("initialValue", cache.putIfAbsent(key, "anotherValue").get()); + assertEquals(value, cache.get(key).get()); // not changed + } + + @Test + public void testCacheRemove() throws Exception { + T cache = getCache(); + + Object key = "enescu"; + Object value = "george"; + + assertNull(cache.get(key)); + cache.put(key, value); + } + + @Test + public void testCacheClear() throws Exception { + T cache = getCache(); + + assertNull(cache.get("enescu")); + cache.put("enescu", "george"); + assertNull(cache.get("vlaicu")); + cache.put("vlaicu", "aurel"); + cache.clear(); + assertNull(cache.get("vlaicu")); + assertNull(cache.get("enescu")); + } +} diff --git a/spring-context-support/src/test/java/org/springframework/cache/ehcache/EhCacheCacheTests.java b/spring-context-support/src/test/java/org/springframework/cache/ehcache/EhCacheCacheTests.java index 587eb95a1f3c..b6d58d214d17 100644 --- a/spring-context-support/src/test/java/org/springframework/cache/ehcache/EhCacheCacheTests.java +++ b/spring-context-support/src/test/java/org/springframework/cache/ehcache/EhCacheCacheTests.java @@ -16,56 +16,55 @@ package org.springframework.cache.ehcache; +import static org.junit.Assert.*; + import net.sf.ehcache.CacheManager; import net.sf.ehcache.Ehcache; import net.sf.ehcache.Element; import net.sf.ehcache.config.CacheConfiguration; +import net.sf.ehcache.config.Configuration; +import org.junit.After; import org.junit.Before; import org.junit.Test; - -import org.springframework.cache.Cache; +import org.springframework.cache.AbstractCacheTests; import org.springframework.tests.Assume; import org.springframework.tests.TestGroup; -import static org.junit.Assert.*; - /** * @author Costin Leau + * @author Stephane Nicoll * @author Juergen Hoeller */ -public class EhCacheCacheTests { - - protected final static String CACHE_NAME = "testCache"; - - protected Ehcache nativeCache; - - protected Cache cache; +public class EhCacheCacheTests extends AbstractCacheTests<EhCacheCache> { + private CacheManager cacheManager; + private Ehcache nativeCache; + private EhCacheCache cache; @Before - public void setUp() throws Exception { - if (CacheManager.getInstance().cacheExists(CACHE_NAME)) { - nativeCache = CacheManager.getInstance().getEhcache(CACHE_NAME); - } - else { - nativeCache = new net.sf.ehcache.Cache(new CacheConfiguration(CACHE_NAME, 100)); - CacheManager.getInstance().addCache(nativeCache); - } + public void setUp() { + cacheManager = new CacheManager(new Configuration().name("EhCacheCacheTests") + .defaultCache(new CacheConfiguration("default", 100))); + nativeCache = new net.sf.ehcache.Cache(new CacheConfiguration(CACHE_NAME, 100)); + cacheManager.addCache(nativeCache); + cache = new EhCacheCache(nativeCache); - cache.clear(); } - - @Test - public void testCacheName() throws Exception { - assertEquals(CACHE_NAME, cache.getName()); + @After + public void tearDown() { + cacheManager.shutdown(); } - @Test - public void testNativeCache() throws Exception { - assertSame(nativeCache, cache.getNativeCache()); + @Override + protected EhCacheCache getCache() { + return cache; } + @Override + protected Ehcache getNativeCache() { + return nativeCache; + } @Test public void testCachePut() throws Exception { Object key = "enescu"; @@ -88,26 +87,6 @@ public void testCachePut() throws Exception { assertNull(cache.get(key, Object.class)); } - @Test - public void testCacheRemove() throws Exception { - Object key = "enescu"; - Object value = "george"; - - assertNull(cache.get(key)); - cache.put(key, value); - } - - @Test - public void testCacheClear() throws Exception { - assertNull(cache.get("enescu")); - cache.put("enescu", "george"); - assertNull(cache.get("vlaicu")); - cache.put("vlaicu", "aurel"); - cache.clear(); - assertNull(cache.get("vlaicu")); - assertNull(cache.get("enescu")); - } - @Test public void testExpiredElements() throws Exception { Assume.group(TestGroup.LONG_RUNNING); @@ -123,5 +102,4 @@ public void testExpiredElements() throws Exception { Thread.sleep(5 * 1000); assertNull(cache.get(key)); } - } diff --git a/spring-context-support/src/test/java/org/springframework/cache/guava/GuavaCacheTests.java b/spring-context-support/src/test/java/org/springframework/cache/guava/GuavaCacheTests.java new file mode 100644 index 000000000000..763e56abb2be --- /dev/null +++ b/spring-context-support/src/test/java/org/springframework/cache/guava/GuavaCacheTests.java @@ -0,0 +1,67 @@ +/* + * Copyright 2002-2014 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cache.guava; + +import static org.junit.Assert.*; + +import com.google.common.cache.CacheBuilder; +import org.junit.Before; +import org.junit.Test; + +import org.springframework.cache.AbstractCacheTests; +import org.springframework.cache.Cache; + +/** + * @author Stephane Nicoll + */ +public class GuavaCacheTests extends AbstractCacheTests<GuavaCache> { + + private com.google.common.cache.Cache<Object, Object> nativeCache; + private GuavaCache cache; + + @Before + public void setUp() { + nativeCache = CacheBuilder.newBuilder().build(); + cache = new GuavaCache(CACHE_NAME, nativeCache); + } + + @Override + protected GuavaCache getCache() { + return cache; + } + + @Override + protected Object getNativeCache() { + return nativeCache; + } + + @Test + public void putIfAbsentNullValue() throws Exception { + GuavaCache cache = getCache(); + + Object key = new Object(); + Object value = null; + + assertNull(cache.get(key)); + assertNull(cache.putIfAbsent(key, value)); + assertEquals(value, cache.get(key).get()); + Cache.ValueWrapper wrapper = cache.putIfAbsent(key, "anotherValue"); + assertNotNull(wrapper); // A value is set but is 'null' + assertEquals(null, wrapper.get()); + assertEquals(value, cache.get(key).get()); // not changed + } +} diff --git a/spring-context-support/src/test/java/org/springframework/cache/transaction/TransactionAwareCacheDecoratorTests.java b/spring-context-support/src/test/java/org/springframework/cache/transaction/TransactionAwareCacheDecoratorTests.java index 553aa74b1607..e43c721bd33a 100644 --- a/spring-context-support/src/test/java/org/springframework/cache/transaction/TransactionAwareCacheDecoratorTests.java +++ b/spring-context-support/src/test/java/org/springframework/cache/transaction/TransactionAwareCacheDecoratorTests.java @@ -90,6 +90,18 @@ public void putTransactional() { assertEquals("123", target.get(key, String.class)); } + @Test + public void putIfAbsent() { // no transactional support for putIfAbsent + Cache target = new ConcurrentMapCache("testCache"); + Cache cache = new TransactionAwareCacheDecorator(target); + + Object key = new Object(); + assertNull(cache.putIfAbsent(key, "123")); + assertEquals("123", target.get(key, String.class)); + assertEquals("123", cache.putIfAbsent(key, "456").get()); + assertEquals("123", target.get(key, String.class)); // unchanged + } + @Test public void evictNonTransactional() { Cache target = new ConcurrentMapCache("testCache"); diff --git a/spring-context/src/main/java/org/springframework/cache/Cache.java b/spring-context/src/main/java/org/springframework/cache/Cache.java index bfe32443d1c7..61058eace1fc 100644 --- a/spring-context/src/main/java/org/springframework/cache/Cache.java +++ b/spring-context/src/main/java/org/springframework/cache/Cache.java @@ -80,6 +80,33 @@ public interface Cache { */ void put(Object key, Object value); + /** + * Atomically associate the specified value with the specified key in this cache if + * it is not set already. + * <p>This is equivalent to: + * <pre><code> + * Object existingValue = cache.get(key); + * if (existingValue == null) { + * cache.put(key, value); + * return null; + * } else { + * return existingValue; + * } + * </code></pre> + * except that the action is performed atomically. While all known providers are + * able to perform the put atomically, the returned value may be retrieved after + * the attempt to put (i.e. in a non atomic way). Check the documentation of + * the native cache implementation that you are using for more details. + * @param key the key with which the specified value is to be associated + * @param value the value to be associated with the specified key + * @return the value to which this cache maps the specified key (which may + * be {@code null} itself), or also {@code null} if the cache did not contain + * any mapping for that key prior to this call. Returning {@code null} is + * therefore an indicator that the given {@code value} has been associated + * with the key + */ + ValueWrapper putIfAbsent(Object key, Object value); + /** * Evict the mapping for this key from this cache if it is present. * @param key the key whose mapping is to be removed from the cache diff --git a/spring-context/src/main/java/org/springframework/cache/concurrent/ConcurrentMapCache.java b/spring-context/src/main/java/org/springframework/cache/concurrent/ConcurrentMapCache.java index 0cf8264dd04b..8601245ea826 100644 --- a/spring-context/src/main/java/org/springframework/cache/concurrent/ConcurrentMapCache.java +++ b/spring-context/src/main/java/org/springframework/cache/concurrent/ConcurrentMapCache.java @@ -103,7 +103,7 @@ public final boolean isAllowNullValues() { @Override public ValueWrapper get(Object key) { Object value = this.store.get(key); - return (value != null ? new SimpleValueWrapper(fromStoreValue(value)) : null); + return toWrapper(value); } @Override @@ -121,6 +121,12 @@ public void put(Object key, Object value) { this.store.put(key, toStoreValue(value)); } + @Override + public ValueWrapper putIfAbsent(Object key, Object value) { + Object existing = this.store.putIfAbsent(key, value); + return toWrapper(existing); + } + @Override public void evict(Object key) { this.store.remove(key); @@ -158,6 +164,9 @@ protected Object toStoreValue(Object userValue) { return userValue; } + private ValueWrapper toWrapper(Object value) { + return (value != null ? new SimpleValueWrapper(fromStoreValue(value)) : null); + } @SuppressWarnings("serial") private static class NullHolder implements Serializable { diff --git a/spring-context/src/main/java/org/springframework/cache/support/NoOpCacheManager.java b/spring-context/src/main/java/org/springframework/cache/support/NoOpCacheManager.java index c76c79731d16..0b486804a695 100644 --- a/spring-context/src/main/java/org/springframework/cache/support/NoOpCacheManager.java +++ b/spring-context/src/main/java/org/springframework/cache/support/NoOpCacheManager.java @@ -1,5 +1,5 @@ /* - * Copyright 2002-2013 the original author or authors. + * Copyright 2002-2014 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,6 +34,7 @@ * <p>Will simply accept any items into the cache not actually storing them. * * @author Costin Leau + * @author Stephane Nicoll * @since 3.1 * @see CompositeCacheManager */ @@ -111,6 +112,11 @@ public Object getNativeCache() { @Override public void put(Object key, Object value) { } + + @Override + public ValueWrapper putIfAbsent(Object key, Object value) { + return null; + } } } diff --git a/spring-context/src/test/java/org/springframework/cache/concurrent/ConcurrentCacheTests.java b/spring-context/src/test/java/org/springframework/cache/concurrent/ConcurrentCacheTests.java index a80bc9560b9f..cba7db7d517a 100644 --- a/spring-context/src/test/java/org/springframework/cache/concurrent/ConcurrentCacheTests.java +++ b/spring-context/src/test/java/org/springframework/cache/concurrent/ConcurrentCacheTests.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2014 the original author or authors. + * Copyright 2002-2014 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,6 +29,7 @@ /** * @author Costin Leau * @author Juergen Hoeller + * @author Stephane Nicoll */ public class ConcurrentCacheTests { @@ -79,6 +80,18 @@ public void testCachePut() throws Exception { assertNull(cache.get(key, Object.class)); } + @Test + public void testCachePutIfAbsent() throws Exception { + Object key = new Object(); + Object value = "initialValue"; + + assertNull(cache.get(key)); + assertNull(cache.putIfAbsent(key, value)); + assertEquals(value, cache.get(key).get()); + assertEquals("initialValue", cache.putIfAbsent(key, "anotherValue").get()); + assertEquals(value, cache.get(key).get()); // not changed + } + @Test public void testCacheRemove() throws Exception { Object key = "enescu";
efa7f1fccf51ef1a83266e815d124c782c2a0815
intellij-community
Typos--
p
https://github.com/JetBrains/intellij-community
diff --git a/platform/platform-impl/src/com/intellij/openapi/diff/impl/DiffPanelImpl.java b/platform/platform-impl/src/com/intellij/openapi/diff/impl/DiffPanelImpl.java index 1c706ff80a7b6..3cdc48f03a2a5 100644 --- a/platform/platform-impl/src/com/intellij/openapi/diff/impl/DiffPanelImpl.java +++ b/platform/platform-impl/src/com/intellij/openapi/diff/impl/DiffPanelImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2000-2009 JetBrains s.r.o. + * Copyright 2000-2012 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -75,7 +75,7 @@ public class DiffPanelImpl implements DiffPanelEx, ContentChangeListener, TwoSid private static final Logger LOG = Logger.getInstance("#com.intellij.openapi.diff.impl.DiffPanelImpl"); private final DiffSplitterI mySplitter; - private final DiffPanelOutterComponent myPanel; + private final DiffPanelOuterComponent myPanel; private final Window myOwnerWindow; private final DiffPanelOptions myOptions; @@ -91,7 +91,7 @@ public class DiffPanelImpl implements DiffPanelEx, ContentChangeListener, TwoSid private final FontSizeSynchronizer myFontSizeSynchronizer = new FontSizeSynchronizer(); private DiffRequest myDiffRequest; private boolean myIsRequestFocus = true; - private boolean myIsSynchScroll; + private boolean myIsSyncScroll; private static final DiffRequest.ToolbarAddons TOOL_BAR = new DiffRequest.ToolbarAddons() { public void customize(DiffToolbar toolbar) { @@ -102,20 +102,19 @@ public void customize(DiffToolbar toolbar) { private boolean myDisposed = false; private final GenericDataProvider myDataProvider; private final Project myProject; - private final boolean myIsHorisontal; + private final boolean myIsHorizontal; private CanNotCalculateDiffPanel myNotCalculateDiffPanel; - private DiffIsApproximate myDiffIsApproximate; private final VisibleAreaListener myVisibleAreaListener; - public DiffPanelImpl(final Window owner, Project project, boolean enableToolbar, boolean horisontal) { + public DiffPanelImpl(final Window owner, Project project, boolean enableToolbar, boolean horizontal) { myProject = project; - myIsHorisontal = horisontal; + myIsHorizontal = horizontal; myOptions = new DiffPanelOptions(this); - myPanel = new DiffPanelOutterComponent(TextDiffType.DIFF_TYPES, TOOL_BAR); + myPanel = new DiffPanelOuterComponent(TextDiffType.DIFF_TYPES, TOOL_BAR); myPanel.disableToolbar(!enableToolbar); if (enableToolbar) myPanel.resetToolbar(); myOwnerWindow = owner; - myIsSynchScroll = true; + myIsSyncScroll = true; myLeftSide = new DiffSideView("", this); myRightSide = new DiffSideView("", this); myLeftSide.becomeMaster(); @@ -123,9 +122,9 @@ public DiffPanelImpl(final Window owner, Project project, boolean enableToolbar, myData = createDiffPanelState(this); - if (horisontal) { + if (horizontal) { mySplitter = new DiffSplitter(myLeftSide.getComponent(), myRightSide.getComponent(), - new DiffDividerPaint(this, FragmentSide.SIDE1), myData); + new DiffDividerPaint(this, FragmentSide.SIDE1), myData); } else { mySplitter = new HorizontalDiffSplitter(myLeftSide.getComponent(), myRightSide.getComponent()); @@ -164,7 +163,7 @@ protected DiffPanelState createDiffPanelState(@NotNull Disposable parentDisposab } public boolean isHorisontal() { - return myIsHorisontal; + return myIsHorizontal; } public DiffPanelState getDiffPanelState() { @@ -172,7 +171,7 @@ public DiffPanelState getDiffPanelState() { } public void noSynchScroll() { - myIsSynchScroll = false; + myIsSyncScroll = false; } public DiffSplitterI getSplitter() { @@ -298,8 +297,7 @@ public void setTooBigFileErrorContents() { public void setPatchAppliedApproximately() { if (myNotCalculateDiffPanel == null) { - myDiffIsApproximate = new DiffIsApproximate(); - myPanel.insertTopComponent(myDiffIsApproximate); + myPanel.insertTopComponent(new DiffIsApproximate()); } } @@ -402,7 +400,7 @@ public Rediffers getDiffUpdater() { public void onContentChangedIn(EditorSource source) { myDiffUpdater.contentRemoved(source); final EditorEx editor = source.getEditor(); - if (myIsHorisontal && source.getSide() == FragmentSide.SIDE1 && editor != null) { + if (myIsHorizontal && source.getSide() == FragmentSide.SIDE1 && editor != null) { editor.setVerticalScrollbarOrientation(EditorEx.VERTICAL_SCROLLBAR_LEFT); } DiffSideView viewSide = getSideView(source.getSide()); @@ -425,7 +423,7 @@ public void onContentChangedIn(EditorSource source) { Editor editor1 = getEditor(FragmentSide.SIDE1); Editor editor2 = getEditor(FragmentSide.SIDE2); - if (editor1 != null && editor2 != null && myIsSynchScroll) { + if (editor1 != null && editor2 != null && myIsSyncScroll) { myScrollSupport.install(new EditingSides[]{this}); } @@ -572,7 +570,7 @@ public void setRequestFocus(boolean isRequestFocus) { myIsRequestFocus = isRequestFocus; } - private class MyScrollingPanel implements DiffPanelOutterComponent.ScrollingPanel { + private class MyScrollingPanel implements DiffPanelOuterComponent.ScrollingPanel { public void scrollEditors() { getOptions().onNewContent(myCurrentSide); diff --git a/platform/platform-impl/src/com/intellij/openapi/diff/impl/incrementalMerge/ui/MergePanel2.java b/platform/platform-impl/src/com/intellij/openapi/diff/impl/incrementalMerge/ui/MergePanel2.java index 45694aea4256f..37bcf703b30e8 100644 --- a/platform/platform-impl/src/com/intellij/openapi/diff/impl/incrementalMerge/ui/MergePanel2.java +++ b/platform/platform-impl/src/com/intellij/openapi/diff/impl/incrementalMerge/ui/MergePanel2.java @@ -1,5 +1,5 @@ /* - * Copyright 2000-2009 JetBrains s.r.o. + * Copyright 2000-2012 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -64,7 +64,7 @@ public class MergePanel2 implements DiffViewer { private static final Logger LOG = Logger.getInstance("#com.intellij.openapi.diff.impl.incrementalMerge.ui.MergePanel2"); - private final DiffPanelOutterComponent myPanel; + private final DiffPanelOuterComponent myPanel; private DiffRequest myData; private MergeList myMergeList; private boolean myDuringCreation = false; @@ -109,7 +109,7 @@ public void onEditorReleased(Editor releasedEditor) { myEditorsPanels[i].setComponent(editorPlace); } FontSizeSynchronizer.attachTo(editorPlaces); - myPanel = new DiffPanelOutterComponent(TextDiffType.MERGE_TYPES, TOOLBAR); + myPanel = new DiffPanelOuterComponent(TextDiffType.MERGE_TYPES, TOOLBAR); myPanel.insertDiffComponent(new ThreePanels(myEditorsPanels, myDividers), new MyScrollingPanel()); myProvider = new MyDataProvider(); myPanel.setDataProvider(myProvider); @@ -328,7 +328,7 @@ public LineBlocks getLineBlocks() { } } - private class MyScrollingPanel implements DiffPanelOutterComponent.ScrollingPanel { + private class MyScrollingPanel implements DiffPanelOuterComponent.ScrollingPanel { public void scrollEditors() { Editor centerEditor = getEditor(1); JComponent centerComponent = centerEditor.getContentComponent(); @@ -504,9 +504,9 @@ public void onChangeRemoved(ChangeList source) { } private static class StatusUpdater implements ChangeCounter.Listener { - private final DiffPanelOutterComponent myPanel; + private final DiffPanelOuterComponent myPanel; - private StatusUpdater(DiffPanelOutterComponent panel) { + private StatusUpdater(DiffPanelOuterComponent panel) { myPanel = panel; } @@ -527,7 +527,7 @@ public void dispose(@NotNull MergeList mergeList) { ChangeCounter.getOrCreate(mergeList).removeListener(this); } - public static StatusUpdater install(MergeList mergeList, DiffPanelOutterComponent panel) { + public static StatusUpdater install(MergeList mergeList, DiffPanelOuterComponent panel) { ChangeCounter counters = ChangeCounter.getOrCreate(mergeList); StatusUpdater updater = new StatusUpdater(panel); counters.addListener(updater); diff --git a/platform/platform-impl/src/com/intellij/openapi/diff/impl/util/DiffPanelOutterComponent.java b/platform/platform-impl/src/com/intellij/openapi/diff/impl/util/DiffPanelOuterComponent.java similarity index 93% rename from platform/platform-impl/src/com/intellij/openapi/diff/impl/util/DiffPanelOutterComponent.java rename to platform/platform-impl/src/com/intellij/openapi/diff/impl/util/DiffPanelOuterComponent.java index 889c5f9e84286..0e377859ca007 100644 --- a/platform/platform-impl/src/com/intellij/openapi/diff/impl/util/DiffPanelOutterComponent.java +++ b/platform/platform-impl/src/com/intellij/openapi/diff/impl/util/DiffPanelOuterComponent.java @@ -1,5 +1,5 @@ /* - * Copyright 2000-2009 JetBrains s.r.o. + * Copyright 2000-2012 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ import java.awt.*; import java.util.List; -public class DiffPanelOutterComponent extends JPanel implements DataProvider { +public class DiffPanelOuterComponent extends JPanel implements DataProvider { private final DiffStatusBar myStatusBar; private final DiffToolbarComponent myToolbar; private final DiffRequest.ToolbarAddons myDefaultActions; @@ -43,7 +43,7 @@ public class DiffPanelOutterComponent extends JPanel implements DataProvider { private int myPrefferedWidth; private Getter<Integer> myDefaultHeight; - public DiffPanelOutterComponent(List<TextDiffType> diffTypes, DiffRequest.ToolbarAddons defaultActions) { + public DiffPanelOuterComponent(List<TextDiffType> diffTypes, DiffRequest.ToolbarAddons defaultActions) { super(new BorderLayout()); myStatusBar = new DiffStatusBar(diffTypes); myBottomContainer = new JPanel(new BorderLayout()); @@ -203,7 +203,7 @@ public void addStatusBar() { private interface DeferScrollToFirstDiff { DeferScrollToFirstDiff scrollNow(ScrollingPanel panel, JComponent component); - void deferScroll(DiffPanelOutterComponent outter); + void deferScroll(DiffPanelOuterComponent outer); } public interface ScrollingPanel { @@ -215,7 +215,7 @@ public DeferScrollToFirstDiff scrollNow(ScrollingPanel panel, JComponent compone return NO_SCROLL_NEEDED; } - public void deferScroll(DiffPanelOutterComponent outter) { + public void deferScroll(DiffPanelOuterComponent outer) { } }; @@ -226,11 +226,11 @@ public DeferScrollToFirstDiff scrollNow(ScrollingPanel panel, JComponent compone return NO_SCROLL_NEEDED; } - public void deferScroll(final DiffPanelOutterComponent outter) { - if (!outter.isDisplayable()) return; + public void deferScroll(final DiffPanelOuterComponent outer) { + if (!outer.isDisplayable()) return; SwingUtilities.invokeLater(new Runnable() { public void run() { - outter.performScroll(); + outer.performScroll(); } }); }
4c73a29f99f0b9d2232b466972a55197758684db
spring-framework
polishing--
p
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.core/src/main/java/org/springframework/core/annotation/AnnotationUtils.java b/org.springframework.core/src/main/java/org/springframework/core/annotation/AnnotationUtils.java index ebbcd83fd23c..754ed3f57f9e 100644 --- a/org.springframework.core/src/main/java/org/springframework/core/annotation/AnnotationUtils.java +++ b/org.springframework.core/src/main/java/org/springframework/core/annotation/AnnotationUtils.java @@ -50,6 +50,7 @@ public abstract class AnnotationUtils { /** The attribute name for annotations with a single element */ static final String VALUE = "value"; + /** * Get all {@link Annotation Annotations} from the supplied {@link Method}. * <p>Correctly handles bridge {@link Method Methods} generated by the compiler. @@ -135,14 +136,16 @@ private static <A extends Annotation> A searchOnInterfaces(Method method, Class< } /** - * Find a single {@link Annotation} of <code>annotationType</code> from the supplied {@link Class}, traversing its - * interfaces and super classes if no annotation can be found on the given class itself. <p>This method explicitly - * handles class-level annotations which are not declared as {@link java.lang.annotation.Inherited inherited} <i>as - * well as annotations on interfaces</i>. <p>The algorithm operates as follows: Searches for an annotation on the given - * class and returns it if found. Else searches all interfaces that the given class declares, returning the annotation - * from the first matching candidate, if any. Else proceeds with introspection of the superclass of the given class, - * checking the superclass itself; if no annotation found there, proceeds with the interfaces that the superclass - * declares. Recursing up through the entire superclass hierarchy if no match is found. + * Find a single {@link Annotation} of <code>annotationType</code> from the supplied {@link Class}, + * traversing its interfaces and super classes if no annotation can be found on the given class itself. + * <p>This method explicitly handles class-level annotations which are not declared as + * {@link Inherited inherited} <i>as well as annotations on interfaces</i>. + * <p>The algorithm operates as follows: Searches for an annotation on the given class and returns + * it if found. Else searches all interfaces that the given class declares, returning the annotation + * from the first matching candidate, if any. Else proceeds with introspection of the superclass + * of the given class, checking the superclass itself; if no annotation found there, proceeds + * with the interfaces that the superclass declares. Recursing up through the entire superclass + * hierarchy if no match is found. * @param clazz the class to look for annotations on * @param annotationType the annotation class to look for * @return the annotation found, or <code>null</code> if none found @@ -175,18 +178,21 @@ public static <A extends Annotation> A findAnnotation(Class<?> clazz, Class<A> a } /** - * Find the first {@link Class} in the inheritance hierarchy of the specified <code>clazz</code> (including the - * specified <code>clazz</code> itself) which declares an annotation for the specified <code>annotationType</code>, or - * <code>null</code> if not found. If the supplied <code>clazz</code> is <code>null</code>, <code>null</code> will be - * returned. <p>If the supplied <code>clazz</code> is an interface, only the interface itself will be checked; the - * inheritance hierarchy for interfaces will not be traversed. <p>The standard {@link Class} API does not provide a - * mechanism for determining which class in an inheritance hierarchy actually declares an {@link Annotation}, so we - * need to handle this explicitly. + * Find the first {@link Class} in the inheritance hierarchy of the specified <code>clazz</code> + * (including the specified <code>clazz</code> itself) which declares an annotation for the + * specified <code>annotationType</code>, or <code>null</code> if not found. If the supplied + * <code>clazz</code> is <code>null</code>, <code>null</code> will be returned. + * <p>If the supplied <code>clazz</code> is an interface, only the interface itself will be checked; + * the inheritance hierarchy for interfaces will not be traversed. + * <p>The standard {@link Class} API does not provide a mechanism for determining which class + * in an inheritance hierarchy actually declares an {@link Annotation}, so we need to handle + * this explicitly. * @param annotationType the Class object corresponding to the annotation type - * @param clazz the Class object corresponding to the class on which to check for the annotation, or - * <code>null</code>. - * @return the first {@link Class} in the inheritance hierarchy of the specified <code>clazz</code> which - * declares an annotation for the specified <code>annotationType</code>, or <code>null</code> if not found. + * @param clazz the Class object corresponding to the class on which to check for the annotation, + * or <code>null</code> + * @return the first {@link Class} in the inheritance hierarchy of the specified <code>clazz</code> + * which declares an annotation for the specified <code>annotationType</code>, or <code>null</code> + * if not found * @see Class#isAnnotationPresent(Class) * @see Class#getDeclaredAnnotations() */ diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerAdapter.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerAdapter.java index e6c22dd7aa52..d0916c2aaa22 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerAdapter.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerAdapter.java @@ -599,9 +599,9 @@ public Method resolveHandlerMethod(HttpServletRequest request) throws ServletExc if (oldMappedMethod != null) { throw new IllegalStateException( "Ambiguous handler methods mapped for HTTP path '" + lookupPath + "': {" + - oldMappedMethod + ", " + handlerMethod + - "}. If you intend to handle the same path in multiple methods, then factor " + - "them out into a dedicated handler class with that path mapped at the type level!"); + oldMappedMethod + ", " + handlerMethod + + "}. If you intend to handle the same path in multiple methods, then factor " + + "them out into a dedicated handler class with that path mapped at the type level!"); } } } @@ -1093,6 +1093,7 @@ public String toString() { } } + /** * Subclass of {@link RequestMappingInfo} that holds request-specific data. */ @@ -1119,22 +1120,21 @@ public void sortMatchedPatterns(Comparator<String> pathComparator) { public String bestMatchedPattern() { return (!this.matchedPatterns.isEmpty() ? this.matchedPatterns.get(0) : null); } - } /** - * Comparator capable of sorting {@link RequestSpecificMappingInfo}s (RHIs) so that sorting a list with this comparator will - * result in: + * Comparator capable of sorting {@link RequestSpecificMappingInfo}s (RHIs) so that + * sorting a list with this comparator will result in: * <ul> - * <li>RHIs with {@linkplain org.springframework.web.servlet.mvc.annotation.AnnotationMethodHandlerAdapter.RequestSpecificMappingInfo#matchedPatterns better matched paths} take prescedence - * over those with a weaker match (as expressed by the {@linkplain PathMatcher#getPatternComparator(String) path - * pattern comparator}.) Typically, this means that patterns without wild cards and uri templates will be ordered - * before those without.</li> + * <li>RHIs with {@linkplain AnnotationMethodHandlerAdapter.RequestSpecificMappingInfo#matchedPatterns better matched paths} + * take prescedence over those with a weaker match (as expressed by the {@linkplain PathMatcher#getPatternComparator(String) + * path pattern comparator}.) Typically, this means that patterns without wild cards and uri templates + * will be ordered before those without.</li> * <li>RHIs with one single {@linkplain RequestMappingInfo#methods request method} will be * ordered before those without a method, or with more than one method.</li> - * <li>RHIs with more {@linkplain RequestMappingInfo#params request parameters} will be ordered before those with - * less parameters</li> + * <li>RHIs with more {@linkplain RequestMappingInfo#params request parameters} will be ordered + * before those with less parameters</li> * </ol> */ static class RequestSpecificMappingInfoComparator implements Comparator<RequestSpecificMappingInfo> {
4155741f7f486537d4a5f7193d79098d523a6ae8
elasticsearch
BytesStreamOutput default size should be 2k- instead of 32k We changed the default of BytesStreamOutput (used in various- places in ES) to 32k from 1k with the assumption that most stream tend to be- large. This doesn't hold for example when indexing small documents and adding- them using XContentBuilder (which will have a large overhead).--Default the buffer size to 2k now, but be relatively aggressive in expanding the buffer when below 256k (double it), and just use oversize (1/8th) when larger to try and minimize garbage and buffer copies.--relates to -3624-closes -3638-
p
https://github.com/elastic/elasticsearch
diff --git a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java index 378a5fecb7ac8..52756377b5a3e 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java @@ -31,7 +31,9 @@ */ public class BytesStreamOutput extends StreamOutput implements BytesStream { - public static final int DEFAULT_SIZE = 32 * 1024; + public static final int DEFAULT_SIZE = 2 * 1024; + + public static final int OVERSIZE_LIMIT = 256 * 1024; /** * The buffer where data is stored. @@ -73,7 +75,7 @@ public void seek(long position) throws IOException { public void writeByte(byte b) throws IOException { int newcount = count + 1; if (newcount > buf.length) { - buf = ArrayUtil.grow(buf, newcount); + buf = grow(newcount); } buf[count] = b; count = newcount; @@ -82,7 +84,7 @@ public void writeByte(byte b) throws IOException { public void skip(int length) { int newcount = count + length; if (newcount > buf.length) { - buf = ArrayUtil.grow(buf, newcount); + buf = grow(newcount); } count = newcount; } @@ -94,12 +96,20 @@ public void writeBytes(byte[] b, int offset, int length) throws IOException { } int newcount = count + length; if (newcount > buf.length) { - buf = ArrayUtil.grow(buf, newcount); + buf = grow(newcount); } System.arraycopy(b, offset, buf, count, length); count = newcount; } + private byte[] grow(int newCount) { + // try and grow faster while we are small... + if (newCount < OVERSIZE_LIMIT) { + newCount = Math.max(buf.length << 1, newCount); + } + return ArrayUtil.grow(buf, newCount); + } + public void seek(int seekTo) { count = seekTo; } @@ -108,6 +118,10 @@ public void reset() { count = 0; } + public int bufferSize() { + return buf.length; + } + @Override public void flush() throws IOException { // nothing to do there diff --git a/src/test/java/org/elasticsearch/test/unit/common/io/streams/BytesStreamsTests.java b/src/test/java/org/elasticsearch/test/unit/common/io/streams/BytesStreamsTests.java index c1f4dc450e30c..d4ee4fff5351f 100644 --- a/src/test/java/org/elasticsearch/test/unit/common/io/streams/BytesStreamsTests.java +++ b/src/test/java/org/elasticsearch/test/unit/common/io/streams/BytesStreamsTests.java @@ -60,4 +60,17 @@ public void testSimpleStreams() throws Exception { assertThat(in.readString(), equalTo("hello")); assertThat(in.readString(), equalTo("goodbye")); } + + @Test + public void testGrowLogic() throws Exception { + BytesStreamOutput out = new BytesStreamOutput(); + out.writeBytes(new byte[BytesStreamOutput.DEFAULT_SIZE - 5]); + assertThat(out.bufferSize(), equalTo(2048)); // remains the default + out.writeBytes(new byte[1 * 1024]); + assertThat(out.bufferSize(), equalTo(4608)); + out.writeBytes(new byte[32 * 1024]); + assertThat(out.bufferSize(), equalTo(40320)); + out.writeBytes(new byte[32 * 1024]); + assertThat(out.bufferSize(), equalTo(90720)); + } }
ebe1a4663d41adf670239bf2765b316a2e46f12d
elasticsearch
[TEST] renamed variables in ScriptServiceTests--
p
https://github.com/elastic/elasticsearch
diff --git a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index fad39746f9f93..6339874c52b94 100644 --- a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -99,10 +99,10 @@ public void testScriptsWithoutExtensions() throws IOException { @Test public void testScriptsSameNameDifferentLanguage() throws IOException { - Path testFileNoExt = scriptsFilePath.resolve("script.groovy"); - Path testFileWithExt = scriptsFilePath.resolve("script.expression"); - Streams.copy("10".getBytes("UTF-8"), Files.newOutputStream(testFileNoExt)); - Streams.copy("20".getBytes("UTF-8"), Files.newOutputStream(testFileWithExt)); + Path groovyScriptPath = scriptsFilePath.resolve("script.groovy"); + Path expScriptPath = scriptsFilePath.resolve("script.expression"); + Streams.copy("10".getBytes("UTF-8"), Files.newOutputStream(groovyScriptPath)); + Streams.copy("20".getBytes("UTF-8"), Files.newOutputStream(expScriptPath)); resourceWatcherService.notifyNow(); CompiledScript groovyScript = scriptService.compile(GroovyScriptEngineService.NAME, "script", ScriptService.ScriptType.FILE); @@ -120,8 +120,8 @@ public void testInlineScriptCompiledOnceMultipleLangAcronyms() throws IOExceptio @Test public void testFileScriptCompiledOnceMultipleLangAcronyms() throws IOException { - Path testFileWithExt = scriptsFilePath.resolve("test_script.tst"); - Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFileWithExt)); + Path scriptPath = scriptsFilePath.resolve("test_script.tst"); + Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(scriptPath)); resourceWatcherService.notifyNow(); CompiledScript compiledScript1 = scriptService.compile("test", "test_script", ScriptService.ScriptType.FILE);
8b9e5a2edd935dd40e6dc30c14829e208d7945f2
drools
[BZ-1092084] raise a compilation error when the- same attribute is defined twice on a rule--
a
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/main/java/org/drools/compiler/lang/ParserHelper.java b/drools-compiler/src/main/java/org/drools/compiler/lang/ParserHelper.java index 69c80fa3646..a5c41bb374e 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/lang/ParserHelper.java +++ b/drools-compiler/src/main/java/org/drools/compiler/lang/ParserHelper.java @@ -801,6 +801,12 @@ void setEnd( DescrBuilder< ? , ? > db ) { NamedConsequenceDescrBuilder.class.isAssignableFrom( clazz )) ) { popParaphrases(); } + + if (RuleDescrBuilder.class.isAssignableFrom(clazz)) { + RuleDescrBuilder ruleDescrBuilder = (RuleDescrBuilder)builder; + ruleDescrBuilder.end().getDescr().afterRuleAdded(ruleDescrBuilder.getDescr()); + } + setEnd( builder ); return (T) builder; } diff --git a/drools-compiler/src/main/java/org/drools/compiler/lang/descr/PackageDescr.java b/drools-compiler/src/main/java/org/drools/compiler/lang/descr/PackageDescr.java index 090cc979fe4..ed70694ad00 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/lang/descr/PackageDescr.java +++ b/drools-compiler/src/main/java/org/drools/compiler/lang/descr/PackageDescr.java @@ -16,6 +16,10 @@ package org.drools.compiler.lang.descr; +import org.drools.core.rule.Namespaceable; +import org.kie.api.io.Resource; +import org.kie.internal.definition.KnowledgeDescr; + import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; @@ -27,10 +31,6 @@ import java.util.List; import java.util.Set; -import org.drools.core.rule.Namespaceable; -import org.kie.internal.definition.KnowledgeDescr; -import org.kie.api.io.Resource; - public class PackageDescr extends BaseDescr implements Namespaceable, @@ -199,6 +199,11 @@ public void addRule(final RuleDescr rule) { if (this.rules == Collections.EMPTY_LIST) { this.rules = new ArrayList<RuleDescr>(1); } + rule.setLoadOrder(rules.size()); + this.rules.add(rule); + } + + public void afterRuleAdded(RuleDescr rule) { for (final AttributeDescr at : attributes) { // check if rule overrides the attribute if (!rule.getAttributes().containsKey(at.getName())) { @@ -206,8 +211,6 @@ public void addRule(final RuleDescr rule) { rule.addAttribute(at); } } - rule.setLoadOrder(rules.size()); - this.rules.add(rule); } public List<RuleDescr> getRules() { diff --git a/drools-compiler/src/main/java/org/drools/compiler/lang/descr/RuleDescr.java b/drools-compiler/src/main/java/org/drools/compiler/lang/descr/RuleDescr.java index 9de6fce97f4..e9f0c901440 100644 --- a/drools-compiler/src/main/java/org/drools/compiler/lang/descr/RuleDescr.java +++ b/drools-compiler/src/main/java/org/drools/compiler/lang/descr/RuleDescr.java @@ -16,12 +16,16 @@ package org.drools.compiler.lang.descr; +import org.drools.core.rule.Dialectable; + import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import java.util.*; - -import org.drools.core.rule.Dialectable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; public class RuleDescr extends AnnotatedBaseDescr implements @@ -139,7 +143,11 @@ public Map<String, AttributeDescr> getAttributes() { public void addAttribute(final AttributeDescr attribute) { if ( attribute != null ) { - this.attributes.put( attribute.getName(), attribute ); + if (attributes.containsKey(attribute.getName())) { + addError("Duplicate attribute definition: " + attribute.getName()); + } else { + this.attributes.put( attribute.getName(), attribute ); + } } } @@ -165,15 +173,19 @@ public Map<String, Object> getNamedConsequences() { public void addNamedConsequences(String name, Object consequence) { if ( namedConsequence.containsKey(name) ) { - if (errors == null) { - errors = new ArrayList<String>(); - } - errors.add("Duplicate consequence name: " + name); + addError("Duplicate consequence name: " + name); } else { namedConsequence.put(name, consequence); } } + private void addError(String message) { + if (errors == null) { + errors = new ArrayList<String>(); + } + errors.add(message); + } + public void setConsequenceLocation(final int line, final int pattern) { this.consequenceLine = line; diff --git a/drools-compiler/src/test/java/org/drools/compiler/integrationtests/Misc2Test.java b/drools-compiler/src/test/java/org/drools/compiler/integrationtests/Misc2Test.java index 42a3209b3d0..0667ed50e98 100644 --- a/drools-compiler/src/test/java/org/drools/compiler/integrationtests/Misc2Test.java +++ b/drools-compiler/src/test/java/org/drools/compiler/integrationtests/Misc2Test.java @@ -5662,4 +5662,15 @@ public void testEvalInSubnetwork() { assertEquals(1, list.size()); assertEquals(0, (int)list.get(0)); } + + @Test + public void testRedeclaringRuleAttribute() { + // BZ-1092084 + String str = "rule R salience 10 salience 100 when then end\n"; + + KieServices ks = KieServices.Factory.get(); + KieFileSystem kfs = ks.newKieFileSystem().write( "src/main/resources/r1.drl", str ); + Results results = ks.newKieBuilder( kfs ).buildAll().getResults(); + assertEquals(1, results.getMessages().size()); + } } \ No newline at end of file diff --git a/drools-compiler/src/test/java/org/drools/compiler/lang/descr/PackageDescrTest.java b/drools-compiler/src/test/java/org/drools/compiler/lang/descr/PackageDescrTest.java index 5045640983e..9cd64b50001 100644 --- a/drools-compiler/src/test/java/org/drools/compiler/lang/descr/PackageDescrTest.java +++ b/drools-compiler/src/test/java/org/drools/compiler/lang/descr/PackageDescrTest.java @@ -1,5 +1,10 @@ package org.drools.compiler.lang.descr; +import org.drools.compiler.Person; +import org.drools.compiler.lang.api.DescrFactory; +import org.drools.compiler.lang.api.PackageDescrBuilder; +import org.junit.Test; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -10,13 +15,6 @@ import java.util.List; import java.util.Map; -import org.drools.compiler.Person; -import org.drools.compiler.lang.api.DescrFactory; -import org.drools.compiler.lang.api.PackageDescrBuilder; -import org.drools.compiler.lang.descr.AttributeDescr; -import org.drools.compiler.lang.descr.PackageDescr; -import org.drools.compiler.lang.descr.RuleDescr; -import org.junit.Test; import static org.junit.Assert.*; public class PackageDescrTest { @@ -39,7 +37,8 @@ public void testAttributeOverriding() { List pkgAts = desc.getAttributes(); assertEquals("bar", ((AttributeDescr)pkgAts.get( 0 )).getValue()); assertEquals("default", ((AttributeDescr)pkgAts.get( 1 )).getValue()); - + + desc.afterRuleAdded( rule ); Map<String, AttributeDescr> ruleAts = rule.getAttributes(); assertEquals("overridden", ((AttributeDescr)ruleAts.get( "foo" )).getValue());
393b67f7a03d76698375be3350ff9282661fbf21
intellij-community
tests repaired--
c
https://github.com/JetBrains/intellij-community
diff --git a/java/java-tests/testSrc/com/intellij/codeInsight/completion/MethodChainsCompletionTest.java b/java/java-tests/testSrc/com/intellij/codeInsight/completion/MethodChainsCompletionTest.java index 320c63cad8d51..49c1c33c10572 100644 --- a/java/java-tests/testSrc/com/intellij/codeInsight/completion/MethodChainsCompletionTest.java +++ b/java/java-tests/testSrc/com/intellij/codeInsight/completion/MethodChainsCompletionTest.java @@ -109,7 +109,7 @@ public void testResultsForSuperClassesShowed() { assertOneElement(doCompletion()); } - public void testInnerClasses() { + public void _testInnerClasses() { assertAdvisorLookupElementEquals("j.getEntry", 0, 8, 1, 0, assertOneElement(doCompletion())); }
1596f988357e666df08193bfb7f41a61f6397afe
hbase
HBASE-3746 Clean up CompressionTest to not- directly reference DistributedFileSystem--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1089684 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/hbase
diff --git a/CHANGES.txt b/CHANGES.txt index ca26637919d4..032652cd313a 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -169,6 +169,12 @@ Release 0.91.0 - Unreleased HBASE-3488 Add CellCounter to count multiple versions of rows (Subbu M. Iyer via Stack) +Release 0.90.3 - Unreleased + + BUG FIXES + HBASE-3746 Clean up CompressionTest to not directly reference + DistributedFileSystem (todd) + Release 0.90.2 - Unreleased diff --git a/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index ee241918a173..d58d7b3c29ba 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -92,55 +93,48 @@ public static void testCompression(Compression.Algorithm algo) protected static Path path = new Path(".hfile-comp-test"); public static void usage() { - System.err.println("Usage: CompressionTest HDFS_PATH none|gz|lzo"); + System.err.println( + "Usage: CompressionTest <path> none|gz|lzo\n" + + "\n" + + "For example:\n" + + " hbase " + CompressionTest.class + " file:///tmp/testfile gz\n"); System.exit(1); } - protected static DistributedFileSystem openConnection(String urlString) - throws java.net.URISyntaxException, java.io.IOException { - URI dfsUri = new URI(urlString); - Configuration dfsConf = new Configuration(); - DistributedFileSystem dfs = new DistributedFileSystem(); - dfs.initialize(dfsUri, dfsConf); - return dfs; + public static void doSmokeTest(FileSystem fs, Path path, String codec) + throws Exception { + HFile.Writer writer = new HFile.Writer( + fs, path, HFile.DEFAULT_BLOCKSIZE, codec, null); + writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval")); + writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval")); + writer.close(); + + HFile.Reader reader = new HFile.Reader(fs, path, null, false, false); + reader.loadFileInfo(); + byte[] key = reader.getFirstKey(); + boolean rc = Bytes.toString(key).equals("testkey"); + reader.close(); + + if (!rc) { + throw new Exception("Read back incorrect result: " + + Bytes.toStringBinary(key)); + } } - protected static boolean closeConnection(DistributedFileSystem dfs) { - if (dfs != null) { - try { - dfs.close(); - } catch (Exception e) { - e.printStackTrace(); - } + public static void main(String[] args) throws Exception { + if (args.length != 2) { + usage(); + System.exit(1); } - return dfs == null; - } - public static void main(String[] args) { - if (args.length != 2) usage(); + Configuration conf = new Configuration(); + Path path = new Path(args[0]); + FileSystem fs = path.getFileSystem(conf); try { - DistributedFileSystem dfs = openConnection(args[0]); - dfs.delete(path, false); - HFile.Writer writer = new HFile.Writer(dfs, path, - HFile.DEFAULT_BLOCKSIZE, args[1], null); - writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval")); - writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval")); - writer.close(); - - HFile.Reader reader = new HFile.Reader(dfs, path, null, false, false); - reader.loadFileInfo(); - byte[] key = reader.getFirstKey(); - boolean rc = Bytes.toString(key).equals("testkey"); - reader.close(); - - dfs.delete(path, false); - closeConnection(dfs); - - if (rc) System.exit(0); - } catch (Exception e) { - e.printStackTrace(); + doSmokeTest(fs, path, args[1]); + } finally { + fs.delete(path, false); } - System.out.println("FAILED"); - System.exit(1); + System.out.println("SUCCESS"); } }
73b54f4efe43dbe674621ba81c7ab7e04e1157c8
spring-framework
SPR-6466 - ContentNegotiatingViewResolver can not- handle View implementations returning null as content type--
c
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.java index 1ffbb93e744e..0ad603a851a3 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.java @@ -350,12 +350,15 @@ public View resolveViewName(String viewName, Locale locale) throws Exception { } for (View candidateView : candidateViews) { - MediaType viewMediaType = MediaType.parseMediaType(candidateView.getContentType()); - for (MediaType requestedMediaType : requestedMediaTypes) { - if (requestedMediaType.includes(viewMediaType)) { - if (!views.containsKey(requestedMediaType)) { - views.put(requestedMediaType, candidateView); - break; + String contentType = candidateView.getContentType(); + if (StringUtils.hasText(contentType)) { + MediaType viewMediaType = MediaType.parseMediaType(contentType); + for (MediaType requestedMediaType : requestedMediaTypes) { + if (requestedMediaType.includes(viewMediaType)) { + if (!views.containsKey(requestedMediaType)) { + views.put(requestedMediaType, candidateView); + break; + } } } } diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolverTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolverTests.java index 8c1ce84dd003..49d4a1869da4 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolverTests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/view/ContentNegotiatingViewResolverTests.java @@ -280,4 +280,29 @@ public void resolveViewNameFilenameDefaultView() throws Exception { verify(viewResolverMock1, viewResolverMock2, viewMock1, viewMock2, viewMock3); } + @Test + public void resolveViewContentTypeNull() throws Exception { + MockHttpServletRequest request = new MockHttpServletRequest("GET", "/test"); + request.addHeader("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"); + RequestContextHolder.setRequestAttributes(new ServletRequestAttributes(request)); + + ViewResolver viewResolverMock = createMock(ViewResolver.class); + viewResolver.setViewResolvers(Collections.singletonList(viewResolverMock)); + + View viewMock = createMock("application_xml", View.class); + + String viewName = "view"; + Locale locale = Locale.ENGLISH; + + expect(viewResolverMock.resolveViewName(viewName, locale)).andReturn(viewMock); + expect(viewMock.getContentType()).andReturn(null); + + replay(viewResolverMock, viewMock); + + View result = viewResolver.resolveViewName(viewName, locale); + assertNull("Invalid view", result); + + verify(viewResolverMock, viewMock); + } + }
9e6fab3a6dcbf26e3aad60aa5cf371adbdc3b47b
elasticsearch
Added support for acknowledgements to update- cluster settings api--As a side note, the internal reroute call is now part of the ack mechanism. That means that if the response contains acknowledged flag, the internal reroute that was eventually issued was acknowledged too. Also, even if the request is not acknowledged, the reroute is issued before returning, which means that there is no need to manually call reroute afterwards to make sure the new settings are immediately applied.--Closes -3995-
a
https://github.com/elastic/elasticsearch
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index ec018f8ca3b3d..73150e802b463 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -20,8 +20,9 @@ package org.elasticsearch.action.admin.cluster.settings; import org.elasticsearch.ElasticSearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.master.MasterNodeOperationRequest; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; @@ -39,8 +40,9 @@ import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream; /** + * Request for an update cluster settings action */ -public class ClusterUpdateSettingsRequest extends MasterNodeOperationRequest<ClusterUpdateSettingsRequest> { +public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpdateSettingsRequest> { private Settings transientSettings = EMPTY_SETTINGS; private Settings persistentSettings = EMPTY_SETTINGS; @@ -65,21 +67,34 @@ Settings persistentSettings() { return persistentSettings; } + /** + * Sets the transient settings to be updated. They will not survive a full cluster restart + */ public ClusterUpdateSettingsRequest transientSettings(Settings settings) { this.transientSettings = settings; return this; } + /** + * Sets the transient settings to be updated. They will not survive a full cluster restart + */ public ClusterUpdateSettingsRequest transientSettings(Settings.Builder settings) { this.transientSettings = settings.build(); return this; } + /** + * Sets the source containing the transient settings to be updated. They will not survive a full cluster restart + */ public ClusterUpdateSettingsRequest transientSettings(String source) { this.transientSettings = ImmutableSettings.settingsBuilder().loadFromSource(source).build(); return this; } + /** + * Sets the transient settings to be updated. They will not survive a full cluster restart + */ + @SuppressWarnings("unchecked") public ClusterUpdateSettingsRequest transientSettings(Map source) { try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); @@ -91,21 +106,34 @@ public ClusterUpdateSettingsRequest transientSettings(Map source) { return this; } + /** + * Sets the persistent settings to be updated. They will get applied cross restarts + */ public ClusterUpdateSettingsRequest persistentSettings(Settings settings) { this.persistentSettings = settings; return this; } + /** + * Sets the persistent settings to be updated. They will get applied cross restarts + */ public ClusterUpdateSettingsRequest persistentSettings(Settings.Builder settings) { this.persistentSettings = settings.build(); return this; } + /** + * Sets the source containing the persistent settings to be updated. They will get applied cross restarts + */ public ClusterUpdateSettingsRequest persistentSettings(String source) { this.persistentSettings = ImmutableSettings.settingsBuilder().loadFromSource(source).build(); return this; } + /** + * Sets the persistent settings to be updated. They will get applied cross restarts + */ + @SuppressWarnings("unchecked") public ClusterUpdateSettingsRequest persistentSettings(Map source) { try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); @@ -117,12 +145,12 @@ public ClusterUpdateSettingsRequest persistentSettings(Map source) { return this; } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); transientSettings = readSettingsFromStream(in); persistentSettings = readSettingsFromStream(in); + readTimeout(in, Version.V_0_90_6); } @Override @@ -130,5 +158,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); + writeTimeout(out, Version.V_0_90_6); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index d7a849cc35853..991b9deae3b21 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.admin.cluster.settings; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.internal.InternalClusterAdminClient; import org.elasticsearch.common.settings.Settings; @@ -28,48 +28,73 @@ import java.util.Map; /** + * Builder for a cluster update settings request */ -public class ClusterUpdateSettingsRequestBuilder extends MasterNodeOperationRequestBuilder<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> { +public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> { public ClusterUpdateSettingsRequestBuilder(ClusterAdminClient clusterClient) { super((InternalClusterAdminClient) clusterClient, new ClusterUpdateSettingsRequest()); } + /** + * Sets the transient settings to be updated. They will not survive a full cluster restart + */ public ClusterUpdateSettingsRequestBuilder setTransientSettings(Settings settings) { request.transientSettings(settings); return this; } + /** + * Sets the transient settings to be updated. They will not survive a full cluster restart + */ public ClusterUpdateSettingsRequestBuilder setTransientSettings(Settings.Builder settings) { request.transientSettings(settings); return this; } + /** + * Sets the source containing the transient settings to be updated. They will not survive a full cluster restart + */ public ClusterUpdateSettingsRequestBuilder setTransientSettings(String settings) { request.transientSettings(settings); return this; } + /** + * Sets the transient settings to be updated. They will not survive a full cluster restart + */ public ClusterUpdateSettingsRequestBuilder setTransientSettings(Map settings) { request.transientSettings(settings); return this; } + /** + * Sets the persistent settings to be updated. They will get applied cross restarts + */ public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Settings settings) { request.persistentSettings(settings); return this; } + /** + * Sets the persistent settings to be updated. They will get applied cross restarts + */ public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Settings.Builder settings) { request.persistentSettings(settings); return this; } + /** + * Sets the source containing the persistent settings to be updated. They will get applied cross restarts + */ public ClusterUpdateSettingsRequestBuilder setPersistentSettings(String settings) { request.persistentSettings(settings); return this; } + /** + * Sets the persistent settings to be updated. They will get applied cross restarts + */ public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Map settings) { request.persistentSettings(settings); return this; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index 96598394bc061..0b7ed0a5f76ab 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -19,18 +19,19 @@ package org.elasticsearch.action.admin.cluster.settings; -import java.io.IOException; - -import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.Version; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import java.io.IOException; + /** * A response for a cluster update settings action. */ -public class ClusterUpdateSettingsResponse extends ActionResponse { +public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { Settings transientSettings; Settings persistentSettings; @@ -40,7 +41,8 @@ public class ClusterUpdateSettingsResponse extends ActionResponse { this.transientSettings = ImmutableSettings.EMPTY; } - ClusterUpdateSettingsResponse(Settings transientSettings, Settings persistentSettings) { + ClusterUpdateSettingsResponse(boolean acknowledged, Settings transientSettings, Settings persistentSettings) { + super(acknowledged); this.persistentSettings = persistentSettings; this.transientSettings = transientSettings; } @@ -50,6 +52,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); transientSettings = ImmutableSettings.readSettingsFromStream(in); persistentSettings = ImmutableSettings.readSettingsFromStream(in); + readAcknowledged(in, Version.V_0_90_6); } public Settings getTransientSettings() { @@ -65,5 +68,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); ImmutableSettings.writeSettingsToStream(transientSettings, out); ImmutableSettings.writeSettingsToStream(persistentSettings, out); + writeAcknowledged(out, Version.V_0_90_6); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 4dd19c9d90ed4..c320636b8a24e 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -22,16 +22,17 @@ import org.elasticsearch.ElasticSearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.settings.ClusterDynamicSettings; import org.elasticsearch.cluster.settings.DynamicSettings; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ImmutableSettings; @@ -42,6 +43,7 @@ import java.util.Map; +import static org.elasticsearch.cluster.ClusterState.builder; import static org.elasticsearch.cluster.ClusterState.newClusterStateBuilder; /** @@ -86,7 +88,91 @@ protected void masterOperation(final ClusterUpdateSettingsRequest request, final final ImmutableSettings.Builder transientUpdates = ImmutableSettings.settingsBuilder(); final ImmutableSettings.Builder persistentUpdates = ImmutableSettings.settingsBuilder(); - clusterService.submitStateUpdateTask("cluster_update_settings", Priority.URGENT, new TimeoutClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask() { + + private volatile boolean changed = false; + + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return true; + } + + @Override + public void onAllNodesAcked(@Nullable Throwable t) { + if (changed) { + reroute(true); + } else { + listener.onResponse(new ClusterUpdateSettingsResponse(true, transientUpdates.build(), persistentUpdates.build())); + } + + } + + @Override + public void onAckTimeout() { + if (changed) { + reroute(false); + } else { + listener.onResponse(new ClusterUpdateSettingsResponse(false, transientUpdates.build(), persistentUpdates.build())); + } + } + + private void reroute(final boolean updateSettingsAcked) { + clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask() { + + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + //we wait for the reroute ack only if the update settings was acknowledged + return updateSettingsAcked; + } + + @Override + public void onAllNodesAcked(@Nullable Throwable t) { + //we return when the cluster reroute is acked (the acknowledged flag depends on whether the update settings was acknowledged) + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); + } + + @Override + public void onAckTimeout() { + //we return when the cluster reroute ack times out (acknowledged false) + listener.onResponse(new ClusterUpdateSettingsResponse(false, transientUpdates.build(), persistentUpdates.build())); + } + + @Override + public TimeValue ackTimeout() { + return request.timeout(); + } + + @Override + public TimeValue timeout() { + return request.masterNodeTimeout(); + } + + @Override + public void onFailure(String source, Throwable t) { + //if the reroute fails we only log + logger.debug("failed to perform [{}]", t, source); + } + + @Override + public ClusterState execute(final ClusterState currentState) { + // now, reroute in case things that require it changed (e.g. number of replicas) + RoutingAllocation.Result routingResult = allocationService.reroute(currentState); + if (!routingResult.changed()) { + return currentState; + } + return newClusterStateBuilder().state(currentState).routingResult(routingResult).build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + } + }); + } + + @Override + public TimeValue ackTimeout() { + return request.timeout(); + } @Override public TimeValue timeout() { @@ -101,7 +187,6 @@ public void onFailure(String source, Throwable t) { @Override public ClusterState execute(final ClusterState currentState) { - boolean changed = false; ImmutableSettings.Builder transientSettings = ImmutableSettings.settingsBuilder(); transientSettings.put(currentState.metaData().transientSettings()); for (Map.Entry<String, String> entry : request.transientSettings().getAsMap().entrySet()) { @@ -152,38 +237,12 @@ public ClusterState execute(final ClusterState currentState) { blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); } - return ClusterState.builder().state(currentState).metaData(metaData).blocks(blocks).build(); + return builder().state(currentState).metaData(metaData).blocks(blocks).build(); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (oldState == newState) { - // nothing changed... - listener.onResponse(new ClusterUpdateSettingsResponse(transientUpdates.build(), persistentUpdates.build())); - return; - } - // now, reroute - clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new ClusterStateUpdateTask() { - @Override - public ClusterState execute(final ClusterState currentState) { - try { - // now, reroute in case things change that require it (like number of replicas) - RoutingAllocation.Result routingResult = allocationService.reroute(currentState); - if (!routingResult.changed()) { - return currentState; - } - return newClusterStateBuilder().state(currentState).routingResult(routingResult).build(); - } finally { - listener.onResponse(new ClusterUpdateSettingsResponse(transientUpdates.build(), persistentUpdates.build())); - } - } - @Override - public void onFailure(String source, Throwable t) { - logger.warn("unexpected failure during [{}]", t, source); - listener.onResponse(new ClusterUpdateSettingsResponse(transientUpdates.build(), persistentUpdates.build())); - } - }); } }); } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java index 6e62adae2de81..9be3a61be50e4 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.cluster.settings; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.client.Client; @@ -29,7 +28,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.rest.*; -import org.elasticsearch.rest.action.support.RestXContentBuilder; import java.io.IOException; import java.util.Map; @@ -48,6 +46,7 @@ public RestClusterUpdateSettingsAction(Settings settings, Client client, RestCon public void handleRequest(final RestRequest request, final RestChannel channel) { final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = Requests.clusterUpdateSettingsRequest(); clusterUpdateSettingsRequest.listenerThreaded(false); + clusterUpdateSettingsRequest.timeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.timeout())); clusterUpdateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterUpdateSettingsRequest.masterNodeTimeout())); try { Map<String, Object> source = XContentFactory.xContent(request.content()).createParser(request.content()).mapAndClose(); @@ -66,31 +65,21 @@ public void handleRequest(final RestRequest request, final RestChannel channel) return; } - client.admin().cluster().updateSettings(clusterUpdateSettingsRequest, new ActionListener<ClusterUpdateSettingsResponse>() { - @Override - public void onResponse(ClusterUpdateSettingsResponse response) { - try { - XContentBuilder builder = RestXContentBuilder.restContentBuilder(request); - builder.startObject(); - - builder.startObject("persistent"); - for (Map.Entry<String, String> entry : response.getPersistentSettings().getAsMap().entrySet()) { - builder.field(entry.getKey(), entry.getValue()); - } - builder.endObject(); - - builder.startObject("transient"); - for (Map.Entry<String, String> entry : response.getTransientSettings().getAsMap().entrySet()) { - builder.field(entry.getKey(), entry.getValue()); - } - builder.endObject(); + client.admin().cluster().updateSettings(clusterUpdateSettingsRequest, new AcknowledgedRestResponseActionListener<ClusterUpdateSettingsResponse>(request, channel, logger) { - builder.endObject(); + @Override + protected void addCustomFields(XContentBuilder builder, ClusterUpdateSettingsResponse response) throws IOException { + builder.startObject("persistent"); + for (Map.Entry<String, String> entry : response.getPersistentSettings().getAsMap().entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); - channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder)); - } catch (Throwable e) { - onFailure(e); + builder.startObject("transient"); + for (Map.Entry<String, String> entry : response.getTransientSettings().getAsMap().entrySet()) { + builder.field(entry.getKey(), entry.getValue()); } + builder.endObject(); } @Override @@ -98,11 +87,7 @@ public void onFailure(Throwable e) { if (logger.isDebugEnabled()) { logger.debug("failed to handle cluster state", e); } - try { - channel.sendResponse(new XContentThrowableRestResponse(request, e)); - } catch (IOException e1) { - logger.error("Failed to send failure response", e1); - } + super.onFailure(e); } }); } diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java index b2dc9332e9736..b4e36b3b23e8f 100644 --- a/src/test/java/org/elasticsearch/cluster/ack/AckTests.java +++ b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java @@ -33,10 +33,7 @@ import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.MutableShardRouting; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; @@ -238,4 +235,49 @@ private static MoveAllocationCommand getAllocationCommand() { return new MoveAllocationCommand(shardToBeMoved.shardId(), fromNodeId, toNodeId); } + + @Test + public void testClusterUpdateSettingsAcknowledgement() { + client().admin().indices().prepareCreate("test") + .setSettings(settingsBuilder() + .put("number_of_shards", atLeast(cluster().numNodes())) + .put("number_of_replicas", 0)).get(); + ensureGreen(); + + NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().get(); + String excludedNodeId = null; + for (NodeInfo nodeInfo : nodesInfo) { + if (nodeInfo.getNode().isDataNode()) { + excludedNodeId = nodesInfo.getAt(0).getNode().id(); + break; + } + } + assert excludedNodeId != null; + + ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get(); + assertThat(clusterUpdateSettingsResponse.isAcknowledged(), equalTo(true)); + assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId)); + + for (Client client : clients()) { + ClusterState clusterState = client.admin().cluster().prepareState().setLocal(true).get().getState(); + assertThat(clusterState.routingNodes().metaData().transientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId)); + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + if (clusterState.nodes().get(shardRouting.currentNodeId()).id().equals(excludedNodeId)) { + //if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged + assertThat(shardRouting.relocating(), equalTo(true)); + } + } + } + } + } + + //let's wait for the relocation to be completed, otherwise there can be issues with after test checks (mock directory wrapper etc.) + waitForRelocation(); + + //removes the allocation exclude settings + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", "")).get(); + } } diff --git a/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java index 23f92eaafa149..e1f4fdfaaec8f 100644 --- a/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java +++ b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java @@ -204,9 +204,7 @@ public void testAwarenessZonesIncrementalNodes() throws InterruptedException { assertThat(counts.get(B_1), equalTo(2)); assertThat(counts.containsKey(noZoneNode), equalTo(false)); client().admin().cluster().prepareUpdateSettings().setTransientSettings(ImmutableSettings.settingsBuilder().put("cluster.routing.allocation.awareness.attributes", "").build()).get(); - - - client().admin().cluster().prepareReroute().get(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); diff --git a/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java index 4a03798c27dd0..9bbea5b043b3d 100644 --- a/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java +++ b/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java @@ -35,8 +35,6 @@ import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.hamcrest.Matchers.equalTo; -/** - */ @ClusterScope(scope=Scope.TEST, numNodes=0) public class FilteringAllocationTests extends AbstractIntegrationTest { @@ -65,9 +63,7 @@ public void testDecommissionNodeNoReplicas() throws Exception { client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._name", node_1)) .execute().actionGet(); - - client().admin().cluster().prepareReroute().get(); - ensureGreen(); + waitForRelocation(); logger.info("--> verify all are allocated on node1 now"); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -140,7 +136,6 @@ public void testDisablingAllocationFiltering() throws Exception { client().admin().indices().prepareUpdateSettings("test") .setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", "")) .execute().actionGet(); - client().admin().cluster().prepareReroute().get(); ensureGreen();
642ed17a4808e36f1458546cc66d52e212cc5acf
hadoop
HADOOP-6951. Distinct minicluster services (e.g.- NN and JT) overwrite each other's service policies. Contributed by Aaron T.- Myers.--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1002896 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hadoop
diff --git a/CHANGES.txt b/CHANGES.txt index 3850d6ecad5d9..0590fa7e4b981 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -250,6 +250,9 @@ Trunk (unreleased changes) HADOOP-6940. RawLocalFileSystem's markSupported method misnamed markSupport. (Tom White via eli). + HADOOP-6951. Distinct minicluster services (e.g. NN and JT) overwrite each + other's service policies. (Aaron T. Myers via tomwhite) + Release 0.21.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/src/java/org/apache/hadoop/ipc/Server.java b/src/java/org/apache/hadoop/ipc/Server.java index e8ee049cb6025..01d76d886ae4f 100644 --- a/src/java/org/apache/hadoop/ipc/Server.java +++ b/src/java/org/apache/hadoop/ipc/Server.java @@ -60,6 +60,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.BytesWritable; @@ -78,6 +79,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.SecretManager; @@ -182,6 +184,7 @@ public static String getRemoteAddress() { private Configuration conf; private SecretManager<TokenIdentifier> secretManager; + private ServiceAuthorizationManager serviceAuthorizationManager = new ServiceAuthorizationManager(); private int maxQueueSize; private final int maxRespSize; @@ -239,6 +242,22 @@ public RpcMetrics getRpcMetrics() { return rpcMetrics; } + /** + * Refresh the service authorization ACL for the service handled by this server. + */ + public void refreshServiceAcl(Configuration conf, PolicyProvider provider) { + serviceAuthorizationManager.refresh(conf, provider); + } + + /** + * Returns a handle to the serviceAuthorizationManager (required in tests) + * @return instance of ServiceAuthorizationManager for this server + */ + @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) + public ServiceAuthorizationManager getServiceAuthorizationManager() { + return serviceAuthorizationManager; + } + /** A call queued for handling. */ private static class Call { private int id; // the client's call id @@ -1652,7 +1671,7 @@ public void authorize(UserGroupInformation user, throw new AuthorizationException("Unknown protocol: " + connection.getProtocol()); } - ServiceAuthorizationManager.authorize(user, protocol, getConf(), hostname); + serviceAuthorizationManager.authorize(user, protocol, getConf(), hostname); } } diff --git a/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java b/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java index 3f78cf9ef2e4f..a73fa2cd9fec5 100644 --- a/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java +++ b/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.IdentityHashMap; import java.util.Map; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -43,7 +44,7 @@ public class ServiceAuthorizationManager { private static final Log LOG = LogFactory .getLog(ServiceAuthorizationManager.class); - private static Map<Class<?>, AccessControlList> protocolToAcl = + private Map<Class<?>, AccessControlList> protocolToAcl = new IdentityHashMap<Class<?>, AccessControlList>(); /** @@ -73,7 +74,7 @@ public class ServiceAuthorizationManager { * @param hostname fully qualified domain name of the client * @throws AuthorizationException on authorization failure */ - public static void authorize(UserGroupInformation user, + public void authorize(UserGroupInformation user, Class<?> protocol, Configuration conf, String hostname @@ -129,7 +130,7 @@ public static void authorize(UserGroupInformation user, AUDITLOG.info(AUTHZ_SUCCESSFULL_FOR + user + " for protocol="+protocol); } - public static synchronized void refresh(Configuration conf, + public synchronized void refresh(Configuration conf, PolicyProvider provider) { // Get the system property 'hadoop.policy.file' String policyFile = @@ -158,4 +159,9 @@ public static synchronized void refresh(Configuration conf, // Flip to the newly parsed permissions protocolToAcl = newAcls; } + + // Package-protected for use in tests. + Set<Class<?>> getProtocolsWithAcls() { + return protocolToAcl.keySet(); + } } diff --git a/src/test/core/org/apache/hadoop/ipc/TestRPC.java b/src/test/core/org/apache/hadoop/ipc/TestRPC.java index c87391e4d58b8..9ca6a6e936142 100644 --- a/src/test/core/org/apache/hadoop/ipc/TestRPC.java +++ b/src/test/core/org/apache/hadoop/ipc/TestRPC.java @@ -41,7 +41,6 @@ import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; -import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.security.AccessControlException; import static org.mockito.Mockito.*; @@ -364,11 +363,11 @@ public Service[] getServices() { } private void doRPCs(Configuration conf, boolean expectFailure) throws Exception { - ServiceAuthorizationManager.refresh(conf, new TestPolicyProvider()); - Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, 0, 5, true, conf, null); + server.refreshServiceAcl(conf, new TestPolicyProvider()); + TestProtocol proxy = null; server.start();
4043b1d38140d531f5f97d4f87850f168283c240
spring-framework
Workaround Javadoc bug with JDK 8 (b112+)--Remove Javadoc linkplain to ExceptionHandler-value() from-AnnotationMethodHandlerExceptionResolver to work around JDK-Javadoc bug 9007707.-
c
https://github.com/spring-projects/spring-framework
diff --git a/spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerExceptionResolver.java b/spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerExceptionResolver.java index 1450d8a9683a..65bba17c3830 100644 --- a/spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerExceptionResolver.java +++ b/spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerExceptionResolver.java @@ -206,7 +206,7 @@ public void doWith(Method method) { /** * Returns all the exception classes handled by the given method. - * <p>The default implementation looks for exceptions in the {@linkplain ExceptionHandler#value() annotation}, + * <p>The default implementation looks for exceptions in the annotation, * or - if that annotation element is empty - any exceptions listed in the method parameters if the method * is annotated with {@code @ExceptionHandler}. * @param method the method
b370969690e5463374c4f47e6f8543c07c5ae4d9
spring-framework
added public "validateDatabaseSchema" method to- Hibernate LocalSessionFactoryBean (SPR- )--
a
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.orm/src/main/java/org/springframework/orm/hibernate3/LocalSessionFactoryBean.java b/org.springframework.orm/src/main/java/org/springframework/orm/hibernate3/LocalSessionFactoryBean.java index 07ede778ab49..5bc2c55ec2b5 100644 --- a/org.springframework.orm/src/main/java/org/springframework/orm/hibernate3/LocalSessionFactoryBean.java +++ b/org.springframework.orm/src/main/java/org/springframework/orm/hibernate3/LocalSessionFactoryBean.java @@ -1,5 +1,5 @@ /* - * Copyright 2002-2009 the original author or authors. + * Copyright 2002-2010 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -875,21 +875,7 @@ public final Configuration getConfiguration() { @Override protected void afterSessionFactoryCreation() throws Exception { if (this.schemaUpdate) { - DataSource dataSource = getDataSource(); - if (dataSource != null) { - // Make given DataSource available for the schema update, - // which unfortunately reinstantiates a ConnectionProvider. - configTimeDataSourceHolder.set(dataSource); - } - try { - updateDatabaseSchema(); - } - finally { - if (dataSource != null) { - // Reset DataSource holder. - configTimeDataSourceHolder.set(null); - } - } + updateDatabaseSchema(); } } @@ -916,6 +902,93 @@ public void destroy() throws HibernateException { } + /** + * Execute schema update script, determined by the Configuration object + * used for creating the SessionFactory. A replacement for Hibernate's + * SchemaUpdate class, for automatically executing schema update scripts + * on application startup. Can also be invoked manually. + * <p>Fetch the LocalSessionFactoryBean itself rather than the exposed + * SessionFactory to be able to invoke this method, e.g. via + * <code>LocalSessionFactoryBean lsfb = (LocalSessionFactoryBean) ctx.getBean("&mySessionFactory");</code>. + * <p>Uses the SessionFactory that this bean generates for accessing a + * JDBC connection to perform the script. + * @throws DataAccessException in case of script execution errors + * @see #setSchemaUpdate + * @see org.hibernate.cfg.Configuration#generateSchemaUpdateScript + * @see org.hibernate.tool.hbm2ddl.SchemaUpdate + */ + public void updateDatabaseSchema() throws DataAccessException { + logger.info("Updating database schema for Hibernate SessionFactory"); + DataSource dataSource = getDataSource(); + if (dataSource != null) { + // Make given DataSource available for the schema update. + configTimeDataSourceHolder.set(dataSource); + } + try { + HibernateTemplate hibernateTemplate = new HibernateTemplate(getSessionFactory()); + hibernateTemplate.setFlushMode(HibernateTemplate.FLUSH_NEVER); + hibernateTemplate.execute( + new HibernateCallback<Object>() { + public Object doInHibernate(Session session) throws HibernateException, SQLException { + Connection con = session.connection(); + Dialect dialect = Dialect.getDialect(getConfiguration().getProperties()); + DatabaseMetadata metadata = new DatabaseMetadata(con, dialect); + String[] sql = getConfiguration().generateSchemaUpdateScript(dialect, metadata); + executeSchemaScript(con, sql); + return null; + } + } + ); + } + finally { + if (dataSource != null) { + configTimeDataSourceHolder.set(null); + } + } + } + + /** + * Execute schema creation script, determined by the Configuration object + * used for creating the SessionFactory. A replacement for Hibernate's + * SchemaValidator class, to be invoked after application startup. + * <p>Fetch the LocalSessionFactoryBean itself rather than the exposed + * SessionFactory to be able to invoke this method, e.g. via + * <code>LocalSessionFactoryBean lsfb = (LocalSessionFactoryBean) ctx.getBean("&mySessionFactory");</code>. + * <p>Uses the SessionFactory that this bean generates for accessing a + * JDBC connection to perform the script. + * @throws DataAccessException in case of script execution errors + * @see org.hibernate.cfg.Configuration#validateSchema + * @see org.hibernate.tool.hbm2ddl.SchemaValidator + */ + public void validateDatabaseSchema() throws DataAccessException { + logger.info("Validating database schema for Hibernate SessionFactory"); + DataSource dataSource = getDataSource(); + if (dataSource != null) { + // Make given DataSource available for the schema update. + configTimeDataSourceHolder.set(dataSource); + } + try { + HibernateTemplate hibernateTemplate = new HibernateTemplate(getSessionFactory()); + hibernateTemplate.setFlushMode(HibernateTemplate.FLUSH_NEVER); + hibernateTemplate.execute( + new HibernateCallback<Object>() { + public Object doInHibernate(Session session) throws HibernateException, SQLException { + Connection con = session.connection(); + Dialect dialect = Dialect.getDialect(getConfiguration().getProperties()); + DatabaseMetadata metadata = new DatabaseMetadata(con, dialect, false); + getConfiguration().validateSchema(dialect, metadata); + return null; + } + } + ); + } + finally { + if (dataSource != null) { + configTimeDataSourceHolder.set(null); + } + } + } + /** * Execute schema drop script, determined by the Configuration object * used for creating the SessionFactory. A replacement for Hibernate's @@ -923,8 +996,8 @@ public void destroy() throws HibernateException { * <p>Fetch the LocalSessionFactoryBean itself rather than the exposed * SessionFactory to be able to invoke this method, e.g. via * <code>LocalSessionFactoryBean lsfb = (LocalSessionFactoryBean) ctx.getBean("&mySessionFactory");</code>. - * <p>Uses the SessionFactory that this bean generates for accessing a JDBC - * connection to perform the script. + * <p>Uses the SessionFactory that this bean generates for accessing a + * JDBC connection to perform the script. * @throws org.springframework.dao.DataAccessException in case of script execution errors * @see org.hibernate.cfg.Configuration#generateDropSchemaScript * @see org.hibernate.tool.hbm2ddl.SchemaExport#drop @@ -952,59 +1025,38 @@ public Object doInHibernate(Session session) throws HibernateException, SQLExcep * <p>Fetch the LocalSessionFactoryBean itself rather than the exposed * SessionFactory to be able to invoke this method, e.g. via * <code>LocalSessionFactoryBean lsfb = (LocalSessionFactoryBean) ctx.getBean("&mySessionFactory");</code>. - * <p>Uses the SessionFactory that this bean generates for accessing a JDBC - * connection to perform the script. + * <p>Uses the SessionFactory that this bean generates for accessing a + * JDBC connection to perform the script. * @throws DataAccessException in case of script execution errors * @see org.hibernate.cfg.Configuration#generateSchemaCreationScript * @see org.hibernate.tool.hbm2ddl.SchemaExport#create */ public void createDatabaseSchema() throws DataAccessException { logger.info("Creating database schema for Hibernate SessionFactory"); - HibernateTemplate hibernateTemplate = new HibernateTemplate(getSessionFactory()); - hibernateTemplate.execute( - new HibernateCallback<Object>() { - public Object doInHibernate(Session session) throws HibernateException, SQLException { - Connection con = session.connection(); - Dialect dialect = Dialect.getDialect(getConfiguration().getProperties()); - String[] sql = getConfiguration().generateSchemaCreationScript(dialect); - executeSchemaScript(con, sql); - return null; - } - } - ); - } - - /** - * Execute schema update script, determined by the Configuration object - * used for creating the SessionFactory. A replacement for Hibernate's - * SchemaUpdate class, for automatically executing schema update scripts - * on application startup. Can also be invoked manually. - * <p>Fetch the LocalSessionFactoryBean itself rather than the exposed - * SessionFactory to be able to invoke this method, e.g. via - * <code>LocalSessionFactoryBean lsfb = (LocalSessionFactoryBean) ctx.getBean("&mySessionFactory");</code>. - * <p>Uses the SessionFactory that this bean generates for accessing a JDBC - * connection to perform the script. - * @throws DataAccessException in case of script execution errors - * @see #setSchemaUpdate - * @see org.hibernate.cfg.Configuration#generateSchemaUpdateScript - * @see org.hibernate.tool.hbm2ddl.SchemaUpdate - */ - public void updateDatabaseSchema() throws DataAccessException { - logger.info("Updating database schema for Hibernate SessionFactory"); - HibernateTemplate hibernateTemplate = new HibernateTemplate(getSessionFactory()); - hibernateTemplate.setFlushMode(HibernateTemplate.FLUSH_NEVER); - hibernateTemplate.execute( - new HibernateCallback<Object>() { - public Object doInHibernate(Session session) throws HibernateException, SQLException { - Connection con = session.connection(); - Dialect dialect = Dialect.getDialect(getConfiguration().getProperties()); - DatabaseMetadata metadata = new DatabaseMetadata(con, dialect); - String[] sql = getConfiguration().generateSchemaUpdateScript(dialect, metadata); - executeSchemaScript(con, sql); - return null; + DataSource dataSource = getDataSource(); + if (dataSource != null) { + // Make given DataSource available for the schema update. + configTimeDataSourceHolder.set(dataSource); + } + try { + HibernateTemplate hibernateTemplate = new HibernateTemplate(getSessionFactory()); + hibernateTemplate.execute( + new HibernateCallback<Object>() { + public Object doInHibernate(Session session) throws HibernateException, SQLException { + Connection con = session.connection(); + Dialect dialect = Dialect.getDialect(getConfiguration().getProperties()); + String[] sql = getConfiguration().generateSchemaCreationScript(dialect); + executeSchemaScript(con, sql); + return null; + } } + ); + } + finally { + if (dataSource != null) { + configTimeDataSourceHolder.set(null); } - ); + } } /**
d9d05e0b057335f3d1c7923cbee9d37c3a528d01
drools
JBRULES-85--git-svn-id: https://svn.jboss.org/repos/labs/trunk/labs/jbossrules@3162 c60d74c8-e8f6-0310-9e8f-d4a2fc68ab70-
c
https://github.com/kiegroup/drools
diff --git a/drools-core/src/main/java/org/drools/base/ClassFieldExtractorFactory.java b/drools-core/src/main/java/org/drools/base/ClassFieldExtractorFactory.java index 55618a83975..1d1d3b26154 100644 --- a/drools-core/src/main/java/org/drools/base/ClassFieldExtractorFactory.java +++ b/drools-core/src/main/java/org/drools/base/ClassFieldExtractorFactory.java @@ -29,9 +29,9 @@ import org.drools.asm.Opcodes; /** - * + * This is an alternative to FieldAccessorGenerator. * @author Alexander Bagerman - * + * TODO: Use this instead of FieldAccessorGenerator - it should be able to be more efficient. */ public class ClassFieldExtractorFactory { @@ -55,7 +55,7 @@ public static BaseClassFieldExtractor getClassFieldExtractor(Class clazz, String typeName = getTypeName(fieldType); // generating byte array to create target class byte[] bytes = dump(originalClassName, className, getterName, - typeName, fieldType); + typeName, fieldType, clazz.isInterface()); // use bytes to get a class ByteArrayClassLoader classLoader = new ByteArrayClassLoader(Thread .currentThread().getContextClassLoader()); @@ -86,7 +86,7 @@ protected static Class getFieldType(Class clazz, String name) } private static byte[] dump(String originalClassName, String className, - String getterName, String typeName, Class fieldType) + String getterName, String typeName, Class fieldType, boolean isInterface) throws Exception { ClassWriter cw = new ClassWriter(false); @@ -142,8 +142,15 @@ private static byte[] dump(String originalClassName, String className, mv.visitInsn(Opcodes.DUP); mv.visitVarInsn(Opcodes.ALOAD, 1); mv.visitTypeInsn(Opcodes.CHECKCAST, originalClassName); - mv.visitMethodInsn(Opcodes.INVOKEVIRTUAL, originalClassName, - getterName, "()" + primitiveTypeTag); + + if (isInterface) { + mv.visitMethodInsn(Opcodes.INVOKEINTERFACE, originalClassName, + getterName, "()" + primitiveTypeTag); + + } else { + mv.visitMethodInsn(Opcodes.INVOKEVIRTUAL, originalClassName, + getterName, "()" + primitiveTypeTag); + } mv.visitMethodInsn(Opcodes.INVOKESPECIAL, typeName, "<init>", "(" + primitiveTypeTag + ")V"); mv.visitInsn(Opcodes.ARETURN); @@ -164,8 +171,13 @@ private static byte[] dump(String originalClassName, String className, mv.visitLineNumber(15, l0); mv.visitVarInsn(Opcodes.ALOAD, 1); mv.visitTypeInsn(Opcodes.CHECKCAST, originalClassName); - mv.visitMethodInsn(Opcodes.INVOKEVIRTUAL, originalClassName, - getterName, "()L" + typeName + ";"); + if (isInterface) { + mv.visitMethodInsn(Opcodes.INVOKEINTERFACE, originalClassName, + getterName, "()L" + typeName + ";"); + } else { + mv.visitMethodInsn(Opcodes.INVOKEVIRTUAL, originalClassName, + getterName, "()L" + typeName + ";"); + } mv.visitInsn(Opcodes.ARETURN); Label l1 = new Label(); mv.visitLabel(l1); diff --git a/drools-core/src/main/java/org/drools/util/asm/ClassFieldInspector.java b/drools-core/src/main/java/org/drools/util/asm/ClassFieldInspector.java index 2df93743468..fde561e9388 100644 --- a/drools-core/src/main/java/org/drools/util/asm/ClassFieldInspector.java +++ b/drools-core/src/main/java/org/drools/util/asm/ClassFieldInspector.java @@ -99,14 +99,14 @@ public MethodVisitor visitMethod(int access, String[] exceptions) { //only want public methods that start with 'get' or 'is' //and have no args, and return a value - if (access == Opcodes.ACC_PUBLIC) { + if ((access & Opcodes.ACC_PUBLIC) > 0) { if (desc.startsWith( "()" ) && ( name.startsWith("get") || name.startsWith("is") ) ) { try { Method method = clazz.getMethod(name, null); if (method.getReturnType() != void.class) { int fieldIndex = methodList.size(); - methodList.add(method); - addToMapping(method, fieldIndex); + methodList.add(method); + addToMapping(method.getName(), fieldIndex); } } catch (NoSuchMethodException e) { throw new IllegalStateException("Error in getting field access method."); @@ -186,8 +186,8 @@ public FieldVisitor visitField(int arg0, - private void addToMapping(Method method, int index) { - String name = method.getName(); + private void addToMapping(String name, int index) { + if (name.startsWith("is")) { this.fieldNameMap.put(calcFieldName( name, 2 ), new Integer(index)); } else { diff --git a/drools-core/src/main/java/org/drools/util/asm/FieldAccessorGenerator.java b/drools-core/src/main/java/org/drools/util/asm/FieldAccessorGenerator.java index 515298e54ba..0889a014136 100644 --- a/drools-core/src/main/java/org/drools/util/asm/FieldAccessorGenerator.java +++ b/drools-core/src/main/java/org/drools/util/asm/FieldAccessorGenerator.java @@ -112,14 +112,14 @@ public static byte[] generateClass(Method getters[], Class targetClass, String g doConstructor( cw ); - doMethods( cw, Type.getInternalName(targetClass), getters ); + doMethods( cw, Type.getInternalName(targetClass), getters, targetClass.isInterface()); cw.visitEnd(); return cw.toByteArray(); } - private static void doMethods(ClassWriter cw, String targetType, Method[] getters) { + private static void doMethods(ClassWriter cw, String targetType, Method[] getters, boolean isInterface) { @@ -162,12 +162,13 @@ private static void doMethods(ClassWriter cw, String targetType, Method[] getter //START switch items for (int i= 0; i < getters.length; i++) { + Method method = getters[i]; if (method.getReturnType().isPrimitive()) { doSwitchItemBoxed( mv, switchItems[i], - target, targetType, method.getName(), method.getReturnType()); + target, targetType, method.getName(), method.getReturnType(), isInterface); } else { - doSwitchItemObject(mv, switchItems[i], target, targetType, method.getName(), method.getReturnType()); + doSwitchItemObject(mv, switchItems[i], target, targetType, method.getName(), method.getReturnType(), isInterface); } } @@ -186,7 +187,7 @@ private static void doMethods(ClassWriter cw, String targetType, Method[] getter /** a switch item that requires autoboxing */ private static void doSwitchItemBoxed(MethodVisitor mv, Label switchItem, int target, String targetType, String targetMethod, - Class scalarType) { + Class scalarType, boolean isInterface) { Class boxType = null; boxType = getBoxType( scalarType ); String scalarDescriptor = Type.getDescriptor(scalarType); @@ -197,10 +198,19 @@ private static void doSwitchItemBoxed(MethodVisitor mv, Label switchItem, mv.visitInsn( DUP ); mv.visitVarInsn( ALOAD, target ); - mv.visitMethodInsn( INVOKEVIRTUAL, - targetType, - targetMethod, - "()" + scalarDescriptor ); + if (isInterface) { + mv.visitMethodInsn( INVOKEINTERFACE, + targetType, + targetMethod, + "()" + scalarDescriptor ); + + } else { + mv.visitMethodInsn( INVOKEVIRTUAL, + targetType, + targetMethod, + "()" + scalarDescriptor ); + + } mv.visitMethodInsn( INVOKESPECIAL, internalBoxName, "<init>", @@ -237,16 +247,24 @@ private static Class getBoxType(Class scalarType) { /** A regular switch item, which doesn't require boxing */ private static void doSwitchItemObject(MethodVisitor mv, Label label, - int target, String targetType, String targetMethod, Class returnClass) { + int target, String targetType, + String targetMethod, Class returnClass, boolean isInterface) { String returnType = "()" + Type.getDescriptor(returnClass); mv.visitLabel( label ); mv.visitVarInsn( ALOAD, target ); - mv.visitMethodInsn( INVOKEVIRTUAL, - targetType, - targetMethod, - returnType ); + if (isInterface) { + mv.visitMethodInsn( INVOKEINTERFACE, + targetType, + targetMethod, + returnType ); + } else { + mv.visitMethodInsn( INVOKEVIRTUAL, + targetType, + targetMethod, + returnType ); + } mv.visitInsn( ARETURN ); } diff --git a/drools-core/src/test/java/org/drools/base/ClassFieldExtractorFactoryTest.java b/drools-core/src/test/java/org/drools/base/ClassFieldExtractorFactoryTest.java new file mode 100644 index 00000000000..9317c4b2997 --- /dev/null +++ b/drools-core/src/test/java/org/drools/base/ClassFieldExtractorFactoryTest.java @@ -0,0 +1,39 @@ +package org.drools.base; + +import org.drools.spi.FieldExtractor; +import org.drools.util.asm.TestAbstract; +import org.drools.util.asm.TestAbstractImpl; +import org.drools.util.asm.TestInterface; +import org.drools.util.asm.TestInterfaceImpl; + +import junit.framework.TestCase; + +public class ClassFieldExtractorFactoryTest extends TestCase { + + public void testIt() throws Exception { + FieldExtractor ex = ClassFieldExtractorFactory.getClassFieldExtractor( TestBean.class, "name" ); + assertEquals(0, ex.getIndex()); + assertEquals("michael", ex.getValue( new TestBean() )); + ex = ClassFieldExtractorFactory.getClassFieldExtractor( TestBean.class, "age" ); + assertEquals(1, ex.getIndex()); + assertEquals(new Integer(42), ex.getValue( new TestBean() )); + + } + + public void testInterface() throws Exception { + FieldExtractor ex = ClassFieldExtractorFactory.getClassFieldExtractor( TestInterface.class, "something" ); + assertEquals(0, ex.getIndex()); + assertEquals("foo", ex.getValue( new TestInterfaceImpl() )); + } + + public void testAbstract() throws Exception { + FieldExtractor ex = ClassFieldExtractorFactory.getClassFieldExtractor( TestAbstract.class, "something" ); + assertEquals(0, ex.getIndex()); + assertEquals("foo", ex.getValue( new TestAbstractImpl() )); + } + + + + +} + diff --git a/drools-core/src/test/java/org/drools/base/TestBean.java b/drools-core/src/test/java/org/drools/base/TestBean.java new file mode 100644 index 00000000000..2c1bf9dc406 --- /dev/null +++ b/drools-core/src/test/java/org/drools/base/TestBean.java @@ -0,0 +1,14 @@ +package org.drools.base; + +public class TestBean { + private String name = "michael"; + private int age = 42; + + public String getName() { + return name; + } + + public int getAge() { + return age; + } +} diff --git a/drools-core/src/test/java/org/drools/util/asm/ClassFieldInspectorTest.java b/drools-core/src/test/java/org/drools/util/asm/ClassFieldInspectorTest.java index adb768215a8..c7de4fe3fd9 100644 --- a/drools-core/src/test/java/org/drools/util/asm/ClassFieldInspectorTest.java +++ b/drools-core/src/test/java/org/drools/util/asm/ClassFieldInspectorTest.java @@ -23,6 +23,37 @@ public void testIt() throws Exception { } + + public void testInterface() throws Exception { + ClassFieldInspector ext = new ClassFieldInspector( TestInterface.class ); + assertEquals(2, ext.getPropertyGetters().size()); + assertEquals("getSomething", ((Method) ext.getPropertyGetters().get(0)).getName()); + assertEquals("getAnother", ((Method) ext.getPropertyGetters().get(1)).getName()); + + + + Map names = ext.getFieldNames(); + assertNotNull(names); + assertEquals(2, names.size()); + assertEquals(0, ((Integer)names.get("something")).intValue()); + assertEquals(1, ((Integer)names.get("another")).intValue()); + + } + + public void testAbstract() throws Exception { + ClassFieldInspector ext = new ClassFieldInspector( TestAbstract.class ); + assertEquals(2, ext.getPropertyGetters().size()); + assertEquals("getSomething", ((Method) ext.getPropertyGetters().get(0)).getName()); + assertEquals("getAnother", ((Method) ext.getPropertyGetters().get(1)).getName()); + + Map names = ext.getFieldNames(); + assertNotNull(names); + assertEquals(2, names.size()); + assertEquals(0, ((Integer)names.get("something")).intValue()); + assertEquals(1, ((Integer)names.get("another")).intValue()); + + } + static class Person { private boolean happy; private String name; diff --git a/drools-core/src/test/java/org/drools/util/asm/FieldAccessorGeneratorTest.java b/drools-core/src/test/java/org/drools/util/asm/FieldAccessorGeneratorTest.java index 1d780e0c702..442fea486b7 100644 --- a/drools-core/src/test/java/org/drools/util/asm/FieldAccessorGeneratorTest.java +++ b/drools-core/src/test/java/org/drools/util/asm/FieldAccessorGeneratorTest.java @@ -46,4 +46,45 @@ public void testAnother() throws Exception { assertEquals(ac, ac2); } + public void testInterface() throws Exception { + FieldAccessorGenerator gen = FieldAccessorGenerator.getInstance(); + FieldAccessorMap map = gen.newInstanceFor(TestInterface.class); + FieldAccessor ac = map.getFieldAccessor(); + assertNotNull(ac); + + TestInterface obj = new TestInterfaceImpl(); + + assertEquals("foo", (String)ac.getFieldByIndex(obj, 0)); + assertEquals(42, ((Integer)ac.getFieldByIndex(obj, 1)).intValue()); + + Integer index = (Integer) map.getFieldNameMap().get("something"); + assertEquals(0, index.intValue()); + + index = (Integer) map.getFieldNameMap().get("another"); + assertEquals(1, index.intValue()); + + + } + + public void testAbstract() throws Exception { + FieldAccessorGenerator gen = FieldAccessorGenerator.getInstance(); + FieldAccessorMap map = gen.newInstanceFor(TestAbstract.class); + FieldAccessor ac = map.getFieldAccessor(); + assertNotNull(ac); + + TestAbstract obj = new TestAbstractImpl(); + + assertEquals(42, ((Integer)ac.getFieldByIndex(obj, 1)).intValue()); + assertEquals("foo", (String)ac.getFieldByIndex(obj, 0)); + + + Integer index = (Integer) map.getFieldNameMap().get("something"); + assertEquals(0, index.intValue()); + + index = (Integer) map.getFieldNameMap().get("another"); + assertEquals(1, index.intValue()); + + + } + } diff --git a/drools-core/src/test/java/org/drools/util/asm/TestAbstract.java b/drools-core/src/test/java/org/drools/util/asm/TestAbstract.java new file mode 100644 index 00000000000..a603dacc1bc --- /dev/null +++ b/drools-core/src/test/java/org/drools/util/asm/TestAbstract.java @@ -0,0 +1,10 @@ +package org.drools.util.asm; + +public abstract class TestAbstract { + + public abstract String getSomething(); + public int getAnother() { + return 42; + } + +} diff --git a/drools-core/src/test/java/org/drools/util/asm/TestAbstractImpl.java b/drools-core/src/test/java/org/drools/util/asm/TestAbstractImpl.java new file mode 100644 index 00000000000..2dad1e6f570 --- /dev/null +++ b/drools-core/src/test/java/org/drools/util/asm/TestAbstractImpl.java @@ -0,0 +1,9 @@ +package org.drools.util.asm; + +public class TestAbstractImpl extends TestAbstract { + + public String getSomething() { + return "foo"; + } + +} diff --git a/drools-core/src/test/java/org/drools/util/asm/TestInterface.java b/drools-core/src/test/java/org/drools/util/asm/TestInterface.java new file mode 100644 index 00000000000..c61d2fbd7a7 --- /dev/null +++ b/drools-core/src/test/java/org/drools/util/asm/TestInterface.java @@ -0,0 +1,8 @@ +package org.drools.util.asm; + +public interface TestInterface { + + public String getSomething(); + public int getAnother(); + +} diff --git a/drools-core/src/test/java/org/drools/util/asm/TestInterfaceImpl.java b/drools-core/src/test/java/org/drools/util/asm/TestInterfaceImpl.java new file mode 100644 index 00000000000..58fa21c4381 --- /dev/null +++ b/drools-core/src/test/java/org/drools/util/asm/TestInterfaceImpl.java @@ -0,0 +1,15 @@ +package org.drools.util.asm; + +public class TestInterfaceImpl + implements + TestInterface { + + public String getSomething() { + return "foo"; + } + + public int getAnother() { + return 42; + } + +}
d66956b1177df05de05644f53e33509d790b57bf
spring-framework
Changed use of AssertThrows to @Test(expected =- ...)--
p
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.testsuite/src/test/java/org/springframework/jdbc/core/namedparam/NamedParameterUtilsTests.java b/org.springframework.testsuite/src/test/java/org/springframework/jdbc/core/namedparam/NamedParameterUtilsTests.java index ccecc08a64e4..16382eb95497 100644 --- a/org.springframework.testsuite/src/test/java/org/springframework/jdbc/core/namedparam/NamedParameterUtilsTests.java +++ b/org.springframework.testsuite/src/test/java/org/springframework/jdbc/core/namedparam/NamedParameterUtilsTests.java @@ -20,19 +20,20 @@ import java.util.HashMap; import java.util.Map; -import junit.framework.TestCase; +import static org.junit.Assert.*; +import org.junit.Test; import org.springframework.dao.InvalidDataAccessApiUsageException; -import org.springframework.test.AssertThrows; /** * @author Thomas Risberg * @author Juergen Hoeller * @author Rick Evans */ -public class NamedParameterUtilsTests extends TestCase { +public class NamedParameterUtilsTests { - public void testParseSql() { + @Test + public void parseSql() { String sql = "xxx :a yyyy :b :c :a zzzzz"; ParsedSql psql = NamedParameterUtils.parseSqlStatement(sql); assertEquals("xxx ? yyyy ? ? ? zzzzz", NamedParameterUtils.substituteNamedParameters(psql, null)); @@ -57,21 +58,24 @@ public void testParseSql() { } - public void testSubstituteNamedParameters() { + @Test + public void substituteNamedParameters() { MapSqlParameterSource namedParams = new MapSqlParameterSource(); namedParams.addValue("a", "a").addValue("b", "b").addValue("c", "c"); assertEquals("xxx ? ? ?", NamedParameterUtils.substituteNamedParameters("xxx :a :b :c", namedParams)); - assertEquals("xxx ? ? ? xx ? ?", NamedParameterUtils.substituteNamedParameters("xxx :a :b :c xx :a :a", namedParams)); + assertEquals("xxx ? ? ? xx ? ?", + NamedParameterUtils.substituteNamedParameters("xxx :a :b :c xx :a :a", namedParams)); } - public void testConvertParamMapToArray() { - Map paramMap = new HashMap(); + @Test + public void convertParamMapToArray() { + Map<String, String> paramMap = new HashMap<String, String>(); paramMap.put("a", "a"); paramMap.put("b", "b"); paramMap.put("c", "c"); - assertTrue(3 == NamedParameterUtils.buildValueArray("xxx :a :b :c", paramMap).length); - assertTrue(5 == NamedParameterUtils.buildValueArray("xxx :a :b :c xx :a :b", paramMap).length); - assertTrue(5 == NamedParameterUtils.buildValueArray("xxx :a :a :a xx :a :a", paramMap).length); + assertSame(3, NamedParameterUtils.buildValueArray("xxx :a :b :c", paramMap).length); + assertSame(5, NamedParameterUtils.buildValueArray("xxx :a :b :c xx :a :b", paramMap).length); + assertSame(5, NamedParameterUtils.buildValueArray("xxx :a :a :a xx :a :a", paramMap).length); assertEquals("b", NamedParameterUtils.buildValueArray("xxx :a :b :c xx :a :b", paramMap)[4]); try { NamedParameterUtils.buildValueArray("xxx :a :b ?", paramMap); @@ -81,31 +85,35 @@ public void testConvertParamMapToArray() { } } - public void testConvertTypeMapToArray() { + @Test + public void convertTypeMapToArray() { MapSqlParameterSource namedParams = new MapSqlParameterSource(); namedParams.addValue("a", "a", 1).addValue("b", "b", 2).addValue("c", "c", 3); - assertTrue(3 == NamedParameterUtils.buildSqlTypeArray(NamedParameterUtils.parseSqlStatement("xxx :a :b :c"), namedParams).length); - assertTrue(5 == NamedParameterUtils.buildSqlTypeArray(NamedParameterUtils.parseSqlStatement("xxx :a :b :c xx :a :b"), namedParams).length); - assertTrue(5 == NamedParameterUtils.buildSqlTypeArray(NamedParameterUtils.parseSqlStatement("xxx :a :a :a xx :a :a"), namedParams).length); - assertEquals(2, NamedParameterUtils.buildSqlTypeArray(NamedParameterUtils.parseSqlStatement("xxx :a :b :c xx :a :b"), namedParams)[4]); + assertSame(3, NamedParameterUtils + .buildSqlTypeArray(NamedParameterUtils.parseSqlStatement("xxx :a :b :c"), namedParams).length); + assertSame(5, NamedParameterUtils + .buildSqlTypeArray(NamedParameterUtils.parseSqlStatement("xxx :a :b :c xx :a :b"), namedParams).length); + assertSame(5, NamedParameterUtils + .buildSqlTypeArray(NamedParameterUtils.parseSqlStatement("xxx :a :a :a xx :a :a"), namedParams).length); + assertEquals(2, NamedParameterUtils + .buildSqlTypeArray(NamedParameterUtils.parseSqlStatement("xxx :a :b :c xx :a :b"), namedParams)[4]); } - public void testBuildValueArrayWithMissingParameterValue() throws Exception { - new AssertThrows(InvalidDataAccessApiUsageException.class) { - public void test() throws Exception { - String sql = "select count(0) from foo where id = :id"; - NamedParameterUtils.buildValueArray(sql, new HashMap()); - } - }.runTest(); + @Test(expected = InvalidDataAccessApiUsageException.class) + public void buildValueArrayWithMissingParameterValue() throws Exception { + String sql = "select count(0) from foo where id = :id"; + NamedParameterUtils.buildValueArray(sql, new HashMap()); } - public void testSubstituteNamedParametersWithStringContainingQuotes() throws Exception { + @Test + public void substituteNamedParametersWithStringContainingQuotes() throws Exception { String expectedSql = "select 'first name' from artists where id = ? and quote = 'exsqueeze me?'"; String sql = "select 'first name' from artists where id = :id and quote = 'exsqueeze me?'"; String newSql = NamedParameterUtils.substituteNamedParameters(sql, new MapSqlParameterSource()); assertEquals(expectedSql, newSql); } + @Test public void testParseSqlStatementWithStringContainingQuotes() throws Exception { String expectedSql = "select 'first name' from artists where id = ? and quote = 'exsqueeze me?'"; String sql = "select 'first name' from artists where id = :id and quote = 'exsqueeze me?'"; @@ -116,7 +124,8 @@ public void testParseSqlStatementWithStringContainingQuotes() throws Exception { /* * SPR-4789 */ - public void testParseSqlContainingComments() { + @Test + public void parseSqlContainingComments() { String sql1 = "/*+ HINT */ xxx /* comment ? */ :a yyyy :b :c :a zzzzz -- :xx XX\n"; ParsedSql psql1 = NamedParameterUtils.parseSqlStatement(sql1); assertEquals("/*+ HINT */ xxx /* comment ? */ ? yyyy ? ? ? zzzzz -- :xx XX\n", @@ -152,7 +161,8 @@ public void testParseSqlContainingComments() { /* * SPR-4612 */ - public void testParseSqlStatementWithPostgresCasting() throws Exception { + @Test + public void parseSqlStatementWithPostgresCasting() throws Exception { String expectedSql = "select 'first name' from artists where id = ? and birth_date=?::timestamp"; String sql = "select 'first name' from artists where id = :id and birth_date=:birthDate::timestamp"; ParsedSql parsedSql = NamedParameterUtils.parseSqlStatement(sql); @@ -162,7 +172,8 @@ public void testParseSqlStatementWithPostgresCasting() throws Exception { /* * SPR-2544 */ - public void testParseSqlStatementWithLogicalAnd() { + @Test + public void parseSqlStatementWithLogicalAnd() { String expectedSql = "xxx & yyyy"; ParsedSql parsedSql = NamedParameterUtils.parseSqlStatement(expectedSql); assertEquals(expectedSql, NamedParameterUtils.substituteNamedParameters(parsedSql, null)); @@ -171,7 +182,8 @@ public void testParseSqlStatementWithLogicalAnd() { /* * SPR-2544 */ - public void testSubstituteNamedParametersWithLogicalAnd() throws Exception { + @Test + public void substituteNamedParametersWithLogicalAnd() throws Exception { String expectedSql = "xxx & yyyy"; String newSql = NamedParameterUtils.substituteNamedParameters(expectedSql, new MapSqlParameterSource()); assertEquals(expectedSql, newSql); @@ -180,7 +192,8 @@ public void testSubstituteNamedParametersWithLogicalAnd() throws Exception { /* * SPR-3173 */ - public void testVariableAssignmentOperator() throws Exception { + @Test + public void variableAssignmentOperator() throws Exception { String expectedSql = "x := 1"; String newSql = NamedParameterUtils.substituteNamedParameters(expectedSql, new MapSqlParameterSource()); assertEquals(expectedSql, newSql);
0d456d2091f5f8dd3a954ab64e7b78a08246f892
camel
minor tidy up of test case--git-svn-id: https://svn.apache.org/repos/asf/activemq/camel/trunk@654441 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/camel
diff --git a/camel-core/src/test/java/org/apache/camel/component/file/FileRouteTest.java b/camel-core/src/test/java/org/apache/camel/component/file/FileRouteTest.java index dfbc0dcad536e..c92918e36c94e 100644 --- a/camel-core/src/test/java/org/apache/camel/component/file/FileRouteTest.java +++ b/camel-core/src/test/java/org/apache/camel/component/file/FileRouteTest.java @@ -28,13 +28,13 @@ public class FileRouteTest extends ContextTestSupport { protected String uri = "file:target/test-default-inbox"; public void testFileRoute() throws Exception { - MockEndpoint result = resolveMandatoryEndpoint("mock:result", MockEndpoint.class); + MockEndpoint result = getMockEndpoint("mock:result"); result.expectedBodiesReceived(expectedBody); result.setResultWaitTime(5000); template.sendBodyAndHeader(uri, expectedBody, "cheese", 123); - result.assertIsSatisfied(); + assertMockEndpointsSatisifed(); } @Override
1945713fd50d0e6740df94fb63358a0939105ab5
ReactiveX-RxJava
Fix unit test after last() changed behavior--
c
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/test/java/rx/operators/OperationTakeWhileTest.java b/rxjava-core/src/test/java/rx/operators/OperationTakeWhileTest.java index 08c6d8e83f..830dd59b33 100644 --- a/rxjava-core/src/test/java/rx/operators/OperationTakeWhileTest.java +++ b/rxjava-core/src/test/java/rx/operators/OperationTakeWhileTest.java @@ -119,7 +119,7 @@ public Subscription onSubscribe(Observer<? super String> observer) { public Boolean call(String s) { return false; } - })).toBlockingObservable().last(); + })).toBlockingObservable().lastOrDefault(""); } @Test
74fdcdcf5f41f420cf46f06ecfebce84ec8f36eb
camel
CAMEL-656: Polished dataset and timer component.- Added @deprecation to not used method. Removed unused imports.--git-svn-id: https://svn.apache.org/repos/asf/activemq/camel/trunk@712497 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/camel
diff --git a/camel-core/src/main/java/org/apache/camel/component/dataset/DataSet.java b/camel-core/src/main/java/org/apache/camel/component/dataset/DataSet.java index ea6509d94af44..b6333e8b7994c 100644 --- a/camel-core/src/main/java/org/apache/camel/component/dataset/DataSet.java +++ b/camel-core/src/main/java/org/apache/camel/component/dataset/DataSet.java @@ -29,8 +29,6 @@ public interface DataSet { /** * Populates a message exchange when using the DataSet as a source of messages - * - * @param exchange */ void populateMessage(Exchange exchange, long messageIndex) throws Exception; diff --git a/camel-core/src/main/java/org/apache/camel/component/dataset/DataSetConsumer.java b/camel-core/src/main/java/org/apache/camel/component/dataset/DataSetConsumer.java index 7e46571866b64..e90e5e7cce751 100644 --- a/camel-core/src/main/java/org/apache/camel/component/dataset/DataSetConsumer.java +++ b/camel-core/src/main/java/org/apache/camel/component/dataset/DataSetConsumer.java @@ -80,7 +80,7 @@ protected void sendMessages(long startIndex, long endIndex) { } } } catch (Exception e) { - LOG.error(e); + handleException(e); } } diff --git a/camel-core/src/main/java/org/apache/camel/component/timer/TimerConsumer.java b/camel-core/src/main/java/org/apache/camel/component/timer/TimerConsumer.java index 7ecc6b080f3a3..e20923af74b96 100644 --- a/camel-core/src/main/java/org/apache/camel/component/timer/TimerConsumer.java +++ b/camel-core/src/main/java/org/apache/camel/component/timer/TimerConsumer.java @@ -101,7 +101,7 @@ protected void sendTimerExchange() { try { getProcessor().process(exchange); } catch (Exception e) { - getExceptionHandler().handleException(e); + handleException(e); } } } diff --git a/camel-core/src/main/java/org/apache/camel/model/AggregatorType.java b/camel-core/src/main/java/org/apache/camel/model/AggregatorType.java index 57748c2ff1248..de74b3770d39a 100644 --- a/camel-core/src/main/java/org/apache/camel/model/AggregatorType.java +++ b/camel-core/src/main/java/org/apache/camel/model/AggregatorType.java @@ -34,9 +34,7 @@ import org.apache.camel.Predicate; import org.apache.camel.Processor; import org.apache.camel.Route; -import org.apache.camel.RuntimeCamelException; import org.apache.camel.builder.ExpressionClause; -import org.apache.camel.builder.xml.DefaultNamespaceContext; import org.apache.camel.model.language.ExpressionType; import org.apache.camel.processor.Aggregator; import org.apache.camel.processor.FilterProcessor; @@ -368,8 +366,11 @@ public List<ProcessorType<?>> getOutputs() { public void setOutputs(List<ProcessorType<?>> outputs) { this.outputs = outputs; - } + } + /** + * @deprecated not used. Will be removed in Camel 2.0. + */ protected FilterProcessor createFilterProcessor(RouteContext routeContext) throws Exception { Processor childProcessor = routeContext.createProcessor(this); return new FilterProcessor(getExpression().createPredicate(routeContext), childProcessor);
b9e1d6c698b589368f4a155134e8b2ba00608dc8
hbase
HBASE-3653 : Parallelize Server Requests on HBase- Client--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1082648 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/hbase
diff --git a/CHANGES.txt b/CHANGES.txt index 21c60235fb3a..70366e5a2e90 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -77,6 +77,7 @@ Release 0.91.0 - Unreleased Export (Subbu M Iyer via Stack) HBASE-3440 Clean out load_table.rb and make sure all roads lead to completebulkload tool (Vidhyashankar Venkataraman via Stack) + HBASE-3653 Parallelize Server Requests on HBase Client TASK HBASE-3559 Move report of split to master OFF the heartbeat channel diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 3d2c5ea8b89d..644de1fa6bd3 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -240,6 +240,7 @@ static class HConnectionImplementation implements HConnection { private final Map<String, HRegionInterface> servers = new ConcurrentHashMap<String, HRegionInterface>(); + private final ConcurrentHashMap<String, String> connectionLock = new ConcurrentHashMap<String, String>(); /** * Map of table to table {@link HRegionLocation}s. The table key is made @@ -941,21 +942,30 @@ public HRegionInterface getHRegionConnection( getMaster(); } HRegionInterface server; - synchronized (this.servers) { - // See if we already have a connection - server = this.servers.get(regionServer.toString()); - if (server == null) { // Get a connection - try { - server = (HRegionInterface)HBaseRPC.waitForProxy( - serverInterfaceClass, HRegionInterface.VERSION, - regionServer.getInetSocketAddress(), this.conf, - this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout); - } catch (RemoteException e) { - LOG.warn("RemoteException connecting to RS", e); - // Throw what the RemoteException was carrying. - throw RemoteExceptionHandler.decodeRemoteException(e); + String rsName = regionServer.toString(); + // See if we already have a connection (common case) + server = this.servers.get(rsName); + if (server == null) { + // create a unique lock for this RS (if necessary) + this.connectionLock.putIfAbsent(rsName, rsName); + // get the RS lock + synchronized (this.connectionLock.get(rsName)) { + // do one more lookup in case we were stalled above + server = this.servers.get(rsName); + if (server == null) { + try { + // definitely a cache miss. establish an RPC for this RS + server = (HRegionInterface) HBaseRPC.waitForProxy( + serverInterfaceClass, HRegionInterface.VERSION, + regionServer.getInetSocketAddress(), this.conf, + this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout); + this.servers.put(rsName, server); + } catch (RemoteException e) { + LOG.warn("RemoteException connecting to RS", e); + // Throw what the RemoteException was carrying. + throw RemoteExceptionHandler.decodeRemoteException(e); + } } - this.servers.put(regionServer.toString(), server); } } return server;
431ccf96d2b8cfbea44518bc2ec9fa6937b421da
intellij-community
ui: use setter--
p
https://github.com/JetBrains/intellij-community
diff --git a/platform/editor-ui-api/src/com/intellij/openapi/actionSystem/AnAction.java b/platform/editor-ui-api/src/com/intellij/openapi/actionSystem/AnAction.java index dad449da8ba82..4d1a1ca35bcfe 100644 --- a/platform/editor-ui-api/src/com/intellij/openapi/actionSystem/AnAction.java +++ b/platform/editor-ui-api/src/com/intellij/openapi/actionSystem/AnAction.java @@ -16,7 +16,6 @@ package com.intellij.openapi.actionSystem; import com.intellij.openapi.Disposable; -import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.project.DumbAware; import com.intellij.openapi.project.PossiblyDumbAware; @@ -202,7 +201,7 @@ public final void copyFrom(@NotNull AnAction sourceAction){ } public final void copyShortcutFrom(@NotNull AnAction sourceAction) { - myShortcutSet = sourceAction.myShortcutSet; + setShortcutSet(sourceAction.getShortcutSet()); }
4e446e768912ca08b30d74b3c570ddb336b837a6
hbase
HBASE-1869 IndexedTable delete fails when used in- conjunction with RowLock()--git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@819060 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hbase
diff --git a/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegion.java b/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegion.java index 4d9563bc567b..fcd12873ccc5 100644 --- a/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegion.java +++ b/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegion.java @@ -244,7 +244,7 @@ public void delete(Delete delete, final Integer lockid, boolean writeToWAL) } } - Result oldRow = super.get(get, null); + Result oldRow = super.get(get, lockid); SortedMap<byte[], byte[]> oldColumnValues = convertToValueMap(oldRow); @@ -255,7 +255,7 @@ public void delete(Delete delete, final Integer lockid, boolean writeToWAL) // Handle if there is still a version visible. if (delete.getTimeStamp() != HConstants.LATEST_TIMESTAMP) { get.setTimeRange(1, delete.getTimeStamp()); - oldRow = super.get(get, null); + oldRow = super.get(get, lockid); SortedMap<byte[], byte[]> currentColumnValues = convertToValueMap(oldRow); for (IndexSpecification indexSpec : getIndexes()) { diff --git a/src/contrib/transactional/src/test/org/apache/hadoop/hbase/client/tableindexed/TestIndexedTable.java b/src/contrib/transactional/src/test/org/apache/hadoop/hbase/client/tableindexed/TestIndexedTable.java index 0cd440acd141..d7c1b15c020e 100644 --- a/src/contrib/transactional/src/test/org/apache/hadoop/hbase/client/tableindexed/TestIndexedTable.java +++ b/src/contrib/transactional/src/test/org/apache/hadoop/hbase/client/tableindexed/TestIndexedTable.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RowLock; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.tableindexed.IndexedRegionServer; import org.apache.hadoop.hbase.util.Bytes; @@ -144,6 +145,24 @@ private void assertRowUpdated(int updatedRow, int expectedRowValue) Bytes.toString(persistedRowValue)); } + private void assertRowDeleted(int numRowsExpected) + throws IndexNotFoundException, IOException { + // Check the size of the primary table + ResultScanner scanner = table.getScanner(new Scan()); + int numRows = 0; + for (Result rowResult : scanner) { + byte[] colA = rowResult.getValue(FAMILY, QUAL_A); + LOG.info("primary scan : row [" + Bytes.toString(rowResult.getRow()) + + "] value [" + Bytes.toString(colA) + "]"); + numRows++; + } + scanner.close(); + Assert.assertEquals(numRowsExpected, numRows); + + // Check the size of the index tables + assertRowsInOrder(numRowsExpected); + } + private void updateRow(int row, int newValue) throws IOException { Put update = new Put(PerformanceEvaluation.format(row)); byte[] valueA = PerformanceEvaluation.format(newValue); @@ -220,4 +239,15 @@ public void testLockedRowUpdateNoAutoFlush() throws IOException { updateLockedRowNoAutoFlush(row, value); assertRowUpdated(row, value); } + + public void testLockedRowDelete() throws IOException { + writeInitalRows(); + // Delete the first row; + byte[] row = PerformanceEvaluation.format(0); + RowLock lock = table.lockRow(row); + table.delete(new Delete(row, HConstants.LATEST_TIMESTAMP, lock)); + table.unlockRow(lock); + + assertRowDeleted(NUM_ROWS - 1); + } }
538d245ba9744f57d66724982db4850e6d3ba226
ReactiveX-RxJava
Implement a cached thread scheduler using event- loops--
a
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/schedulers/CachedThreadScheduler.java b/rxjava-core/src/main/java/rx/schedulers/CachedThreadScheduler.java new file mode 100644 index 0000000000..92dd486d92 --- /dev/null +++ b/rxjava-core/src/main/java/rx/schedulers/CachedThreadScheduler.java @@ -0,0 +1,180 @@ +/** + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package rx.schedulers; + +import rx.Scheduler; +import rx.Subscription; +import rx.functions.Action0; +import rx.subscriptions.CompositeSubscription; +import rx.subscriptions.Subscriptions; + +import java.util.Iterator; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +/* package */class CachedThreadScheduler extends Scheduler { + private static final class CachedWorkerPool { + final ThreadFactory factory = new ThreadFactory() { + final AtomicInteger counter = new AtomicInteger(); + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(r, "RxCachedThreadScheduler-" + counter.incrementAndGet()); + t.setDaemon(true); + return t; + } + }; + + private final long keepAliveTime; + private final ConcurrentLinkedQueue<PoolWorker> expiringQueue; + private final ScheduledExecutorService evictExpiredWorkerExecutor; + + CachedWorkerPool(long keepAliveTime, TimeUnit unit) { + this.keepAliveTime = unit.toNanos(keepAliveTime); + this.expiringQueue = new ConcurrentLinkedQueue<PoolWorker>(); + + evictExpiredWorkerExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() { + final AtomicInteger counter = new AtomicInteger(); + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(r, "RxCachedWorkerPoolEvictor-" + counter.incrementAndGet()); + t.setDaemon(true); + return t; + } + }); + evictExpiredWorkerExecutor.scheduleWithFixedDelay( + new Runnable() { + @Override + public void run() { + evictExpiredWorkers(); + } + }, this.keepAliveTime, this.keepAliveTime, TimeUnit.NANOSECONDS + ); + } + + private static CachedWorkerPool INSTANCE = new CachedWorkerPool( + 60L, TimeUnit.SECONDS + ); + + PoolWorker get() { + while (!expiringQueue.isEmpty()) { + PoolWorker poolWorker = expiringQueue.poll(); + if (poolWorker != null) { + return poolWorker; + } + } + + // No cached worker found, so create a new one. + return new PoolWorker(factory); + } + + void release(PoolWorker poolWorker) { + // Refresh expire time before putting worker back in pool + poolWorker.setExpirationTime(now() + keepAliveTime); + + expiringQueue.add(poolWorker); + } + + void evictExpiredWorkers() { + if (!expiringQueue.isEmpty()) { + long currentTimestamp = now(); + + Iterator<PoolWorker> poolWorkerIterator = expiringQueue.iterator(); + while (poolWorkerIterator.hasNext()) { + PoolWorker poolWorker = poolWorkerIterator.next(); + if (poolWorker.getExpirationTime() <= currentTimestamp) { + poolWorkerIterator.remove(); + poolWorker.unsubscribe(); + } else { + // Queue is ordered with the worker that will expire first in the beginning, so when we + // find a non-expired worker we can stop evicting. + break; + } + } + } + } + + long now() { + return System.nanoTime(); + } + } + + @Override + public Worker createWorker() { + return new EventLoopWorker(CachedWorkerPool.INSTANCE.get()); + } + + private static class EventLoopWorker extends Scheduler.Worker { + private final CompositeSubscription innerSubscription = new CompositeSubscription(); + private final PoolWorker poolWorker; + private final AtomicBoolean releasePoolWorkerOnce = new AtomicBoolean(false); + + EventLoopWorker(PoolWorker poolWorker) { + this.poolWorker = poolWorker; + } + + @Override + public void unsubscribe() { + if (releasePoolWorkerOnce.compareAndSet(false, true)) { + // unsubscribe should be idempotent, so only do this once + CachedWorkerPool.INSTANCE.release(poolWorker); + } + innerSubscription.unsubscribe(); + } + + @Override + public boolean isUnsubscribed() { + return innerSubscription.isUnsubscribed(); + } + + @Override + public Subscription schedule(Action0 action) { + return schedule(action, 0, null); + } + + @Override + public Subscription schedule(Action0 action, long delayTime, TimeUnit unit) { + if (innerSubscription.isUnsubscribed()) { + // don't schedule, we are unsubscribed + return Subscriptions.empty(); + } + + NewThreadScheduler.NewThreadWorker.ScheduledAction s = poolWorker.scheduleActual(action, delayTime, unit); + innerSubscription.add(s); + s.addParent(innerSubscription); + return s; + } + } + + private static final class PoolWorker extends NewThreadScheduler.NewThreadWorker { + private long expirationTime; + + PoolWorker(ThreadFactory threadFactory) { + super(threadFactory); + this.expirationTime = 0L; + } + + public long getExpirationTime() { + return expirationTime; + } + + public void setExpirationTime(long expirationTime) { + this.expirationTime = expirationTime; + } + } +} diff --git a/rxjava-core/src/main/java/rx/schedulers/Schedulers.java b/rxjava-core/src/main/java/rx/schedulers/Schedulers.java index d7096b7751..53bed75151 100644 --- a/rxjava-core/src/main/java/rx/schedulers/Schedulers.java +++ b/rxjava-core/src/main/java/rx/schedulers/Schedulers.java @@ -15,11 +15,11 @@ */ package rx.schedulers; -import java.util.concurrent.Executor; - import rx.Scheduler; import rx.plugins.RxJavaPlugins; +import java.util.concurrent.Executor; + /** * Static factory methods for creating Schedulers. */ @@ -43,7 +43,7 @@ private Schedulers() { if (io != null) { ioScheduler = io; } else { - ioScheduler = NewThreadScheduler.instance(); // defaults to new thread + ioScheduler = new CachedThreadScheduler(); } Scheduler nt = RxJavaPlugins.getInstance().getDefaultSchedulers().getNewThreadScheduler(); diff --git a/rxjava-core/src/test/java/rx/schedulers/CachedThreadSchedulerTest.java b/rxjava-core/src/test/java/rx/schedulers/CachedThreadSchedulerTest.java new file mode 100644 index 0000000000..f9f8ca161c --- /dev/null +++ b/rxjava-core/src/test/java/rx/schedulers/CachedThreadSchedulerTest.java @@ -0,0 +1,60 @@ +/** + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rx.schedulers; + +import org.junit.Test; +import rx.Observable; +import rx.Scheduler; +import rx.functions.Action1; +import rx.functions.Func1; + +import static org.junit.Assert.assertTrue; + +public class CachedThreadSchedulerTest extends AbstractSchedulerConcurrencyTests { + + @Override + protected Scheduler getScheduler() { + return Schedulers.io(); + } + + /** + * IO scheduler defaults to using CachedThreadScheduler + */ + @Test + public final void testIOScheduler() { + + Observable<Integer> o1 = Observable.from(1, 2, 3, 4, 5); + Observable<Integer> o2 = Observable.from(6, 7, 8, 9, 10); + Observable<String> o = Observable.merge(o1, o2).map(new Func1<Integer, String>() { + + @Override + public String call(Integer t) { + assertTrue(Thread.currentThread().getName().startsWith("RxCachedThreadScheduler")); + return "Value_" + t + "_Thread_" + Thread.currentThread().getName(); + } + }); + + o.subscribeOn(Schedulers.io()).toBlocking().forEach(new Action1<String>() { + + @Override + public void call(String t) { + System.out.println("t: " + t); + } + }); + } + +} \ No newline at end of file diff --git a/rxjava-core/src/test/java/rx/schedulers/NewThreadSchedulerTest.java b/rxjava-core/src/test/java/rx/schedulers/NewThreadSchedulerTest.java index 37b314a0dd..963ee50fa9 100644 --- a/rxjava-core/src/test/java/rx/schedulers/NewThreadSchedulerTest.java +++ b/rxjava-core/src/test/java/rx/schedulers/NewThreadSchedulerTest.java @@ -16,14 +16,7 @@ package rx.schedulers; -import static org.junit.Assert.assertTrue; - -import org.junit.Test; - -import rx.Observable; import rx.Scheduler; -import rx.functions.Action1; -import rx.functions.Func1; public class NewThreadSchedulerTest extends AbstractSchedulerConcurrencyTests { @@ -31,31 +24,4 @@ public class NewThreadSchedulerTest extends AbstractSchedulerConcurrencyTests { protected Scheduler getScheduler() { return Schedulers.newThread(); } - - /** - * IO scheduler defaults to using NewThreadScheduler - */ - @Test - public final void testIOScheduler() { - - Observable<Integer> o1 = Observable.<Integer> from(1, 2, 3, 4, 5); - Observable<Integer> o2 = Observable.<Integer> from(6, 7, 8, 9, 10); - Observable<String> o = Observable.<Integer> merge(o1, o2).map(new Func1<Integer, String>() { - - @Override - public String call(Integer t) { - assertTrue(Thread.currentThread().getName().startsWith("RxNewThreadScheduler")); - return "Value_" + t + "_Thread_" + Thread.currentThread().getName(); - } - }); - - o.subscribeOn(Schedulers.io()).toBlocking().forEach(new Action1<String>() { - - @Override - public void call(String t) { - System.out.println("t: " + t); - } - }); - } - }
8760be7d6428e3b54e6486106770fe1a1ac1e18c
spring-framework
ResponseEntity's HeadersBuilder allows for- specifying existing HttpHeaders--Issue: SPR-12324-
a
https://github.com/spring-projects/spring-framework
diff --git a/spring-web/src/main/java/org/springframework/http/ResponseEntity.java b/spring-web/src/main/java/org/springframework/http/ResponseEntity.java index d63752b4ac5d..2d7d4b812819 100644 --- a/spring-web/src/main/java/org/springframework/http/ResponseEntity.java +++ b/spring-web/src/main/java/org/springframework/http/ResponseEntity.java @@ -256,13 +256,22 @@ public interface HeadersBuilder<B extends HeadersBuilder<B>> { /** * Add the given, single header value under the given name. - * @param headerName the header name + * @param headerName the header name * @param headerValues the header value(s) * @return this builder * @see HttpHeaders#add(String, String) */ B header(String headerName, String... headerValues); + /** + * Copy the given headers into the entity's headers map. + * @param headers the existing HttpHeaders to copy from + * @return this builder + * @since 4.1.2 + * @see HttpHeaders#add(String, String) + */ + B headers(HttpHeaders headers); + /** * Set the set of allowed {@link HttpMethod HTTP methods}, as specified * by the {@code Allow} header. @@ -360,6 +369,12 @@ public BodyBuilder header(String headerName, String... headerValues) { return this; } + @Override + public BodyBuilder headers(HttpHeaders headers) { + this.headers.putAll(headers); + return this; + } + @Override public BodyBuilder allow(HttpMethod... allowedMethods) { this.headers.setAllow(new HashSet<HttpMethod>(Arrays.asList(allowedMethods)));
9ebbf1bfcea9942117727c08c6905dd444c230ae
hadoop
YARN-3361. CapacityScheduler side changes to- support non-exclusive node labels. Contributed by Wangda Tan (cherry picked- from commit 0fefda645bca935b87b6bb8ca63e6f18340d59f5)--
a
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 478d0aebacc5f..059c5a3d39e7d 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -24,6 +24,9 @@ Release 2.8.0 - UNRELEASED YARN-3443. Create a 'ResourceHandler' subsystem to ease addition of support for new resource types on the NM. (Sidharta Seethana via junping_du) + YARN-3361. CapacityScheduler side changes to support non-exclusive node + labels. (Wangda Tan via jianhe) + IMPROVEMENTS YARN-1880. Cleanup TestApplicationClientProtocolOnHA diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index 68d4ef9fe77aa..f2146c8b124be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -313,6 +313,7 @@ public static ResourceRequest newResourceRequest(ResourceRequest r) { request.setResourceName(r.getResourceName()); request.setCapability(r.getCapability()); request.setNumContainers(r.getNumContainers()); + request.setNodeLabelExpression(r.getNodeLabelExpression()); return request; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 1be1727e86599..1071831263ae1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -146,7 +146,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { private ConcurrentMap<NodeId, List<ContainerStatus>> finishedContainersSentToAM = new ConcurrentHashMap<NodeId, List<ContainerStatus>>(); - private Container masterContainer; + private volatile Container masterContainer; private float progress = 0; private String host = "N/A"; @@ -762,13 +762,7 @@ public List<ContainerStatus> pullJustFinishedContainers() { @Override public Container getMasterContainer() { - this.readLock.lock(); - - try { - return this.masterContainer; - } finally { - this.readLock.unlock(); - } + return this.masterContainer; } @InterfaceAudience.Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index 5521d47ed6076..5604f0f33965f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -73,10 +73,11 @@ public class AppSchedulingInfo { /* Allocated by scheduler */ boolean pending = true; // for app metrics + private ResourceUsage appResourceUsage; public AppSchedulingInfo(ApplicationAttemptId appAttemptId, String user, Queue queue, ActiveUsersManager activeUsersManager, - long epoch) { + long epoch, ResourceUsage appResourceUsage) { this.applicationAttemptId = appAttemptId; this.applicationId = appAttemptId.getApplicationId(); this.queue = queue; @@ -84,6 +85,7 @@ public AppSchedulingInfo(ApplicationAttemptId appAttemptId, this.user = user; this.activeUsersManager = activeUsersManager; this.containerIdCounter = new AtomicLong(epoch << EPOCH_BIT_SHIFT); + this.appResourceUsage = appResourceUsage; } public ApplicationId getApplicationId() { @@ -191,13 +193,19 @@ synchronized public void updateResourceRequests( lastRequestCapability); // update queue: + Resource increasedResource = Resources.multiply(request.getCapability(), + request.getNumContainers()); queue.incPendingResource( request.getNodeLabelExpression(), - Resources.multiply(request.getCapability(), - request.getNumContainers())); + increasedResource); + appResourceUsage.incPending(request.getNodeLabelExpression(), increasedResource); if (lastRequest != null) { + Resource decreasedResource = + Resources.multiply(lastRequestCapability, lastRequestContainers); queue.decPendingResource(lastRequest.getNodeLabelExpression(), - Resources.multiply(lastRequestCapability, lastRequestContainers)); + decreasedResource); + appResourceUsage.decPending(lastRequest.getNodeLabelExpression(), + decreasedResource); } } } @@ -385,6 +393,8 @@ synchronized private void decrementOutstanding( checkForDeactivation(); } + appResourceUsage.decPending(offSwitchRequest.getNodeLabelExpression(), + offSwitchRequest.getCapability()); queue.decPendingResource(offSwitchRequest.getNodeLabelExpression(), offSwitchRequest.getCapability()); } @@ -492,9 +502,10 @@ public synchronized void recoverContainer(RMContainer rmContainer) { } public ResourceRequest cloneResourceRequest(ResourceRequest request) { - ResourceRequest newRequest = ResourceRequest.newInstance( - request.getPriority(), request.getResourceName(), - request.getCapability(), 1, request.getRelaxLocality()); + ResourceRequest newRequest = + ResourceRequest.newInstance(request.getPriority(), + request.getResourceName(), request.getCapability(), 1, + request.getRelaxLocality(), request.getNodeLabelExpression()); return newRequest; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java index 36ee4daa1edbc..5169b78dd582f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java @@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.util.resource.Resources; /** @@ -250,6 +251,10 @@ private static Resource normalize(Resource res) { } private Resource _get(String label, ResourceType type) { + if (label == null) { + label = RMNodeLabelsManager.NO_LABEL; + } + try { readLock.lock(); UsageByLabel usage = usages.get(label); @@ -263,6 +268,9 @@ private Resource _get(String label, ResourceType type) { } private UsageByLabel getAndAddIfMissing(String label) { + if (label == null) { + label = RMNodeLabelsManager.NO_LABEL; + } if (!usages.containsKey(label)) { UsageByLabel u = new UsageByLabel(label); usages.put(label, u); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 5e0bbc7f9b48e..fccf7661a2ad4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -56,6 +56,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReservedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; import com.google.common.base.Preconditions; @@ -108,14 +110,24 @@ public class SchedulerApplicationAttempt { private Set<ContainerId> pendingRelease = null; /** - * Count how many times the application has been given an opportunity - * to schedule a task at each priority. Each time the scheduler - * asks the application for a task at this priority, it is incremented, - * and each time the application successfully schedules a task, it + * Count how many times the application has been given an opportunity to + * schedule a task at each priority. Each time the scheduler asks the + * application for a task at this priority, it is incremented, and each time + * the application successfully schedules a task (at rack or node local), it * is reset to 0. */ Multiset<Priority> schedulingOpportunities = HashMultiset.create(); + /** + * Count how many times the application has been given an opportunity to + * schedule a non-partitioned resource request at each priority. Each time the + * scheduler asks the application for a task at this priority, it is + * incremented, and each time the application successfully schedules a task, + * it is reset to 0 when schedule any task at corresponding priority. + */ + Multiset<Priority> missedNonPartitionedRequestSchedulingOpportunity = + HashMultiset.create(); + // Time of the last container scheduled at the current allowed level protected Map<Priority, Long> lastScheduledContainer = new HashMap<Priority, Long>(); @@ -132,7 +144,7 @@ public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId, this.rmContext = rmContext; this.appSchedulingInfo = new AppSchedulingInfo(applicationAttemptId, user, queue, - activeUsersManager, rmContext.getEpoch()); + activeUsersManager, rmContext.getEpoch(), attemptResourceUsage); this.queue = queue; this.pendingRelease = new HashSet<ContainerId>(); this.attemptId = applicationAttemptId; @@ -489,6 +501,18 @@ public boolean isBlacklisted(String resourceName) { return this.appSchedulingInfo.isBlacklisted(resourceName); } + public synchronized int addMissedNonPartitionedRequestSchedulingOpportunity( + Priority priority) { + missedNonPartitionedRequestSchedulingOpportunity.add(priority); + return missedNonPartitionedRequestSchedulingOpportunity.count(priority); + } + + public synchronized void + resetMissedNonPartitionedRequestSchedulingOpportunity(Priority priority) { + missedNonPartitionedRequestSchedulingOpportunity.setCount(priority, 0); + } + + public synchronized void addSchedulingOpportunity(Priority priority) { schedulingOpportunities.setCount(priority, schedulingOpportunities.count(priority) + 1); @@ -518,6 +542,7 @@ public synchronized int getSchedulingOpportunities(Priority priority) { public synchronized void resetSchedulingOpportunities(Priority priority) { resetSchedulingOpportunities(priority, System.currentTimeMillis()); } + // used for continuous scheduling public synchronized void resetSchedulingOpportunities(Priority priority, long currentTimeMs) { @@ -669,4 +694,13 @@ public void recordContainerAllocationTime(long value) { public Set<String> getBlacklistedNodes() { return this.appSchedulingInfo.getBlackListCopy(); } + + @Private + public boolean hasPendingResourceRequest(ResourceCalculator rc, + String nodePartition, Resource cluster, + SchedulingMode schedulingMode) { + return SchedulerUtils.hasPendingResourceRequest(rc, + this.attemptResourceUsage, nodePartition, cluster, + schedulingMode); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index 248cc08b74853..7a1a5287a9959 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -37,11 +37,10 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.AccessType; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.collect.Sets; - /** * Utilities shared by schedulers. */ @@ -235,9 +234,13 @@ public static void validateResourceRequest(ResourceRequest resReq, if (labelExp == null && queueInfo != null && ResourceRequest.ANY.equals(resReq.getResourceName())) { labelExp = queueInfo.getDefaultNodeLabelExpression(); - resReq.setNodeLabelExpression(labelExp); } + // If labelExp still equals to null, set it to be NO_LABEL + resReq + .setNodeLabelExpression(labelExp == null ? RMNodeLabelsManager.NO_LABEL + : labelExp); + // we don't allow specify label expression other than resourceName=ANY now if (!ResourceRequest.ANY.equals(resReq.getResourceName()) && labelExp != null && !labelExp.trim().isEmpty()) { @@ -273,25 +276,6 @@ public static void validateResourceRequest(ResourceRequest resReq, } } - public static boolean checkQueueAccessToNode(Set<String> queueLabels, - Set<String> nodeLabels) { - // if queue's label is *, it can access any node - if (queueLabels != null && queueLabels.contains(RMNodeLabelsManager.ANY)) { - return true; - } - // any queue can access to a node without label - if (nodeLabels == null || nodeLabels.isEmpty()) { - return true; - } - // a queue can access to a node only if it contains any label of the node - if (queueLabels != null - && Sets.intersection(queueLabels, nodeLabels).size() > 0) { - return true; - } - // sorry, you cannot access - return false; - } - public static void checkIfLabelInClusterNodeLabels(RMNodeLabelsManager mgr, Set<String> labels) throws IOException { if (mgr == null) { @@ -311,26 +295,6 @@ public static void checkIfLabelInClusterNodeLabels(RMNodeLabelsManager mgr, } } } - - public static boolean checkNodeLabelExpression(Set<String> nodeLabels, - String labelExpression) { - // empty label expression can only allocate on node with empty labels - if (labelExpression == null || labelExpression.trim().isEmpty()) { - if (!nodeLabels.isEmpty()) { - return false; - } - } - - if (labelExpression != null) { - for (String str : labelExpression.split("&&")) { - if (!str.trim().isEmpty() - && (nodeLabels == null || !nodeLabels.contains(str.trim()))) { - return false; - } - } - } - return true; - } public static boolean checkQueueLabelExpression(Set<String> queueLabels, String labelExpression) { @@ -360,4 +324,43 @@ public static AccessType toAccessType(QueueACL acl) { } return null; } + + public static boolean checkResourceRequestMatchingNodePartition( + ResourceRequest offswitchResourceRequest, String nodePartition, + SchedulingMode schedulingMode) { + // We will only look at node label = nodeLabelToLookAt according to + // schedulingMode and partition of node. + String nodePartitionToLookAt = null; + if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) { + nodePartitionToLookAt = nodePartition; + } else { + nodePartitionToLookAt = RMNodeLabelsManager.NO_LABEL; + } + + String askedNodePartition = offswitchResourceRequest.getNodeLabelExpression(); + if (null == askedNodePartition) { + askedNodePartition = RMNodeLabelsManager.NO_LABEL; + } + return askedNodePartition.equals(nodePartitionToLookAt); + } + + private static boolean hasPendingResourceRequest(ResourceCalculator rc, + ResourceUsage usage, String partitionToLookAt, Resource cluster) { + if (Resources.greaterThan(rc, cluster, + usage.getPending(partitionToLookAt), Resources.none())) { + return true; + } + return false; + } + + @Private + public static boolean hasPendingResourceRequest(ResourceCalculator rc, + ResourceUsage usage, String nodePartition, Resource cluster, + SchedulingMode schedulingMode) { + String partitionToLookAt = nodePartition; + if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) { + partitionToLookAt = RMNodeLabelsManager.NO_LABEL; + } + return hasPendingResourceRequest(rc, usage, partitionToLookAt, cluster); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 42ea089d72afa..d95c45c79be87 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -38,12 +37,12 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.security.AccessType; import org.apache.hadoop.yarn.security.PrivilegedEntity; import org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType; import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; @@ -56,6 +55,11 @@ public abstract class AbstractCSQueue implements CSQueue { private static final Log LOG = LogFactory.getLog(AbstractCSQueue.class); + static final CSAssignment NULL_ASSIGNMENT = + new CSAssignment(Resources.createResource(0, 0), NodeType.NODE_LOCAL); + + static final CSAssignment SKIP_ASSIGNMENT = new CSAssignment(true); + CSQueue parent; final String queueName; volatile int numContainers; @@ -343,16 +347,8 @@ public Resource getMinimumAllocation() { } synchronized void allocateResource(Resource clusterResource, - Resource resource, Set<String> nodeLabels) { - - // Update usedResources by labels - if (nodeLabels == null || nodeLabels.isEmpty()) { - queueUsage.incUsed(resource); - } else { - for (String label : Sets.intersection(accessibleLabels, nodeLabels)) { - queueUsage.incUsed(label, resource); - } - } + Resource resource, String nodePartition) { + queueUsage.incUsed(nodePartition, resource); ++numContainers; CSQueueUtils.updateQueueStatistics(resourceCalculator, this, getParent(), @@ -360,15 +356,8 @@ synchronized void allocateResource(Resource clusterResource, } protected synchronized void releaseResource(Resource clusterResource, - Resource resource, Set<String> nodeLabels) { - // Update usedResources by labels - if (null == nodeLabels || nodeLabels.isEmpty()) { - queueUsage.decUsed(resource); - } else { - for (String label : Sets.intersection(accessibleLabels, nodeLabels)) { - queueUsage.decUsed(label, resource); - } - } + Resource resource, String nodePartition) { + queueUsage.decUsed(nodePartition, resource); CSQueueUtils.updateQueueStatistics(resourceCalculator, this, getParent(), clusterResource, minimumAllocation); @@ -434,103 +423,108 @@ private boolean isQueueHierarchyPreemptionDisabled(CSQueue q) { parentQ.getPreemptionDisabled()); } - private Resource getCurrentLimitResource(String nodeLabel, - Resource clusterResource, ResourceLimits currentResourceLimits) { - /* - * Current limit resource: For labeled resource: limit = queue-max-resource - * (TODO, this part need update when we support labeled-limit) For - * non-labeled resource: limit = min(queue-max-resource, - * limit-set-by-parent) - */ - Resource queueMaxResource = - Resources.multiplyAndNormalizeDown(resourceCalculator, - labelManager.getResourceByLabel(nodeLabel, clusterResource), - queueCapacities.getAbsoluteMaximumCapacity(nodeLabel), minimumAllocation); - if (nodeLabel.equals(RMNodeLabelsManager.NO_LABEL)) { - return Resources.min(resourceCalculator, clusterResource, - queueMaxResource, currentResourceLimits.getLimit()); + private Resource getCurrentLimitResource(String nodePartition, + Resource clusterResource, ResourceLimits currentResourceLimits, + SchedulingMode schedulingMode) { + if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) { + /* + * Current limit resource: For labeled resource: limit = queue-max-resource + * (TODO, this part need update when we support labeled-limit) For + * non-labeled resource: limit = min(queue-max-resource, + * limit-set-by-parent) + */ + Resource queueMaxResource = + Resources.multiplyAndNormalizeDown(resourceCalculator, + labelManager.getResourceByLabel(nodePartition, clusterResource), + queueCapacities.getAbsoluteMaximumCapacity(nodePartition), minimumAllocation); + if (nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) { + return Resources.min(resourceCalculator, clusterResource, + queueMaxResource, currentResourceLimits.getLimit()); + } + return queueMaxResource; + } else if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) { + // When we doing non-exclusive resource allocation, maximum capacity of + // all queues on this label equals to total resource with the label. + return labelManager.getResourceByLabel(nodePartition, clusterResource); } - return queueMaxResource; + + return Resources.none(); } synchronized boolean canAssignToThisQueue(Resource clusterResource, - Set<String> nodeLabels, ResourceLimits currentResourceLimits, - Resource nowRequired, Resource resourceCouldBeUnreserved) { - // Get label of this queue can access, it's (nodeLabel AND queueLabel) - Set<String> labelCanAccess; - if (null == nodeLabels || nodeLabels.isEmpty()) { - labelCanAccess = new HashSet<String>(); - // Any queue can always access any node without label - labelCanAccess.add(RMNodeLabelsManager.NO_LABEL); - } else { - labelCanAccess = new HashSet<String>( - accessibleLabels.contains(CommonNodeLabelsManager.ANY) ? nodeLabels - : Sets.intersection(accessibleLabels, nodeLabels)); - } - - for (String label : labelCanAccess) { - // New total resource = used + required - Resource newTotalResource = - Resources.add(queueUsage.getUsed(label), nowRequired); - - Resource currentLimitResource = - getCurrentLimitResource(label, clusterResource, currentResourceLimits); - - // if reservation continous looking enabled, check to see if could we - // potentially use this node instead of a reserved node if the application - // has reserved containers. - // TODO, now only consider reservation cases when the node has no label - if (this.reservationsContinueLooking - && label.equals(RMNodeLabelsManager.NO_LABEL) - && Resources.greaterThan(resourceCalculator, clusterResource, - resourceCouldBeUnreserved, Resources.none())) { - // resource-without-reserved = used - reserved - Resource newTotalWithoutReservedResource = - Resources.subtract(newTotalResource, resourceCouldBeUnreserved); - - // when total-used-without-reserved-resource < currentLimit, we still - // have chance to allocate on this node by unreserving some containers - if (Resources.lessThan(resourceCalculator, clusterResource, - newTotalWithoutReservedResource, currentLimitResource)) { - if (LOG.isDebugEnabled()) { - LOG.debug("try to use reserved: " + getQueueName() - + " usedResources: " + queueUsage.getUsed() - + ", clusterResources: " + clusterResource - + ", reservedResources: " + resourceCouldBeUnreserved - + ", capacity-without-reserved: " - + newTotalWithoutReservedResource + ", maxLimitCapacity: " - + currentLimitResource); - } - return true; + String nodePartition, ResourceLimits currentResourceLimits, + Resource nowRequired, Resource resourceCouldBeUnreserved, + SchedulingMode schedulingMode) { + // New total resource = used + required + Resource newTotalResource = + Resources.add(queueUsage.getUsed(nodePartition), nowRequired); + + // Get current limited resource: + // - When doing RESPECT_PARTITION_EXCLUSIVITY allocation, we will respect + // queues' max capacity. + // - When doing IGNORE_PARTITION_EXCLUSIVITY allocation, we will not respect + // queue's max capacity, queue's max capacity on the partition will be + // considered to be 100%. Which is a queue can use all resource in the + // partition. + // Doing this because: for non-exclusive allocation, we make sure there's + // idle resource on the partition, to avoid wastage, such resource will be + // leveraged as much as we can, and preemption policy will reclaim it back + // when partitoned-resource-request comes back. + Resource currentLimitResource = + getCurrentLimitResource(nodePartition, clusterResource, + currentResourceLimits, schedulingMode); + + // if reservation continous looking enabled, check to see if could we + // potentially use this node instead of a reserved node if the application + // has reserved containers. + // TODO, now only consider reservation cases when the node has no label + if (this.reservationsContinueLooking + && nodePartition.equals(RMNodeLabelsManager.NO_LABEL) + && Resources.greaterThan(resourceCalculator, clusterResource, + resourceCouldBeUnreserved, Resources.none())) { + // resource-without-reserved = used - reserved + Resource newTotalWithoutReservedResource = + Resources.subtract(newTotalResource, resourceCouldBeUnreserved); + + // when total-used-without-reserved-resource < currentLimit, we still + // have chance to allocate on this node by unreserving some containers + if (Resources.lessThan(resourceCalculator, clusterResource, + newTotalWithoutReservedResource, currentLimitResource)) { + if (LOG.isDebugEnabled()) { + LOG.debug("try to use reserved: " + getQueueName() + + " usedResources: " + queueUsage.getUsed() + + ", clusterResources: " + clusterResource + + ", reservedResources: " + resourceCouldBeUnreserved + + ", capacity-without-reserved: " + + newTotalWithoutReservedResource + ", maxLimitCapacity: " + + currentLimitResource); } + return true; } - - // Otherwise, if any of the label of this node beyond queue limit, we - // cannot allocate on this node. Consider a small epsilon here. - if (Resources.greaterThan(resourceCalculator, clusterResource, - newTotalResource, currentLimitResource)) { - return false; - } + } - if (LOG.isDebugEnabled()) { - LOG.debug(getQueueName() - + "Check assign to queue, label=" + label - + " usedResources: " + queueUsage.getUsed(label) - + " clusterResources: " + clusterResource - + " currentUsedCapacity " - + Resources.divide(resourceCalculator, clusterResource, - queueUsage.getUsed(label), - labelManager.getResourceByLabel(label, clusterResource)) - + " max-capacity: " - + queueCapacities.getAbsoluteMaximumCapacity(label) - + ")"); - } - return true; + // Check if we over current-resource-limit computed. + if (Resources.greaterThan(resourceCalculator, clusterResource, + newTotalResource, currentLimitResource)) { + return false; } - - // Actually, this will not happen, since labelCanAccess will be always - // non-empty - return false; + + if (LOG.isDebugEnabled()) { + LOG.debug(getQueueName() + + "Check assign to queue, nodePartition=" + + nodePartition + + " usedResources: " + + queueUsage.getUsed(nodePartition) + + " clusterResources: " + + clusterResource + + " currentUsedCapacity " + + Resources.divide(resourceCalculator, clusterResource, + queueUsage.getUsed(nodePartition), + labelManager.getResourceByLabel(nodePartition, clusterResource)) + + " max-capacity: " + + queueCapacities.getAbsoluteMaximumCapacity(nodePartition) + ")"); + } + return true; } @Override @@ -556,4 +550,33 @@ public void decPendingResource(String nodeLabel, Resource resourceToDec) { parent.decPendingResource(nodeLabel, resourceToDec); } } + + /** + * Return if the queue has pending resource on given nodePartition and + * schedulingMode. + */ + boolean hasPendingResourceRequest(String nodePartition, + Resource cluster, SchedulingMode schedulingMode) { + return SchedulerUtils.hasPendingResourceRequest(resourceCalculator, + queueUsage, nodePartition, cluster, schedulingMode); + } + + boolean accessibleToPartition(String nodePartition) { + // if queue's label is *, it can access any node + if (accessibleLabels != null + && accessibleLabels.contains(RMNodeLabelsManager.ANY)) { + return true; + } + // any queue can access to a node without label + if (nodePartition == null + || nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) { + return true; + } + // a queue can access to a node only if it contains any label of the node + if (accessibleLabels != null && accessibleLabels.contains(nodePartition)) { + return true; + } + // sorry, you cannot access + return false; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java index 1a9448acaa148..b06a646cec973 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java @@ -190,10 +190,13 @@ public void finishApplicationAttempt(FiCaSchedulerApp application, * @param clusterResource the resource of the cluster. * @param node node on which resources are available * @param resourceLimits how much overall resource of this queue can use. + * @param schedulingMode Type of exclusive check when assign container on a + * NodeManager, see {@link SchedulingMode}. * @return the assignment */ public CSAssignment assignContainers(Resource clusterResource, - FiCaSchedulerNode node, ResourceLimits resourceLimits); + FiCaSchedulerNode node, ResourceLimits resourceLimits, + SchedulingMode schedulingMode); /** * A container assigned to the queue has completed. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index e93c5291f2905..cfeee37d1e6ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; @@ -1114,28 +1115,30 @@ private synchronized void allocateContainersToNode(FiCaSchedulerNode node) { if (reservedContainer != null) { FiCaSchedulerApp reservedApplication = getCurrentAttemptForContainer(reservedContainer.getContainerId()); - + // Try to fulfill the reservation - LOG.info("Trying to fulfill reservation for application " + - reservedApplication.getApplicationId() + " on node: " + - node.getNodeID()); - - LeafQueue queue = ((LeafQueue)reservedApplication.getQueue()); - assignment = queue.assignContainers( + LOG.info("Trying to fulfill reservation for application " + + reservedApplication.getApplicationId() + " on node: " + + node.getNodeID()); + + LeafQueue queue = ((LeafQueue) reservedApplication.getQueue()); + assignment = + queue.assignContainers( clusterResource, node, // TODO, now we only consider limits for parent for non-labeled // resources, should consider labeled resources as well. new ResourceLimits(labelManager.getResourceByLabel( - RMNodeLabelsManager.NO_LABEL, clusterResource))); + RMNodeLabelsManager.NO_LABEL, clusterResource)), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); if (assignment.isFulfilledReservation()) { CSAssignment tmp = new CSAssignment(reservedContainer.getReservedResource(), - assignment.getType()); + assignment.getType()); Resources.addTo(assignment.getAssignmentInformation().getAllocated(), - reservedContainer.getReservedResource()); + reservedContainer.getReservedResource()); tmp.getAssignmentInformation().addAllocationDetails( - reservedContainer.getContainerId(), queue.getQueuePath()); + reservedContainer.getContainerId(), queue.getQueuePath()); tmp.getAssignmentInformation().incrAllocations(); updateSchedulerHealth(lastNodeUpdateTime, node, tmp); schedulerHealth.updateSchedulerFulfilledReservationCounts(1); @@ -1143,16 +1146,13 @@ private synchronized void allocateContainersToNode(FiCaSchedulerNode node) { RMContainer excessReservation = assignment.getExcessReservation(); if (excessReservation != null) { - Container container = excessReservation.getContainer(); - queue.completedContainer( - clusterResource, assignment.getApplication(), node, - excessReservation, - SchedulerUtils.createAbnormalContainerStatus( - container.getId(), - SchedulerUtils.UNRESERVED_CONTAINER), - RMContainerEventType.RELEASED, null, true); + Container container = excessReservation.getContainer(); + queue.completedContainer(clusterResource, assignment.getApplication(), + node, excessReservation, SchedulerUtils + .createAbnormalContainerStatus(container.getId(), + SchedulerUtils.UNRESERVED_CONTAINER), + RMContainerEventType.RELEASED, null, true); } - } // Try to schedule more if there are no reservations to fulfill @@ -1163,22 +1163,61 @@ private synchronized void allocateContainersToNode(FiCaSchedulerNode node) { LOG.debug("Trying to schedule on node: " + node.getNodeName() + ", available: " + node.getAvailableResource()); } + assignment = root.assignContainers( clusterResource, node, // TODO, now we only consider limits for parent for non-labeled // resources, should consider labeled resources as well. new ResourceLimits(labelManager.getResourceByLabel( - RMNodeLabelsManager.NO_LABEL, clusterResource))); + RMNodeLabelsManager.NO_LABEL, clusterResource)), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + if (Resources.greaterThan(calculator, clusterResource, + assignment.getResource(), Resources.none())) { + updateSchedulerHealth(lastNodeUpdateTime, node, assignment); + return; + } + + // Only do non-exclusive allocation when node has node-labels. + if (StringUtils.equals(node.getPartition(), + RMNodeLabelsManager.NO_LABEL)) { + return; + } + + // Only do non-exclusive allocation when the node-label supports that + try { + if (rmContext.getNodeLabelManager().isExclusiveNodeLabel( + node.getPartition())) { + return; + } + } catch (IOException e) { + LOG.warn("Exception when trying to get exclusivity of node label=" + + node.getPartition(), e); + return; + } + + // Try to use NON_EXCLUSIVE + assignment = root.assignContainers( + clusterResource, + node, + // TODO, now we only consider limits for parent for non-labeled + // resources, should consider labeled resources as well. + new ResourceLimits(labelManager.getResourceByLabel( + RMNodeLabelsManager.NO_LABEL, clusterResource)), + SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY); updateSchedulerHealth(lastNodeUpdateTime, node, assignment); + if (Resources.greaterThan(calculator, clusterResource, + assignment.getResource(), Resources.none())) { + return; + } } } else { - LOG.info("Skipping scheduling since node " + node.getNodeID() + - " is reserved by application " + - node.getReservedContainer().getContainerId().getApplicationAttemptId() - ); + LOG.info("Skipping scheduling since node " + + node.getNodeID() + + " is reserved by application " + + node.getReservedContainer().getContainerId() + .getApplicationAttemptId()); } - } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 102e5539f162a..4e8d61769ecdf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -319,6 +319,11 @@ public float getMaximumApplicationMasterResourcePerQueuePercent(String queue) { getMaximumApplicationMasterResourcePercent()); } + public void setMaximumApplicationMasterResourcePerQueuePercent(String queue, + float percent) { + setFloat(getQueuePrefix(queue) + MAXIMUM_AM_RESOURCE_SUFFIX, percent); + } + public float getNonLabeledQueueCapacity(String queue) { float capacity = queue.equals("root") ? 100.0f : getFloat( getQueuePrefix(queue) + CAPACITY, UNDEFINED); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 59a016f98140d..8a6a601f202f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -24,7 +24,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -58,6 +57,7 @@ import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.security.AccessType; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; @@ -718,39 +718,11 @@ private synchronized FiCaSchedulerApp getApplication( ApplicationAttemptId applicationAttemptId) { return applicationAttemptMap.get(applicationAttemptId); } - - private static final CSAssignment NULL_ASSIGNMENT = - new CSAssignment(Resources.createResource(0, 0), NodeType.NODE_LOCAL); - - private static final CSAssignment SKIP_ASSIGNMENT = new CSAssignment(true); - - private static Set<String> getRequestLabelSetByExpression( - String labelExpression) { - Set<String> labels = new HashSet<String>(); - if (null == labelExpression) { - return labels; - } - for (String l : labelExpression.split("&&")) { - if (l.trim().isEmpty()) { - continue; - } - labels.add(l.trim()); - } - return labels; - } - - private boolean checkResourceRequestMatchingNodeLabel(ResourceRequest offswitchResourceRequest, - FiCaSchedulerNode node) { - String askedNodeLabel = offswitchResourceRequest.getNodeLabelExpression(); - if (null == askedNodeLabel) { - askedNodeLabel = RMNodeLabelsManager.NO_LABEL; - } - return askedNodeLabel.equals(node.getPartition()); - } @Override public synchronized CSAssignment assignContainers(Resource clusterResource, - FiCaSchedulerNode node, ResourceLimits currentResourceLimits) { + FiCaSchedulerNode node, ResourceLimits currentResourceLimits, + SchedulingMode schedulingMode) { updateCurrentResourceLimits(currentResourceLimits, clusterResource); if(LOG.isDebugEnabled()) { @@ -758,12 +730,6 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, + " #applications=" + activeApplications.size()); } - // if our queue cannot access this node, just return - if (!SchedulerUtils.checkQueueAccessToNode(accessibleLabels, - node.getLabels())) { - return NULL_ASSIGNMENT; - } - // Check for reserved resources RMContainer reservedContainer = node.getReservedContainer(); if (reservedContainer != null) { @@ -771,8 +737,26 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, getApplication(reservedContainer.getApplicationAttemptId()); synchronized (application) { return assignReservedContainer(application, node, reservedContainer, - clusterResource); + clusterResource, schedulingMode); + } + } + + // if our queue cannot access this node, just return + if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY + && !accessibleToPartition(node.getPartition())) { + return NULL_ASSIGNMENT; + } + + // Check if this queue need more resource, simply skip allocation if this + // queue doesn't need more resources. + if (!hasPendingResourceRequest(node.getPartition(), + clusterResource, schedulingMode)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip this queue=" + getQueuePath() + + ", because it doesn't need more resource, schedulingMode=" + + schedulingMode.name() + " node-partition=" + node.getPartition()); } + return NULL_ASSIGNMENT; } // Try to assign containers to applications in order @@ -783,6 +767,17 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, + application.getApplicationId()); application.showRequests(); } + + // Check if application needs more resource, skip if it doesn't need more. + if (!application.hasPendingResourceRequest(resourceCalculator, + node.getPartition(), clusterResource, schedulingMode)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip app_attempt=" + application.getApplicationAttemptId() + + ", because it doesn't need more resource, schedulingMode=" + + schedulingMode.name() + " node-label=" + node.getPartition()); + } + continue; + } synchronized (application) { // Check if this resource is on the blacklist @@ -806,10 +801,27 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, continue; } + // AM container allocation doesn't support non-exclusive allocation to + // avoid painful of preempt an AM container + if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) { + RMAppAttempt rmAppAttempt = + csContext.getRMContext().getRMApps() + .get(application.getApplicationId()).getCurrentAppAttempt(); + if (null == rmAppAttempt.getMasterContainer()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip allocating AM container to app_attempt=" + + application.getApplicationAttemptId() + + ", don't allow to allocate AM container in non-exclusive mode"); + } + break; + } + } + // Is the node-label-expression of this offswitch resource request // matches the node's label? // If not match, jump to next priority. - if (!checkResourceRequestMatchingNodeLabel(anyRequest, node)) { + if (!SchedulerUtils.checkResourceRequestMatchingNodePartition( + anyRequest, node.getPartition(), schedulingMode)) { continue; } @@ -822,10 +834,6 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, } } - Set<String> requestedNodeLabels = - getRequestLabelSetByExpression(anyRequest - .getNodeLabelExpression()); - // Compute user-limit & set headroom // Note: We compute both user-limit & headroom with the highest // priority request as the target. @@ -833,27 +841,61 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, // before all higher priority ones are serviced. Resource userLimit = computeUserLimitAndSetHeadroom(application, clusterResource, - required, requestedNodeLabels); + required, node.getPartition(), schedulingMode); // Check queue max-capacity limit - if (!super.canAssignToThisQueue(clusterResource, node.getLabels(), - this.currentResourceLimits, required, application.getCurrentReservation())) { + if (!super.canAssignToThisQueue(clusterResource, node.getPartition(), + this.currentResourceLimits, required, + application.getCurrentReservation(), schedulingMode)) { return NULL_ASSIGNMENT; } // Check user limit if (!canAssignToUser(clusterResource, application.getUser(), userLimit, - application, true, requestedNodeLabels)) { + application, true, node.getPartition())) { break; } // Inform the application it is about to get a scheduling opportunity application.addSchedulingOpportunity(priority); + // Increase missed-non-partitioned-resource-request-opportunity. + // This is to make sure non-partitioned-resource-request will prefer + // to be allocated to non-partitioned nodes + int missedNonPartitionedRequestSchedulingOpportunity = 0; + if (anyRequest.getNodeLabelExpression().equals( + RMNodeLabelsManager.NO_LABEL)) { + missedNonPartitionedRequestSchedulingOpportunity = + application + .addMissedNonPartitionedRequestSchedulingOpportunity(priority); + } + + if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) { + // Before doing allocation, we need to check scheduling opportunity to + // make sure : non-partitioned resource request should be scheduled to + // non-partitioned partition first. + if (missedNonPartitionedRequestSchedulingOpportunity < scheduler + .getNumClusterNodes()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip app_attempt=" + + application.getApplicationAttemptId() + + " priority=" + + priority + + " because missed-non-partitioned-resource-request" + + " opportunity under requred:" + + " Now=" + missedNonPartitionedRequestSchedulingOpportunity + + " required=" + + scheduler.getNumClusterNodes()); + } + + break; + } + } + // Try to schedule CSAssignment assignment = assignContainersOnNode(clusterResource, node, application, priority, - null); + null, schedulingMode); // Did the application skip this node? if (assignment.getSkipped()) { @@ -870,9 +912,9 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, // Book-keeping // Note: Update headroom to account for current allocation too... allocateResource(clusterResource, application, assigned, - node.getLabels()); + node.getPartition()); - // Don't reset scheduling opportunities for non-local assignments + // Don't reset scheduling opportunities for offswitch assignments // otherwise the app will be delayed for each non-local assignment. // This helps apps with many off-cluster requests schedule faster. if (assignment.getType() != NodeType.OFF_SWITCH) { @@ -881,6 +923,10 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, } application.resetSchedulingOpportunities(priority); } + // Non-exclusive scheduling opportunity is different: we need reset + // it every time to make sure non-labeled resource request will be + // most likely allocated on non-labeled nodes first. + application.resetMissedNonPartitionedRequestSchedulingOpportunity(priority); // Done return assignment; @@ -904,7 +950,8 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, private synchronized CSAssignment assignReservedContainer( FiCaSchedulerApp application, FiCaSchedulerNode node, - RMContainer rmContainer, Resource clusterResource) { + RMContainer rmContainer, Resource clusterResource, + SchedulingMode schedulingMode) { // Do we still need this reservation? Priority priority = rmContainer.getReservedPriority(); if (application.getTotalRequiredResources(priority) == 0) { @@ -915,7 +962,7 @@ private synchronized CSAssignment assignReservedContainer( // Try to assign if we have sufficient resources CSAssignment tmp = assignContainersOnNode(clusterResource, node, application, priority, - rmContainer); + rmContainer, schedulingMode); // Doesn't matter... since it's already charged for at time of reservation // "re-reservation" is *free* @@ -929,7 +976,8 @@ private synchronized CSAssignment assignReservedContainer( protected Resource getHeadroom(User user, Resource queueCurrentLimit, Resource clusterResource, FiCaSchedulerApp application, Resource required) { return getHeadroom(user, queueCurrentLimit, clusterResource, - computeUserLimit(application, clusterResource, required, user, null)); + computeUserLimit(application, clusterResource, required, user, + RMNodeLabelsManager.NO_LABEL, SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY)); } private Resource getHeadroom(User user, Resource currentResourceLimit, @@ -973,7 +1021,8 @@ private void setQueueResourceLimitsInfo( @Lock({LeafQueue.class, FiCaSchedulerApp.class}) Resource computeUserLimitAndSetHeadroom(FiCaSchedulerApp application, - Resource clusterResource, Resource required, Set<String> requestedLabels) { + Resource clusterResource, Resource required, String nodePartition, + SchedulingMode schedulingMode) { String user = application.getUser(); User queueUser = getUser(user); @@ -981,7 +1030,7 @@ Resource computeUserLimitAndSetHeadroom(FiCaSchedulerApp application, // TODO, need consider headroom respect labels also Resource userLimit = computeUserLimit(application, clusterResource, required, - queueUser, requestedLabels); + queueUser, nodePartition, schedulingMode); setQueueResourceLimitsInfo(clusterResource); @@ -1010,34 +1059,18 @@ Resource computeUserLimitAndSetHeadroom(FiCaSchedulerApp application, @Lock(NoLock.class) private Resource computeUserLimit(FiCaSchedulerApp application, Resource clusterResource, Resource required, User user, - Set<String> requestedLabels) { + String nodePartition, SchedulingMode schedulingMode) { // What is our current capacity? // * It is equal to the max(required, queue-capacity) if // we're running below capacity. The 'max' ensures that jobs in queues // with miniscule capacity (< 1 slot) make progress // * If we're running over capacity, then its // (usedResources + required) (which extra resources we are allocating) - Resource queueCapacity = Resource.newInstance(0, 0); - if (requestedLabels != null && !requestedLabels.isEmpty()) { - // if we have multiple labels to request, we will choose to use the first - // label - String firstLabel = requestedLabels.iterator().next(); - queueCapacity = - Resources - .max(resourceCalculator, clusterResource, queueCapacity, - Resources.multiplyAndNormalizeUp(resourceCalculator, - labelManager.getResourceByLabel(firstLabel, - clusterResource), - queueCapacities.getAbsoluteCapacity(firstLabel), - minimumAllocation)); - } else { - // else there's no label on request, just to use absolute capacity as - // capacity for nodes without label - queueCapacity = - Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager - .getResourceByLabel(CommonNodeLabelsManager.NO_LABEL, clusterResource), - queueCapacities.getAbsoluteCapacity(), minimumAllocation); - } + Resource queueCapacity = + Resources.multiplyAndNormalizeUp(resourceCalculator, + labelManager.getResourceByLabel(nodePartition, clusterResource), + queueCapacities.getAbsoluteCapacity(nodePartition), + minimumAllocation); // Allow progress for queues with miniscule capacity queueCapacity = @@ -1047,33 +1080,56 @@ private Resource computeUserLimit(FiCaSchedulerApp application, required); Resource currentCapacity = - Resources.lessThan(resourceCalculator, clusterResource, - queueUsage.getUsed(), queueCapacity) ? - queueCapacity : Resources.add(queueUsage.getUsed(), required); + Resources.lessThan(resourceCalculator, clusterResource, + queueUsage.getUsed(nodePartition), queueCapacity) ? queueCapacity + : Resources.add(queueUsage.getUsed(nodePartition), required); // Never allow a single user to take more than the // queue's configured capacity * user-limit-factor. // Also, the queue's configured capacity should be higher than // queue-hard-limit * ulMin - final int activeUsers = activeUsersManager.getNumActiveUsers(); - - Resource limit = + final int activeUsers = activeUsersManager.getNumActiveUsers(); + + // User limit resource is determined by: + // max{currentCapacity / #activeUsers, currentCapacity * user-limit-percentage%) + Resource userLimitResource = Resources.max( + resourceCalculator, clusterResource, + Resources.divideAndCeil( + resourceCalculator, currentCapacity, activeUsers), + Resources.divideAndCeil( + resourceCalculator, + Resources.multiplyAndRoundDown( + currentCapacity, userLimit), + 100) + ); + + // User limit is capped by maxUserLimit + // - maxUserLimit = queueCapacity * user-limit-factor (RESPECT_PARTITION_EXCLUSIVITY) + // - maxUserLimit = total-partition-resource (IGNORE_PARTITION_EXCLUSIVITY) + // + // In IGNORE_PARTITION_EXCLUSIVITY mode, if a queue cannot access a + // partition, its guaranteed resource on that partition is 0. And + // user-limit-factor computation is based on queue's guaranteed capacity. So + // we will not cap user-limit as well as used resource when doing + // IGNORE_PARTITION_EXCLUSIVITY allocation. + Resource maxUserLimit = Resources.none(); + if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) { + maxUserLimit = + Resources.multiplyAndRoundDown(queueCapacity, userLimitFactor); + } else if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) { + maxUserLimit = + labelManager.getResourceByLabel(nodePartition, clusterResource); + } + + // Cap final user limit with maxUserLimit + userLimitResource = Resources.roundUp( resourceCalculator, Resources.min( resourceCalculator, clusterResource, - Resources.max( - resourceCalculator, clusterResource, - Resources.divideAndCeil( - resourceCalculator, currentCapacity, activeUsers), - Resources.divideAndCeil( - resourceCalculator, - Resources.multiplyAndRoundDown( - currentCapacity, userLimit), - 100) - ), - Resources.multiplyAndRoundDown(queueCapacity, userLimitFactor) + userLimitResource, + maxUserLimit ), minimumAllocation); @@ -1081,11 +1137,11 @@ private Resource computeUserLimit(FiCaSchedulerApp application, String userName = application.getUser(); LOG.debug("User limit computation for " + userName + " in queue " + getQueueName() + - " userLimit=" + userLimit + + " userLimitPercent=" + userLimit + " userLimitFactor=" + userLimitFactor + " required: " + required + " consumed: " + user.getUsed() + - " limit: " + limit + + " user-limit-resource: " + userLimitResource + " queueCapacity: " + queueCapacity + " qconsumed: " + queueUsage.getUsed() + " currentCapacity: " + currentCapacity + @@ -1093,31 +1149,26 @@ private Resource computeUserLimit(FiCaSchedulerApp application, " clusterCapacity: " + clusterResource ); } - user.setUserResourceLimit(limit); - return limit; + user.setUserResourceLimit(userLimitResource); + return userLimitResource; } @Private protected synchronized boolean canAssignToUser(Resource clusterResource, String userName, Resource limit, FiCaSchedulerApp application, - boolean checkReservations, Set<String> requestLabels) { + boolean checkReservations, String nodePartition) { User user = getUser(userName); - - String label = CommonNodeLabelsManager.NO_LABEL; - if (requestLabels != null && !requestLabels.isEmpty()) { - label = requestLabels.iterator().next(); - } // Note: We aren't considering the current request since there is a fixed // overhead of the AM, but it's a > check, not a >= check, so... if (Resources .greaterThan(resourceCalculator, clusterResource, - user.getUsed(label), + user.getUsed(nodePartition), limit)) { // if enabled, check to see if could we potentially use this node instead // of a reserved node if the application has reserved containers if (this.reservationsContinueLooking && checkReservations - && label.equals(CommonNodeLabelsManager.NO_LABEL)) { + && nodePartition.equals(CommonNodeLabelsManager.NO_LABEL)) { if (Resources.lessThanOrEqual( resourceCalculator, clusterResource, @@ -1136,7 +1187,7 @@ protected synchronized boolean canAssignToUser(Resource clusterResource, if (LOG.isDebugEnabled()) { LOG.debug("User " + userName + " in queue " + getQueueName() + " will exceed limit - " + " consumed: " - + user.getUsed() + " limit: " + limit); + + user.getUsed(nodePartition) + " limit: " + limit); } return false; } @@ -1176,7 +1227,7 @@ resourceCalculator, required, getMaximumAllocation() private CSAssignment assignContainersOnNode(Resource clusterResource, FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority, - RMContainer reservedContainer) { + RMContainer reservedContainer, SchedulingMode schedulingMode) { CSAssignment assigned; @@ -1190,7 +1241,7 @@ private CSAssignment assignContainersOnNode(Resource clusterResource, assigned = assignNodeLocalContainers(clusterResource, nodeLocalResourceRequest, node, application, priority, reservedContainer, - allocatedContainer); + allocatedContainer, schedulingMode); if (Resources.greaterThan(resourceCalculator, clusterResource, assigned.getResource(), Resources.none())) { @@ -1219,7 +1270,7 @@ private CSAssignment assignContainersOnNode(Resource clusterResource, assigned = assignRackLocalContainers(clusterResource, rackLocalResourceRequest, node, application, priority, reservedContainer, - allocatedContainer); + allocatedContainer, schedulingMode); if (Resources.greaterThan(resourceCalculator, clusterResource, assigned.getResource(), Resources.none())) { @@ -1248,7 +1299,7 @@ private CSAssignment assignContainersOnNode(Resource clusterResource, assigned = assignOffSwitchContainers(clusterResource, offSwitchResourceRequest, node, application, priority, reservedContainer, - allocatedContainer); + allocatedContainer, schedulingMode); // update locality statistics if (allocatedContainer.getValue() != null) { @@ -1314,16 +1365,17 @@ protected boolean findNodeToUnreserve(Resource clusterResource, @Private protected boolean checkLimitsToReserve(Resource clusterResource, - FiCaSchedulerApp application, Resource capability) { + FiCaSchedulerApp application, Resource capability, String nodePartition, + SchedulingMode schedulingMode) { // we can't reserve if we got here based on the limit // checks assuming we could unreserve!!! Resource userLimit = computeUserLimitAndSetHeadroom(application, - clusterResource, capability, null); + clusterResource, capability, nodePartition, schedulingMode); // Check queue max-capacity limit, // TODO: Consider reservation on labels - if (!canAssignToThisQueue(clusterResource, null, - this.currentResourceLimits, capability, Resources.none())) { + if (!canAssignToThisQueue(clusterResource, RMNodeLabelsManager.NO_LABEL, + this.currentResourceLimits, capability, Resources.none(), schedulingMode)) { if (LOG.isDebugEnabled()) { LOG.debug("was going to reserve but hit queue limit"); } @@ -1332,7 +1384,7 @@ protected boolean checkLimitsToReserve(Resource clusterResource, // Check user limit if (!canAssignToUser(clusterResource, application.getUser(), userLimit, - application, false, null)) { + application, false, nodePartition)) { if (LOG.isDebugEnabled()) { LOG.debug("was going to reserve but hit user limit"); } @@ -1345,12 +1397,13 @@ protected boolean checkLimitsToReserve(Resource clusterResource, private CSAssignment assignNodeLocalContainers(Resource clusterResource, ResourceRequest nodeLocalResourceRequest, FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority, - RMContainer reservedContainer, MutableObject allocatedContainer) { + RMContainer reservedContainer, MutableObject allocatedContainer, + SchedulingMode schedulingMode) { if (canAssign(application, priority, node, NodeType.NODE_LOCAL, reservedContainer)) { return assignContainer(clusterResource, node, application, priority, nodeLocalResourceRequest, NodeType.NODE_LOCAL, reservedContainer, - allocatedContainer); + allocatedContainer, schedulingMode); } return new CSAssignment(Resources.none(), NodeType.NODE_LOCAL); @@ -1359,12 +1412,13 @@ private CSAssignment assignNodeLocalContainers(Resource clusterResource, private CSAssignment assignRackLocalContainers(Resource clusterResource, ResourceRequest rackLocalResourceRequest, FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority, - RMContainer reservedContainer, MutableObject allocatedContainer) { + RMContainer reservedContainer, MutableObject allocatedContainer, + SchedulingMode schedulingMode) { if (canAssign(application, priority, node, NodeType.RACK_LOCAL, reservedContainer)) { return assignContainer(clusterResource, node, application, priority, rackLocalResourceRequest, NodeType.RACK_LOCAL, reservedContainer, - allocatedContainer); + allocatedContainer, schedulingMode); } return new CSAssignment(Resources.none(), NodeType.RACK_LOCAL); @@ -1373,16 +1427,21 @@ private CSAssignment assignRackLocalContainers(Resource clusterResource, private CSAssignment assignOffSwitchContainers(Resource clusterResource, ResourceRequest offSwitchResourceRequest, FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority, - RMContainer reservedContainer, MutableObject allocatedContainer) { + RMContainer reservedContainer, MutableObject allocatedContainer, + SchedulingMode schedulingMode) { if (canAssign(application, priority, node, NodeType.OFF_SWITCH, reservedContainer)) { return assignContainer(clusterResource, node, application, priority, offSwitchResourceRequest, NodeType.OFF_SWITCH, reservedContainer, - allocatedContainer); + allocatedContainer, schedulingMode); } return new CSAssignment(Resources.none(), NodeType.OFF_SWITCH); } + + private int getActualNodeLocalityDelay() { + return Math.min(scheduler.getNumClusterNodes(), getNodeLocalityDelay()); + } boolean canAssign(FiCaSchedulerApp application, Priority priority, FiCaSchedulerNode node, NodeType type, RMContainer reservedContainer) { @@ -1417,10 +1476,7 @@ boolean canAssign(FiCaSchedulerApp application, Priority priority, if (type == NodeType.RACK_LOCAL) { // 'Delay' rack-local just a little bit... long missedOpportunities = application.getSchedulingOpportunities(priority); - return ( - Math.min(scheduler.getNumClusterNodes(), getNodeLocalityDelay()) < - missedOpportunities - ); + return getActualNodeLocalityDelay() < missedOpportunities; } // Check if we need containers on this host @@ -1460,7 +1516,7 @@ Container createContainer(FiCaSchedulerApp application, FiCaSchedulerNode node, private CSAssignment assignContainer(Resource clusterResource, FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority, ResourceRequest request, NodeType type, RMContainer rmContainer, - MutableObject createdContainer) { + MutableObject createdContainer, SchedulingMode schedulingMode) { if (LOG.isDebugEnabled()) { LOG.debug("assignContainers: node=" + node.getNodeName() + " application=" + application.getApplicationId() @@ -1469,9 +1525,8 @@ private CSAssignment assignContainer(Resource clusterResource, FiCaSchedulerNode } // check if the resource request can access the label - if (!SchedulerUtils.checkNodeLabelExpression( - node.getLabels(), - request.getNodeLabelExpression())) { + if (!SchedulerUtils.checkResourceRequestMatchingNodePartition(request, + node.getPartition(), schedulingMode)) { // this is a reserved container, but we cannot allocate it now according // to label not match. This can be caused by node label changed // We should un-reserve this container. @@ -1576,8 +1631,8 @@ private CSAssignment assignContainer(Resource clusterResource, FiCaSchedulerNode // If we're trying to reserve a container here, not container will be // unreserved for reserving the new one. Check limits again before // reserve the new container - if (!checkLimitsToReserve(clusterResource, - application, capability)) { + if (!checkLimitsToReserve(clusterResource, + application, capability, node.getPartition(), schedulingMode)) { return new CSAssignment(Resources.none(), type); } } @@ -1666,7 +1721,7 @@ public void completedContainer(Resource clusterResource, // Book-keeping if (removed) { releaseResource(clusterResource, application, - container.getResource(), node.getLabels()); + container.getResource(), node.getPartition()); LOG.info("completedContainer" + " container=" + container + " queue=" + this + @@ -1684,13 +1739,13 @@ public void completedContainer(Resource clusterResource, synchronized void allocateResource(Resource clusterResource, SchedulerApplicationAttempt application, Resource resource, - Set<String> nodeLabels) { - super.allocateResource(clusterResource, resource, nodeLabels); + String nodePartition) { + super.allocateResource(clusterResource, resource, nodePartition); // Update user metrics String userName = application.getUser(); User user = getUser(userName); - user.assignContainer(resource, nodeLabels); + user.assignContainer(resource, nodePartition); // Note this is a bit unconventional since it gets the object and modifies // it here, rather then using set routine Resources.subtractFrom(application.getHeadroom(), resource); // headroom @@ -1707,13 +1762,13 @@ synchronized void allocateResource(Resource clusterResource, } synchronized void releaseResource(Resource clusterResource, - FiCaSchedulerApp application, Resource resource, Set<String> nodeLabels) { - super.releaseResource(clusterResource, resource, nodeLabels); + FiCaSchedulerApp application, Resource resource, String nodePartition) { + super.releaseResource(clusterResource, resource, nodePartition); // Update user metrics String userName = application.getUser(); User user = getUser(userName); - user.releaseContainer(resource, nodeLabels); + user.releaseContainer(resource, nodePartition); metrics.setAvailableResourcesToUser(userName, application.getHeadroom()); LOG.info(getQueueName() + @@ -1723,7 +1778,8 @@ synchronized void releaseResource(Resource clusterResource, private void updateAbsoluteCapacityResource(Resource clusterResource) { absoluteCapacityResource = - Resources.multiplyAndNormalizeUp(resourceCalculator, clusterResource, + Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager + .getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource), queueCapacities.getAbsoluteCapacity(), minimumAllocation); } @@ -1769,8 +1825,9 @@ resourceCalculator, this, getParent(), clusterResource, // Update application properties for (FiCaSchedulerApp application : activeApplications) { synchronized (application) { - computeUserLimitAndSetHeadroom(application, clusterResource, - Resources.none(), null); + computeUserLimitAndSetHeadroom(application, clusterResource, + Resources.none(), RMNodeLabelsManager.NO_LABEL, + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); } } } @@ -1828,25 +1885,12 @@ public synchronized void finishApplication(boolean wasActive) { } } - public void assignContainer(Resource resource, - Set<String> nodeLabels) { - if (nodeLabels == null || nodeLabels.isEmpty()) { - userResourceUsage.incUsed(resource); - } else { - for (String label : nodeLabels) { - userResourceUsage.incUsed(label, resource); - } - } + public void assignContainer(Resource resource, String nodePartition) { + userResourceUsage.incUsed(nodePartition, resource); } - public void releaseContainer(Resource resource, Set<String> nodeLabels) { - if (nodeLabels == null || nodeLabels.isEmpty()) { - userResourceUsage.decUsed(resource); - } else { - for (String label : nodeLabels) { - userResourceUsage.decUsed(label, resource); - } - } + public void releaseContainer(Resource resource, String nodePartition) { + userResourceUsage.decUsed(nodePartition, resource); } public Resource getUserResourceLimit() { @@ -1869,7 +1913,7 @@ public void recoverContainer(Resource clusterResource, FiCaSchedulerNode node = scheduler.getNode(rmContainer.getContainer().getNodeId()); allocateResource(clusterResource, attempt, rmContainer.getContainer() - .getResource(), node.getLabels()); + .getResource(), node.getPartition()); } getParent().recoverContainer(clusterResource, attempt, rmContainer); } @@ -1909,7 +1953,7 @@ public void attachContainer(Resource clusterResource, FiCaSchedulerNode node = scheduler.getNode(rmContainer.getContainer().getNodeId()); allocateResource(clusterResource, application, rmContainer.getContainer() - .getResource(), node.getLabels()); + .getResource(), node.getPartition()); LOG.info("movedContainer" + " container=" + rmContainer.getContainer() + " resource=" + rmContainer.getContainer().getResource() + " queueMoveIn=" + this + " usedCapacity=" + getUsedCapacity() @@ -1927,7 +1971,7 @@ public void detachContainer(Resource clusterResource, FiCaSchedulerNode node = scheduler.getNode(rmContainer.getContainer().getNodeId()); releaseResource(clusterResource, application, rmContainer.getContainer() - .getResource(), node.getLabels()); + .getResource(), node.getPartition()); LOG.info("movedContainer" + " container=" + rmContainer.getContainer() + " resource=" + rmContainer.getContainer().getResource() + " queueMoveOut=" + this + " usedCapacity=" + getUsedCapacity() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 882498a6808f2..eb64d4384f0a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -56,8 +56,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.util.resource.Resources; @@ -377,16 +375,29 @@ private synchronized void removeApplication(ApplicationId applicationId, @Override public synchronized CSAssignment assignContainers(Resource clusterResource, - FiCaSchedulerNode node, ResourceLimits resourceLimits) { - CSAssignment assignment = - new CSAssignment(Resources.createResource(0, 0), NodeType.NODE_LOCAL); - Set<String> nodeLabels = node.getLabels(); - + FiCaSchedulerNode node, ResourceLimits resourceLimits, + SchedulingMode schedulingMode) { // if our queue cannot access this node, just return - if (!SchedulerUtils.checkQueueAccessToNode(accessibleLabels, nodeLabels)) { - return assignment; + if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY + && !accessibleToPartition(node.getPartition())) { + return NULL_ASSIGNMENT; + } + + // Check if this queue need more resource, simply skip allocation if this + // queue doesn't need more resources. + if (!super.hasPendingResourceRequest(node.getPartition(), + clusterResource, schedulingMode)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip this queue=" + getQueuePath() + + ", because it doesn't need more resource, schedulingMode=" + + schedulingMode.name() + " node-partition=" + node.getPartition()); + } + return NULL_ASSIGNMENT; } + CSAssignment assignment = + new CSAssignment(Resources.createResource(0, 0), NodeType.NODE_LOCAL); + while (canAssign(clusterResource, node)) { if (LOG.isDebugEnabled()) { LOG.debug("Trying to assign containers to child-queue of " @@ -396,15 +407,17 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, // Are we over maximum-capacity for this queue? // This will also consider parent's limits and also continuous reservation // looking - if (!super.canAssignToThisQueue(clusterResource, nodeLabels, resourceLimits, - minimumAllocation, Resources.createResource(getMetrics() - .getReservedMB(), getMetrics().getReservedVirtualCores()))) { + if (!super.canAssignToThisQueue(clusterResource, node.getPartition(), + resourceLimits, minimumAllocation, Resources.createResource( + getMetrics().getReservedMB(), getMetrics() + .getReservedVirtualCores()), schedulingMode)) { break; } // Schedule - CSAssignment assignedToChild = - assignContainersToChildQueues(clusterResource, node, resourceLimits); + CSAssignment assignedToChild = + assignContainersToChildQueues(clusterResource, node, resourceLimits, + schedulingMode); assignment.setType(assignedToChild.getType()); // Done if no child-queue assigned anything @@ -413,7 +426,7 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, assignedToChild.getResource(), Resources.none())) { // Track resource utilization for the parent-queue super.allocateResource(clusterResource, assignedToChild.getResource(), - nodeLabels); + node.getPartition()); // Track resource utilization in this pass of the scheduler Resources @@ -510,7 +523,8 @@ private ResourceLimits getResourceLimitsOfChild(CSQueue child, } private synchronized CSAssignment assignContainersToChildQueues( - Resource cluster, FiCaSchedulerNode node, ResourceLimits limits) { + Resource cluster, FiCaSchedulerNode node, ResourceLimits limits, + SchedulingMode schedulingMode) { CSAssignment assignment = new CSAssignment(Resources.createResource(0, 0), NodeType.NODE_LOCAL); @@ -523,12 +537,13 @@ private synchronized CSAssignment assignContainersToChildQueues( LOG.debug("Trying to assign to queue: " + childQueue.getQueuePath() + " stats: " + childQueue); } - + // Get ResourceLimits of child queue before assign containers ResourceLimits childLimits = getResourceLimitsOfChild(childQueue, cluster, limits); - assignment = childQueue.assignContainers(cluster, node, childLimits); + assignment = childQueue.assignContainers(cluster, node, + childLimits, schedulingMode); if(LOG.isDebugEnabled()) { LOG.debug("Assigned to queue: " + childQueue.getQueuePath() + " stats: " + childQueue + " --> " + @@ -584,7 +599,7 @@ public void completedContainer(Resource clusterResource, // Book keeping synchronized (this) { super.releaseResource(clusterResource, rmContainer.getContainer() - .getResource(), node.getLabels()); + .getResource(), node.getPartition()); LOG.info("completedContainer" + " queue=" + getQueueName() + @@ -653,7 +668,7 @@ public void recoverContainer(Resource clusterResource, FiCaSchedulerNode node = scheduler.getNode(rmContainer.getContainer().getNodeId()); super.allocateResource(clusterResource, rmContainer.getContainer() - .getResource(), node.getLabels()); + .getResource(), node.getPartition()); } if (parent != null) { parent.recoverContainer(clusterResource, attempt, rmContainer); @@ -681,7 +696,7 @@ public void attachContainer(Resource clusterResource, FiCaSchedulerNode node = scheduler.getNode(rmContainer.getContainer().getNodeId()); super.allocateResource(clusterResource, rmContainer.getContainer() - .getResource(), node.getLabels()); + .getResource(), node.getPartition()); LOG.info("movedContainer" + " queueMoveIn=" + getQueueName() + " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + " used=" + queueUsage.getUsed() + " cluster=" @@ -701,7 +716,7 @@ public void detachContainer(Resource clusterResource, scheduler.getNode(rmContainer.getContainer().getNodeId()); super.releaseResource(clusterResource, rmContainer.getContainer().getResource(), - node.getLabels()); + node.getPartition()); LOG.info("movedContainer" + " queueMoveOut=" + getQueueName() + " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + " used=" + queueUsage.getUsed() + " cluster=" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/SchedulingMode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/SchedulingMode.java new file mode 100644 index 0000000000000..7e7dc37c9bea1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/SchedulingMode.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +/** + * Scheduling modes, see below for detailed explanations + */ +public enum SchedulingMode { + /** + * <p> + * When a node has partition (say partition=x), only application in the queue + * can access to partition=x AND requires for partition=x resource can get + * chance to allocate on the node. + * </p> + * + * <p> + * When a node has no partition, only application requires non-partitioned + * resource can get chance to allocate on the node. + * </p> + */ + RESPECT_PARTITION_EXCLUSIVITY, + + /** + * Only used when a node has partition AND the partition isn't an exclusive + * partition AND application requires non-partitioned resource. + */ + IGNORE_PARTITION_EXCLUSIVITY +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java index 76ede3940f825..9b7eb840dbecc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java @@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.Task.State; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; @@ -277,6 +278,9 @@ private synchronized void addResourceRequest( } else { request.setNumContainers(request.getNumContainers() + 1); } + if (request.getNodeLabelExpression() == null) { + request.setNodeLabelExpression(RMNodeLabelsManager.NO_LABEL); + } // Note this down for next interaction with ResourceManager ask.remove(request); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java index f62fdb3dcee22..5c107aa38bfb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java @@ -150,8 +150,14 @@ public AllocateResponse allocate( public AllocateResponse allocate( String host, int memory, int numContainers, List<ContainerId> releases, String labelExpression) throws Exception { + return allocate(host, memory, numContainers, 1, releases, labelExpression); + } + + public AllocateResponse allocate( + String host, int memory, int numContainers, int priority, + List<ContainerId> releases, String labelExpression) throws Exception { List<ResourceRequest> reqs = - createReq(new String[] { host }, memory, 1, numContainers, + createReq(new String[] { host }, memory, priority, numContainers, labelExpression); return allocate(reqs, releases); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 06c6b3275e334..f2b1d8646de51 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -21,6 +21,8 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.security.PrivilegedAction; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; @@ -200,10 +202,18 @@ public boolean waitForState(MockNM nm, ContainerId containerId, public boolean waitForState(MockNM nm, ContainerId containerId, RMContainerState containerState, int timeoutMillisecs) throws Exception { + return waitForState(Arrays.asList(nm), containerId, containerState, + timeoutMillisecs); + } + + public boolean waitForState(Collection<MockNM> nms, ContainerId containerId, + RMContainerState containerState, int timeoutMillisecs) throws Exception { RMContainer container = getResourceScheduler().getRMContainer(containerId); int timeoutSecs = 0; while(container == null && timeoutSecs++ < timeoutMillisecs / 100) { - nm.nodeHeartbeat(true); + for (MockNM nm : nms) { + nm.nodeHeartbeat(true); + } container = getResourceScheduler().getRMContainer(containerId); System.out.println("Waiting for container " + containerId + " to be allocated."); Thread.sleep(100); @@ -217,9 +227,11 @@ public boolean waitForState(MockNM nm, ContainerId containerId, && timeoutSecs++ < timeoutMillisecs / 100) { System.out.println("Container : " + containerId + " State is : " + container.getState() + " Waiting for state : " + containerState); - nm.nodeHeartbeat(true); + for (MockNM nm : nms) { + nm.nodeHeartbeat(true); + } Thread.sleep(100); - + if (timeoutMillisecs <= timeoutSecs * 100) { return false; } @@ -650,11 +662,28 @@ public static void finishAMAndVerifyAppState(RMApp rmApp, MockRM rm, MockNM nm, am.waitForState(RMAppAttemptState.FINISHED); rm.waitForState(rmApp.getApplicationId(), RMAppState.FINISHED); } + + @SuppressWarnings("rawtypes") + private static void waitForSchedulerAppAttemptAdded( + ApplicationAttemptId attemptId, MockRM rm) throws InterruptedException { + int tick = 0; + // Wait for at most 5 sec + while (null == ((AbstractYarnScheduler) rm.getResourceScheduler()) + .getApplicationAttempt(attemptId) && tick < 50) { + Thread.sleep(100); + if (tick % 10 == 0) { + System.out.println("waiting for SchedulerApplicationAttempt=" + + attemptId + " added."); + } + tick++; + } + } public static MockAM launchAM(RMApp app, MockRM rm, MockNM nm) throws Exception { rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED); RMAppAttempt attempt = app.getCurrentAppAttempt(); + waitForSchedulerAppAttemptAdded(attempt.getAppAttemptId(), rm); System.out.println("Launch AM " + attempt.getAppAttemptId()); nm.nodeHeartbeat(true); MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java index 1ca5c97a411a5..46167ca68596b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -612,7 +612,7 @@ public void testHeadroom() throws Exception { // Schedule to compute queue.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); Resource expectedHeadroom = Resources.createResource(10*16*GB, 1); assertEquals(expectedHeadroom, app_0_0.getHeadroom()); @@ -632,7 +632,7 @@ public void testHeadroom() throws Exception { // Schedule to compute queue.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); // Schedule to compute + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute assertEquals(expectedHeadroom, app_0_0.getHeadroom()); assertEquals(expectedHeadroom, app_0_1.getHeadroom());// no change @@ -652,7 +652,7 @@ public void testHeadroom() throws Exception { // Schedule to compute queue.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); // Schedule to compute + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute expectedHeadroom = Resources.createResource(10*16*GB / 2, 1); // changes assertEquals(expectedHeadroom, app_0_0.getHeadroom()); assertEquals(expectedHeadroom, app_0_1.getHeadroom()); @@ -661,7 +661,7 @@ public void testHeadroom() throws Exception { // Now reduce cluster size and check for the smaller headroom clusterResource = Resources.createResource(90*16*GB); queue.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); // Schedule to compute + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute expectedHeadroom = Resources.createResource(9*16*GB / 2, 1); // changes assertEquals(expectedHeadroom, app_0_0.getHeadroom()); assertEquals(expectedHeadroom, app_0_1.getHeadroom()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index 23b31faeb8f7d..970a98ad576f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; @@ -133,7 +134,7 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { final Resource allocatedResource = Resources.createResource(allocation); if (queue instanceof ParentQueue) { ((ParentQueue)queue).allocateResource(clusterResource, - allocatedResource, null); + allocatedResource, RMNodeLabelsManager.NO_LABEL); } else { FiCaSchedulerApp app1 = getMockApplication(0, ""); ((LeafQueue)queue).allocateResource(clusterResource, app1, @@ -145,7 +146,7 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { doReturn(new CSAssignment(Resources.none(), type)). when(queue) .assignContainers(eq(clusterResource), eq(node), - any(ResourceLimits.class)); + any(ResourceLimits.class), any(SchedulingMode.class)); // Mock the node's resource availability Resource available = node.getAvailableResource(); @@ -157,7 +158,7 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { } }). when(queue).assignContainers(eq(clusterResource), eq(node), - any(ResourceLimits.class)); + any(ResourceLimits.class), any(SchedulingMode.class)); doNothing().when(node).releaseContainer(any(Container.class)); } @@ -241,6 +242,14 @@ public void testSortedQueues() throws Exception { CSQueue b = queues.get(B); CSQueue c = queues.get(C); CSQueue d = queues.get(D); + + // Make a/b/c/d has >0 pending resource, so that allocation will continue. + queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage() + .incPending(Resources.createResource(1 * GB)); + a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + c.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + d.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); final String user_0 = "user_0"; @@ -275,7 +284,7 @@ public void testSortedQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 0*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); root.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); for(int i=0; i < 2; i++) { stubQueueAllocation(a, clusterResource, node_0, 0*GB); @@ -283,7 +292,7 @@ public void testSortedQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 0*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); root.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); } for(int i=0; i < 3; i++) { @@ -292,7 +301,7 @@ public void testSortedQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 1*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); root.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); } for(int i=0; i < 4; i++) { @@ -301,7 +310,7 @@ public void testSortedQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 0*GB); stubQueueAllocation(d, clusterResource, node_0, 1*GB); root.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); } verifyQueueMetrics(a, 1*GB, clusterResource); verifyQueueMetrics(b, 2*GB, clusterResource); @@ -335,7 +344,7 @@ public void testSortedQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 0*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); root.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); } verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 2*GB, clusterResource); @@ -363,7 +372,7 @@ public void testSortedQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 0*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); root.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyQueueMetrics(a, 2*GB, clusterResource); verifyQueueMetrics(b, 3*GB, clusterResource); verifyQueueMetrics(c, 3*GB, clusterResource); @@ -390,7 +399,7 @@ public void testSortedQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 0*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); root.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 2*GB, clusterResource); verifyQueueMetrics(c, 3*GB, clusterResource); @@ -405,12 +414,14 @@ public void testSortedQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 0*GB); stubQueueAllocation(d, clusterResource, node_0, 1*GB); root.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); InOrder allocationOrder = inOrder(d,b); - allocationOrder.verify(d).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), any(ResourceLimits.class)); - allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), any(ResourceLimits.class)); + allocationOrder.verify(d).assignContainers(eq(clusterResource), + any(FiCaSchedulerNode.class), any(ResourceLimits.class), + any(SchedulingMode.class)); + allocationOrder.verify(b).assignContainers(eq(clusterResource), + any(FiCaSchedulerNode.class), any(ResourceLimits.class), + any(SchedulingMode.class)); verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 2*GB, clusterResource); verifyQueueMetrics(c, 3*GB, clusterResource); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index 03b8f5c1fe195..54ba61724f95e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -19,6 +19,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Set; @@ -32,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -51,9 +54,13 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; @@ -327,387 +334,4 @@ protected RMSecretManagerService createRMSecretManagerService() { rm1.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.ALLOCATED); MockRM.launchAndRegisterAM(app1, rm1, nm1); } - - private Configuration getConfigurationWithQueueLabels(Configuration config) { - CapacitySchedulerConfiguration conf = - new CapacitySchedulerConfiguration(config); - - // Define top-level queues - conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"}); - conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100); - conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "y", 100); - - final String A = CapacitySchedulerConfiguration.ROOT + ".a"; - conf.setCapacity(A, 10); - conf.setMaximumCapacity(A, 15); - conf.setAccessibleNodeLabels(A, toSet("x")); - conf.setCapacityByLabel(A, "x", 100); - - final String B = CapacitySchedulerConfiguration.ROOT + ".b"; - conf.setCapacity(B, 20); - conf.setAccessibleNodeLabels(B, toSet("y")); - conf.setCapacityByLabel(B, "y", 100); - - final String C = CapacitySchedulerConfiguration.ROOT + ".c"; - conf.setCapacity(C, 70); - conf.setMaximumCapacity(C, 70); - conf.setAccessibleNodeLabels(C, RMNodeLabelsManager.EMPTY_STRING_SET); - - // Define 2nd-level queues - final String A1 = A + ".a1"; - conf.setQueues(A, new String[] {"a1"}); - conf.setCapacity(A1, 100); - conf.setMaximumCapacity(A1, 100); - conf.setCapacityByLabel(A1, "x", 100); - - final String B1 = B + ".b1"; - conf.setQueues(B, new String[] {"b1"}); - conf.setCapacity(B1, 100); - conf.setMaximumCapacity(B1, 100); - conf.setCapacityByLabel(B1, "y", 100); - - final String C1 = C + ".c1"; - conf.setQueues(C, new String[] {"c1"}); - conf.setCapacity(C1, 100); - conf.setMaximumCapacity(C1, 100); - - return conf; - } - - private void checkTaskContainersHost(ApplicationAttemptId attemptId, - ContainerId containerId, ResourceManager rm, String host) { - YarnScheduler scheduler = rm.getRMContext().getScheduler(); - SchedulerAppReport appReport = scheduler.getSchedulerAppInfo(attemptId); - - Assert.assertTrue(appReport.getLiveContainers().size() > 0); - for (RMContainer c : appReport.getLiveContainers()) { - if (c.getContainerId().equals(containerId)) { - Assert.assertEquals(host, c.getAllocatedNode().getHost()); - } - } - } - - @SuppressWarnings("unchecked") - private <E> Set<E> toSet(E... elements) { - Set<E> set = Sets.newHashSet(elements); - return set; - } - - @Test (timeout = 300000) - public void testContainerAllocationWithSingleUserLimits() throws Exception { - final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager(); - mgr.init(conf); - - // set node -> label - mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); - mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), - NodeId.newInstance("h2", 0), toSet("y"))); - - // inject node label manager - MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) { - @Override - public RMNodeLabelsManager createNodeLabelManager() { - return mgr; - } - }; - - rm1.getRMContext().setNodeLabelManager(mgr); - rm1.start(); - MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x - rm1.registerNode("h2:1234", 8000); // label = y - MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty> - - // launch an app to queue a1 (label = x), and check all container will - // be allocated in h1 - RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); - MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); - - // A has only 10% of x, so it can only allocate one container in label=empty - ContainerId containerId = - ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); - am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), ""); - Assert.assertTrue(rm1.waitForState(nm3, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - // Cannot allocate 2nd label=empty container - containerId = - ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); - am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), ""); - Assert.assertFalse(rm1.waitForState(nm3, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - - // A has default user limit = 100, so it can use all resource in label = x - // We can allocate floor(8000 / 1024) = 7 containers - for (int id = 3; id <= 8; id++) { - containerId = - ContainerId.newContainerId(am1.getApplicationAttemptId(), id); - am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x"); - Assert.assertTrue(rm1.waitForState(nm1, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - } - rm1.close(); - } - - @Test(timeout = 300000) - public void testContainerAllocateWithComplexLabels() throws Exception { - /* - * Queue structure: - * root (*) - * ________________ - * / \ - * a x(100%), y(50%) b y(50%), z(100%) - * ________________ ______________ - * / / \ - * a1 (x,y) b1(no) b2(y,z) - * 100% y = 100%, z = 100% - * - * Node structure: - * h1 : x - * h2 : y - * h3 : y - * h4 : z - * h5 : NO - * - * Total resource: - * x: 4G - * y: 6G - * z: 2G - * *: 2G - * - * Resource of - * a1: x=4G, y=3G, NO=0.2G - * b1: NO=0.9G (max=1G) - * b2: y=3, z=2G, NO=0.9G (max=1G) - * - * Each node can only allocate two containers - */ - - // set node -> label - mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y", "z")); - mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), - toSet("x"), NodeId.newInstance("h2", 0), toSet("y"), - NodeId.newInstance("h3", 0), toSet("y"), NodeId.newInstance("h4", 0), - toSet("z"), NodeId.newInstance("h5", 0), - RMNodeLabelsManager.EMPTY_STRING_SET)); - - // inject node label manager - MockRM rm1 = new MockRM(TestUtils.getComplexConfigurationWithQueueLabels(conf)) { - @Override - public RMNodeLabelsManager createNodeLabelManager() { - return mgr; - } - }; - - rm1.getRMContext().setNodeLabelManager(mgr); - rm1.start(); - MockNM nm1 = rm1.registerNode("h1:1234", 2048); - MockNM nm2 = rm1.registerNode("h2:1234", 2048); - MockNM nm3 = rm1.registerNode("h3:1234", 2048); - MockNM nm4 = rm1.registerNode("h4:1234", 2048); - MockNM nm5 = rm1.registerNode("h5:1234", 2048); - - ContainerId containerId; - - // launch an app to queue a1 (label = x), and check all container will - // be allocated in h1 - RMApp app1 = rm1.submitApp(1024, "app", "user", null, "a1"); - MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); - - // request a container (label = y). can be allocated on nm2 - am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y"); - containerId = - ContainerId.newContainerId(am1.getApplicationAttemptId(), 2L); - Assert.assertTrue(rm1.waitForState(nm2, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, - "h2"); - - // launch an app to queue b1 (label = y), and check all container will - // be allocated in h5 - RMApp app2 = rm1.submitApp(1024, "app", "user", null, "b1"); - MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm5); - - // request a container for AM, will succeed - // and now b1's queue capacity will be used, cannot allocate more containers - // (Maximum capacity reached) - am2.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); - Assert.assertFalse(rm1.waitForState(nm4, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - Assert.assertFalse(rm1.waitForState(nm5, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - - // launch an app to queue b2 - RMApp app3 = rm1.submitApp(1024, "app", "user", null, "b2"); - MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm5); - - // request a container. try to allocate on nm1 (label = x) and nm3 (label = - // y,z). Will successfully allocate on nm3 - am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y"); - containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); - Assert.assertFalse(rm1.waitForState(nm1, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - Assert.assertTrue(rm1.waitForState(nm3, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, - "h3"); - - // try to allocate container (request label = z) on nm4 (label = y,z). - // Will successfully allocate on nm4 only. - am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "z"); - containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 3L); - Assert.assertTrue(rm1.waitForState(nm4, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, - "h4"); - - rm1.close(); - } - - @Test (timeout = 120000) - public void testContainerAllocateWithLabels() throws Exception { - // set node -> label - mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); - mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), - NodeId.newInstance("h2", 0), toSet("y"))); - - // inject node label manager - MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) { - @Override - public RMNodeLabelsManager createNodeLabelManager() { - return mgr; - } - }; - - rm1.getRMContext().setNodeLabelManager(mgr); - rm1.start(); - MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x - MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y - MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty> - - ContainerId containerId; - - // launch an app to queue a1 (label = x), and check all container will - // be allocated in h1 - RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); - MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm3); - - // request a container. - am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x"); - containerId = - ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); - Assert.assertFalse(rm1.waitForState(nm2, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - Assert.assertTrue(rm1.waitForState(nm1, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, - "h1"); - - // launch an app to queue b1 (label = y), and check all container will - // be allocated in h2 - RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1"); - MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3); - - // request a container. - am2.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y"); - containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); - Assert.assertFalse(rm1.waitForState(nm1, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - Assert.assertTrue(rm1.waitForState(nm2, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1, - "h2"); - - // launch an app to queue c1 (label = ""), and check all container will - // be allocated in h3 - RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1"); - MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3); - - // request a container. - am3.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); - Assert.assertFalse(rm1.waitForState(nm2, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - Assert.assertTrue(rm1.waitForState(nm3, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, - "h3"); - - rm1.close(); - } - - @Test (timeout = 120000) - public void testContainerAllocateWithDefaultQueueLabels() throws Exception { - // This test is pretty much similar to testContainerAllocateWithLabel. - // Difference is, this test doesn't specify label expression in ResourceRequest, - // instead, it uses default queue label expression - - // set node -> label - mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); - mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), - NodeId.newInstance("h2", 0), toSet("y"))); - - // inject node label manager - MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) { - @Override - public RMNodeLabelsManager createNodeLabelManager() { - return mgr; - } - }; - - rm1.getRMContext().setNodeLabelManager(mgr); - rm1.start(); - MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x - MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y - MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty> - - ContainerId containerId; - - // launch an app to queue a1 (label = x), and check all container will - // be allocated in h1 - RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); - MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); - - // request a container. - am1.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = - ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); - Assert.assertFalse(rm1.waitForState(nm3, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - Assert.assertTrue(rm1.waitForState(nm1, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, - "h1"); - - // launch an app to queue b1 (label = y), and check all container will - // be allocated in h2 - RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1"); - MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); - - // request a container. - am2.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); - Assert.assertFalse(rm1.waitForState(nm3, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - Assert.assertTrue(rm1.waitForState(nm2, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1, - "h2"); - - // launch an app to queue c1 (label = ""), and check all container will - // be allocated in h3 - RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1"); - MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3); - - // request a container. - am3.allocate("*", 1024, 1, new ArrayList<ContainerId>()); - containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); - Assert.assertFalse(rm1.waitForState(nm2, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - Assert.assertTrue(rm1.waitForState(nm3, containerId, - RMContainerState.ALLOCATED, 10 * 1000)); - checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, - "h3"); - - rm1.close(); - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 972cabbf2cc2c..0b5250b4fae87 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -351,7 +351,7 @@ public void testSingleQueueOneUserMetrics() throws Exception { // Only 1 container a.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals( (int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - (1*GB), a.getMetrics().getAvailableMB()); @@ -487,7 +487,7 @@ public void testSingleQueueWithOneUser() throws Exception { // Only 1 container a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -498,7 +498,7 @@ public void testSingleQueueWithOneUser() throws Exception { // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -507,7 +507,7 @@ public void testSingleQueueWithOneUser() throws Exception { // Can't allocate 3rd due to user-limit a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -517,7 +517,7 @@ public void testSingleQueueWithOneUser() throws Exception { // Bump up user-limit-factor, now allocate should work a.setUserLimitFactor(10); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(3*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -526,7 +526,7 @@ public void testSingleQueueWithOneUser() throws Exception { // One more should work, for app_1, due to user-limit-factor a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(4*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); @@ -537,7 +537,7 @@ public void testSingleQueueWithOneUser() throws Exception { // Now - no more allocs since we are at max-cap a.setMaxCapacity(0.5f); a.assignContainers(clusterResource, node_0, new ResourceLimits( - clusterResource)); + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(4*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); @@ -653,21 +653,21 @@ public void testUserLimits() throws Exception { // 1 container to user_0 a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); // Again one to user_0 since he hasn't exceeded user limit yet a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(3*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); // One more to user_0 since he is the only active user a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(4*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(2*GB, app_1.getCurrentConsumption().getMemory()); @@ -719,10 +719,10 @@ public void testComputeUserLimitAndSetHeadroom(){ 1, qb.getActiveUsersManager().getNumActiveUsers()); //get headroom qb.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, app_0 .getResourceRequest(u0Priority, ResourceRequest.ANY).getCapability(), - null); + "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); //maxqueue 16G, userlimit 13G, - 4G used = 9G assertEquals(9*GB,app_0.getHeadroom().getMemory()); @@ -739,10 +739,10 @@ public void testComputeUserLimitAndSetHeadroom(){ u1Priority, recordFactory))); qb.submitApplicationAttempt(app_2, user_1); qb.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, app_0 .getResourceRequest(u0Priority, ResourceRequest.ANY).getCapability(), - null); + "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8*GB, qb.getUsedResources().getMemory()); assertEquals(4*GB, app_0.getCurrentConsumption().getMemory()); @@ -782,12 +782,12 @@ public void testComputeUserLimitAndSetHeadroom(){ qb.submitApplicationAttempt(app_1, user_0); qb.submitApplicationAttempt(app_3, user_1); qb.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); qb.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); qb.computeUserLimitAndSetHeadroom(app_3, clusterResource, app_3 .getResourceRequest(u1Priority, ResourceRequest.ANY).getCapability(), - null); + "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(4*GB, qb.getUsedResources().getMemory()); //maxqueue 16G, userlimit 7G, used (by each user) 2G, headroom 5G (both) assertEquals(5*GB, app_3.getHeadroom().getMemory()); @@ -803,13 +803,13 @@ public void testComputeUserLimitAndSetHeadroom(){ TestUtils.createResourceRequest(ResourceRequest.ANY, 6*GB, 1, true, u0Priority, recordFactory))); qb.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); qb.computeUserLimitAndSetHeadroom(app_4, clusterResource, app_4 .getResourceRequest(u0Priority, ResourceRequest.ANY).getCapability(), - null); + "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); qb.computeUserLimitAndSetHeadroom(app_3, clusterResource, app_3 .getResourceRequest(u1Priority, ResourceRequest.ANY).getCapability(), - null); + "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); //app3 is user1, active from last test case @@ -876,7 +876,7 @@ public void testUserHeadroomMultiApp() throws Exception { priority, recordFactory))); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -893,7 +893,7 @@ public void testUserHeadroomMultiApp() throws Exception { priority, recordFactory))); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); @@ -982,7 +982,7 @@ public void testHeadroomWithMaxCap() throws Exception { // 1 container to user_0 a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -993,7 +993,7 @@ public void testHeadroomWithMaxCap() throws Exception { // Again one to user_0 since he hasn't exceeded user limit yet a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(3*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); @@ -1010,7 +1010,7 @@ public void testHeadroomWithMaxCap() throws Exception { // No more to user_0 since he is already over user-limit // and no more containers to queue since it's already at max-cap a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(3*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); @@ -1024,7 +1024,7 @@ public void testHeadroomWithMaxCap() throws Exception { priority, recordFactory))); assertEquals(1, a.getActiveUsersManager().getNumActiveUsers()); a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(0*GB, app_2.getHeadroom().getMemory()); // hit queue max-cap } @@ -1095,7 +1095,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { // Only 1 container a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1103,7 +1103,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1111,7 +1111,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { // Can't allocate 3rd due to user-limit a.setUserLimit(25); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1130,7 +1130,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { // user_0 is at limit inspite of high user-limit-factor a.setUserLimitFactor(10); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1140,7 +1140,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { // Now allocations should goto app_0 since // user_0 is at user-limit not above it a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1151,7 +1151,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { // Now - no more allocs since we are at max-cap a.setMaxCapacity(0.5f); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1163,7 +1163,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { a.setMaxCapacity(1.0f); a.setUserLimitFactor(1); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(7*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1172,7 +1172,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { // Now we should assign to app_3 again since user_2 is under user-limit a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1272,7 +1272,7 @@ public void testReservation() throws Exception { // Only 1 container a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1283,7 +1283,7 @@ public void testReservation() throws Exception { // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1292,7 +1292,7 @@ public void testReservation() throws Exception { // Now, reservation should kick in for app_1 a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1309,7 +1309,7 @@ public void testReservation() throws Exception { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1326,7 +1326,7 @@ public void testReservation() throws Exception { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(4*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); @@ -1394,7 +1394,7 @@ public void testStolenReservedContainer() throws Exception { // Start testing... a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1404,7 +1404,7 @@ public void testStolenReservedContainer() throws Exception { // Now, reservation should kick in for app_1 a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1418,7 +1418,7 @@ public void testStolenReservedContainer() throws Exception { doReturn(-1).when(a).getNodeLocalityDelay(); a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(10*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); @@ -1435,7 +1435,7 @@ public void testStolenReservedContainer() throws Exception { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(8*GB, app_1.getCurrentConsumption().getMemory()); @@ -1504,7 +1504,7 @@ public void testReservationExchange() throws Exception { // Only 1 container a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1512,14 +1512,14 @@ public void testReservationExchange() throws Exception { // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); // Now, reservation should kick in for app_1 a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1534,7 +1534,7 @@ public void testReservationExchange() throws Exception { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1544,7 +1544,7 @@ public void testReservationExchange() throws Exception { // Re-reserve a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); @@ -1554,7 +1554,7 @@ public void testReservationExchange() throws Exception { // Try to schedule on node_1 now, should *move* the reservation a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(9*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); @@ -1572,7 +1572,7 @@ public void testReservationExchange() throws Exception { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); CSAssignment assignment = a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); @@ -1644,7 +1644,7 @@ public void testLocalityScheduling() throws Exception { // Start with off switch, shouldn't allocate due to delay scheduling assignment = a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(1, app_0.getSchedulingOpportunities(priority)); @@ -1653,7 +1653,7 @@ public void testLocalityScheduling() throws Exception { // Another off switch, shouldn't allocate due to delay scheduling assignment = a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(2, app_0.getSchedulingOpportunities(priority)); @@ -1662,7 +1662,7 @@ public void testLocalityScheduling() throws Exception { // Another off switch, shouldn't allocate due to delay scheduling assignment = a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(3, app_0.getSchedulingOpportunities(priority)); @@ -1672,7 +1672,7 @@ public void testLocalityScheduling() throws Exception { // Another off switch, now we should allocate // since missedOpportunities=3 and reqdContainers=3 assignment = a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(4, app_0.getSchedulingOpportunities(priority)); // should NOT reset @@ -1681,7 +1681,7 @@ public void testLocalityScheduling() throws Exception { // NODE_LOCAL - node_0 assignment = a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset @@ -1690,7 +1690,7 @@ public void testLocalityScheduling() throws Exception { // NODE_LOCAL - node_1 assignment = a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset @@ -1719,14 +1719,14 @@ public void testLocalityScheduling() throws Exception { // Shouldn't assign RACK_LOCAL yet assignment = a.assignContainers(clusterResource, node_3, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(1, app_0.getSchedulingOpportunities(priority)); assertEquals(2, app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Should assign RACK_LOCAL now assignment = a.assignContainers(clusterResource, node_3, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.RACK_LOCAL), eq(node_3), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset @@ -1808,7 +1808,7 @@ public void testApplicationPriorityScheduling() throws Exception { // Start with off switch, shouldn't allocate P1 due to delay scheduling // thus, no P2 either! a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), eq(priority_1), any(ResourceRequest.class), any(Container.class)); assertEquals(1, app_0.getSchedulingOpportunities(priority_1)); @@ -1821,7 +1821,7 @@ public void testApplicationPriorityScheduling() throws Exception { // Another off-switch, shouldn't allocate P1 due to delay scheduling // thus, no P2 either! a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), eq(priority_1), any(ResourceRequest.class), any(Container.class)); assertEquals(2, app_0.getSchedulingOpportunities(priority_1)); @@ -1833,7 +1833,7 @@ public void testApplicationPriorityScheduling() throws Exception { // Another off-switch, shouldn't allocate OFF_SWITCH P1 a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_2), eq(priority_1), any(ResourceRequest.class), any(Container.class)); assertEquals(3, app_0.getSchedulingOpportunities(priority_1)); @@ -1845,7 +1845,7 @@ public void testApplicationPriorityScheduling() throws Exception { // Now, DATA_LOCAL for P1 a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_0), eq(priority_1), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority_1)); @@ -1857,7 +1857,7 @@ public void testApplicationPriorityScheduling() throws Exception { // Now, OFF_SWITCH for P2 a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1), eq(priority_1), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority_1)); @@ -1934,7 +1934,7 @@ public void testSchedulingConstraints() throws Exception { // NODE_LOCAL - node_0_1 a.assignContainers(clusterResource, node_0_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_0_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset @@ -1943,7 +1943,7 @@ public void testSchedulingConstraints() throws Exception { // No allocation on node_1_0 even though it's node/rack local since // required(ANY) == 0 a.assignContainers(clusterResource, node_1_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // Still zero @@ -1960,7 +1960,7 @@ public void testSchedulingConstraints() throws Exception { // No allocation on node_0_1 even though it's node/rack local since // required(rack_1) == 0 a.assignContainers(clusterResource, node_0_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(1, app_0.getSchedulingOpportunities(priority)); @@ -1968,7 +1968,7 @@ public void testSchedulingConstraints() throws Exception { // NODE_LOCAL - node_1 a.assignContainers(clusterResource, node_1_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_1_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset @@ -2221,7 +2221,7 @@ public void testLocalityConstraints() throws Exception { // node_0_1 // Shouldn't allocate since RR(rack_0) = null && RR(ANY) = relax: false a.assignContainers(clusterResource, node_0_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_0_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 @@ -2244,7 +2244,7 @@ public void testLocalityConstraints() throws Exception { // node_1_1 // Shouldn't allocate since RR(rack_1) = relax: false a.assignContainers(clusterResource, node_1_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_0_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 @@ -2275,7 +2275,7 @@ public void testLocalityConstraints() throws Exception { // node_1_1 // Shouldn't allocate since node_1_1 is blacklisted a.assignContainers(clusterResource, node_1_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 @@ -2304,7 +2304,7 @@ public void testLocalityConstraints() throws Exception { // node_1_1 // Shouldn't allocate since rack_1 is blacklisted a.assignContainers(clusterResource, node_1_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 @@ -2331,7 +2331,7 @@ public void testLocalityConstraints() throws Exception { // Now, should allocate since RR(rack_1) = relax: true a.assignContainers(clusterResource, node_1_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0,never()).allocate(eq(NodeType.RACK_LOCAL), eq(node_1_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); @@ -2362,7 +2362,7 @@ public void testLocalityConstraints() throws Exception { // host_1_1: 7G a.assignContainers(clusterResource, node_1_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_1_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); assertEquals(0, app_0.getSchedulingOpportunities(priority)); @@ -2445,7 +2445,7 @@ public void testAllocateContainerOnNodeWithoutOffSwitchSpecified() try { a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); } catch (NullPointerException e) { Assert.fail("NPE when allocating container on node but " + "forget to set off-switch request should be handled"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java new file mode 100644 index 0000000000000..cf1b26f37e9cf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -0,0 +1,1027 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeLabel; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + +public class TestNodeLabelContainerAllocation { + private final int GB = 1024; + + private YarnConfiguration conf; + + RMNodeLabelsManager mgr; + + @Before + public void setUp() throws Exception { + conf = new YarnConfiguration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + mgr = new NullRMNodeLabelsManager(); + mgr.init(conf); + } + + private Configuration getConfigurationWithQueueLabels(Configuration config) { + CapacitySchedulerConfiguration conf = + new CapacitySchedulerConfiguration(config); + + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"}); + conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100); + conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "y", 100); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 15); + conf.setAccessibleNodeLabels(A, toSet("x")); + conf.setCapacityByLabel(A, "x", 100); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 20); + conf.setAccessibleNodeLabels(B, toSet("y")); + conf.setCapacityByLabel(B, "y", 100); + + final String C = CapacitySchedulerConfiguration.ROOT + ".c"; + conf.setCapacity(C, 70); + conf.setMaximumCapacity(C, 70); + conf.setAccessibleNodeLabels(C, RMNodeLabelsManager.EMPTY_STRING_SET); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + conf.setQueues(A, new String[] {"a1"}); + conf.setCapacity(A1, 100); + conf.setMaximumCapacity(A1, 100); + conf.setCapacityByLabel(A1, "x", 100); + + final String B1 = B + ".b1"; + conf.setQueues(B, new String[] {"b1"}); + conf.setCapacity(B1, 100); + conf.setMaximumCapacity(B1, 100); + conf.setCapacityByLabel(B1, "y", 100); + + final String C1 = C + ".c1"; + conf.setQueues(C, new String[] {"c1"}); + conf.setCapacity(C1, 100); + conf.setMaximumCapacity(C1, 100); + + return conf; + } + + private void checkTaskContainersHost(ApplicationAttemptId attemptId, + ContainerId containerId, ResourceManager rm, String host) { + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerAppReport appReport = scheduler.getSchedulerAppInfo(attemptId); + + Assert.assertTrue(appReport.getLiveContainers().size() > 0); + for (RMContainer c : appReport.getLiveContainers()) { + if (c.getContainerId().equals(containerId)) { + Assert.assertEquals(host, c.getAllocatedNode().getHost()); + } + } + } + + @SuppressWarnings("unchecked") + private <E> Set<E> toSet(E... elements) { + Set<E> set = Sets.newHashSet(elements); + return set; + } + + + @Test (timeout = 300000) + public void testContainerAllocationWithSingleUserLimits() throws Exception { + final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager(); + mgr.init(conf); + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), + NodeId.newInstance("h2", 0), toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x + rm1.registerNode("h2:1234", 8000); // label = y + MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty> + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // A has only 10% of x, so it can only allocate one container in label=empty + ContainerId containerId = + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); + am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), ""); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + // Cannot allocate 2nd label=empty container + containerId = + ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); + am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), ""); + Assert.assertFalse(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + + // A has default user limit = 100, so it can use all resource in label = x + // We can allocate floor(8000 / 1024) = 7 containers + for (int id = 3; id <= 8; id++) { + containerId = + ContainerId.newContainerId(am1.getApplicationAttemptId(), id); + am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x"); + Assert.assertTrue(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + } + rm1.close(); + } + + @Test(timeout = 300000) + public void testContainerAllocateWithComplexLabels() throws Exception { + /* + * Queue structure: + * root (*) + * ________________ + * / \ + * a x(100%), y(50%) b y(50%), z(100%) + * ________________ ______________ + * / / \ + * a1 (x,y) b1(no) b2(y,z) + * 100% y = 100%, z = 100% + * + * Node structure: + * h1 : x + * h2 : y + * h3 : y + * h4 : z + * h5 : NO + * + * Total resource: + * x: 4G + * y: 6G + * z: 2G + * *: 2G + * + * Resource of + * a1: x=4G, y=3G, NO=0.2G + * b1: NO=0.9G (max=1G) + * b2: y=3, z=2G, NO=0.9G (max=1G) + * + * Each node can only allocate two containers + */ + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y", "z")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), + toSet("x"), NodeId.newInstance("h2", 0), toSet("y"), + NodeId.newInstance("h3", 0), toSet("y"), NodeId.newInstance("h4", 0), + toSet("z"), NodeId.newInstance("h5", 0), + RMNodeLabelsManager.EMPTY_STRING_SET)); + + // inject node label manager + MockRM rm1 = new MockRM(TestUtils.getComplexConfigurationWithQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 2048); + MockNM nm2 = rm1.registerNode("h2:1234", 2048); + MockNM nm3 = rm1.registerNode("h3:1234", 2048); + MockNM nm4 = rm1.registerNode("h4:1234", 2048); + MockNM nm5 = rm1.registerNode("h5:1234", 2048); + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(1024, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // request a container (label = y). can be allocated on nm2 + am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y"); + containerId = + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2L); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h5 + RMApp app2 = rm1.submitApp(1024, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm5); + + // request a container for AM, will succeed + // and now b1's queue capacity will be used, cannot allocate more containers + // (Maximum capacity reached) + am2.allocate("*", 1024, 1, new ArrayList<ContainerId>()); + containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm4, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertFalse(rm1.waitForState(nm5, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + + // launch an app to queue b2 + RMApp app3 = rm1.submitApp(1024, "app", "user", null, "b2"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm5); + + // request a container. try to allocate on nm1 (label = x) and nm3 (label = + // y,z). Will successfully allocate on nm3 + am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y"); + containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + // try to allocate container (request label = z) on nm4 (label = y,z). + // Will successfully allocate on nm4 only. + am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "z"); + containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 3L); + Assert.assertTrue(rm1.waitForState(nm4, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h4"); + + rm1.close(); + } + + @Test (timeout = 120000) + public void testContainerAllocateWithLabels() throws Exception { + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), + NodeId.newInstance("h2", 0), toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y + MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty> + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm3); + + // request a container. + am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x"); + containerId = + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h2 + RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3); + + // request a container. + am2.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y"); + containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue c1 (label = ""), and check all container will + // be allocated in h3 + RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3); + + // request a container. + am3.allocate("*", 1024, 1, new ArrayList<ContainerId>()); + containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + rm1.close(); + } + + @Test (timeout = 120000) + public void testContainerAllocateWithDefaultQueueLabels() throws Exception { + // This test is pretty much similar to testContainerAllocateWithLabel. + // Difference is, this test doesn't specify label expression in ResourceRequest, + // instead, it uses default queue label expression + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), + NodeId.newInstance("h2", 0), toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y + MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty> + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // request a container. + am1.allocate("*", 1024, 1, new ArrayList<ContainerId>()); + containerId = + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h2 + RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + // request a container. + am2.allocate("*", 1024, 1, new ArrayList<ContainerId>()); + containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue c1 (label = ""), and check all container will + // be allocated in h3 + RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3); + + // request a container. + am3.allocate("*", 1024, 1, new ArrayList<ContainerId>()); + containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + rm1.close(); + } + + private void checkPendingResource(MockRM rm, int priority, + ApplicationAttemptId attemptId, int memory) { + CapacityScheduler cs = (CapacityScheduler) rm.getRMContext().getScheduler(); + FiCaSchedulerApp app = cs.getApplicationAttempt(attemptId); + ResourceRequest rr = + app.getAppSchedulingInfo().getResourceRequest( + Priority.newInstance(priority), "*"); + Assert.assertEquals(memory, + rr.getCapability().getMemory() * rr.getNumContainers()); + } + + private void checkLaunchedContainerNumOnNode(MockRM rm, NodeId nodeId, + int numContainers) { + CapacityScheduler cs = (CapacityScheduler) rm.getRMContext().getScheduler(); + SchedulerNode node = cs.getSchedulerNode(nodeId); + Assert.assertEquals(numContainers, node.getNumContainers()); + } + + @Test + public void testPreferenceOfNeedyAppsTowardsNodePartitions() throws Exception { + /** + * Test case: Submit two application to a queue (app1 first then app2), app1 + * asked for no-label, app2 asked for label=x, when node1 has label=x + * doing heart beat, app2 will get allocation first, even if app2 submits later + * than app1 + */ + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + // Makes y to be non-exclusive node labels + mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("y", false))); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y + MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty> + + // launch an app to queue b1 (label = y), AM container should be launched in nm2 + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2); + + // launch another app to queue b1 (label = y), AM container should be launched in nm2 + RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + // request container and nm1 do heartbeat (nm2 has label=y), note that app1 + // request non-labeled container, and app2 request labeled container, app2 + // will get allocated first even if app1 submitted first. + am1.allocate("*", 1 * GB, 8, new ArrayList<ContainerId>()); + am2.allocate("*", 1 * GB, 8, new ArrayList<ContainerId>(), "y"); + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + + // Do node heartbeats many times + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + } + + // App2 will get preference to be allocated on node1, and node1 will be all + // used by App2. + FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId()); + FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(am2.getApplicationAttemptId()); + // app1 get nothing in nm1 (partition=y) + checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(), schedulerApp1); + checkNumOfContainersInAnAppOnGivenNode(9, nm2.getNodeId(), schedulerApp1); + // app2 get all resource in nm1 (partition=y) + checkNumOfContainersInAnAppOnGivenNode(8, nm1.getNodeId(), schedulerApp2); + checkNumOfContainersInAnAppOnGivenNode(1, nm2.getNodeId(), schedulerApp2); + + rm1.close(); + } + + private void checkNumOfContainersInAnAppOnGivenNode(int expectedNum, + NodeId nodeId, FiCaSchedulerApp app) { + int num = 0; + for (RMContainer container : app.getLiveContainers()) { + if (container.getAllocatedNode().equals(nodeId)) { + num++; + } + } + Assert.assertEquals(expectedNum, num); + } + + @Test + public void + testPreferenceOfNeedyPrioritiesUnderSameAppTowardsNodePartitions() + throws Exception { + /** + * Test case: Submit one application, it asks label="" in priority=1 and + * label="x" in priority=2, when a node with label=x heartbeat, priority=2 + * will get allocation first even if there're pending resource in priority=1 + */ + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + // Makes y to be non-exclusive node labels + mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("y", false))); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y + MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty> + + ContainerId nextContainerId; + + // launch an app to queue b1 (label = y), AM container should be launched in nm3 + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2); + + // request containers from am2, priority=1 asks for "" and priority=2 asks + // for "y", "y" container should be allocated first + nextContainerId = + ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); + am1.allocate("*", 1 * GB, 1, 1, new ArrayList<ContainerId>(), ""); + am1.allocate("*", 1 * GB, 1, 2, new ArrayList<ContainerId>(), "y"); + Assert.assertTrue(rm1.waitForState(nm1, nextContainerId, + RMContainerState.ALLOCATED, 10 * 1000)); + + // Check pending resource for am2, priority=1 doesn't get allocated before + // priority=2 allocated + checkPendingResource(rm1, 1, am1.getApplicationAttemptId(), 1 * GB); + checkPendingResource(rm1, 2, am1.getApplicationAttemptId(), 0 * GB); + + rm1.close(); + } + + @Test + public void testNonLabeledResourceRequestGetPreferrenceToNonLabeledNode() + throws Exception { + /** + * Test case: Submit one application, it asks 6 label="" containers, NM1 + * with label=y and NM2 has no label, NM1/NM2 doing heartbeat together. Even + * if NM1 has idle resource, containers are all allocated to NM2 since + * non-labeled request should get allocation on non-labeled nodes first. + */ + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + // Makes x to be non-exclusive node labels + mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("x", false))); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y + MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty> + + ContainerId nextContainerId; + + // launch an app to queue b1 (label = y), AM container should be launched in nm3 + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2); + + // request containers from am2, priority=1 asks for "" * 6 (id from 4 to 9), + // nm2/nm3 do + // heartbeat at the same time, check containers are always allocated to nm3. + // This is to verify when there's resource available in non-labeled + // partition, non-labeled resource should allocate to non-labeled partition + // first. + am1.allocate("*", 1 * GB, 6, 1, new ArrayList<ContainerId>(), ""); + for (int i = 2; i < 2 + 6; i++) { + nextContainerId = + ContainerId.newContainerId(am1.getApplicationAttemptId(), i); + Assert.assertTrue(rm1.waitForState(Arrays.asList(nm1, nm2), + nextContainerId, RMContainerState.ALLOCATED, 10 * 1000)); + } + // no more container allocated on nm1 + checkLaunchedContainerNumOnNode(rm1, nm1.getNodeId(), 0); + // all 7 (1 AM container + 6 task container) containers allocated on nm2 + checkLaunchedContainerNumOnNode(rm1, nm2.getNodeId(), 7); + + rm1.close(); + } + + @Test + public void testPreferenceOfQueuesTowardsNodePartitions() + throws Exception { + /** + * Test case: have a following queue structure: + * + * <pre> + * root + * / | \ + * a b c + * / \ / \ / \ + * a1 a2 b1 b2 c1 c2 + * (x) (x) (x) + * </pre> + * + * Only a1, b1, c1 can access label=x, and their default label=x Each each + * has one application, asks for 5 containers. NM1 has label=x + * + * NM1/NM2 doing heartbeat for 15 times, it should allocate all 15 + * containers with label=x + */ + + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(this.conf); + + // Define top-level queues + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"}); + csConf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + csConf.setCapacity(A, 33); + csConf.setAccessibleNodeLabels(A, toSet("x")); + csConf.setCapacityByLabel(A, "x", 33); + csConf.setQueues(A, new String[] {"a1", "a2"}); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + csConf.setCapacity(B, 33); + csConf.setAccessibleNodeLabels(B, toSet("x")); + csConf.setCapacityByLabel(B, "x", 33); + csConf.setQueues(B, new String[] {"b1", "b2"}); + + final String C = CapacitySchedulerConfiguration.ROOT + ".c"; + csConf.setCapacity(C, 34); + csConf.setAccessibleNodeLabels(C, toSet("x")); + csConf.setCapacityByLabel(C, "x", 34); + csConf.setQueues(C, new String[] {"c1", "c2"}); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + csConf.setCapacity(A1, 50); + csConf.setCapacityByLabel(A1, "x", 100); + csConf.setDefaultNodeLabelExpression(A1, "x"); + + final String A2 = A + ".a2"; + csConf.setCapacity(A2, 50); + csConf.setCapacityByLabel(A2, "x", 0); + + final String B1 = B + ".b1"; + csConf.setCapacity(B1, 50); + csConf.setCapacityByLabel(B1, "x", 100); + csConf.setDefaultNodeLabelExpression(B1, "x"); + + final String B2 = B + ".b2"; + csConf.setCapacity(B2, 50); + csConf.setCapacityByLabel(B2, "x", 0); + + final String C1 = C + ".c1"; + csConf.setCapacity(C1, 50); + csConf.setCapacityByLabel(C1, "x", 100); + csConf.setDefaultNodeLabelExpression(C1, "x"); + + final String C2 = C + ".c2"; + csConf.setCapacity(C2, 50); + csConf.setCapacityByLabel(C2, "x", 0); + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + // Makes x to be non-exclusive node labels + mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("x", false))); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM(csConf) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty> + + // app1 -> a1 + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // app2 -> a2 + RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "a2"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + // app3 -> b1 + RMApp app3 = rm1.submitApp(1 * GB, "app", "user", null, "b1"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1); + + // app4 -> b2 + RMApp app4 = rm1.submitApp(1 * GB, "app", "user", null, "b2"); + MockAM am4 = MockRM.launchAndRegisterAM(app4, rm1, nm2); + + // app5 -> c1 + RMApp app5 = rm1.submitApp(1 * GB, "app", "user", null, "c1"); + MockAM am5 = MockRM.launchAndRegisterAM(app5, rm1, nm1); + + // app6 -> b2 + RMApp app6 = rm1.submitApp(1 * GB, "app", "user", null, "c2"); + MockAM am6 = MockRM.launchAndRegisterAM(app6, rm1, nm2); + + // Each application request 5 * 1GB container + am1.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>()); + am2.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>()); + am3.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>()); + am4.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>()); + am5.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>()); + am6.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>()); + + // NM1 do 15 heartbeats + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + for (int i = 0; i < 15; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + + // NM1 get 15 new containers (total is 18, 15 task containers and 3 AM + // containers) + checkLaunchedContainerNumOnNode(rm1, nm1.getNodeId(), 18); + + // Check pending resource each application + // APP1/APP3/APP5 get satisfied, and APP2/APP2/APP3 get nothing. + checkPendingResource(rm1, 1, am1.getApplicationAttemptId(), 0 * GB); + checkPendingResource(rm1, 1, am2.getApplicationAttemptId(), 5 * GB); + checkPendingResource(rm1, 1, am3.getApplicationAttemptId(), 0 * GB); + checkPendingResource(rm1, 1, am4.getApplicationAttemptId(), 5 * GB); + checkPendingResource(rm1, 1, am5.getApplicationAttemptId(), 0 * GB); + checkPendingResource(rm1, 1, am6.getApplicationAttemptId(), 5 * GB); + + rm1.close(); + } + + @Test + public void testQueuesWithoutAccessUsingPartitionedNodes() throws Exception { + /** + * Test case: have a following queue structure: + * + * <pre> + * root + * / \ + * a b + * (x) + * </pre> + * + * Only a can access label=x, two nodes in the cluster, n1 has x and n2 has + * no-label. + * + * When user-limit-factor=5, submit one application in queue b and request + * for infinite containers should be able to use up all cluster resources. + */ + + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(this.conf); + + // Define top-level queues + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); + csConf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + csConf.setCapacity(A, 50); + csConf.setAccessibleNodeLabels(A, toSet("x")); + csConf.setCapacityByLabel(A, "x", 100); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + csConf.setCapacity(B, 50); + csConf.setAccessibleNodeLabels(B, new HashSet<String>()); + csConf.setUserLimitFactor(B, 5); + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x")); + // Makes x to be non-exclusive node labels + mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("x", false))); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM(csConf) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB); // label = <empty> + + // app1 -> b + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2); + + // Each application request 5 * 1GB container + am1.allocate("*", 1 * GB, 50, new ArrayList<ContainerId>()); + + // NM1 do 50 heartbeats + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + + SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId()); + + // How much cycles we waited to be allocated when available resource only on + // partitioned node + int cycleWaited = 0; + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + if (schedulerNode1.getNumContainers() == 0) { + cycleWaited++; + } + } + // We will will 10 cycles before get allocated on partitioned node + // NM2 can allocate 10 containers totally, exclude already allocated AM + // container, we will wait 9 to fulfill non-partitioned node, and need wait + // one more cycle before allocating to non-partitioned node + Assert.assertEquals(10, cycleWaited); + + // Both NM1/NM2 launched 10 containers, cluster resource is exhausted + checkLaunchedContainerNumOnNode(rm1, nm1.getNodeId(), 10); + checkLaunchedContainerNumOnNode(rm1, nm2.getNodeId(), 10); + + rm1.close(); + } + + @Test + public void testAMContainerAllocationWillAlwaysBeExclusive() + throws Exception { + /** + * Test case: Submit one application without partition, trying to allocate a + * node has partition=x, it should fail to allocate since AM container will + * always respect exclusivity for partitions + */ + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + // Makes x to be non-exclusive node labels + mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("x", false))); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x + + // launch an app to queue b1 (label = y), AM container should be launched in nm3 + rm1.submitApp(1 * GB, "app", "user", null, "b1"); + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + + // Heartbeat for many times, app1 should get nothing + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + + Assert.assertEquals(0, cs.getSchedulerNode(nm1.getNodeId()) + .getNumContainers()); + + rm1.close(); + } + + @Test + public void + testQueueMaxCapacitiesWillNotBeHonoredWhenNotRespectingExclusivity() + throws Exception { + /** + * Test case: have a following queue structure: + * + * <pre> + * root + * / \ + * a b + * (x) (x) + * </pre> + * + * a/b can access x, both of them has max-capacity-on-x = 50 + * + * When doing non-exclusive allocation, app in a (or b) can use 100% of x + * resource. + */ + + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(this.conf); + + // Define top-level queues + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] { "a", + "b" }); + csConf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + csConf.setCapacity(A, 50); + csConf.setAccessibleNodeLabels(A, toSet("x")); + csConf.setCapacityByLabel(A, "x", 50); + csConf.setMaximumCapacityByLabel(A, "x", 50); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + csConf.setCapacity(B, 50); + csConf.setAccessibleNodeLabels(B, toSet("x")); + csConf.setCapacityByLabel(B, "x", 50); + csConf.setMaximumCapacityByLabel(B, "x", 50); + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x")); + // Makes x to be non-exclusive node labels + mgr.updateNodeLabels(Arrays.asList(NodeLabel.newInstance("x", false))); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM(csConf) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB); // label = <empty> + + // app1 -> a + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2); + + // app1 asks for 10 partition= containers + am1.allocate("*", 1 * GB, 10, new ArrayList<ContainerId>()); + + // NM1 do 50 heartbeats + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + + SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId()); + + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + + // app1 gets all resource in partition=x + Assert.assertEquals(10, schedulerNode1.getNumContainers()); + + rm1.close(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index 7da1c97fec0ef..52d0bc1241bed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -23,7 +23,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; @@ -45,6 +44,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; @@ -146,7 +146,7 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { final Resource allocatedResource = Resources.createResource(allocation); if (queue instanceof ParentQueue) { ((ParentQueue)queue).allocateResource(clusterResource, - allocatedResource, null); + allocatedResource, RMNodeLabelsManager.NO_LABEL); } else { FiCaSchedulerApp app1 = getMockApplication(0, ""); ((LeafQueue)queue).allocateResource(clusterResource, app1, @@ -157,7 +157,7 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { if (allocation > 0) { doReturn(new CSAssignment(Resources.none(), type)).when(queue) .assignContainers(eq(clusterResource), eq(node), - any(ResourceLimits.class)); + any(ResourceLimits.class), any(SchedulingMode.class)); // Mock the node's resource availability Resource available = node.getAvailableResource(); @@ -168,7 +168,7 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { return new CSAssignment(allocatedResource, type); } }).when(queue).assignContainers(eq(clusterResource), eq(node), - any(ResourceLimits.class)); + any(ResourceLimits.class), any(SchedulingMode.class)); } private float computeQueueAbsoluteUsedCapacity(CSQueue queue, @@ -228,11 +228,16 @@ public void testSingleLevelQueues() throws Exception { LeafQueue a = (LeafQueue)queues.get(A); LeafQueue b = (LeafQueue)queues.get(B); + a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage() + .incPending(Resources.createResource(1 * GB)); + // Simulate B returning a container on node_0 stubQueueAllocation(a, clusterResource, node_0, 0*GB); stubQueueAllocation(b, clusterResource, node_0, 1*GB); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyQueueMetrics(a, 0*GB, clusterResource); verifyQueueMetrics(b, 1*GB, clusterResource); @@ -240,12 +245,12 @@ public void testSingleLevelQueues() throws Exception { stubQueueAllocation(a, clusterResource, node_1, 2*GB); stubQueueAllocation(b, clusterResource, node_1, 1*GB); root.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); InOrder allocationOrder = inOrder(a, b); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(a, 2*GB, clusterResource); verifyQueueMetrics(b, 2*GB, clusterResource); @@ -254,12 +259,12 @@ public void testSingleLevelQueues() throws Exception { stubQueueAllocation(a, clusterResource, node_0, 1*GB); stubQueueAllocation(b, clusterResource, node_0, 2*GB); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); allocationOrder = inOrder(b, a); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 4*GB, clusterResource); @@ -268,12 +273,12 @@ public void testSingleLevelQueues() throws Exception { stubQueueAllocation(a, clusterResource, node_0, 0*GB); stubQueueAllocation(b, clusterResource, node_0, 4*GB); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); allocationOrder = inOrder(b, a); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 8*GB, clusterResource); @@ -282,12 +287,12 @@ public void testSingleLevelQueues() throws Exception { stubQueueAllocation(a, clusterResource, node_1, 1*GB); stubQueueAllocation(b, clusterResource, node_1, 1*GB); root.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); allocationOrder = inOrder(a, b); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(a, 4*GB, clusterResource); verifyQueueMetrics(b, 9*GB, clusterResource); } @@ -448,16 +453,27 @@ public void testMultiLevelQueues() throws Exception { // Start testing CSQueue a = queues.get(A); + a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); CSQueue b = queues.get(B); + b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); CSQueue c = queues.get(C); + c.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); CSQueue d = queues.get(D); + d.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); CSQueue a1 = queues.get(A1); + a1.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); CSQueue a2 = queues.get(A2); + a2.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); CSQueue b1 = queues.get(B1); + b1.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); CSQueue b2 = queues.get(B2); + b2.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); CSQueue b3 = queues.get(B3); + b3.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage() + .incPending(Resources.createResource(1 * GB)); // Simulate C returning a container on node_0 stubQueueAllocation(a, clusterResource, node_0, 0*GB); @@ -465,7 +481,7 @@ public void testMultiLevelQueues() throws Exception { stubQueueAllocation(c, clusterResource, node_0, 1*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyQueueMetrics(a, 0*GB, clusterResource); verifyQueueMetrics(b, 0*GB, clusterResource); verifyQueueMetrics(c, 1*GB, clusterResource); @@ -478,7 +494,7 @@ public void testMultiLevelQueues() throws Exception { stubQueueAllocation(b2, clusterResource, node_1, 4*GB); stubQueueAllocation(c, clusterResource, node_1, 0*GB); root.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyQueueMetrics(a, 0*GB, clusterResource); verifyQueueMetrics(b, 4*GB, clusterResource); verifyQueueMetrics(c, 1*GB, clusterResource); @@ -490,14 +506,14 @@ public void testMultiLevelQueues() throws Exception { stubQueueAllocation(b3, clusterResource, node_0, 2*GB); stubQueueAllocation(c, clusterResource, node_0, 2*GB); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); InOrder allocationOrder = inOrder(a, c, b); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(c).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(a, 1*GB, clusterResource); verifyQueueMetrics(b, 6*GB, clusterResource); verifyQueueMetrics(c, 3*GB, clusterResource); @@ -517,16 +533,16 @@ public void testMultiLevelQueues() throws Exception { stubQueueAllocation(b1, clusterResource, node_2, 1*GB); stubQueueAllocation(c, clusterResource, node_2, 1*GB); root.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); allocationOrder = inOrder(a, a2, a1, b, c); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(a2).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(c).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 8*GB, clusterResource); verifyQueueMetrics(c, 4*GB, clusterResource); @@ -622,12 +638,16 @@ public void testOffSwitchScheduling() throws Exception { // Start testing LeafQueue a = (LeafQueue)queues.get(A); LeafQueue b = (LeafQueue)queues.get(B); + a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage() + .incPending(Resources.createResource(1 * GB)); // Simulate B returning a container on node_0 stubQueueAllocation(a, clusterResource, node_0, 0*GB, NodeType.OFF_SWITCH); stubQueueAllocation(b, clusterResource, node_0, 1*GB, NodeType.OFF_SWITCH); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyQueueMetrics(a, 0*GB, clusterResource); verifyQueueMetrics(b, 1*GB, clusterResource); @@ -636,12 +656,12 @@ public void testOffSwitchScheduling() throws Exception { stubQueueAllocation(a, clusterResource, node_1, 2*GB, NodeType.RACK_LOCAL); stubQueueAllocation(b, clusterResource, node_1, 1*GB, NodeType.OFF_SWITCH); root.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); InOrder allocationOrder = inOrder(a, b); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(a, 2*GB, clusterResource); verifyQueueMetrics(b, 2*GB, clusterResource); @@ -651,12 +671,12 @@ public void testOffSwitchScheduling() throws Exception { stubQueueAllocation(a, clusterResource, node_0, 1*GB, NodeType.NODE_LOCAL); stubQueueAllocation(b, clusterResource, node_0, 2*GB, NodeType.OFF_SWITCH); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); allocationOrder = inOrder(b, a); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(a, 2*GB, clusterResource); verifyQueueMetrics(b, 4*GB, clusterResource); @@ -691,12 +711,19 @@ public void testOffSwitchSchedulingMultiLevelQueues() throws Exception { // Start testing LeafQueue b3 = (LeafQueue)queues.get(B3); LeafQueue b2 = (LeafQueue)queues.get(B2); + b2.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + b3.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); + queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage() + .incPending(Resources.createResource(1 * GB)); + + CSQueue b = queues.get(B); + b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB)); // Simulate B3 returning a container on node_0 stubQueueAllocation(b2, clusterResource, node_0, 0*GB, NodeType.OFF_SWITCH); stubQueueAllocation(b3, clusterResource, node_0, 1*GB, NodeType.OFF_SWITCH); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); verifyQueueMetrics(b2, 0*GB, clusterResource); verifyQueueMetrics(b3, 1*GB, clusterResource); @@ -705,12 +732,12 @@ public void testOffSwitchSchedulingMultiLevelQueues() throws Exception { stubQueueAllocation(b2, clusterResource, node_1, 1*GB, NodeType.RACK_LOCAL); stubQueueAllocation(b3, clusterResource, node_1, 1*GB, NodeType.OFF_SWITCH); root.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); InOrder allocationOrder = inOrder(b2, b3); allocationOrder.verify(b2).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(b3).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(b2, 1*GB, clusterResource); verifyQueueMetrics(b3, 2*GB, clusterResource); @@ -720,12 +747,12 @@ public void testOffSwitchSchedulingMultiLevelQueues() throws Exception { stubQueueAllocation(b2, clusterResource, node_0, 1*GB, NodeType.NODE_LOCAL); stubQueueAllocation(b3, clusterResource, node_0, 1*GB, NodeType.OFF_SWITCH); root.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); allocationOrder = inOrder(b3, b2); allocationOrder.verify(b3).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); allocationOrder.verify(b2).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits(), any(SchedulingMode.class)); verifyQueueMetrics(b2, 1*GB, clusterResource); verifyQueueMetrics(b3, 3*GB, clusterResource); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index e8a8243203365..47be61809881f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -48,10 +48,10 @@ import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -266,7 +266,7 @@ public void testReservation() throws Exception { // Start testing... // Only AM a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -278,7 +278,7 @@ public void testReservation() throws Exception { // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -290,7 +290,7 @@ public void testReservation() throws Exception { // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -305,7 +305,7 @@ public void testReservation() throws Exception { // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); @@ -321,7 +321,7 @@ public void testReservation() throws Exception { // assign reducer to node 2 a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(18 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); @@ -338,7 +338,7 @@ public void testReservation() throws Exception { // node_1 heartbeat and unreserves from node_0 in order to allocate // on node_1 a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(18 * GB, a.getUsedResources().getMemory()); assertEquals(18 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -422,7 +422,7 @@ public void testReservationNoContinueLook() throws Exception { // Start testing... // Only AM a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -434,7 +434,7 @@ public void testReservationNoContinueLook() throws Exception { // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -446,7 +446,7 @@ public void testReservationNoContinueLook() throws Exception { // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -461,7 +461,7 @@ public void testReservationNoContinueLook() throws Exception { // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); @@ -477,7 +477,7 @@ public void testReservationNoContinueLook() throws Exception { // assign reducer to node 2 a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(18 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); @@ -494,7 +494,7 @@ public void testReservationNoContinueLook() throws Exception { // node_1 heartbeat and won't unreserve from node_0, potentially stuck // if AM doesn't handle a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(18 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); @@ -570,7 +570,7 @@ public void testAssignContainersNeedToUnreserve() throws Exception { // Start testing... // Only AM a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -581,7 +581,7 @@ public void testAssignContainersNeedToUnreserve() throws Exception { // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -592,7 +592,7 @@ public void testAssignContainersNeedToUnreserve() throws Exception { // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -606,7 +606,7 @@ public void testAssignContainersNeedToUnreserve() throws Exception { // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); @@ -621,7 +621,7 @@ public void testAssignContainersNeedToUnreserve() throws Exception { // could allocate but told need to unreserve first a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -823,7 +823,7 @@ public void testAssignToQueue() throws Exception { // Start testing... // Only AM a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -834,7 +834,7 @@ public void testAssignToQueue() throws Exception { // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -845,7 +845,7 @@ public void testAssignToQueue() throws Exception { // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -860,15 +860,16 @@ public void testAssignToQueue() throws Exception { Resource capability = Resources.createResource(32 * GB, 0); boolean res = a.canAssignToThisQueue(clusterResource, - CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( - clusterResource), capability, Resources.none()); + RMNodeLabelsManager.NO_LABEL, new ResourceLimits( + clusterResource), capability, Resources.none(), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertFalse(res); // now add in reservations and make sure it continues if config set // allocate to queue so that the potential new capacity is greater then // absoluteMaxCapacity a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); @@ -881,16 +882,17 @@ CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( capability = Resources.createResource(5 * GB, 0); res = a.canAssignToThisQueue(clusterResource, - CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( - clusterResource), capability, Resources - .createResource(5 * GB)); + RMNodeLabelsManager.NO_LABEL, new ResourceLimits( + clusterResource), capability, Resources.createResource(5 * GB), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertTrue(res); // tell to not check reservations res = a.canAssignToThisQueue(clusterResource, - CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( - clusterResource), capability, Resources.none()); + RMNodeLabelsManager.NO_LABEL, new ResourceLimits( + clusterResource), capability, Resources.none(), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertFalse(res); refreshQueuesTurnOffReservationsContLook(a, csConf); @@ -899,15 +901,16 @@ CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( // in since feature is off res = a.canAssignToThisQueue(clusterResource, - CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( - clusterResource), capability, Resources.none()); + RMNodeLabelsManager.NO_LABEL, new ResourceLimits( + clusterResource), capability, Resources.none(), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertFalse(res); res = a.canAssignToThisQueue(clusterResource, - CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( - clusterResource), capability, Resources - .createResource(5 * GB)); + RMNodeLabelsManager.NO_LABEL, new ResourceLimits( + clusterResource), capability, Resources.createResource(5 * GB), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertFalse(res); } @@ -1008,7 +1011,7 @@ public void testAssignToUser() throws Exception { // Start testing... // Only AM a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1019,7 +1022,7 @@ public void testAssignToUser() throws Exception { // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1030,7 +1033,7 @@ public void testAssignToUser() throws Exception { // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1044,7 +1047,7 @@ public void testAssignToUser() throws Exception { // allocate to queue so that the potential new capacity is greater then // absoluteMaxCapacity a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(5 * GB, app_0.getCurrentReservation().getMemory()); @@ -1059,18 +1062,18 @@ public void testAssignToUser() throws Exception { // set limit so subtrace reservations it can continue Resource limit = Resources.createResource(12 * GB, 0); boolean res = a.canAssignToUser(clusterResource, user_0, limit, app_0, - true, null); + true, ""); assertTrue(res); // tell it not to check for reservations and should fail as already over // limit - res = a.canAssignToUser(clusterResource, user_0, limit, app_0, false, null); + res = a.canAssignToUser(clusterResource, user_0, limit, app_0, false, ""); assertFalse(res); refreshQueuesTurnOffReservationsContLook(a, csConf); // should now return false since feature off - res = a.canAssignToUser(clusterResource, user_0, limit, app_0, true, null); + res = a.canAssignToUser(clusterResource, user_0, limit, app_0, true, ""); assertFalse(res); } @@ -1143,7 +1146,7 @@ public void testReservationsNoneAvailable() throws Exception { // Start testing... // Only AM a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1155,7 +1158,7 @@ public void testReservationsNoneAvailable() throws Exception { // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1167,7 +1170,7 @@ public void testReservationsNoneAvailable() throws Exception { // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1183,7 +1186,7 @@ public void testReservationsNoneAvailable() throws Exception { // some resource. Even with continous reservation looking, we don't allow // unreserve resource to reserve container. a.assignContainers(clusterResource, node_0, - new ResourceLimits(Resources.createResource(10 * GB))); + new ResourceLimits(Resources.createResource(10 * GB)), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1199,7 +1202,7 @@ public void testReservationsNoneAvailable() throws Exception { // used (8G) + required (5G). It will not reserved since it has to unreserve // some resource. Unfortunately, there's nothing to unreserve. a.assignContainers(clusterResource, node_2, - new ResourceLimits(Resources.createResource(10 * GB))); + new ResourceLimits(Resources.createResource(10 * GB)), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1213,7 +1216,7 @@ public void testReservationsNoneAvailable() throws Exception { // let it assign 5G to node_2 a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); @@ -1226,7 +1229,7 @@ public void testReservationsNoneAvailable() throws Exception { // reserve 8G node_0 a.assignContainers(clusterResource, node_0, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(21 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(8 * GB, a.getMetrics().getReservedMB()); @@ -1241,7 +1244,7 @@ public void testReservationsNoneAvailable() throws Exception { // continued to try due to having reservation above, // but hits queue limits so can't reserve anymore. a.assignContainers(clusterResource, node_2, - new ResourceLimits(clusterResource)); + new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals(21 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(8 * GB, a.getMetrics().getReservedMB()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 62135b91df4d7..84abf4e5445bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -160,6 +160,7 @@ public static ResourceRequest createResourceRequest( request.setCapability(capability); request.setRelaxLocality(relaxLocality); request.setPriority(priority); + request.setNodeLabelExpression(RMNodeLabelsManager.NO_LABEL); return request; } @@ -273,6 +274,7 @@ public static Configuration getConfigurationWithQueueLabels(Configuration config conf.setCapacity(B1, 100); conf.setMaximumCapacity(B1, 100); conf.setCapacityByLabel(B1, "y", 100); + conf.setMaximumApplicationMasterResourcePerQueuePercent(B1, 1f); final String C1 = C + ".c1"; conf.setQueues(C, new String[] {"c1"});
97f4f27b14e6afe3833fecfce79ca39877f2227a
elasticsearch
remove equals/hashcode as part of Pipeline and- adapt tests--Only MutateProcessor implemented equals / hashcode hence we would only use that one in our tests, since they relied on them. Better to not rely on equals/hashcode, drop them and mock processor/pipeline in our tests that need them. That also allow to make MutateProcessor constructor package private as the other processors.-
p
https://github.com/elastic/elasticsearch
diff --git a/plugins/ingest/src/main/java/org/elasticsearch/ingest/Pipeline.java b/plugins/ingest/src/main/java/org/elasticsearch/ingest/Pipeline.java index b98a469d3c307..7b44f7d5a7f9f 100644 --- a/plugins/ingest/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/plugins/ingest/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -71,21 +71,6 @@ public List<Processor> getProcessors() { return processors; } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Pipeline pipeline = (Pipeline) o; - return Objects.equals(id, pipeline.id) && - Objects.equals(description, pipeline.description) && - Objects.equals(processors, pipeline.processors); - } - - @Override - public int hashCode() { - return Objects.hash(id, description, processors); - } - public final static class Factory { public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws IOException { diff --git a/plugins/ingest/src/main/java/org/elasticsearch/ingest/processor/mutate/MutateProcessor.java b/plugins/ingest/src/main/java/org/elasticsearch/ingest/processor/mutate/MutateProcessor.java index 4a950bea08368..2daadd5fee54a 100644 --- a/plugins/ingest/src/main/java/org/elasticsearch/ingest/processor/mutate/MutateProcessor.java +++ b/plugins/ingest/src/main/java/org/elasticsearch/ingest/processor/mutate/MutateProcessor.java @@ -45,7 +45,7 @@ public final class MutateProcessor implements Processor { private final List<String> uppercase; private final List<String> lowercase; - public MutateProcessor(Map<String, Object> update, Map<String, String> rename, Map<String, String> convert, + MutateProcessor(Map<String, Object> update, Map<String, String> rename, Map<String, String> convert, Map<String, String> split, List<GsubExpression> gsub, Map<String, String> join, List<String> remove, List<String> trim, List<String> uppercase, List<String> lowercase) { this.update = update; diff --git a/plugins/ingest/src/main/java/org/elasticsearch/plugin/ingest/transport/simulate/ParsedSimulateRequest.java b/plugins/ingest/src/main/java/org/elasticsearch/plugin/ingest/transport/simulate/ParsedSimulateRequest.java index 47af2db583d6d..50897435217c5 100644 --- a/plugins/ingest/src/main/java/org/elasticsearch/plugin/ingest/transport/simulate/ParsedSimulateRequest.java +++ b/plugins/ingest/src/main/java/org/elasticsearch/plugin/ingest/transport/simulate/ParsedSimulateRequest.java @@ -51,21 +51,6 @@ public boolean isVerbose() { return verbose; } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ParsedSimulateRequest that = (ParsedSimulateRequest) o; - return Objects.equals(verbose, that.verbose) && - Objects.equals(documents, that.documents) && - Objects.equals(pipeline, that.pipeline); - } - - @Override - public int hashCode() { - return Objects.hash(documents, pipeline, verbose); - } - public static class Parser { private static final Pipeline.Factory PIPELINE_FACTORY = new Pipeline.Factory(); public static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline"; diff --git a/plugins/ingest/src/test/java/org/elasticsearch/ingest/DataTests.java b/plugins/ingest/src/test/java/org/elasticsearch/ingest/DataTests.java index 918064a17c7a1..d17a354b13977 100644 --- a/plugins/ingest/src/test/java/org/elasticsearch/ingest/DataTests.java +++ b/plugins/ingest/src/test/java/org/elasticsearch/ingest/DataTests.java @@ -26,7 +26,8 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public class DataTests extends ESTestCase { @@ -86,28 +87,54 @@ public void testAddFieldOnExistingParent() { assertThat(data.getProperty("fizz.new"), equalTo("bar")); } - public void testEquals() { - Data otherData = new Data(data); - assertThat(otherData, equalTo(data)); - } - - public void testNotEqualsDiffIndex() { - Data otherData = new Data(data.getIndex() + "foo", data.getType(), data.getId(), data.getDocument()); - assertThat(otherData, not(equalTo(data))); - } - - public void testNotEqualsDiffType() { - Data otherData = new Data(data.getIndex(), data.getType() + "foo", data.getId(), data.getDocument()); - assertThat(otherData, not(equalTo(data))); - } - - public void testNotEqualsDiffId() { - Data otherData = new Data(data.getIndex(), data.getType(), data.getId() + "foo", data.getDocument()); - assertThat(otherData, not(equalTo(data))); - } - - public void testNotEqualsDiffDocument() { - Data otherData = new Data(data.getIndex(), data.getType(), data.getId(), Collections.emptyMap()); - assertThat(otherData, not(equalTo(data))); + public void testEqualsAndHashcode() throws Exception { + String index = randomAsciiOfLengthBetween(1, 10); + String type = randomAsciiOfLengthBetween(1, 10); + String id = randomAsciiOfLengthBetween(1, 10); + String fieldName = randomAsciiOfLengthBetween(1, 10); + String fieldValue = randomAsciiOfLengthBetween(1, 10); + Data data = new Data(index, type, id, Collections.singletonMap(fieldName, fieldValue)); + + boolean changed = false; + String otherIndex; + if (randomBoolean()) { + otherIndex = randomAsciiOfLengthBetween(1, 10); + changed = true; + } else { + otherIndex = index; + } + String otherType; + if (randomBoolean()) { + otherType = randomAsciiOfLengthBetween(1, 10); + changed = true; + } else { + otherType = type; + } + String otherId; + if (randomBoolean()) { + otherId = randomAsciiOfLengthBetween(1, 10); + changed = true; + } else { + otherId = id; + } + Map<String, Object> document; + if (randomBoolean()) { + document = Collections.singletonMap(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); + changed = true; + } else { + document = Collections.singletonMap(fieldName, fieldValue); + } + + Data otherData = new Data(otherIndex, otherType, otherId, document); + if (changed) { + assertThat(data, not(equalTo(otherData))); + assertThat(otherData, not(equalTo(data))); + } else { + assertThat(data, equalTo(otherData)); + assertThat(otherData, equalTo(data)); + Data thirdData = new Data(index, type, id, Collections.singletonMap(fieldName, fieldValue)); + assertThat(thirdData, equalTo(data)); + assertThat(data, equalTo(thirdData)); + } } } diff --git a/plugins/ingest/src/test/java/org/elasticsearch/ingest/PipelineTests.java b/plugins/ingest/src/test/java/org/elasticsearch/ingest/PipelineTests.java deleted file mode 100644 index 84d0953495be2..0000000000000 --- a/plugins/ingest/src/test/java/org/elasticsearch/ingest/PipelineTests.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.ingest.processor.Processor; -import org.elasticsearch.ingest.processor.mutate.MutateProcessor; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.util.*; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; -import static org.mockito.Mockito.mock; - -public class PipelineTests extends ESTestCase { - private Processor updateProcessor; - private Processor lowercaseProcessor; - private Pipeline pipeline; - - @Before - public void setup() { - Map<String, Object> update = Collections.singletonMap("foo", 123); - List<String> lowercase = Collections.singletonList("foo"); - updateProcessor = new MutateProcessor(update, null, null, null, null, null, null, null, null, null); - lowercaseProcessor = new MutateProcessor(null, null, null, null, null, null, null, null, null, lowercase); - pipeline = new Pipeline("id", "description", Arrays.asList(updateProcessor, lowercaseProcessor)); - } - - public void testEquals() throws Exception { - Pipeline other = new Pipeline(pipeline.getId(), pipeline.getDescription(), pipeline.getProcessors()); - assertThat(pipeline, equalTo(other)); - } - - public void testNotEqualsDiffId() throws Exception { - Pipeline other = new Pipeline(pipeline.getId() + "foo", pipeline.getDescription(), pipeline.getProcessors()); - assertThat(pipeline, not(equalTo(other))); - } - - public void testNotEqualsDiffDescription() throws Exception { - Pipeline other = new Pipeline(pipeline.getId(), pipeline.getDescription() + "foo", pipeline.getProcessors()); - assertThat(pipeline, not(equalTo(other))); - } - - public void testNotEqualsDiffProcessors() throws Exception { - Pipeline other = new Pipeline(pipeline.getId(), pipeline.getDescription() + "foo", Collections.singletonList(updateProcessor)); - assertThat(pipeline, not(equalTo(other))); - } -} diff --git a/plugins/ingest/src/test/java/org/elasticsearch/plugin/ingest/transport/TransportDataTests.java b/plugins/ingest/src/test/java/org/elasticsearch/plugin/ingest/transport/TransportDataTests.java index 89ef773132712..1cc3f6baadaf4 100644 --- a/plugins/ingest/src/test/java/org/elasticsearch/plugin/ingest/transport/TransportDataTests.java +++ b/plugins/ingest/src/test/java/org/elasticsearch/plugin/ingest/transport/TransportDataTests.java @@ -39,8 +39,7 @@ public void testEqualsAndHashcode() throws Exception { String id = randomAsciiOfLengthBetween(1, 10); String fieldName = randomAsciiOfLengthBetween(1, 10); String fieldValue = randomAsciiOfLengthBetween(1, 10); - Data data = new Data(index, type, id, Collections.singletonMap(fieldName, fieldValue)); - TransportData transportData = new TransportData(data); + TransportData transportData = new TransportData(new Data(index, type, id, Collections.singletonMap(fieldName, fieldValue))); boolean changed = false; String otherIndex; @@ -72,22 +71,14 @@ public void testEqualsAndHashcode() throws Exception { document = Collections.singletonMap(fieldName, fieldValue); } - Data otherData = new Data(otherIndex, otherType, otherId, document); - TransportData otherTransportData = new TransportData(otherData); + TransportData otherTransportData = new TransportData(new Data(otherIndex, otherType, otherId, document)); if (changed) { - assertThat(data, not(equalTo(otherData))); - assertThat(otherData, not(equalTo(data))); assertThat(transportData, not(equalTo(otherTransportData))); assertThat(otherTransportData, not(equalTo(transportData))); } else { - assertThat(data, equalTo(otherData)); - assertThat(otherData, equalTo(data)); assertThat(transportData, equalTo(otherTransportData)); assertThat(otherTransportData, equalTo(transportData)); - Data thirdData = new Data(index, type, id, Collections.singletonMap(fieldName, fieldValue)); - TransportData thirdTransportData = new TransportData(thirdData); - assertThat(thirdData, equalTo(data)); - assertThat(data, equalTo(thirdData)); + TransportData thirdTransportData = new TransportData(new Data(index, type, id, Collections.singletonMap(fieldName, fieldValue))); assertThat(thirdTransportData, equalTo(transportData)); assertThat(transportData, equalTo(thirdTransportData)); } diff --git a/plugins/ingest/src/test/java/org/elasticsearch/plugin/ingest/transport/simulate/ParsedSimulateRequestParserTests.java b/plugins/ingest/src/test/java/org/elasticsearch/plugin/ingest/transport/simulate/ParsedSimulateRequestParserTests.java index 09c145a56b62e..7f44fc08b9b9e 100644 --- a/plugins/ingest/src/test/java/org/elasticsearch/plugin/ingest/transport/simulate/ParsedSimulateRequestParserTests.java +++ b/plugins/ingest/src/test/java/org/elasticsearch/plugin/ingest/transport/simulate/ParsedSimulateRequestParserTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.ingest.Data; import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.ingest.processor.Processor; -import org.elasticsearch.ingest.processor.mutate.MutateProcessor; import org.elasticsearch.plugin.ingest.PipelineStore; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -30,70 +29,120 @@ import java.io.IOException; import java.util.*; +import static org.elasticsearch.plugin.ingest.transport.simulate.SimulatePipelineRequest.Fields; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.elasticsearch.plugin.ingest.transport.simulate.SimulatePipelineRequest.Fields; - public class ParsedSimulateRequestParserTests extends ESTestCase { + private PipelineStore store; - private ParsedSimulateRequest.Parser parser; - private Pipeline pipeline; - private Data data; @Before public void init() throws IOException { - parser = new ParsedSimulateRequest.Parser(); - List<String> uppercase = Collections.singletonList("foo"); - Processor processor = new MutateProcessor(null, null, null, null, null, null, null, null, uppercase, null); - pipeline = new Pipeline(ParsedSimulateRequest.Parser.SIMULATED_PIPELINE_ID, null, Arrays.asList(processor)); - data = new Data("_index", "_type", "_id", Collections.singletonMap("foo", "bar")); + Pipeline pipeline = new Pipeline(ParsedSimulateRequest.Parser.SIMULATED_PIPELINE_ID, null, Collections.singletonList(mock(Processor.class))); Map<String, Processor.Factory> processorRegistry = new HashMap<>(); - processorRegistry.put("mutate", new MutateProcessor.Factory()); + processorRegistry.put("mock_processor", mock(Processor.Factory.class)); store = mock(PipelineStore.class); - when(store.get("_id")).thenReturn(pipeline); + when(store.get(ParsedSimulateRequest.Parser.SIMULATED_PIPELINE_ID)).thenReturn(pipeline); when(store.getProcessorFactoryRegistry()).thenReturn(processorRegistry); } public void testParseUsingPipelineStore() throws Exception { - ParsedSimulateRequest expectedRequest = new ParsedSimulateRequest(pipeline, Collections.singletonList(data), false); + int numDocs = randomIntBetween(1, 10); - Map<String, Object> raw = new HashMap<>(); + Map<String, Object> requestContent = new HashMap<>(); List<Map<String, Object>> docs = new ArrayList<>(); - Map<String, Object> doc = new HashMap<>(); - doc.put(Fields.INDEX, "_index"); - doc.put(Fields.TYPE, "_type"); - doc.put(Fields.ID, "_id"); - doc.put(Fields.SOURCE, data.getDocument()); - docs.add(doc); - raw.put(Fields.DOCS, docs); - - ParsedSimulateRequest actualRequest = parser.parseWithPipelineId("_id", raw, false, store); - assertThat(actualRequest, equalTo(expectedRequest)); + List<Map<String, Object>> expectedDocs = new ArrayList<>(); + requestContent.put(Fields.DOCS, docs); + for (int i = 0; i < numDocs; i++) { + Map<String, Object> doc = new HashMap<>(); + String index = randomAsciiOfLengthBetween(1, 10); + String type = randomAsciiOfLengthBetween(1, 10); + String id = randomAsciiOfLengthBetween(1, 10); + doc.put(Fields.INDEX, index); + doc.put(Fields.TYPE, type); + doc.put(Fields.ID, id); + String fieldName = randomAsciiOfLengthBetween(1, 10); + String fieldValue = randomAsciiOfLengthBetween(1, 10); + doc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue)); + docs.add(doc); + Map<String, Object> expectedDoc = new HashMap<>(); + expectedDoc.put(Fields.INDEX, index); + expectedDoc.put(Fields.TYPE, type); + expectedDoc.put(Fields.ID, id); + expectedDoc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue)); + expectedDocs.add(expectedDoc); + } + + ParsedSimulateRequest actualRequest = new ParsedSimulateRequest.Parser().parseWithPipelineId(ParsedSimulateRequest.Parser.SIMULATED_PIPELINE_ID, requestContent, false, store); + assertThat(actualRequest.isVerbose(), equalTo(false)); + assertThat(actualRequest.getDocuments().size(), equalTo(numDocs)); + Iterator<Map<String, Object>> expectedDocsIterator = expectedDocs.iterator(); + for (Data data : actualRequest.getDocuments()) { + Map<String, Object> expectedDocument = expectedDocsIterator.next(); + assertThat(data.getDocument(), equalTo(expectedDocument.get(Fields.SOURCE))); + assertThat(data.getIndex(), equalTo(expectedDocument.get(Fields.INDEX))); + assertThat(data.getType(), equalTo(expectedDocument.get(Fields.TYPE))); + assertThat(data.getId(), equalTo(expectedDocument.get(Fields.ID))); + } + + assertThat(actualRequest.getPipeline().getId(), equalTo(ParsedSimulateRequest.Parser.SIMULATED_PIPELINE_ID)); + assertThat(actualRequest.getPipeline().getDescription(), nullValue()); + assertThat(actualRequest.getPipeline().getProcessors().size(), equalTo(1)); } public void testParseWithProvidedPipeline() throws Exception { - ParsedSimulateRequest expectedRequest = new ParsedSimulateRequest(pipeline, Collections.singletonList(data), false); + int numDocs = randomIntBetween(1, 10); - Map<String, Object> raw = new HashMap<>(); + Map<String, Object> requestContent = new HashMap<>(); List<Map<String, Object>> docs = new ArrayList<>(); - Map<String, Object> doc = new HashMap<>(); - doc.put(Fields.INDEX, "_index"); - doc.put(Fields.TYPE, "_type"); - doc.put(Fields.ID, "_id"); - doc.put(Fields.SOURCE, data.getDocument()); - docs.add(doc); - - Map<String, Object> processorConfig = new HashMap<>(); - processorConfig.put("uppercase", Arrays.asList("foo")); + List<Map<String, Object>> expectedDocs = new ArrayList<>(); + requestContent.put(Fields.DOCS, docs); + for (int i = 0; i < numDocs; i++) { + Map<String, Object> doc = new HashMap<>(); + String index = randomAsciiOfLengthBetween(1, 10); + String type = randomAsciiOfLengthBetween(1, 10); + String id = randomAsciiOfLengthBetween(1, 10); + doc.put(Fields.INDEX, index); + doc.put(Fields.TYPE, type); + doc.put(Fields.ID, id); + String fieldName = randomAsciiOfLengthBetween(1, 10); + String fieldValue = randomAsciiOfLengthBetween(1, 10); + doc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue)); + docs.add(doc); + Map<String, Object> expectedDoc = new HashMap<>(); + expectedDoc.put(Fields.INDEX, index); + expectedDoc.put(Fields.TYPE, type); + expectedDoc.put(Fields.ID, id); + expectedDoc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue)); + expectedDocs.add(expectedDoc); + } + Map<String, Object> pipelineConfig = new HashMap<>(); - pipelineConfig.put("processors", Collections.singletonList(Collections.singletonMap("mutate", processorConfig))); + List<Map<String, Object>> processors = new ArrayList<>(); + int numProcessors = randomIntBetween(1, 10); + for (int i = 0; i < numProcessors; i++) { + processors.add(Collections.singletonMap("mock_processor", Collections.emptyMap())); + } + pipelineConfig.put("processors", processors); + requestContent.put(Fields.PIPELINE, pipelineConfig); - raw.put(Fields.DOCS, docs); - raw.put(Fields.PIPELINE, pipelineConfig); + ParsedSimulateRequest actualRequest = new ParsedSimulateRequest.Parser().parse(requestContent, false, store); + assertThat(actualRequest.isVerbose(), equalTo(false)); + assertThat(actualRequest.getDocuments().size(), equalTo(numDocs)); + Iterator<Map<String, Object>> expectedDocsIterator = expectedDocs.iterator(); + for (Data data : actualRequest.getDocuments()) { + Map<String, Object> expectedDocument = expectedDocsIterator.next(); + assertThat(data.getDocument(), equalTo(expectedDocument.get(Fields.SOURCE))); + assertThat(data.getIndex(), equalTo(expectedDocument.get(Fields.INDEX))); + assertThat(data.getType(), equalTo(expectedDocument.get(Fields.TYPE))); + assertThat(data.getId(), equalTo(expectedDocument.get(Fields.ID))); + } - ParsedSimulateRequest actualRequest = parser.parse(raw, false, store); - assertThat(actualRequest, equalTo(expectedRequest)); + assertThat(actualRequest.getPipeline().getId(), equalTo(ParsedSimulateRequest.Parser.SIMULATED_PIPELINE_ID)); + assertThat(actualRequest.getPipeline().getDescription(), nullValue()); + assertThat(actualRequest.getPipeline().getProcessors().size(), equalTo(numProcessors)); } }
c00c4d3881ddd790a2dae2686bd64df40102332c
kotlin
KT-1808 Auto import offers private static Java- classes -KT-1808 fixed--
c
https://github.com/JetBrains/kotlin
diff --git a/idea/src/org/jetbrains/jet/plugin/caches/JetShortNamesCache.java b/idea/src/org/jetbrains/jet/plugin/caches/JetShortNamesCache.java index 4492e250cfc9f..9f4256bebb559 100644 --- a/idea/src/org/jetbrains/jet/plugin/caches/JetShortNamesCache.java +++ b/idea/src/org/jetbrains/jet/plugin/caches/JetShortNamesCache.java @@ -17,7 +17,6 @@ package org.jetbrains.jet.plugin.caches; import com.google.common.base.Function; -import com.google.common.base.Predicate; import com.google.common.collect.Collections2; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.Condition; @@ -36,7 +35,10 @@ import org.jetbrains.jet.asJava.JavaElementFinder; import org.jetbrains.jet.lang.descriptors.*; import org.jetbrains.jet.lang.psi.*; -import org.jetbrains.jet.lang.resolve.*; +import org.jetbrains.jet.lang.resolve.BindingContext; +import org.jetbrains.jet.lang.resolve.BindingTraceContext; +import org.jetbrains.jet.lang.resolve.ImportPath; +import org.jetbrains.jet.lang.resolve.QualifiedExpressionResolver; import org.jetbrains.jet.lang.resolve.name.FqName; import org.jetbrains.jet.lang.resolve.scopes.JetScope; import org.jetbrains.jet.lang.types.JetType; @@ -48,7 +50,6 @@ import org.jetbrains.jet.plugin.stubindex.JetFullClassNameIndex; import org.jetbrains.jet.plugin.stubindex.JetShortClassNameIndex; import org.jetbrains.jet.plugin.stubindex.JetShortFunctionNameIndex; -import org.jetbrains.jet.util.QualifiedNamesUtil; import java.util.*; @@ -93,7 +94,7 @@ public PsiClass[] getClassesByName(@NotNull @NonNls String name, @NotNull Global List<PsiClass> result = new ArrayList<PsiClass>(); for (String fqName : JetFullClassNameIndex.getInstance().getAllKeys(project)) { - if (QualifiedNamesUtil.fqnToShortName(new FqName(fqName)).getName().equals(name)) { + if ((new FqName(fqName)).shortName().getName().equals(name)) { PsiClass psiClass = javaElementFinder.findClass(fqName, scope); if (psiClass != null) { result.add(psiClass); @@ -129,17 +130,6 @@ public DeclarationDescriptor apply(@Nullable ClassDescriptor classDescriptor) { return standardTypes; } - @NotNull - public Collection<FqName> getFQNamesByName(@NotNull final String name, JetFile file, @NotNull GlobalSearchScope scope) { - BindingContext context = WholeProjectAnalyzerFacade.analyzeProjectWithCacheOnAFile(file).getBindingContext(); - return Collections2.filter(context.getKeys(BindingContext.FQNAME_TO_CLASS_DESCRIPTOR), new Predicate<FqName>() { - @Override - public boolean apply(@Nullable FqName fqName) { - return fqName != null && QualifiedNamesUtil.isShortNameForFQN(name, fqName); - } - }); - } - /** * Get jet non-extension top-level function names. Method is allowed to give invalid names - all result should be * checked with getTopLevelFunctionDescriptorsByName(). diff --git a/idea/src/org/jetbrains/jet/plugin/quickfix/ImportClassAndFunFix.java b/idea/src/org/jetbrains/jet/plugin/quickfix/ImportClassAndFunFix.java index 25a9e4c8b53e3..fb49e881f31ca 100644 --- a/idea/src/org/jetbrains/jet/plugin/quickfix/ImportClassAndFunFix.java +++ b/idea/src/org/jetbrains/jet/plugin/quickfix/ImportClassAndFunFix.java @@ -30,28 +30,32 @@ import com.intellij.openapi.project.Project; import com.intellij.openapi.util.Condition; import com.intellij.openapi.util.text.StringUtil; -import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.PsiClass; import com.intellij.psi.PsiFile; -import com.intellij.psi.search.DelegatingGlobalSearchScope; +import com.intellij.psi.PsiMember; +import com.intellij.psi.PsiModifier; import com.intellij.psi.search.GlobalSearchScope; import com.intellij.psi.search.PsiShortNamesCache; import com.intellij.util.IncorrectOperationException; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.jetbrains.jet.asJava.JetLightClass; +import org.jetbrains.jet.lang.descriptors.ClassDescriptor; import org.jetbrains.jet.lang.descriptors.DeclarationDescriptor; import org.jetbrains.jet.lang.descriptors.FunctionDescriptor; +import org.jetbrains.jet.lang.descriptors.Visibilities; import org.jetbrains.jet.lang.diagnostics.Diagnostic; import org.jetbrains.jet.lang.psi.JetFile; import org.jetbrains.jet.lang.psi.JetSimpleNameExpression; +import org.jetbrains.jet.lang.resolve.BindingContext; import org.jetbrains.jet.lang.resolve.DescriptorUtils; -import org.jetbrains.jet.lang.resolve.name.FqName; import org.jetbrains.jet.lang.resolve.ImportPath; +import org.jetbrains.jet.lang.resolve.name.FqName; import org.jetbrains.jet.plugin.JetBundle; -import org.jetbrains.jet.plugin.JetFileType; import org.jetbrains.jet.plugin.actions.JetAddImportAction; import org.jetbrains.jet.plugin.caches.JetCacheManager; import org.jetbrains.jet.plugin.caches.JetShortNamesCache; +import org.jetbrains.jet.plugin.project.WholeProjectAnalyzerFacade; import java.util.Collection; import java.util.Collections; @@ -100,8 +104,10 @@ public boolean apply(@Nullable FqName fqName) { } }); } - - private static Collection<FqName> getJetTopLevelFunctions(@NotNull String referenceName, JetSimpleNameExpression expression, @NotNull Project project) { + + private static Collection<FqName> getJetTopLevelFunctions(@NotNull String referenceName, + JetSimpleNameExpression expression, + @NotNull Project project) { JetShortNamesCache namesCache = JetCacheManager.getInstance(project).getNamesCache(); Collection<FunctionDescriptor> topLevelFunctions = namesCache.getTopLevelFunctionDescriptorsByName( referenceName, @@ -148,33 +154,58 @@ public FqName apply(@Nullable DeclarationDescriptor declarationDescriptor) { public static Collection<FqName> getClassNames(@NotNull String referenceName, @NotNull JetFile file) { final GlobalSearchScope scope = GlobalSearchScope.allScope(file.getProject()); Set<FqName> possibleResolveNames = Sets.newHashSet(); - possibleResolveNames.addAll(JetCacheManager.getInstance(file.getProject()).getNamesCache().getFQNamesByName(referenceName, file, scope)); + possibleResolveNames.addAll(getJavaClasses(referenceName, file.getProject(), scope)); // TODO: Do appropriate sorting return Lists.newArrayList(possibleResolveNames); } - private static Collection<FqName> getJavaClasses(@NotNull final String typeName, @NotNull Project project, final GlobalSearchScope scope) { + private static Collection<FqName> getJavaClasses(@NotNull final String typeName, + @NotNull Project project, + final GlobalSearchScope scope) { PsiShortNamesCache cache = PsiShortNamesCache.getInstance(project); - PsiClass[] classes = cache.getClassesByName(typeName, new DelegatingGlobalSearchScope(scope) { + PsiClass[] classes = cache.getClassesByName(typeName, scope); + + Collection<PsiClass> accessibleClasses = Collections2.filter(Lists.newArrayList(classes), new Predicate<PsiClass>() { @Override - public boolean contains(@NotNull VirtualFile file) { - return myBaseScope.contains(file) && file.getFileType() != JetFileType.INSTANCE; + public boolean apply(PsiClass psiClass) { + assert psiClass != null; + return isAccessible(psiClass); } }); - return Collections2.transform(Lists.newArrayList(classes), new Function<PsiClass, FqName>() { + return Collections2.transform(accessibleClasses, new Function<PsiClass, FqName>() { @Nullable @Override public FqName apply(@Nullable PsiClass javaClass) { assert javaClass != null; - return new FqName(javaClass.getQualifiedName()); + String qualifiedName = javaClass.getQualifiedName(); + assert qualifiedName != null; + return new FqName(qualifiedName); } }); } + private static boolean isAccessible(PsiMember member) { + if (member instanceof JetLightClass) { + // TODO: Now light class can't losing accessibility information + JetLightClass lightClass = (JetLightClass) member; + BindingContext context = WholeProjectAnalyzerFacade.analyzeProjectWithCacheOnAFile((JetFile) lightClass.getContainingFile()).getBindingContext(); + ClassDescriptor descriptor = context.get(BindingContext.FQNAME_TO_CLASS_DESCRIPTOR, lightClass.getFqName()); + + if (descriptor != null) { + return descriptor.getVisibility() == Visibilities.PUBLIC || descriptor.getVisibility() == Visibilities.INTERNAL; + } + else { + assert false : "Descriptor of the class isn't found in the binding context"; + } + } + + return member.hasModifierProperty(PsiModifier.PUBLIC) || member.hasModifierProperty(PsiModifier.PROTECTED); + } + @Override public boolean showHint(@NotNull Editor editor) { if (suggestions.isEmpty()) { @@ -220,7 +251,8 @@ public boolean isAvailable(@NotNull Project project, Editor editor, PsiFile file } @Override - public void invoke(@NotNull final Project project, @NotNull final Editor editor, final PsiFile file) throws IncorrectOperationException { + public void invoke(@NotNull final Project project, @NotNull final Editor editor, final PsiFile file) + throws IncorrectOperationException { CommandProcessor.getInstance().runUndoTransparentAction(new Runnable() { @Override public void run() { diff --git a/idea/testData/quickfix/autoImports/noImportForPrivateClass.after.kt b/idea/testData/quickfix/autoImports/noImportForPrivateClass.after.kt new file mode 100644 index 0000000000000..880e647c55971 --- /dev/null +++ b/idea/testData/quickfix/autoImports/noImportForPrivateClass.after.kt @@ -0,0 +1,5 @@ +// "Import Class" "false" + +fun test() { + PrivateClass +} \ No newline at end of file diff --git a/idea/testData/quickfix/autoImports/noImportForPrivateClass.before.Main.kt b/idea/testData/quickfix/autoImports/noImportForPrivateClass.before.Main.kt new file mode 100644 index 0000000000000..926bed4da6bc0 --- /dev/null +++ b/idea/testData/quickfix/autoImports/noImportForPrivateClass.before.Main.kt @@ -0,0 +1,5 @@ +// "Import" "false" + +fun test() { + <caret>PrivateClass +} \ No newline at end of file diff --git a/idea/testData/quickfix/autoImports/noImportForPrivateClass.before.data.Sample.kt b/idea/testData/quickfix/autoImports/noImportForPrivateClass.before.data.Sample.kt new file mode 100644 index 0000000000000..cbd83c6a1c2b5 --- /dev/null +++ b/idea/testData/quickfix/autoImports/noImportForPrivateClass.before.data.Sample.kt @@ -0,0 +1,3 @@ +package sometest + +private class PrivateClass \ No newline at end of file diff --git a/idea/tests/org/jetbrains/jet/plugin/quickfix/AutoImportFixTest.java b/idea/tests/org/jetbrains/jet/plugin/quickfix/AutoImportFixTest.java index a3c9ca3f74150..920f5a9e83d73 100644 --- a/idea/tests/org/jetbrains/jet/plugin/quickfix/AutoImportFixTest.java +++ b/idea/tests/org/jetbrains/jet/plugin/quickfix/AutoImportFixTest.java @@ -38,6 +38,10 @@ public void testFunctionImport() throws Exception { doTest(); } + public void testNoImportForPrivateClass() throws Exception { + doTest(); + } + @Override protected String getCheckFileName() { return getTestName(true) + ".after.kt";
c93b0290e8efe32d4844d10adc78c70b802fde18
orientdb
fixed minor issue with thread local management--
c
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/config/OGlobalConfiguration.java b/core/src/main/java/com/orientechnologies/orient/core/config/OGlobalConfiguration.java index efd9b38a74a..a8ab992f831 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/config/OGlobalConfiguration.java +++ b/core/src/main/java/com/orientechnologies/orient/core/config/OGlobalConfiguration.java @@ -432,7 +432,7 @@ public void change(final Object iCurrentValue, final Object iNewValue) { Level.class, Level.SEVERE), SERVER_LOG_DUMP_CLIENT_EXCEPTION_FULLSTACKTRACE("server.log.dumpClientExceptionFullStackTrace", - "Dumps the full stack trace of the exception to sent to the client", Level.class, Boolean.FALSE), + "Dumps the full stack trace of the exception to sent to the client", Boolean.class, Boolean.FALSE), // DISTRIBUTED DISTRIBUTED_CRUD_TASK_SYNCH_TIMEOUT("distributed.crudTaskTimeout", diff --git a/graphdb/src/main/java/com/tinkerpop/blueprints/impls/orient/OrientBaseGraph.java b/graphdb/src/main/java/com/tinkerpop/blueprints/impls/orient/OrientBaseGraph.java index b4cecdad580..e70b253a20b 100755 --- a/graphdb/src/main/java/com/tinkerpop/blueprints/impls/orient/OrientBaseGraph.java +++ b/graphdb/src/main/java/com/tinkerpop/blueprints/impls/orient/OrientBaseGraph.java @@ -284,11 +284,8 @@ public static void clearInitStack() { final ThreadLocal<OrientBaseGraph> ag = activeGraph; if (ag != null) - ag.set(null); + ag.remove(); - final ODatabaseRecordThreadLocal dbtl = ODatabaseRecordThreadLocal.INSTANCE; - if (dbtl != null) - dbtl.set(null); } /** @@ -354,7 +351,7 @@ public static String decodeClassName(String iClassName) { protected static void checkForGraphSchema(final ODatabaseDocumentTx iDatabase) { final OSchema schema = iDatabase.getMetadata().getSchema(); -// schema.getOrCreateClass(OMVRBTreeRIDProvider.PERSISTENT_CLASS_NAME); + // schema.getOrCreateClass(OMVRBTreeRIDProvider.PERSISTENT_CLASS_NAME); final OClass vertexBaseClass = schema.getClass(OrientVertexType.CLASS_NAME); final OClass edgeBaseClass = schema.getClass(OrientEdgeType.CLASS_NAME); diff --git a/server/src/main/java/com/orientechnologies/orient/server/network/protocol/binary/ONetworkProtocolBinary.java b/server/src/main/java/com/orientechnologies/orient/server/network/protocol/binary/ONetworkProtocolBinary.java index 2d1a13447f3..61e6af8d638 100755 --- a/server/src/main/java/com/orientechnologies/orient/server/network/protocol/binary/ONetworkProtocolBinary.java +++ b/server/src/main/java/com/orientechnologies/orient/server/network/protocol/binary/ONetworkProtocolBinary.java @@ -213,8 +213,8 @@ protected void onBeforeRequest() throws IOException { } if (connection != null) { - ODatabaseRecordThreadLocal.INSTANCE.set(connection.database); if (connection.database != null) { + connection.database.activateOnCurrentThread(); connection.data.lastDatabase = connection.database.getName(); connection.data.lastUser = connection.database.getUser() != null ? connection.database.getUser().getName() : null; } else {
c94e990f4670af28bc87528c1874946e920eabab
orientdb
OOM reported by NMSWorks was fixed.--
c
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/common/concur/lock/ODistributedCounter.java b/core/src/main/java/com/orientechnologies/common/concur/lock/ODistributedCounter.java new file mode 100755 index 00000000000..b211899b797 --- /dev/null +++ b/core/src/main/java/com/orientechnologies/common/concur/lock/ODistributedCounter.java @@ -0,0 +1,117 @@ +package com.orientechnologies.common.concur.lock; + +import com.orientechnologies.orient.core.OOrientListenerAbstract; +import com.orientechnologies.orient.core.Orient; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +/** + * * @author Andrey Lomakin (a.lomakin-at-orientechnologies.com) + */ +public class ODistributedCounter extends OOrientListenerAbstract { + private static final int HASH_INCREMENT = 0x61c88647; + + private static final AtomicInteger nextHashCode = new AtomicInteger(); + private final AtomicBoolean poolBusy = new AtomicBoolean(); + private final int maxPartitions = Runtime.getRuntime().availableProcessors() << 3; + private final int MAX_RETRIES = 8; + + private final ThreadLocal<Integer> threadHashCode = new ThreadHashCode(); + private volatile AtomicLong[] counters = new AtomicLong[2]; + + public ODistributedCounter() { + for (int i = 0; i < counters.length; i++) { + counters[i] = new AtomicLong(); + } + + Orient.instance().registerWeakOrientStartupListener(this); + Orient.instance().registerWeakOrientShutdownListener(this); + } + + public void increment() { + updateCounter(+1); + } + + public void decrement() { + updateCounter(-1); + } + + private void updateCounter(int delta) { + final int hashCode = threadHashCode.get(); + + while (true) { + final AtomicLong[] cts = counters; + final int index = (cts.length - 1) & hashCode; + + AtomicLong counter = cts[index]; + + if (counter == null) { + if (!poolBusy.get() && poolBusy.compareAndSet(false, true)) { + if (cts == counters) { + counter = cts[index]; + + if (counter == null) + cts[index] = new AtomicLong(); + } + + poolBusy.set(false); + } + + continue; + } else { + long v = counter.get(); + int retries = 0; + + if (cts.length < maxPartitions) { + while (retries < MAX_RETRIES) { + if (!counter.compareAndSet(v, v + delta)) { + retries++; + v = counter.get(); + } else { + return; + } + } + } else { + counter.addAndGet(delta); + return; + } + + if (!poolBusy.get() && poolBusy.compareAndSet(false, true)) { + if (cts == counters) { + if (cts.length < maxPartitions) { + counters = new AtomicLong[cts.length << 1]; + System.arraycopy(cts, 0, counters, 0, cts.length); + } + } + + poolBusy.set(false); + } + + continue; + } + } + } + + public boolean isEmpty() { + long sum = 0; + + for (AtomicLong counter : counters) + if (counter != null) + sum += counter.get(); + + return sum == 0; + } + + private static int nextHashCode() { + return nextHashCode.getAndAdd(HASH_INCREMENT); + } + + private static class ThreadHashCode extends ThreadLocal<Integer> { + @Override + protected Integer initialValue() { + return nextHashCode(); + } + } +} diff --git a/core/src/main/java/com/orientechnologies/common/concur/lock/OReadersWriterSpinLock.java b/core/src/main/java/com/orientechnologies/common/concur/lock/OReadersWriterSpinLock.java old mode 100644 new mode 100755 index 5bacf186405..5cc90542279 --- a/core/src/main/java/com/orientechnologies/common/concur/lock/OReadersWriterSpinLock.java +++ b/core/src/main/java/com/orientechnologies/common/concur/lock/OReadersWriterSpinLock.java @@ -36,13 +36,13 @@ * @since 8/18/14 */ public class OReadersWriterSpinLock extends AbstractOwnableSynchronizer implements OOrientStartupListener, OOrientShutdownListener { - private final OThreadCountersHashTable threadCountersHashTable = new OThreadCountersHashTable(); + private final ODistributedCounter distributedCounter = new ODistributedCounter(); - private final AtomicReference<WNode> tail = new AtomicReference<WNode>(); - private volatile ThreadLocal<OModifiableInteger> lockHolds = new InitOModifiableInteger(); + private final AtomicReference<WNode> tail = new AtomicReference<WNode>(); + private volatile ThreadLocal<OModifiableInteger> lockHolds = new InitOModifiableInteger(); - private volatile ThreadLocal<WNode> myNode = new InitWNode(); - private volatile ThreadLocal<WNode> predNode = new ThreadLocal<WNode>(); + private volatile ThreadLocal<WNode> myNode = new InitWNode(); + private volatile ThreadLocal<WNode> predNode = new ThreadLocal<WNode>(); public OReadersWriterSpinLock() { final WNode wNode = new WNode(); @@ -67,11 +67,11 @@ public void acquireReadLock() { return; } - threadCountersHashTable.increment(); + distributedCounter.increment(); WNode wNode = tail.get(); while (wNode.locked) { - threadCountersHashTable.decrement(); + distributedCounter.decrement(); while (wNode.locked && wNode == tail.get()) { wNode.waitingReaders.add(Thread.currentThread()); @@ -82,7 +82,7 @@ public void acquireReadLock() { wNode = tail.get(); } - threadCountersHashTable.increment(); + distributedCounter.increment(); wNode = tail.get(); } @@ -102,7 +102,7 @@ public void releaseReadLock() { return; } - threadCountersHashTable.decrement(); + distributedCounter.decrement(); lHolds.decrement(); assert lHolds.intValue() == 0; @@ -131,7 +131,7 @@ public void acquireWriteLock() { pNode.waitingWriter = null; - while (!threadCountersHashTable.isEmpty()) + while (!distributedCounter.isEmpty()) ; setExclusiveOwnerThread(Thread.currentThread()); diff --git a/core/src/main/java/com/orientechnologies/orient/core/db/OPartitionedDatabasePool.java b/core/src/main/java/com/orientechnologies/orient/core/db/OPartitionedDatabasePool.java old mode 100644 new mode 100755 index cdc6d31b598..f0d536ee542 --- a/core/src/main/java/com/orientechnologies/orient/core/db/OPartitionedDatabasePool.java +++ b/core/src/main/java/com/orientechnologies/orient/core/db/OPartitionedDatabasePool.java @@ -84,12 +84,7 @@ public class OPartitionedDatabasePool extends OOrientListenerAbstract { private final String userName; private final String password; private final int maxSize; - private final ThreadLocal<PoolData> poolData = new ThreadLocal<PoolData>() { - @Override - protected PoolData initialValue() { - return new PoolData(); - } - }; + private final ThreadLocal<PoolData> poolData = new ThreadPoolData(); private final AtomicBoolean poolBusy = new AtomicBoolean(); private final int maxPartitions = Runtime.getRuntime().availableProcessors() << 3; private volatile PoolPartition[] partitions; @@ -111,6 +106,13 @@ private static final class PoolPartition { private final ConcurrentLinkedQueue<DatabaseDocumentTxPolled> queue = new ConcurrentLinkedQueue<DatabaseDocumentTxPolled>(); } + private static class ThreadPoolData extends ThreadLocal<PoolData> { + @Override + protected PoolData initialValue() { + return new PoolData(); + } + } + private final class DatabaseDocumentTxPolled extends ODatabaseDocumentTx { private PoolPartition partition;
36c3025c974288b8d2322dd9a830d7bff0b155cb
hbase
HBASE-3313 Table name isn't checked in- isTableEnabled/isTableDisabled--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1081431 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/hbase
diff --git a/CHANGES.txt b/CHANGES.txt index 8713b854d534..60d682f45a15 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -66,6 +66,8 @@ Release 0.91.0 - Unreleased not exit HBASE-3626 Update instructions in thrift demo files (Moaz Reyad via Stack) HBASE-3538 Column families allow to have slashes in name (Ian Knome via Stack) + HBASE-3313 Table name isn't checked in isTableEnabled/isTableDisabled + (Ted Yu via Stack) IMPROVEMENTS HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack) diff --git a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 82993a7b3013..69b14a54ed91 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -590,6 +590,7 @@ public boolean isTableEnabled(String tableName) throws IOException { * @throws IOException if a remote or network exception occurs */ public boolean isTableEnabled(byte[] tableName) throws IOException { + HTableDescriptor.isLegalTableName(tableName); return connection.isTableEnabled(tableName); } @@ -608,6 +609,7 @@ public boolean isTableDisabled(final String tableName) throws IOException { * @throws IOException if a remote or network exception occurs */ public boolean isTableDisabled(byte[] tableName) throws IOException { + HTableDescriptor.isLegalTableName(tableName); return connection.isTableDisabled(tableName); }
adb90c7f52be4c443a1050b2bfcbcb5cdf8542f5
hadoop
YARN-2821. Fixed a problem that DistributedShell AM- may hang if restarted. Contributed by Varun Vasudev (cherry picked from- commit 7438966586f1896ab3e8b067d47a4af28a894106)--
c
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index c97df938c81ad..16cb27b8361f7 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -375,6 +375,9 @@ Release 2.8.0 - UNRELEASED YARN-3302. TestDockerContainerExecutor should run automatically if it can detect docker in the usual place (Ravindra Kumar Naik via raviprak) + YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted. + (Varun Vasudev via jianhe) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index 24f8bcc000e6e..6ac8bf134d2f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -116,6 +116,11 @@ <type>test-jar</type> <scope>test</scope> </dependency> + <dependency> + <groupId>org.mockito</groupId> + <artifactId>mockito-all</artifactId> + <scope>test</scope> + </dependency> </dependencies> <build> diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index b62c24cbd711f..b28c0c925c3d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -30,10 +30,12 @@ import java.nio.ByteBuffer; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -277,6 +279,10 @@ public static enum DSEntity { private final String linux_bash_command = "bash"; private final String windows_command = "cmd /c"; + @VisibleForTesting + protected final Set<ContainerId> launchedContainers = + Collections.newSetFromMap(new ConcurrentHashMap<ContainerId, Boolean>()); + /** * @param args Command line args */ @@ -601,8 +607,12 @@ public void run() throws YarnException, IOException, InterruptedException { response.getContainersFromPreviousAttempts(); LOG.info(appAttemptID + " received " + previousAMRunningContainers.size() + " previous attempts' running containers on AM registration."); + for(Container container: previousAMRunningContainers) { + launchedContainers.add(container.getId()); + } numAllocatedContainers.addAndGet(previousAMRunningContainers.size()); + int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size(); // Setup ask for containers from RM @@ -715,8 +725,9 @@ protected boolean finish() { return success; } - - private class RMCallbackHandler implements AMRMClientAsync.CallbackHandler { + + @VisibleForTesting + class RMCallbackHandler implements AMRMClientAsync.CallbackHandler { @SuppressWarnings("unchecked") @Override public void onContainersCompleted(List<ContainerStatus> completedContainers) { @@ -731,6 +742,14 @@ public void onContainersCompleted(List<ContainerStatus> completedContainers) { // non complete containers should not be here assert (containerStatus.getState() == ContainerState.COMPLETE); + // ignore containers we know nothing about - probably from a previous + // attempt + if (!launchedContainers.contains(containerStatus.getContainerId())) { + LOG.info("Ignoring completed status of " + + containerStatus.getContainerId() + + "; unknown container(probably launched by previous attempt)"); + continue; + } // increment counters for completed/failed containers int exitStatus = containerStatus.getExitStatus(); @@ -796,14 +815,13 @@ public void onContainersAllocated(List<Container> allocatedContainers) { // + ", containerToken" // +allocatedContainer.getContainerToken().getIdentifier().toString()); - LaunchContainerRunnable runnableLaunchContainer = - new LaunchContainerRunnable(allocatedContainer, containerListener); - Thread launchThread = new Thread(runnableLaunchContainer); + Thread launchThread = createLaunchContainerThread(allocatedContainer); // launch and start the container on a separate thread to keep // the main thread unblocked // as all containers may not be allocated at one go. launchThreads.add(launchThread); + launchedContainers.add(allocatedContainer.getId()); launchThread.start(); } } @@ -1150,4 +1168,30 @@ private static void publishApplicationAttemptEvent( + appAttemptId.toString(), e); } } + + RMCallbackHandler getRMCallbackHandler() { + return new RMCallbackHandler(); + } + + @VisibleForTesting + void setAmRMClient(AMRMClientAsync client) { + this.amRMClient = client; + } + + @VisibleForTesting + int getNumCompletedContainers() { + return numCompletedContainers.get(); + } + + @VisibleForTesting + boolean getDone() { + return done; + } + + @VisibleForTesting + Thread createLaunchContainerThread(Container allocatedContainer) { + LaunchContainerRunnable runnableLaunchContainer = + new LaunchContainerRunnable(allocatedContainer, containerListener); + return new Thread(runnableLaunchContainer); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java index 11e840a8c90ad..0fed14d02cc00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java @@ -20,13 +20,143 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Test; +import org.mockito.Matchers; +import org.mockito.Mockito; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * A bunch of tests to make sure that the container allocations + * and releases occur correctly. + */ public class TestDSAppMaster { + static class TestAppMaster extends ApplicationMaster { + private int threadsLaunched = 0; + + @Override + protected Thread createLaunchContainerThread(Container allocatedContainer) { + threadsLaunched++; + launchedContainers.add(allocatedContainer.getId()); + return new Thread(); + } + + void setNumTotalContainers(int numTotalContainers) { + this.numTotalContainers = numTotalContainers; + } + + int getAllocatedContainers() { + return this.numAllocatedContainers.get(); + } + + @Override + void startTimelineClient(final Configuration conf) throws YarnException, + IOException, InterruptedException { + timelineClient = null; + } + } + + @SuppressWarnings("unchecked") + @Test + public void testDSAppMasterAllocateHandler() throws Exception { + + TestAppMaster master = new TestAppMaster(); + int targetContainers = 2; + AMRMClientAsync mockClient = Mockito.mock(AMRMClientAsync.class); + master.setAmRMClient(mockClient); + master.setNumTotalContainers(targetContainers); + Mockito.doNothing().when(mockClient) + .addContainerRequest(Matchers.any(AMRMClient.ContainerRequest.class)); + + ApplicationMaster.RMCallbackHandler handler = master.getRMCallbackHandler(); + + List<Container> containers = new ArrayList<>(1); + ContainerId id1 = BuilderUtils.newContainerId(1, 1, 1, 1); + containers.add(generateContainer(id1)); + + master.numRequestedContainers.set(targetContainers); + + // first allocate a single container, everything should be fine + handler.onContainersAllocated(containers); + Assert.assertEquals("Wrong container allocation count", 1, + master.getAllocatedContainers()); + Mockito.verifyZeroInteractions(mockClient); + Assert.assertEquals("Incorrect number of threads launched", 1, + master.threadsLaunched); + + // now send 3 extra containers + containers.clear(); + ContainerId id2 = BuilderUtils.newContainerId(1, 1, 1, 2); + containers.add(generateContainer(id2)); + ContainerId id3 = BuilderUtils.newContainerId(1, 1, 1, 3); + containers.add(generateContainer(id3)); + ContainerId id4 = BuilderUtils.newContainerId(1, 1, 1, 4); + containers.add(generateContainer(id4)); + handler.onContainersAllocated(containers); + Assert.assertEquals("Wrong final container allocation count", 4, + master.getAllocatedContainers()); + + Assert.assertEquals("Incorrect number of threads launched", 4, + master.threadsLaunched); + + // make sure we handle completion events correctly + List<ContainerStatus> status = new ArrayList<>(); + status.add(generateContainerStatus(id1, ContainerExitStatus.SUCCESS)); + status.add(generateContainerStatus(id2, ContainerExitStatus.SUCCESS)); + status.add(generateContainerStatus(id3, ContainerExitStatus.ABORTED)); + status.add(generateContainerStatus(id4, ContainerExitStatus.ABORTED)); + handler.onContainersCompleted(status); + + Assert.assertEquals("Unexpected number of completed containers", + targetContainers, master.getNumCompletedContainers()); + Assert.assertTrue("Master didn't finish containers as expected", + master.getDone()); + + // test for events from containers we know nothing about + // these events should be ignored + status = new ArrayList<>(); + ContainerId id5 = BuilderUtils.newContainerId(1, 1, 1, 5); + status.add(generateContainerStatus(id5, ContainerExitStatus.ABORTED)); + Assert.assertEquals("Unexpected number of completed containers", + targetContainers, master.getNumCompletedContainers()); + Assert.assertTrue("Master didn't finish containers as expected", + master.getDone()); + status.add(generateContainerStatus(id5, ContainerExitStatus.SUCCESS)); + Assert.assertEquals("Unexpected number of completed containers", + targetContainers, master.getNumCompletedContainers()); + Assert.assertTrue("Master didn't finish containers as expected", + master.getDone()); + } + + private Container generateContainer(ContainerId cid) { + return Container.newInstance(cid, NodeId.newInstance("host", 5000), + "host:80", Resource.newInstance(1024, 1), Priority.newInstance(0), null); + } + + private ContainerStatus + generateContainerStatus(ContainerId id, int exitStatus) { + return ContainerStatus.newInstance(id, ContainerState.COMPLETE, "", + exitStatus); + } + @Test public void testTimelineClientInDSAppMaster() throws Exception { ApplicationMaster appMaster = new ApplicationMaster();
040fa2581a8a9b51fb154a5e5ae8aff6c8cd291d
elasticsearch
Added GeoDistance test which verifies the- difference in behaviour between ARC and PLANE, causing elliptical results--
p
https://github.com/elastic/elasticsearch
diff --git a/src/test/java/org/elasticsearch/test/unit/index/search/geo/GeoDistanceTests.java b/src/test/java/org/elasticsearch/test/unit/index/search/geo/GeoDistanceTests.java index 3783357c3fe18..649769e26d5cf 100644 --- a/src/test/java/org/elasticsearch/test/unit/index/search/geo/GeoDistanceTests.java +++ b/src/test/java/org/elasticsearch/test/unit/index/search/geo/GeoDistanceTests.java @@ -21,10 +21,13 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.search.geo.GeoDistance; +import org.elasticsearch.index.search.geo.Point; import org.testng.annotations.Test; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; /** */ @@ -35,18 +38,33 @@ public class GeoDistanceTests { public void testDistanceCheck() { // Note, is within is an approximation, so, even though 0.52 is outside 50mi, we still get "true" GeoDistance.DistanceBoundingCheck check = GeoDistance.distanceBoundingCheck(0, 0, 50, DistanceUnit.MILES); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 0, 0.5, 0.5, DistanceUnit.MILES)); assertThat(check.isWithin(0.5, 0.5), equalTo(true)); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 0, 0.52, 0.52, DistanceUnit.MILES)); assertThat(check.isWithin(0.52, 0.52), equalTo(true)); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 0, 1, 1, DistanceUnit.MILES)); assertThat(check.isWithin(1, 1), equalTo(false)); - check = GeoDistance.distanceBoundingCheck(0, 179, 200, DistanceUnit.MILES); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 179, 0, -179, DistanceUnit.MILES)); assertThat(check.isWithin(0, -179), equalTo(true)); - //System.out.println("Dist: " + GeoDistance.ARC.calculate(0, 179, 0, -178, DistanceUnit.MILES)); assertThat(check.isWithin(0, -178), equalTo(false)); } + + @Test + public void testArcDistanceVsPlaneInEllipsis() { + Point centre = new Point(48.8534100, 2.3488000); + Point northernPoint = new Point(48.8801108681, 2.35152032666); + Point westernPoint = new Point(48.85265, 2.308896); + + // With GeoDistance.ARC both the northern and western points are within the 4km range + assertThat(GeoDistance.ARC.calculate(centre.lat, centre.lon, northernPoint.lat, + northernPoint.lon, DistanceUnit.KILOMETERS), lessThan(4D)); + assertThat(GeoDistance.ARC.calculate(centre.lat, centre.lon, westernPoint.lat, + westernPoint.lon, DistanceUnit.KILOMETERS), lessThan(4D)); + + // With GeoDistance.PLANE, only the northern point is within the 4km range, + // the western point is outside of the range due to the simple math it employs, + // meaning results will appear elliptical + assertThat(GeoDistance.PLANE.calculate(centre.lat, centre.lon, northernPoint.lat, + northernPoint.lon, DistanceUnit.KILOMETERS), lessThan(4D)); + assertThat(GeoDistance.PLANE.calculate(centre.lat, centre.lon, westernPoint.lat, + westernPoint.lon, DistanceUnit.KILOMETERS), greaterThan(4D)); + } }
439413c6269bbc9655f7e86c70dc85dd7e150f63
elasticsearch
Introduced common test methods in- MatchedQueriesTests (e.g. createIndex, ensureGreen, refresh, assertHitCount)--
p
https://github.com/elastic/elasticsearch
diff --git a/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java b/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java index 515014f525acd..831cb160da032 100644 --- a/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java +++ b/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java @@ -19,16 +19,15 @@ package org.elasticsearch.search.matchedqueries; +import org.elasticsearch.AbstractSharedClusterTest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.Priority; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.AbstractSharedClusterTest; import org.junit.Test; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.FilterBuilders.*; import static org.elasticsearch.index.query.QueryBuilders.*; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItemInArray; @@ -40,8 +39,8 @@ public class MatchedQueriesTests extends AbstractSharedClusterTest { @Test public void simpleMatchedQueryFromFilteredQuery() throws Exception { - client().admin().indices().prepareCreate("test").execute().actionGet(); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + createIndex("test"); + ensureGreen(); client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject() .field("name", "test1") @@ -58,15 +57,14 @@ public void simpleMatchedQueryFromFilteredQuery() throws Exception { .field("number", 3) .endObject()).execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refresh(); SearchResponse searchResponse = client().prepareSearch() .setQuery(filteredQuery(matchAllQuery(), orFilter(rangeFilter("number").lte(2).filterName("test1"), rangeFilter("number").gt(2).filterName("test2")))) .execute().actionGet(); + assertHitCount(searchResponse, 3l); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(3l)); for (SearchHit hit : searchResponse.getHits()) { if (hit.id().equals("1") || hit.id().equals("2")) { assertThat(hit.matchedQueries().length, equalTo(1)); @@ -84,8 +82,8 @@ public void simpleMatchedQueryFromFilteredQuery() throws Exception { .execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(3l)); + assertHitCount(searchResponse, 3l); + for (SearchHit hit : searchResponse.getHits()) { if (hit.id().equals("1") || hit.id().equals("2")) { assertThat(hit.matchedQueries().length, equalTo(1)); @@ -102,8 +100,8 @@ public void simpleMatchedQueryFromFilteredQuery() throws Exception { @Test public void simpleMatchedQueryFromTopLevelFilter() throws Exception { - client().admin().indices().prepareCreate("test").execute().actionGet(); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + createIndex("test"); + ensureGreen(); client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject() .field("name", "test") @@ -118,7 +116,7 @@ public void simpleMatchedQueryFromTopLevelFilter() throws Exception { .field("name", "test") .endObject()).execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refresh(); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -127,8 +125,7 @@ public void simpleMatchedQueryFromTopLevelFilter() throws Exception { termFilter("title", "title1").filterName("title"))) .execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(3l)); + assertHitCount(searchResponse, 3l); for (SearchHit hit : searchResponse.getHits()) { if (hit.id().equals("1")) { @@ -150,8 +147,7 @@ public void simpleMatchedQueryFromTopLevelFilter() throws Exception { .should(termQuery("title", "title1").queryName("title")))) .execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(3l)); + assertHitCount(searchResponse, 3l); for (SearchHit hit : searchResponse.getHits()) { if (hit.id().equals("1")) { @@ -170,8 +166,8 @@ public void simpleMatchedQueryFromTopLevelFilter() throws Exception { @Test public void simpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Exception { - client().admin().indices().prepareCreate("test").execute().actionGet(); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + createIndex("test"); + ensureGreen(); client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject() .field("name", "test") @@ -188,15 +184,14 @@ public void simpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Except .field("title", "title3") .endObject()).execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refresh(); SearchResponse searchResponse = client().prepareSearch() .setQuery(filteredQuery(matchAllQuery(), termsFilter("title", "title1", "title2", "title3").filterName("title"))) .setFilter(termFilter("name", "test").filterName("name")) .execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(3l)); + assertHitCount(searchResponse, 3l); for (SearchHit hit : searchResponse.getHits()) { if (hit.id().equals("1") || hit.id().equals("2") || hit.id().equals("3")) { @@ -213,8 +208,7 @@ public void simpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Except .setFilter(queryFilter(matchQuery("name", "test").queryName("name"))) .execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(3l)); + assertHitCount(searchResponse, 3l); for (SearchHit hit : searchResponse.getHits()) { if (hit.id().equals("1") || hit.id().equals("2") || hit.id().equals("3")) {
982eebf70ff4aebfc200c6373511f1ce0e8667ed
kotlin
better root ns--
p
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaBridgeConfiguration.java b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaBridgeConfiguration.java index 10f6b61ae373a..c41515e932cb0 100644 --- a/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaBridgeConfiguration.java +++ b/compiler/frontend.java/src/org/jetbrains/jet/lang/resolve/java/JavaBridgeConfiguration.java @@ -19,7 +19,6 @@ import com.intellij.openapi.project.Project; import org.jetbrains.annotations.NotNull; import org.jetbrains.jet.lang.ModuleConfiguration; -import org.jetbrains.jet.lang.descriptors.DeclarationDescriptor; import org.jetbrains.jet.lang.descriptors.NamespaceDescriptor; import org.jetbrains.jet.lang.descriptors.annotations.AnnotationDescriptor; import org.jetbrains.jet.lang.psi.JetImportDirective; @@ -72,24 +71,4 @@ public void extendNamespaceScope(@NotNull BindingTrace trace, @NotNull Namespace } - @Override - public NamespaceDescriptor getTopLevelNamespace(@NotNull String shortName) { - NamespaceDescriptor namespaceDescriptor = javaSemanticServices.getDescriptorResolver().resolveNamespace(FqName.topLevel(shortName)); - if (namespaceDescriptor != null) { - return namespaceDescriptor; - } - return delegateConfiguration.getTopLevelNamespace(shortName); - } - - @Override - public void addAllTopLevelNamespacesTo(@NotNull Collection<? super NamespaceDescriptor> topLevelNamespaces) { - NamespaceDescriptor defaultPackage = javaSemanticServices.getDescriptorResolver().resolveNamespace(FqName.ROOT); - assert defaultPackage != null : "Cannot resolve Java's default package"; - for (DeclarationDescriptor declarationDescriptor : defaultPackage.getMemberScope().getAllDescriptors()) { - if (declarationDescriptor instanceof NamespaceDescriptor) { - NamespaceDescriptor namespaceDescriptor = (NamespaceDescriptor) declarationDescriptor; - topLevelNamespaces.add(namespaceDescriptor); - } - } - } } diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/DefaultModuleConfiguration.java b/compiler/frontend/src/org/jetbrains/jet/lang/DefaultModuleConfiguration.java index 790b1b184ee79..82bf46581b5ae 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/DefaultModuleConfiguration.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/DefaultModuleConfiguration.java @@ -53,13 +53,4 @@ public void addDefaultImports(@NotNull WritableScope rootScope, @NotNull Collect public void extendNamespaceScope(@NotNull BindingTrace trace, @NotNull NamespaceDescriptor namespaceDescriptor, @NotNull WritableScope namespaceMemberScope) { } - @Override - public NamespaceDescriptor getTopLevelNamespace(@NotNull String shortName) { - return null; - } - - @Override - public void addAllTopLevelNamespacesTo(@NotNull Collection<? super NamespaceDescriptor> topLevelNamespaces) { - } - } diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/ModuleConfiguration.java b/compiler/frontend/src/org/jetbrains/jet/lang/ModuleConfiguration.java index 0ed59fcb0383d..5c9c34bedb3e9 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/ModuleConfiguration.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/ModuleConfiguration.java @@ -17,8 +17,6 @@ package org.jetbrains.jet.lang; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import org.jetbrains.jet.lang.descriptors.ModuleDescriptor; import org.jetbrains.jet.lang.descriptors.NamespaceDescriptor; import org.jetbrains.jet.lang.psi.JetImportDirective; import org.jetbrains.jet.lang.resolve.BindingTrace; @@ -39,14 +37,6 @@ public void addDefaultImports(@NotNull WritableScope rootScope, @NotNull Collect public void extendNamespaceScope(@NotNull BindingTrace trace, @NotNull NamespaceDescriptor namespaceDescriptor, @NotNull WritableScope namespaceMemberScope) { } - @Override - public NamespaceDescriptor getTopLevelNamespace(@NotNull String shortName) { - return null; - } - - @Override - public void addAllTopLevelNamespacesTo(@NotNull Collection<? super NamespaceDescriptor> topLevelNamespaces) { - } }; void addDefaultImports(@NotNull WritableScope rootScope, @NotNull Collection<JetImportDirective> directives); @@ -57,10 +47,4 @@ public void addAllTopLevelNamespacesTo(@NotNull Collection<? super NamespaceDesc */ void extendNamespaceScope(@NotNull BindingTrace trace, @NotNull NamespaceDescriptor namespaceDescriptor, @NotNull WritableScope namespaceMemberScope); - /** This method is called only if no namespace with the same short name is declared in the module itself, or to merge namespaces */ - @Nullable - NamespaceDescriptor getTopLevelNamespace(@NotNull String shortName); - - /** Add all the top-level namespaces from the dependencies of this module into the given collection */ - void addAllTopLevelNamespacesTo(@NotNull Collection<? super NamespaceDescriptor> topLevelNamespaces); } diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TopDownAnalyzer.java b/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TopDownAnalyzer.java index 408fc93b1b543..1b6e1b055d7eb 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TopDownAnalyzer.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TopDownAnalyzer.java @@ -288,28 +288,11 @@ private void doAnalyzeFilesWithGivenTrance2(Collection<JetFile> files) { // Import the lang package scope.importScope(JetStandardLibrary.getInstance().getLibraryScope()); + NamespaceDescriptorImpl rootNs = typeHierarchyResolver.createNamespaceDescriptorIfNeeded(null, moduleDescriptor, "<root>", true); + // Import a scope that contains all top-level namespaces that come from dependencies // This makes the namespaces visible at all, does not import themselves - scope.importScope(new JetScopeAdapter(JetScope.EMPTY) { - @Override - public NamespaceDescriptor getNamespace(@NotNull String name) { - // Is it a top-level namespace coming from the dependencies? - NamespaceDescriptor topLevelNamespaceFromConfiguration = configuration.getTopLevelNamespace(name); - if (topLevelNamespaceFromConfiguration != null) { - return topLevelNamespaceFromConfiguration; - } - // Should be null, we are delegating to EMPTY - return super.getNamespace(name); - } - - @NotNull - @Override - public Collection<DeclarationDescriptor> getAllDescriptors() { - List<DeclarationDescriptor> allDescriptors = Lists.newArrayList(); - configuration.addAllTopLevelNamespacesTo(allDescriptors); - return allDescriptors; - } - }); + scope.importScope(rootNs.getMemberScope()); // dummy builder is used because "root" is module descriptor, // namespaces added to module explicitly in diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TypeHierarchyResolver.java b/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TypeHierarchyResolver.java index da814eeaa06d1..63a49dd678514 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TypeHierarchyResolver.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TypeHierarchyResolver.java @@ -263,7 +263,7 @@ private NamespaceDescriptorImpl createNamespaceDescriptorPathIfNeeded(JetFile fi } @NotNull - private NamespaceDescriptorImpl createNamespaceDescriptorIfNeeded(@Nullable JetFile file, @NotNull NamespaceDescriptorParent owner, @NotNull String name, boolean root) { + public NamespaceDescriptorImpl createNamespaceDescriptorIfNeeded(@Nullable JetFile file, @NotNull NamespaceDescriptorParent owner, @NotNull String name, boolean root) { FqName fqName; NamespaceDescriptorImpl namespaceDescriptor; diff --git a/js/js.translator/src/org/jetbrains/k2js/analyze/AnalyzerFacadeForJS.java b/js/js.translator/src/org/jetbrains/k2js/analyze/AnalyzerFacadeForJS.java index a001856ead064..76209a93f713a 100644 --- a/js/js.translator/src/org/jetbrains/k2js/analyze/AnalyzerFacadeForJS.java +++ b/js/js.translator/src/org/jetbrains/k2js/analyze/AnalyzerFacadeForJS.java @@ -129,13 +129,5 @@ public void extendNamespaceScope(@NotNull BindingTrace trace, @NotNull Namespace @NotNull WritableScope namespaceMemberScope) { } - @Override - public NamespaceDescriptor getTopLevelNamespace(@NotNull String shortName) { - return null; - } - - @Override - public void addAllTopLevelNamespacesTo(@NotNull Collection<? super NamespaceDescriptor> topLevelNamespaces) { - } } }
6ebe0c30ec99df77b9d9260e8dda5d0d9a975877
kotlin
fix KT-9299 In a project with circular- dependencies between modules, IDE reports error on use of internal class from- another module, but the corresponding code still compiles and runs.---KT-9299 Fixed-
c
https://github.com/JetBrains/kotlin
diff --git a/compiler/cli/src/org/jetbrains/kotlin/cli/common/moduleVisibilityImpl.kt b/compiler/cli/src/org/jetbrains/kotlin/cli/common/moduleVisibilityImpl.kt index 2000ecc8421ce..7b9f68cef34b7 100644 --- a/compiler/cli/src/org/jetbrains/kotlin/cli/common/moduleVisibilityImpl.kt +++ b/compiler/cli/src/org/jetbrains/kotlin/cli/common/moduleVisibilityImpl.kt @@ -24,6 +24,7 @@ import org.jetbrains.kotlin.load.kotlin.ModuleVisibilityManager import org.jetbrains.kotlin.load.kotlin.getSourceElement import org.jetbrains.kotlin.load.kotlin.isContainedByCompiledPartOfOurModule import org.jetbrains.kotlin.modules.Module +import org.jetbrains.kotlin.psi.KtFile import org.jetbrains.kotlin.resolve.lazy.descriptors.LazyPackageDescriptor import org.jetbrains.kotlin.resolve.source.KotlinSourceElement import org.jetbrains.kotlin.util.ModuleVisibilityHelper @@ -44,8 +45,18 @@ class ModuleVisibilityHelperImpl : ModuleVisibilityHelper { val moduleVisibilityManager = ModuleVisibilityManager.SERVICE.getInstance(project) + fun findModule(kotlinFile: KtFile): Module? = moduleVisibilityManager.chunk.firstOrNull { it.getSourceFiles().containsRaw(kotlinFile.virtualFile.path) } + val whatSource = getSourceElement(what) - if (whatSource is KotlinSourceElement) return true + if (whatSource is KotlinSourceElement) { + if (moduleVisibilityManager.chunk.size > 1 && fromSource is KotlinSourceElement) { + val fromSourceKotlinFile = fromSource.psi.getContainingJetFile() + val whatSourceKotlinFile = whatSource.psi.getContainingJetFile() + return findModule(whatSourceKotlinFile) === findModule(fromSourceKotlinFile) + } + + return true + } moduleVisibilityManager.friendPaths.forEach { if (isContainedByCompiledPartOfOurModule(what, File(it))) return true diff --git a/compiler/frontend/src/org/jetbrains/kotlin/resolve/QualifiedExpressionResolver.kt b/compiler/frontend/src/org/jetbrains/kotlin/resolve/QualifiedExpressionResolver.kt index 613aee751284c..338506b33d371 100644 --- a/compiler/frontend/src/org/jetbrains/kotlin/resolve/QualifiedExpressionResolver.kt +++ b/compiler/frontend/src/org/jetbrains/kotlin/resolve/QualifiedExpressionResolver.kt @@ -29,6 +29,7 @@ import org.jetbrains.kotlin.resolve.scopes.LexicalScope import org.jetbrains.kotlin.resolve.scopes.receivers.QualifierReceiver import org.jetbrains.kotlin.resolve.scopes.receivers.ReceiverValue import org.jetbrains.kotlin.resolve.scopes.utils.findClassifier +import org.jetbrains.kotlin.resolve.source.KotlinSourceElement import org.jetbrains.kotlin.resolve.validation.SymbolUsageValidator import org.jetbrains.kotlin.utils.addIfNotNull import org.jetbrains.kotlin.utils.addToStdlib.check @@ -122,11 +123,19 @@ public class QualifiedExpressionResolver(val symbolUsageValidator: SymbolUsageVa val importedReference = importDirective.importedReference ?: return null val path = importedReference.asQualifierPartList(trace) val lastPart = path.lastOrNull() ?: return null + val packageFragmentForCheck = + if (packageFragmentForVisibilityCheck is DeclarationDescriptorWithSource && packageFragmentForVisibilityCheck.source == SourceElement.NO_SOURCE) { + PackageFragmentWithCustomSource(packageFragmentForVisibilityCheck, KotlinSourceElement(importDirective.getContainingJetFile())) + } + + else { + packageFragmentForVisibilityCheck + } if (!importDirective.isAllUnder) { - return processSingleImport(moduleDescriptor, trace, importDirective, path, lastPart, packageFragmentForVisibilityCheck) + return processSingleImport(moduleDescriptor, trace, importDirective, path, lastPart, packageFragmentForCheck) } - val packageOrClassDescriptor = resolveToPackageOrClass(path, moduleDescriptor, trace, packageFragmentForVisibilityCheck, + val packageOrClassDescriptor = resolveToPackageOrClass(path, moduleDescriptor, trace, packageFragmentForCheck, scopeForFirstPart = null, inImport = true) ?: return null if (packageOrClassDescriptor is ClassDescriptor && packageOrClassDescriptor.kind.isSingleton) { trace.report(Errors.CANNOT_ALL_UNDER_IMPORT_FROM_SINGLETON.on(lastPart.expression, packageOrClassDescriptor)) // todo report on star @@ -398,8 +407,17 @@ public class QualifiedExpressionResolver(val symbolUsageValidator: SymbolUsageVa symbolUsageValidator.validateTypeUsage(descriptor, trace, referenceExpression) } - if (descriptor is DeclarationDescriptorWithVisibility && !isVisible(descriptor, shouldBeVisibleFrom, inImport)) { - trace.report(Errors.INVISIBLE_REFERENCE.on(referenceExpression, descriptor, descriptor.visibility, descriptor)) + if (descriptor is DeclarationDescriptorWithVisibility) { + val fromToCheck = + if (shouldBeVisibleFrom is PackageFragmentDescriptor && shouldBeVisibleFrom.source == SourceElement.NO_SOURCE) { + PackageFragmentWithCustomSource(shouldBeVisibleFrom, KotlinSourceElement(referenceExpression.getContainingJetFile())) + } + else { + shouldBeVisibleFrom + } + if (!isVisible(descriptor, fromToCheck, inImport)) { + trace.report(Errors.INVISIBLE_REFERENCE.on(referenceExpression, descriptor, descriptor.visibility, descriptor)) + } } if (isQualifier) { @@ -429,3 +447,11 @@ public class QualifiedExpressionResolver(val symbolUsageValidator: SymbolUsageVa return Visibilities.isVisible(ReceiverValue.IRRELEVANT_RECEIVER, descriptor, shouldBeVisibleFrom) } } + +/* + This purpose of this class is to pass information about source file for current package fragment in order for check visibilities between modules + (see ModuleVisibilityHelperImpl.isInFriendModule). + */ +private class PackageFragmentWithCustomSource(private val original: PackageFragmentDescriptor, private val source: SourceElement) : PackageFragmentDescriptor by original { + override fun getSource(): SourceElement = source +} diff --git a/jps-plugin/test/org/jetbrains/kotlin/jps/build/IncrementalJpsTestGenerated.java b/jps-plugin/test/org/jetbrains/kotlin/jps/build/IncrementalJpsTestGenerated.java index 00730fed6a476..896dbdd010d82 100644 --- a/jps-plugin/test/org/jetbrains/kotlin/jps/build/IncrementalJpsTestGenerated.java +++ b/jps-plugin/test/org/jetbrains/kotlin/jps/build/IncrementalJpsTestGenerated.java @@ -79,6 +79,18 @@ public void testSimpleDependency() throws Exception { doTest(fileName); } + @TestMetadata("simpleDependencyErrorOnAccessToInternal1") + public void testSimpleDependencyErrorOnAccessToInternal1() throws Exception { + String fileName = JetTestUtils.navigationMetadata("jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/"); + doTest(fileName); + } + + @TestMetadata("simpleDependencyErrorOnAccessToInternal2") + public void testSimpleDependencyErrorOnAccessToInternal2() throws Exception { + String fileName = JetTestUtils.navigationMetadata("jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/"); + doTest(fileName); + } + @TestMetadata("simpleDependencyUnchanged") public void testSimpleDependencyUnchanged() throws Exception { String fileName = JetTestUtils.navigationMetadata("jps-plugin/testData/incremental/multiModule/simpleDependencyUnchanged/"); diff --git a/jps-plugin/test/org/jetbrains/kotlin/jps/build/KotlinJpsBuildTest.kt b/jps-plugin/test/org/jetbrains/kotlin/jps/build/KotlinJpsBuildTest.kt index f5fe840ffaaa4..b4264cd694ac8 100644 --- a/jps-plugin/test/org/jetbrains/kotlin/jps/build/KotlinJpsBuildTest.kt +++ b/jps-plugin/test/org/jetbrains/kotlin/jps/build/KotlinJpsBuildTest.kt @@ -494,20 +494,14 @@ public class KotlinJpsBuildTest : AbstractKotlinJpsBuildTestCase() { initProject() val result = makeAll() result.assertFailed() - - val actualErrors = result.getMessages(BuildMessage.Kind.ERROR) - .map { it as CompilerMessage } - .map { "${it.messageText} at line ${it.line}, column ${it.column}" }.sorted().joinToString("\n") - val projectRoot = File(AbstractKotlinJpsBuildTestCase.TEST_DATA_PATH + "general/" + getTestName(false)) - val expectedFile = File(projectRoot, "errors.txt") - JetTestUtils.assertEqualsToFile(expectedFile, actualErrors) + result.checkErrors() } - // TODO See KT-9299 In a project with circular dependencies between modules, IDE reports error on use of internal class from another module, but the corresponding code still compiles and runs. public fun testCircularDependenciesInternalFromAnotherModule() { initProject() val result = makeAll() - result.assertSuccessful() + result.assertFailed() + result.checkErrors() } public fun testCircularDependencyWithReferenceToOldVersionLib() { @@ -679,6 +673,15 @@ public class KotlinJpsBuildTest : AbstractKotlinJpsBuildTestCase() { assertFalse(File(storageRoot, "targets/java-production/module2/kotlin").exists()) } + private fun BuildResult.checkErrors() { + val actualErrors = getMessages(BuildMessage.Kind.ERROR) + .map { it as CompilerMessage } + .map { "${it.messageText} at line ${it.line}, column ${it.column}" }.sorted().joinToString("\n") + val projectRoot = File(AbstractKotlinJpsBuildTestCase.TEST_DATA_PATH + "general/" + getTestName(false)) + val expectedFile = File(projectRoot, "errors.txt") + JetTestUtils.assertEqualsToFile(expectedFile, actualErrors) + } + private fun buildCustom(canceledStatus: CanceledStatus, logger: TestProjectBuilderLogger,buildResult: BuildResult) { val scopeBuilder = CompileScopeTestBuilder.make().all() val descriptor = this.createProjectDescriptor(BuildLoggingManager(logger)) diff --git a/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/errors.txt b/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/errors.txt new file mode 100644 index 0000000000000..331aaf1316d76 --- /dev/null +++ b/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/errors.txt @@ -0,0 +1,7 @@ +'internal open val member: kotlin.Int defined in test.ClassBB1' has no access to 'internal abstract val member: kotlin.Int defined in test.ClassB1', so it cannot override it at line 14, column 14 +Cannot access 'InternalClass1': it is 'internal' in 'test' at line 5, column 13 +Cannot access 'InternalClass1': it is 'internal' in 'test' at line 8, column 36 +Cannot access 'InternalClass2': it is 'internal' in 'test' at line 19, column 15 +Cannot access 'InternalClassAnnotation': it is 'internal' in 'test' at line 10, column 2 +Cannot access 'InternalFileAnnotation': it is 'internal' in 'test' at line 1, column 7 +Cannot access 'member': it is 'invisible_fake' in 'ClassAA1' at line 27, column 25 \ No newline at end of file diff --git a/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/module1/src/module1.kt b/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/module1/src/module1.kt index 30fcc94d78088..9a7bdb153a3d6 100644 --- a/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/module1/src/module1.kt +++ b/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/module1/src/module1.kt @@ -2,9 +2,18 @@ package test internal open class InternalClass1 +@Target(AnnotationTarget.FILE) +@Retention(AnnotationRetention.SOURCE) +internal annotation class InternalFileAnnotation() + +@Target(AnnotationTarget.CLASS) +@Retention(AnnotationRetention.SOURCE) +internal annotation class InternalClassAnnotation() + abstract class ClassA1(internal val member: Int) abstract class ClassB1 { internal abstract val member: Int } +class ClassD: InternalClass2() \ No newline at end of file diff --git a/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/module2/src/module2.kt b/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/module2/src/module2.kt index 1f1647ddad4dc..ece01121bd2cc 100644 --- a/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/module2/src/module2.kt +++ b/jps-plugin/testData/general/CircularDependenciesInternalFromAnotherModule/module2/src/module2.kt @@ -1,11 +1,13 @@ -// ERROR: 'internal open val member: kotlin.Int defined in test.ClassBB1' has no access to 'internal abstract val member: kotlin.Int defined in test.ClassB1', so it cannot override it -// ERROR: Cannot access 'InternalClass1': it is 'internal' in 'test' -// ERROR: Cannot access 'member': it is 'invisible_fake' in 'ClassAA1' +@file:InternalFileAnnotation + package test +import test.InternalClass1 + // InternalClass1, ClassA1, ClassB1 are in module1 class ClassInheritedFromInternal1: InternalClass1() +@InternalClassAnnotation class ClassAA1 : ClassA1(10) class ClassBB1 : ClassB1() { diff --git a/jps-plugin/testData/general/InternalFromAnotherModule/errors.txt b/jps-plugin/testData/general/InternalFromAnotherModule/errors.txt index 13cd0c3cbf138..2fae260968a70 100644 --- a/jps-plugin/testData/general/InternalFromAnotherModule/errors.txt +++ b/jps-plugin/testData/general/InternalFromAnotherModule/errors.txt @@ -1,4 +1,6 @@ -'internal open val member: kotlin.Int defined in test.ClassBB1' has no access to 'internal abstract val member: kotlin.Int defined in test.ClassB1', so it cannot override it at line 11, column 14 -Cannot access 'InternalClass1': it is 'internal' in 'test' at line 3, column 13 -Cannot access 'InternalClass1': it is 'internal' in 'test' at line 6, column 36 -Cannot access 'member': it is 'invisible_fake' in 'ClassAA1' at line 24, column 25 \ No newline at end of file +'internal open val member: kotlin.Int defined in test.ClassBB1' has no access to 'internal abstract val member: kotlin.Int defined in test.ClassB1', so it cannot override it at line 14, column 14 +Cannot access 'InternalClass1': it is 'internal' in 'test' at line 5, column 13 +Cannot access 'InternalClass1': it is 'internal' in 'test' at line 8, column 36 +Cannot access 'InternalClassAnnotation': it is 'internal' in 'test' at line 10, column 2 +Cannot access 'InternalTestAnnotation': it is 'internal' in 'test' at line 1, column 7 +Cannot access 'member': it is 'invisible_fake' in 'ClassAA1' at line 27, column 25 \ No newline at end of file diff --git a/jps-plugin/testData/general/InternalFromAnotherModule/module1/src/module1.kt b/jps-plugin/testData/general/InternalFromAnotherModule/module1/src/module1.kt index 30fcc94d78088..e2fc0309302f5 100644 --- a/jps-plugin/testData/general/InternalFromAnotherModule/module1/src/module1.kt +++ b/jps-plugin/testData/general/InternalFromAnotherModule/module1/src/module1.kt @@ -1,4 +1,13 @@ package test +@Target(AnnotationTarget.FILE) +@Retention(AnnotationRetention.SOURCE) +internal annotation class InternalTestAnnotation() + +@Target(AnnotationTarget.CLASS) +@Retention(AnnotationRetention.SOURCE) +internal annotation class InternalClassAnnotation() + +private class PrivateClass1 internal open class InternalClass1 diff --git a/jps-plugin/testData/general/InternalFromAnotherModule/module2/src/module2.kt b/jps-plugin/testData/general/InternalFromAnotherModule/module2/src/module2.kt index 64f4bb6cf1f45..155a4c3a341a9 100644 --- a/jps-plugin/testData/general/InternalFromAnotherModule/module2/src/module2.kt +++ b/jps-plugin/testData/general/InternalFromAnotherModule/module2/src/module2.kt @@ -1,3 +1,5 @@ +@file:InternalTestAnnotation + package test import test.InternalClass1 @@ -5,6 +7,7 @@ import test.InternalClass1 // InternalClass1, ClassA1, ClassB1 are in module1 class ClassInheritedFromInternal1: InternalClass1() +@InternalClassAnnotation class ClassAA1 : ClassA1(10) class ClassBB1 : ClassB1() { diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/build.log b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/build.log new file mode 100644 index 0000000000000..df01db10bfd62 --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/build.log @@ -0,0 +1,26 @@ +Cleaning output files: +out/production/module1/META-INF/module1.kotlin_module +out/production/module1/a/A.class +out/production/module1/a/ClassAnnotation.class +out/production/module1/a/FileAnnotation.class +out/production/module1/a/Module1_aKt.class +End of files +Compiling files: +module1/src/module1_a.kt +End of files +Cleaning output files: +out/production/module2/META-INF/module2.kotlin_module +out/production/module2/b/B.class +out/production/module2/b/Module2_bKt.class +End of files +Compiling files: +module2/src/module2_b.kt +End of files +COMPILATION FAILED +Cannot access 'FileAnnotation': it is 'internal' in 'a' +Cannot access 'A': it is 'internal' in 'a' +Cannot access 'FileAnnotation': it is 'internal' in 'a' +Cannot access 'ClassAnnotation': it is 'internal' in 'a' +Cannot access 'ClassAnnotation': it is 'internal' in 'a' +Cannot access 'A': it is 'internal' in 'a' +Cannot access 'a': it is 'internal' in 'a' \ No newline at end of file diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/dependencies.txt b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/dependencies.txt new file mode 100644 index 0000000000000..827bf04cc589b --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/dependencies.txt @@ -0,0 +1,2 @@ +module1-> +module2->module1 diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module1_a.kt b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module1_a.kt new file mode 100644 index 0000000000000..621ede2ee51bf --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module1_a.kt @@ -0,0 +1,14 @@ +package a + +@Target(AnnotationTarget.FILE) +@Retention(AnnotationRetention.SOURCE) +annotation class FileAnnotation() + +@Target(AnnotationTarget.CLASS) +@Retention(AnnotationRetention.SOURCE) +annotation class ClassAnnotation() + +class A + +fun a() { +} diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module1_a.kt.new b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module1_a.kt.new new file mode 100644 index 0000000000000..e514959008cac --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module1_a.kt.new @@ -0,0 +1,15 @@ +package a + +@Target(AnnotationTarget.FILE) +@Retention(AnnotationRetention.SOURCE) +internal annotation class FileAnnotation() + +@Target(AnnotationTarget.CLASS) +@Retention(AnnotationRetention.SOURCE) +internal annotation class ClassAnnotation() + +internal class A + +internal fun a(): String { + return ":)" +} diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module2_b.kt b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module2_b.kt new file mode 100644 index 0000000000000..9573f5d051783 --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module2_b.kt @@ -0,0 +1,13 @@ +@file:FileAnnotation +package b + +import a.A +import a.FileAnnotation +import a.ClassAnnotation + +@ClassAnnotation +class B + +fun b(param: a.A) { + a.a() +} diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module2_c.kt b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module2_c.kt new file mode 100644 index 0000000000000..c4fab8fb00b47 --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal1/module2_c.kt @@ -0,0 +1,5 @@ +package c + +fun c() { + // This file doesn't use anything from module1, so it won't be recompiled after change +} \ No newline at end of file diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/build.log b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/build.log new file mode 100644 index 0000000000000..1a5350845e0b1 --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/build.log @@ -0,0 +1,26 @@ +Cleaning output files: +out/production/module1/META-INF/module1.kotlin_module +out/production/module1/a/A.class +out/production/module1/a/InternalA.class +out/production/module1/a/InternalClassAnnotation.class +out/production/module1/a/InternalFileAnnotation.class +out/production/module1/a/Module1_aKt.class +End of files +Compiling files: +module1/src/module1_a.kt +End of files +Cleaning output files: +out/production/module2/META-INF/module2.kotlin_module +out/production/module2/b/B.class +out/production/module2/b/Module2_bKt.class +End of files +Compiling files: +module2/src/module2_b.kt +End of files +COMPILATION FAILED +Cannot access 'InternalFileAnnotation': it is 'internal' in 'a' +Cannot access 'InternalFileAnnotation': it is 'internal' in 'a' +Cannot access 'InternalClassAnnotation': it is 'internal' in 'a' +Cannot access 'InternalClassAnnotation': it is 'internal' in 'a' +Unresolved reference: InternalA +Unresolved reference: internalA \ No newline at end of file diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/dependencies.txt b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/dependencies.txt new file mode 100644 index 0000000000000..827bf04cc589b --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/dependencies.txt @@ -0,0 +1,2 @@ +module1-> +module2->module1 diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module1_a.kt b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module1_a.kt new file mode 100644 index 0000000000000..e7392045a0bc3 --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module1_a.kt @@ -0,0 +1,19 @@ +package a + +@Target(AnnotationTarget.FILE) +@Retention(AnnotationRetention.SOURCE) +internal annotation class InternalFileAnnotation() + +@Target(AnnotationTarget.CLASS) +@Retention(AnnotationRetention.SOURCE) +internal annotation class InternalClassAnnotation() + +class A + +internal class InternalA + +fun a() { +} + +internal fun internalA() { +} diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module1_a.kt.new b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module1_a.kt.new new file mode 100644 index 0000000000000..bc69686475069 --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module1_a.kt.new @@ -0,0 +1,19 @@ +package a + +@Target(AnnotationTarget.FILE) +@Retention(AnnotationRetention.SOURCE) +internal annotation class InternalFileAnnotation() + +@Target(AnnotationTarget.CLASS) +@Retention(AnnotationRetention.SOURCE) +internal annotation class InternalClassAnnotation() + +class A + +internal class AA + +fun a() { +} + +internal fun aa() { +} diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_b.kt b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_b.kt new file mode 100644 index 0000000000000..436721358e970 --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_b.kt @@ -0,0 +1,7 @@ +package b + +class B + +fun b(param: a.A) { + a.a() +} diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_b.kt.new b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_b.kt.new new file mode 100644 index 0000000000000..961d6541b9c5a --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_b.kt.new @@ -0,0 +1,12 @@ +@file:InternalFileAnnotation +package b + +import a.InternalFileAnnotation +import a.InternalClassAnnotation + +@InternalClassAnnotation +class B + +fun b(param: a.InternalA) { + a.internalA() +} diff --git a/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_c.kt b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_c.kt new file mode 100644 index 0000000000000..c4fab8fb00b47 --- /dev/null +++ b/jps-plugin/testData/incremental/multiModule/simpleDependencyErrorOnAccessToInternal2/module2_c.kt @@ -0,0 +1,5 @@ +package c + +fun c() { + // This file doesn't use anything from module1, so it won't be recompiled after change +} \ No newline at end of file
29e1e664f1bcaa2af3488096d33d84e328099f3c
camel
CAMEL-5370: Added direct-vm component to act as- synchronous direct calls between multiple camel contexts in the same JVM (eg- like direct + vm together). Can be used to support transactions spanning- multiple camel contextes / bundles.--git-svn-id: https://svn.apache.org/repos/asf/camel/trunk@1350591 13f79535-47bb-0310-9956-ffa450edef68-
a
https://github.com/apache/camel
diff --git a/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmComponent.java b/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmComponent.java index c7560c84e9250..c05e613ecda80 100644 --- a/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmComponent.java +++ b/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmComponent.java @@ -35,7 +35,7 @@ public class DirectVmComponent extends DefaultComponent { // must keep a map of consumers on the component to ensure endpoints can lookup old consumers // later in case the DirectEndpoint was re-created due the old was evicted from the endpoints LRUCache // on DefaultCamelContext - private static final ConcurrentMap<String, DirectVmConsumer> consumers = new ConcurrentHashMap<String, DirectVmConsumer>(); + private static final ConcurrentMap<String, DirectVmConsumer> CONSUMERS = new ConcurrentHashMap<String, DirectVmConsumer>(); @Override protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { @@ -46,22 +46,22 @@ protected Endpoint createEndpoint(String uri, String remaining, Map<String, Obje public DirectVmConsumer getConsumer(DirectVmEndpoint endpoint) { String key = getConsumerKey(endpoint.getEndpointUri()); - return consumers.get(key); + return CONSUMERS.get(key); } public void addConsumer(DirectVmEndpoint endpoint, DirectVmConsumer consumer) { String key = getConsumerKey(endpoint.getEndpointUri()); - DirectVmConsumer existing = consumers.putIfAbsent(key, consumer); + DirectVmConsumer existing = CONSUMERS.putIfAbsent(key, consumer); if (existing != null) { String contextId = existing.getEndpoint().getCamelContext().getName(); throw new IllegalStateException("A consumer " + existing + " already exists from CamelContext: " + contextId + ". Multiple consumers not supported"); } - consumers.put(key, consumer); + CONSUMERS.put(key, consumer); } public void removeConsumer(DirectVmEndpoint endpoint, DirectVmConsumer consumer) { String key = getConsumerKey(endpoint.getEndpointUri()); - consumers.remove(key); + CONSUMERS.remove(key); } private static String getConsumerKey(String uri) { @@ -82,7 +82,7 @@ protected void doStart() throws Exception { protected void doStop() throws Exception { if (START_COUNTER.decrementAndGet() <= 0) { // clear queues when no more direct-vm components in use - consumers.clear(); + CONSUMERS.clear(); } } diff --git a/camel-core/src/main/java/org/apache/camel/component/directvm/package.html b/camel-core/src/main/java/org/apache/camel/component/directvm/package.html index ebcecc9feac27..80219af01cf19 100644 --- a/camel-core/src/main/java/org/apache/camel/component/directvm/package.html +++ b/camel-core/src/main/java/org/apache/camel/component/directvm/package.html @@ -19,8 +19,8 @@ </head> <body> -The <a href="http://camel.apache.org/directvm.html">Direct VM Component</a> which synchronously invokes -the consumer when a producer sends an exchange to the endpoint. This also known as <i>strait through processing</i>. +The <a href="http://camel.apache.org/direct-vm.html">Direct VM Component</a> which synchronously invokes +the consumer when a producer sends an exchange to the endpoint. This also known as <i>strait through processing</i>. <p/> This component supports messaging within the current JVM; so across CamelContext instances. Note that this communication can only take place between ClassLoaders which share the same camel-core.jar. diff --git a/camel-core/src/test/java/org/apache/camel/component/directvm/DirectVmTwoCamelContextTest.java b/camel-core/src/test/java/org/apache/camel/component/directvm/DirectVmTwoCamelContextTest.java index 09f5104e2752c..6865215cc562d 100644 --- a/camel-core/src/test/java/org/apache/camel/component/directvm/DirectVmTwoCamelContextTest.java +++ b/camel-core/src/test/java/org/apache/camel/component/directvm/DirectVmTwoCamelContextTest.java @@ -25,6 +25,8 @@ public class DirectVmTwoCamelContextTest extends AbstractDirectVmTestSupport { public void testTwoCamelContext() throws Exception { getMockEndpoint("mock:result").expectedBodiesReceived("Bye World"); + getMockEndpoint("mock:result").expectedHeaderReceived("name1", context.getName()); + getMockEndpoint("mock:result").expectedHeaderReceived("name2", context2.getName()); String out = template2.requestBody("direct:start", "Hello World", String.class); assertEquals("Bye World", out); @@ -39,6 +41,7 @@ protected RouteBuilder createRouteBuilder() throws Exception { public void configure() throws Exception { from("direct-vm:foo") .transform(constant("Bye World")) + .setHeader("name1", simple("${camelId}")) .log("Running on Camel ${camelId} on thread ${threadName} with message ${body}") .to("mock:result"); } @@ -51,6 +54,7 @@ protected RouteBuilder createRouteBuilderForSecondContext() throws Exception { @Override public void configure() throws Exception { from("direct:start") + .setHeader("name2", simple("${camelId}")) .log("Running on Camel ${camelId} on thread ${threadName} with message ${body}") .to("direct-vm:foo"); }
d585683650c8e2664b86ccc0aeef080d3b417d1d
restlet-framework-java
ServerResource now supports empty POST and PUT- requests.--
a
https://github.com/restlet/restlet-framework-java
diff --git a/build/tmpl/text/changes.txt b/build/tmpl/text/changes.txt index ea4508a850..79ade3d4bf 100644 --- a/build/tmpl/text/changes.txt +++ b/build/tmpl/text/changes.txt @@ -16,6 +16,8 @@ Changes log media type. - ServerResource now rejects unconvertable parameters with a 415 status. + - ServerResource now supports empty POST and PUT requests. + - Bugs fixed - Fixed issue in DomRepresentation with createTransformer() method. Fix contributed by Remi Dewitte. diff --git a/modules/org.restlet/src/org/restlet/resource/ServerResource.java b/modules/org.restlet/src/org/restlet/resource/ServerResource.java index b20fa02cd3..3441e8d3d4 100644 --- a/modules/org.restlet/src/org/restlet/resource/ServerResource.java +++ b/modules/org.restlet/src/org/restlet/resource/ServerResource.java @@ -130,7 +130,7 @@ protected Representation delete() throws ResourceException { AnnotationInfo annotationInfo = getAnnotation(Method.DELETE); if (annotationInfo != null) { - result = doHandle(annotationInfo); + result = doHandle(annotationInfo, null); } else { setStatus(Status.CLIENT_ERROR_METHOD_NOT_ALLOWED); } @@ -276,7 +276,7 @@ private RepresentationInfo doGetInfo() throws ResourceException { AnnotationInfo annotationInfo = getAnnotation(Method.GET); if (annotationInfo != null) { - result = doHandle(annotationInfo); + result = doHandle(annotationInfo, null); } else { result = getInfo(); } @@ -347,7 +347,7 @@ protected Representation doHandle() throws ResourceException { AnnotationInfo annotationInfo = getAnnotation(method); if (annotationInfo != null) { - result = doHandle(annotationInfo); + result = doHandle(annotationInfo, null); } else { setStatus(Status.CLIENT_ERROR_METHOD_NOT_ALLOWED); } @@ -360,74 +360,6 @@ protected Representation doHandle() throws ResourceException { return result; } - /** - * Effectively handle a call using an annotated method. - * - * @param annotationInfo - * The annotation descriptor. - * @return The response entity. - * @throws ResourceException - */ - private Representation doHandle(AnnotationInfo annotationInfo) - throws ResourceException { - Representation result = null; - ConverterService cs = getConverterService(); - Class<?>[] parameterTypes = annotationInfo.getJavaInputTypes(); - List<Object> parameters = null; - Object resultObject = null; - Object parameter = null; - - try { - if (parameterTypes.length > 0) { - parameters = new ArrayList<Object>(); - - for (Class<?> parameterType : parameterTypes) { - if (getRequestEntity() != null) { - try { - parameter = cs.toObject(getRequestEntity(), - parameterType, this); - - if (parameter != null) { - parameters.add(parameter); - } else { - throw new ResourceException( - Status.CLIENT_ERROR_UNSUPPORTED_MEDIA_TYPE); - } - } catch (IOException e) { - e.printStackTrace(); - parameters.add(null); - } - } else { - parameters.add(null); - } - } - - // Actually invoke the method - resultObject = annotationInfo.getJavaMethod().invoke(this, - parameters.toArray()); - } else { - // Actually invoke the method - resultObject = annotationInfo.getJavaMethod().invoke(this); - } - } catch (IllegalArgumentException e) { - throw new ResourceException(e); - } catch (IllegalAccessException e) { - throw new ResourceException(e); - } catch (InvocationTargetException e) { - if (e.getTargetException() instanceof ResourceException) { - throw (ResourceException) e.getTargetException(); - } - - throw new ResourceException(e.getTargetException()); - } - - if (resultObject != null) { - result = cs.toRepresentation(resultObject); - } - - return result; - } - /** * Effectively handles a call with content negotiation of the response * entity using an annotated method. @@ -435,31 +367,32 @@ private Representation doHandle(AnnotationInfo annotationInfo) * @param annotationInfo * The annotation descriptor. * @param variant - * The response variant expected. + * The response variant expected (can be null). * @return The response entity. * @throws ResourceException */ private Representation doHandle(AnnotationInfo annotationInfo, Variant variant) throws ResourceException { Representation result = null; + Class<?>[] parameterTypes = annotationInfo.getJavaInputTypes(); ConverterService cs = getConverterService(); - Object resultObject = null; + // Invoke the annoted method and get the resulting object. + Object resultObject = null; try { - if ((annotationInfo.getJavaInputTypes() != null) - && (annotationInfo.getJavaInputTypes().length > 0)) { + if (parameterTypes.length > 0) { List<Object> parameters = new ArrayList<Object>(); Object parameter = null; - for (Class<?> param : annotationInfo.getJavaInputTypes()) { - if (Variant.class.equals(param)) { + for (Class<?> parameterType : parameterTypes) { + if (Variant.class.equals(parameterType)) { parameters.add(variant); } else { - if (getRequestEntity().isAvailable()) { + if (getRequestEntity() != null + && getRequestEntity().isAvailable()) { try { parameter = cs.toObject(getRequestEntity(), - param, this); - + parameterType, this); } catch (IOException e) { parameter = null; } @@ -481,8 +414,6 @@ private Representation doHandle(AnnotationInfo annotationInfo, } else { resultObject = annotationInfo.getJavaMethod().invoke(this); } - - result = cs.toRepresentation(resultObject, variant, this); } catch (IllegalArgumentException e) { throw new ResourceException(e); } catch (IllegalAccessException e) { @@ -495,6 +426,14 @@ private Representation doHandle(AnnotationInfo annotationInfo, throw new ResourceException(e.getTargetException()); } + if (resultObject != null) { + if (variant != null) { + result = cs.toRepresentation(resultObject, variant, this); + } else { + result = cs.toRepresentation(resultObject); + } + } + return result; } @@ -613,7 +552,7 @@ protected Representation get() throws ResourceException { AnnotationInfo annotationInfo = getAnnotation(Method.GET); if (annotationInfo != null) { - result = doHandle(annotationInfo); + result = doHandle(annotationInfo, null); } else { setStatus(Status.CLIENT_ERROR_METHOD_NOT_ALLOWED); } @@ -967,7 +906,7 @@ protected Representation options() throws ResourceException { AnnotationInfo annotationInfo = getAnnotation(Method.OPTIONS); if (annotationInfo != null) { - result = doHandle(annotationInfo); + result = doHandle(annotationInfo, null); } else { setStatus(Status.CLIENT_ERROR_METHOD_NOT_ALLOWED); } @@ -1029,7 +968,7 @@ protected Representation post(Representation entity) AnnotationInfo annotationInfo = getAnnotation(Method.POST); if (annotationInfo != null) { - result = doHandle(annotationInfo); + result = doHandle(annotationInfo, null); } else { setStatus(Status.CLIENT_ERROR_METHOD_NOT_ALLOWED); } @@ -1096,7 +1035,7 @@ protected Representation put(Representation representation) AnnotationInfo annotationInfo = getAnnotation(Method.PUT); if (annotationInfo != null) { - result = doHandle(annotationInfo); + result = doHandle(annotationInfo, null); } else { setStatus(Status.CLIENT_ERROR_METHOD_NOT_ALLOWED); }
4a6101a697475b6f0f5b5da7cf280ad372725ebb
spring-framework
Guard against null in -visitInnerClass--Issue: SPR-8358,SPR-8186-
c
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.core/src/main/java/org/springframework/core/type/classreading/ClassMetadataReadingVisitor.java b/org.springframework.core/src/main/java/org/springframework/core/type/classreading/ClassMetadataReadingVisitor.java index 4e21320fbd06..d69a75bb3d17 100644 --- a/org.springframework.core/src/main/java/org/springframework/core/type/classreading/ClassMetadataReadingVisitor.java +++ b/org.springframework.core/src/main/java/org/springframework/core/type/classreading/ClassMetadataReadingVisitor.java @@ -80,9 +80,9 @@ public void visitOuterClass(String owner, String name, String desc) { } public void visitInnerClass(String name, String outerName, String innerName, int access) { - String fqName = ClassUtils.convertResourcePathToClassName(name); - String fqOuterName = ClassUtils.convertResourcePathToClassName(outerName); if (outerName != null) { + String fqName = ClassUtils.convertResourcePathToClassName(name); + String fqOuterName = ClassUtils.convertResourcePathToClassName(outerName); if (this.className.equals(fqName)) { this.enclosingClassName = fqOuterName; this.independentInnerClass = ((access & Opcodes.ACC_STATIC) != 0);
bc9f5da47e74b6b474c2792797cbd26a4f427685
kotlin
Rendering nice unicode arrow for function types- in UI.--
p
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend/src/org/jetbrains/jet/renderer/DescriptorRendererImpl.java b/compiler/frontend/src/org/jetbrains/jet/renderer/DescriptorRendererImpl.java index 02b8c922c33c2..ccaa3f9355778 100644 --- a/compiler/frontend/src/org/jetbrains/jet/renderer/DescriptorRendererImpl.java +++ b/compiler/frontend/src/org/jetbrains/jet/renderer/DescriptorRendererImpl.java @@ -116,6 +116,17 @@ private String lt() { return escape("<"); } + @NotNull + private String arrow() { + switch (textFormat) { + case PLAIN: + return escape("->"); + case HTML: + return "&rarr;"; + } + throw new IllegalStateException("Unexpected textFormat: " + textFormat); + } + @NotNull private String renderMessage(@NotNull String message) { switch (textFormat) { @@ -188,7 +199,7 @@ private String renderTypeWithoutEscape(@NotNull JetType type) { return "???"; } else if (type == CallResolverUtil.PLACEHOLDER_FUNCTION_TYPE) { - return "??? -> ???"; + return "??? " + arrow() + " ???"; } else if (ErrorUtils.isErrorType(type)) { return type.toString(); @@ -258,7 +269,7 @@ private String renderFunctionType(@NotNull JetType type) { sb.append("("); appendTypeProjections(KotlinBuiltIns.getInstance().getParameterTypeProjectionsFromFunctionType(type), sb); - sb.append(") -> "); + sb.append(") " + arrow() + " "); sb.append(renderType(KotlinBuiltIns.getInstance().getReturnTypeFromFunctionType(type))); if (type.isNullable()) { diff --git a/idea/testData/diagnosticMessage/functionPlaceholder1.html b/idea/testData/diagnosticMessage/functionPlaceholder1.html index a41e3eb40d789..de44d6314a1ac 100644 --- a/idea/testData/diagnosticMessage/functionPlaceholder1.html +++ b/idea/testData/diagnosticMessage/functionPlaceholder1.html @@ -36,7 +36,7 @@ <td align="right" style="white-space:nowrap;"> <font color=red> <b> -??? -&gt; ???</b> +??? &rarr; ???</b> </font> </td> <td style="white-space:nowrap;"> diff --git a/idea/testData/diagnosticMessage/functionPlaceholder2.html b/idea/testData/diagnosticMessage/functionPlaceholder2.html index cebdd26caece7..af0eba78cf393 100644 --- a/idea/testData/diagnosticMessage/functionPlaceholder2.html +++ b/idea/testData/diagnosticMessage/functionPlaceholder2.html @@ -36,7 +36,7 @@ <td align="right" style="white-space:nowrap;"> <font color=red> <b> -(???) -&gt; ???</b> +(???) &rarr; ???</b> </font> </td> <td style="white-space:nowrap;"> diff --git a/idea/testData/diagnosticMessage/functionPlaceholder3.html b/idea/testData/diagnosticMessage/functionPlaceholder3.html index 465a3c5a818bf..f1a742a340662 100644 --- a/idea/testData/diagnosticMessage/functionPlaceholder3.html +++ b/idea/testData/diagnosticMessage/functionPlaceholder3.html @@ -36,7 +36,7 @@ <td align="right" style="white-space:nowrap;"> <font color=red> <b> -(jet.Int) -&gt; ???</b> +(jet.Int) &rarr; ???</b> </font> </td> <td style="white-space:nowrap;">
a2a9029c7ee322dc77a85f581f0e584138278840
drools
-Make TupleSourceTest work--git-svn-id: https://svn.jboss.org/repos/labs/trunk/labs/jbossrules@2284 c60d74c8-e8f6-0310-9e8f-d4a2fc68ab70-
c
https://github.com/kiegroup/drools
diff --git a/drools-core/src/test/java/org/drools/reteoo/TupleSourceTest.java b/drools-core/src/test/java/org/drools/reteoo/TupleSourceTest.java index b657a06fe85..2e5965ea78c 100644 --- a/drools-core/src/test/java/org/drools/reteoo/TupleSourceTest.java +++ b/drools-core/src/test/java/org/drools/reteoo/TupleSourceTest.java @@ -47,9 +47,7 @@ public void testPropagateAssertTuple() throws Exception { assertLength( 0, sink1.getAsserted() ); - ReteTuple tuple1 = new ReteTuple( 0, - new FactHandleImpl( 1 ), - workingMemory ); + ReteTuple tuple1 = new ReteTuple( new FactHandleImpl( 1 ) ); source.propagateAssertTuple( tuple1, context, @@ -66,9 +64,7 @@ public void testPropagateAssertTuple() throws Exception { assertSame( workingMemory, list[2] ); - ReteTuple tuple2 = new ReteTuple( 0, - new FactHandleImpl( 1 ), - workingMemory ); + ReteTuple tuple2 = new ReteTuple( new FactHandleImpl( 1 ) ); MockTupleSink sink2 = new MockTupleSink(); source.addTupleSink( sink2 ); @@ -130,9 +126,7 @@ public void testAttachNewNode() { // Only the last added TupleSink should receive facts source.attachingNewNode = true; - ReteTuple tuple1 = new ReteTuple( 0, - new FactHandleImpl( 2 ), - workingMemory ); + ReteTuple tuple1 = new ReteTuple( new FactHandleImpl( 2 ) ); source.propagateAssertTuple( tuple1, context, @@ -146,9 +140,7 @@ public void testAttachNewNode() { // Now all sinks should receive values source.attachingNewNode = false; - ReteTuple tuple2 = new ReteTuple( 0, - new FactHandleImpl( 3 ), - workingMemory ); + ReteTuple tuple2 = new ReteTuple( new FactHandleImpl( 3 ) ); source.propagateAssertTuple( tuple2, context,
c8fdea3a62ecf92c159ca8811b7d4a1039edd546
orientdb
Fixed issue -2472 about null values in Lists--
c
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/db/record/ORecordLazyList.java b/core/src/main/java/com/orientechnologies/orient/core/db/record/ORecordLazyList.java index 60cce8473b2..8157ec110d5 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/db/record/ORecordLazyList.java +++ b/core/src/main/java/com/orientechnologies/orient/core/db/record/ORecordLazyList.java @@ -79,7 +79,9 @@ public boolean addAll(Collection<? extends OIdentifiable> c) { while (it.hasNext()) { Object o = it.next(); - if (o instanceof OIdentifiable) + if (o == null) + add(null); + else if (o instanceof OIdentifiable) add((OIdentifiable) o); else OMultiValue.add(this, o); @@ -407,9 +409,9 @@ public boolean lazyLoad(final boolean iInvalidateStream) { for (String item : items) { if (item.length() == 0) - continue; - - super.add(new ORecordId(item)); + super.add(new ORecordId()); + else + super.add(new ORecordId(item)); } modCount = currentModCount; diff --git a/core/src/main/java/com/orientechnologies/orient/core/fetch/OFetchHelper.java b/core/src/main/java/com/orientechnologies/orient/core/fetch/OFetchHelper.java index 021e97ea0cc..ab800be333d 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/fetch/OFetchHelper.java +++ b/core/src/main/java/com/orientechnologies/orient/core/fetch/OFetchHelper.java @@ -298,6 +298,9 @@ private static void processRecord(final ODocument record, final Object iUserObje final String iFieldPathFromRoot, final OFetchListener iListener, final OFetchContext iContext, final String iFormat) throws IOException { + if (record == null) + return; + Object fieldValue; iContext.onBeforeFetch(record); @@ -363,7 +366,6 @@ private static void processRecord(final ODocument record, final Object iUserObje } } catch (Exception e) { - e.printStackTrace(); OLogManager.instance().error(null, "Fetching error on record %s", e, record.getIdentity()); } } @@ -531,7 +533,9 @@ else if (fieldValue instanceof Iterable<?> || fieldValue instanceof ORidBag) { removeParsedFromMap(parsedRecords, d); d = d.getRecord(); - if (!(d instanceof ODocument)) { + if (d == null) + iListener.processStandardField(null, d, null, iContext, iUserObject, ""); + else if (!(d instanceof ODocument)) { iListener.processStandardField(null, d, fieldName, iContext, iUserObject, ""); } else { iContext.onBeforeDocument(iRootRecord, (ODocument) d, fieldName, iUserObject); diff --git a/core/src/main/java/com/orientechnologies/orient/core/fetch/json/OJSONFetchContext.java b/core/src/main/java/com/orientechnologies/orient/core/fetch/json/OJSONFetchContext.java index c5773e46570..3178cc3eeb5 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/fetch/json/OJSONFetchContext.java +++ b/core/src/main/java/com/orientechnologies/orient/core/fetch/json/OJSONFetchContext.java @@ -16,12 +16,6 @@ */ package com.orientechnologies.orient.core.fetch.json; -import java.io.IOException; -import java.math.BigDecimal; -import java.util.Date; -import java.util.Set; -import java.util.Stack; - import com.orientechnologies.orient.core.config.OGlobalConfiguration; import com.orientechnologies.orient.core.db.record.OIdentifiable; import com.orientechnologies.orient.core.db.record.ridbag.ORidBag; @@ -35,6 +29,12 @@ import com.orientechnologies.orient.core.serialization.serializer.record.string.ORecordSerializerJSON.FormatSettings; import com.orientechnologies.orient.core.version.ODistributedVersion; +import java.io.IOException; +import java.math.BigDecimal; +import java.util.Date; +import java.util.Set; +import java.util.Stack; + /** * @author luca.molino * @@ -174,6 +174,11 @@ private void appendType(final StringBuilder iBuffer, final String iFieldName, fi } public void writeSignature(final OJSONWriter json, final ORecordInternal<?> record) throws IOException { + if( record == null ) { + json.write("null"); + return; + } + boolean firstAttribute = true; if (settings.includeType) { diff --git a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/OJSONWriter.java b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/OJSONWriter.java index fd34670cf43..067310a12dc 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/OJSONWriter.java +++ b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/OJSONWriter.java @@ -85,8 +85,13 @@ else if (iValue instanceof OIdentifiable) { } else { if (iFormat != null && iFormat.contains("shallow")) buffer.append("{}"); - else - buffer.append(linked.getRecord().toJSON(iFormat)); + else { + final ORecord<?> rec = linked.getRecord(); + if (rec != null) + buffer.append(rec.toJSON(iFormat)); + else + buffer.append("null"); + } } } else if (iValue.getClass().isArray()) { @@ -374,8 +379,10 @@ public OJSONWriter writeAttribute(final int iIdentLevel, final boolean iNewLine, format(iIdentLevel, iNewLine); - out.append(writeValue(iName, iFormat)); - out.append(":"); + if (iName != null) { + out.append(writeValue(iName, iFormat)); + out.append(":"); + } if (iFormat.contains("graph") && (iValue == null || iValue instanceof OIdentifiable) && (iName.startsWith("in_") || iName.startsWith("out_"))) { diff --git a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/string/ORecordSerializerCSVAbstract.java b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/string/ORecordSerializerCSVAbstract.java index e7d5cd862a7..8d21461f7f7 100755 --- a/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/string/ORecordSerializerCSVAbstract.java +++ b/core/src/main/java/com/orientechnologies/orient/core/serialization/serializer/record/string/ORecordSerializerCSVAbstract.java @@ -15,13 +15,6 @@ */ package com.orientechnologies.orient.core.serialization.serializer.record.string; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - import com.orientechnologies.common.collection.OLazyIterator; import com.orientechnologies.common.collection.OMultiCollectionIterator; import com.orientechnologies.common.collection.OMultiValue; @@ -61,6 +54,13 @@ import com.orientechnologies.orient.core.serialization.serializer.string.OStringSerializerEmbedded; import com.orientechnologies.orient.core.type.tree.OMVRBTreeRIDSet; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + @SuppressWarnings({ "unchecked", "serial" }) public abstract class ORecordSerializerCSVAbstract extends ORecordSerializerStringAbstract { public static final char FIELD_VALUE_SEPARATOR = ':'; @@ -689,7 +689,7 @@ else if (item.length() > 2 && item.charAt(0) == OStringSerializerHelper.EMBEDDED } } else { if (linkedType == null) { - final char begin = item.charAt(0); + final char begin = item.length() > 0 ? item.charAt(0) : OStringSerializerHelper.LINK; // AUTO-DETERMINE LINKED TYPE if (begin == OStringSerializerHelper.LINK)
687d4dbd1d26c3f6cfd24ee37e872a1fef9552d1
hadoop
HADOOP-6682. NetUtils:normalizeHostName does not- process hostnames starting with [a-f] correctly.--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@953929 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hadoop
diff --git a/CHANGES.txt b/CHANGES.txt index 65c4823c3471e..24da48af1d886 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -87,6 +87,9 @@ Trunk (unreleased changes) HADOOP-6613. Moves the RPC version check ahead of the AuthMethod check. (Kan Zhang via ddas) + HADOOP-6682. NetUtils:normalizeHostName does not process hostnames starting + with [a-f] correctly. (jghoman) + Release 0.21.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/src/java/org/apache/hadoop/net/NetUtils.java b/src/java/org/apache/hadoop/net/NetUtils.java index e8590618726dc..bc81d5b79a88f 100644 --- a/src/java/org/apache/hadoop/net/NetUtils.java +++ b/src/java/org/apache/hadoop/net/NetUtils.java @@ -398,7 +398,7 @@ public static void connect(Socket socket, * @return its IP address in the string format */ public static String normalizeHostName(String name) { - if (Character.digit(name.charAt(0), 16) != -1) { // it is an IP + if (Character.digit(name.charAt(0), 10) != -1) { // it is an IP return name; } else { try {
92fe7f9d32f2c53767ecfea631aae79c9f3ac9d4
ReactiveX-RxJava
made the public window methods more generic via the- basic (lol) super/extends fluff; also simplified api by removing a few- useless super definitions (there's no super of Opening and Closing)--
p
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/Observable.java b/rxjava-core/src/main/java/rx/Observable.java index 25073ca729..e4b64fc28a 100644 --- a/rxjava-core/src/main/java/rx/Observable.java +++ b/rxjava-core/src/main/java/rx/Observable.java @@ -1337,7 +1337,7 @@ public Observable<List<T>> buffer(Func0<? extends Observable<? extends Closing>> * @return * An {@link Observable} which produces buffers which are created and emitted when the specified {@link Observable}s publish certain objects. */ - public Observable<List<T>> buffer(Observable<? extends Opening> bufferOpenings, Func1<? super Opening, ? extends Observable<? extends Closing>> bufferClosingSelector) { + public Observable<List<T>> buffer(Observable<? extends Opening> bufferOpenings, Func1<Opening, ? extends Observable<? extends Closing>> bufferClosingSelector) { return create(OperationBuffer.buffer(this, bufferOpenings, bufferClosingSelector)); } @@ -1520,7 +1520,7 @@ public Observable<List<T>> buffer(long timespan, long timeshift, TimeUnit unit, * An {@link Observable} which produces connected non-overlapping windows, which are emitted * when the current {@link Observable} created with the {@link Func0} argument produces a {@link rx.util.Closing} object. */ - public Observable<Observable<T>> window(Observable<T> source, Func0<Observable<Closing>> closingSelector) { + public Observable<Observable<T>> window(Observable<? extends T> source, Func0<? extends Observable<? extends Closing>> closingSelector) { return create(OperationWindow.window(source, closingSelector)); } @@ -1542,7 +1542,7 @@ public Observable<Observable<T>> window(Observable<T> source, Func0<Observable<C * @return * An {@link Observable} which produces windows which are created and emitted when the specified {@link Observable}s publish certain objects. */ - public Observable<Observable<T>> window(Observable<T> source, Observable<Opening> windowOpenings, Func1<Opening, Observable<Closing>> closingSelector) { + public Observable<Observable<T>> window(Observable<? extends T> source, Observable<? extends Opening> windowOpenings, Func1<Opening, ? extends Observable<? extends Closing>> closingSelector) { return create(OperationWindow.window(source, windowOpenings, closingSelector)); } @@ -1559,7 +1559,7 @@ public Observable<Observable<T>> window(Observable<T> source, Observable<Opening * An {@link Observable} which produces connected non-overlapping windows containing at most * "count" produced values. */ - public Observable<Observable<T>> window(Observable<T> source, int count) { + public Observable<Observable<T>> window(Observable<? extends T> source, int count) { return create(OperationWindow.window(source, count)); } @@ -1579,7 +1579,7 @@ public Observable<Observable<T>> window(Observable<T> source, int count) { * An {@link Observable} which produces windows every "skipped" values containing at most * "count" produced values. */ - public Observable<Observable<T>> window(Observable<T> source, int count, int skip) { + public Observable<Observable<T>> window(Observable<? extends T> source, int count, int skip) { return create(OperationWindow.window(source, count, skip)); } @@ -1598,7 +1598,7 @@ public Observable<Observable<T>> window(Observable<T> source, int count, int ski * @return * An {@link Observable} which produces connected non-overlapping windows with a fixed duration. */ - public Observable<Observable<T>> window(Observable<T> source, long timespan, TimeUnit unit) { + public Observable<Observable<T>> window(Observable<? extends T> source, long timespan, TimeUnit unit) { return create(OperationWindow.window(source, timespan, unit)); } @@ -1619,7 +1619,7 @@ public Observable<Observable<T>> window(Observable<T> source, long timespan, Tim * @return * An {@link Observable} which produces connected non-overlapping windows with a fixed duration. */ - public Observable<Observable<T>> window(Observable<T> source, long timespan, TimeUnit unit, Scheduler scheduler) { + public Observable<Observable<T>> window(Observable<? extends T> source, long timespan, TimeUnit unit, Scheduler scheduler) { return create(OperationWindow.window(source, timespan, unit, scheduler)); } @@ -1642,7 +1642,7 @@ public Observable<Observable<T>> window(Observable<T> source, long timespan, Tim * An {@link Observable} which produces connected non-overlapping windows which are emitted after * a fixed duration or when the window has reached maximum capacity (which ever occurs first). */ - public Observable<Observable<T>> window(Observable<T> source, long timespan, TimeUnit unit, int count) { + public Observable<Observable<T>> window(Observable<? extends T> source, long timespan, TimeUnit unit, int count) { return create(OperationWindow.window(source, timespan, unit, count)); } @@ -1667,7 +1667,7 @@ public Observable<Observable<T>> window(Observable<T> source, long timespan, Tim * An {@link Observable} which produces connected non-overlapping windows which are emitted after * a fixed duration or when the window has reached maximum capacity (which ever occurs first). */ - public Observable<Observable<T>> window(Observable<T> source, long timespan, TimeUnit unit, int count, Scheduler scheduler) { + public Observable<Observable<T>> window(Observable<? extends T> source, long timespan, TimeUnit unit, int count, Scheduler scheduler) { return create(OperationWindow.window(source, timespan, unit, count, scheduler)); } @@ -1689,7 +1689,7 @@ public Observable<Observable<T>> window(Observable<T> source, long timespan, Tim * An {@link Observable} which produces new windows periodically, and these are emitted after * a fixed timespan has elapsed. */ - public Observable<Observable<T>> window(Observable<T> source, long timespan, long timeshift, TimeUnit unit) { + public Observable<Observable<T>> window(Observable<? extends T> source, long timespan, long timeshift, TimeUnit unit) { return create(OperationWindow.window(source, timespan, timeshift, unit)); } @@ -1713,7 +1713,7 @@ public Observable<Observable<T>> window(Observable<T> source, long timespan, lon * An {@link Observable} which produces new windows periodically, and these are emitted after * a fixed timespan has elapsed. */ - public Observable<Observable<T>> window(Observable<T> source, long timespan, long timeshift, TimeUnit unit, Scheduler scheduler) { + public Observable<Observable<T>> window(Observable<? extends T> source, long timespan, long timeshift, TimeUnit unit, Scheduler scheduler) { return create(OperationWindow.window(source, timespan, timeshift, unit, scheduler)); } diff --git a/rxjava-core/src/main/java/rx/operators/ChunkedOperation.java b/rxjava-core/src/main/java/rx/operators/ChunkedOperation.java index f0990425bb..1b5cf8d0d1 100644 --- a/rxjava-core/src/main/java/rx/operators/ChunkedOperation.java +++ b/rxjava-core/src/main/java/rx/operators/ChunkedOperation.java @@ -495,7 +495,7 @@ protected static class ObservableBasedMultiChunkCreator<T, C> implements ChunkCr private final SafeObservableSubscription subscription = new SafeObservableSubscription(); - public ObservableBasedMultiChunkCreator(final OverlappingChunks<T, C> chunks, Observable<? extends Opening> openings, final Func1<? super Opening, ? extends Observable<? extends Closing>> chunkClosingSelector) { + public ObservableBasedMultiChunkCreator(final OverlappingChunks<T, C> chunks, Observable<? extends Opening> openings, final Func1<Opening, ? extends Observable<? extends Closing>> chunkClosingSelector) { subscription.wrap(openings.subscribe(new Action1<Opening>() { @Override public void call(Opening opening) { diff --git a/rxjava-core/src/main/java/rx/operators/OperationBuffer.java b/rxjava-core/src/main/java/rx/operators/OperationBuffer.java index c692ed6cee..1cc559fb57 100644 --- a/rxjava-core/src/main/java/rx/operators/OperationBuffer.java +++ b/rxjava-core/src/main/java/rx/operators/OperationBuffer.java @@ -42,12 +42,14 @@ public final class OperationBuffer extends ChunkedOperation { - private static final Func0 BUFFER_MAKER = new Func0() { - @Override - public Object call() { - return new Buffer(); - } - }; + private static <T> Func0<Buffer<T>> bufferMaker() { + return new Func0<Buffer<T>>() { + @Override + public Buffer<T> call() { + return new Buffer<T>(); + } + }; + } /** * <p>This method creates a {@link Func1} object which represents the buffer operation. This operation takes @@ -74,7 +76,7 @@ public static <T> OnSubscribeFunc<List<T>> buffer(final Observable<T> source, fi @Override public Subscription onSubscribe(Observer<? super List<T>> observer) { - NonOverlappingChunks<T, List<T>> buffers = new NonOverlappingChunks<T, List<T>>(observer, BUFFER_MAKER); + NonOverlappingChunks<T, List<T>> buffers = new NonOverlappingChunks<T, List<T>>(observer, OperationBuffer.<T>bufferMaker()); ChunkCreator creator = new ObservableBasedSingleChunkCreator<T, List<T>>(buffers, bufferClosingSelector); return source.subscribe(new ChunkObserver<T, List<T>>(buffers, observer, creator)); } @@ -106,11 +108,11 @@ public Subscription onSubscribe(Observer<? super List<T>> observer) { * @return * the {@link Func1} object representing the specified buffer operation. */ - public static <T> OnSubscribeFunc<List<T>> buffer(final Observable<T> source, final Observable<? extends Opening> bufferOpenings, final Func1<? super Opening, ? extends Observable<? extends Closing>> bufferClosingSelector) { + public static <T> OnSubscribeFunc<List<T>> buffer(final Observable<T> source, final Observable<? extends Opening> bufferOpenings, final Func1<Opening, ? extends Observable<? extends Closing>> bufferClosingSelector) { return new OnSubscribeFunc<List<T>>() { @Override public Subscription onSubscribe(final Observer<? super List<T>> observer) { - OverlappingChunks<T, List<T>> buffers = new OverlappingChunks<T, List<T>>(observer, BUFFER_MAKER); + OverlappingChunks<T, List<T>> buffers = new OverlappingChunks<T, List<T>>(observer, OperationBuffer.<T>bufferMaker()); ChunkCreator creator = new ObservableBasedMultiChunkCreator<T, List<T>>(buffers, bufferOpenings, bufferClosingSelector); return source.subscribe(new ChunkObserver<T, List<T>>(buffers, observer, creator)); } @@ -165,7 +167,7 @@ public static <T> OnSubscribeFunc<List<T>> buffer(final Observable<T> source, fi return new OnSubscribeFunc<List<T>>() { @Override public Subscription onSubscribe(final Observer<? super List<T>> observer) { - Chunks<T, List<T>> chunks = new SizeBasedChunks<T, List<T>>(observer, BUFFER_MAKER, count); + Chunks<T, List<T>> chunks = new SizeBasedChunks<T, List<T>>(observer, OperationBuffer.<T>bufferMaker(), count); ChunkCreator creator = new SkippingChunkCreator<T, List<T>>(chunks, skip); return source.subscribe(new ChunkObserver<T, List<T>>(chunks, observer, creator)); } @@ -220,7 +222,7 @@ public static <T> OnSubscribeFunc<List<T>> buffer(final Observable<T> source, fi return new OnSubscribeFunc<List<T>>() { @Override public Subscription onSubscribe(final Observer<? super List<T>> observer) { - NonOverlappingChunks<T, List<T>> buffers = new NonOverlappingChunks<T, List<T>>(observer, BUFFER_MAKER); + NonOverlappingChunks<T, List<T>> buffers = new NonOverlappingChunks<T, List<T>>(observer, OperationBuffer.<T>bufferMaker()); ChunkCreator creator = new TimeBasedChunkCreator<T, List<T>>(buffers, timespan, unit, scheduler); return source.subscribe(new ChunkObserver<T, List<T>>(buffers, observer, creator)); } @@ -281,7 +283,7 @@ public static <T> OnSubscribeFunc<List<T>> buffer(final Observable<T> source, fi return new OnSubscribeFunc<List<T>>() { @Override public Subscription onSubscribe(final Observer<? super List<T>> observer) { - Chunks<T, List<T>> chunks = new TimeAndSizeBasedChunks<T, List<T>>(observer, BUFFER_MAKER, count, timespan, unit, scheduler); + Chunks<T, List<T>> chunks = new TimeAndSizeBasedChunks<T, List<T>>(observer, OperationBuffer.<T>bufferMaker(), count, timespan, unit, scheduler); ChunkCreator creator = new SingleChunkCreator<T, List<T>>(chunks); return source.subscribe(new ChunkObserver<T, List<T>>(chunks, observer, creator)); } @@ -342,7 +344,7 @@ public static <T> OnSubscribeFunc<List<T>> buffer(final Observable<T> source, fi return new OnSubscribeFunc<List<T>>() { @Override public Subscription onSubscribe(final Observer<? super List<T>> observer) { - OverlappingChunks<T, List<T>> buffers = new TimeBasedChunks<T, List<T>>(observer, BUFFER_MAKER, timespan, unit, scheduler); + OverlappingChunks<T, List<T>> buffers = new TimeBasedChunks<T, List<T>>(observer, OperationBuffer.<T>bufferMaker(), timespan, unit, scheduler); ChunkCreator creator = new TimeBasedChunkCreator<T, List<T>>(buffers, timeshift, unit, scheduler); return source.subscribe(new ChunkObserver<T, List<T>>(buffers, observer, creator)); } diff --git a/rxjava-core/src/main/java/rx/operators/OperationWindow.java b/rxjava-core/src/main/java/rx/operators/OperationWindow.java index ab15ed2352..5cffda9661 100644 --- a/rxjava-core/src/main/java/rx/operators/OperationWindow.java +++ b/rxjava-core/src/main/java/rx/operators/OperationWindow.java @@ -72,7 +72,7 @@ public Window<T> call() { * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<T> source, final Func0<Observable<Closing>> windowClosingSelector) { + public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<? extends T> source, final Func0<? extends Observable<? extends Closing>> windowClosingSelector) { return new OnSubscribeFunc<Observable<T>>() { @Override public Subscription onSubscribe(final Observer<? super Observable<T>> observer) { @@ -109,7 +109,7 @@ public Subscription onSubscribe(final Observer<? super Observable<T>> observer) * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<T> source, final Observable<Opening> windowOpenings, final Func1<Opening, Observable<Closing>> windowClosingSelector) { + public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<? extends T> source, final Observable<? extends Opening> windowOpenings, final Func1<Opening, ? extends Observable<? extends Closing>> windowClosingSelector) { return new OnSubscribeFunc<Observable<T>>() { @Override public Subscription onSubscribe(final Observer<? super Observable<T>> observer) { @@ -137,7 +137,7 @@ public Subscription onSubscribe(final Observer<? super Observable<T>> observer) * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(Observable<T> source, int count) { + public static <T> OnSubscribeFunc<Observable<T>> window(Observable<? extends T> source, int count) { return window(source, count, count); } @@ -164,7 +164,7 @@ public static <T> OnSubscribeFunc<Observable<T>> window(Observable<T> source, in * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<T> source, final int count, final int skip) { + public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<? extends T> source, final int count, final int skip) { return new OnSubscribeFunc<Observable<T>>() { @Override public Subscription onSubscribe(final Observer<? super Observable<T>> observer) { @@ -194,7 +194,7 @@ public Subscription onSubscribe(final Observer<? super Observable<T>> observer) * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(Observable<T> source, long timespan, TimeUnit unit) { + public static <T> OnSubscribeFunc<Observable<T>> window(Observable<? extends T> source, long timespan, TimeUnit unit) { return window(source, timespan, unit, Schedulers.threadPoolForComputation()); } @@ -219,7 +219,7 @@ public static <T> OnSubscribeFunc<Observable<T>> window(Observable<T> source, lo * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<T> source, final long timespan, final TimeUnit unit, final Scheduler scheduler) { + public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<? extends T> source, final long timespan, final TimeUnit unit, final Scheduler scheduler) { return new OnSubscribeFunc<Observable<T>>() { @Override public Subscription onSubscribe(final Observer<? super Observable<T>> observer) { @@ -252,7 +252,7 @@ public Subscription onSubscribe(final Observer<? super Observable<T>> observer) * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(Observable<T> source, long timespan, TimeUnit unit, int count) { + public static <T> OnSubscribeFunc<Observable<T>> window(Observable<? extends T> source, long timespan, TimeUnit unit, int count) { return window(source, timespan, unit, count, Schedulers.threadPoolForComputation()); } @@ -280,7 +280,7 @@ public static <T> OnSubscribeFunc<Observable<T>> window(Observable<T> source, lo * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<T> source, final long timespan, final TimeUnit unit, final int count, final Scheduler scheduler) { + public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<? extends T> source, final long timespan, final TimeUnit unit, final int count, final Scheduler scheduler) { return new OnSubscribeFunc<Observable<T>>() { @Override public Subscription onSubscribe(final Observer<? super Observable<T>> observer) { @@ -313,7 +313,7 @@ public Subscription onSubscribe(final Observer<? super Observable<T>> observer) * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(Observable<T> source, long timespan, long timeshift, TimeUnit unit) { + public static <T> OnSubscribeFunc<Observable<T>> window(Observable<? extends T> source, long timespan, long timeshift, TimeUnit unit) { return window(source, timespan, timeshift, unit, Schedulers.threadPoolForComputation()); } @@ -341,7 +341,7 @@ public static <T> OnSubscribeFunc<Observable<T>> window(Observable<T> source, lo * @return * the {@link rx.util.functions.Func1} object representing the specified window operation. */ - public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<T> source, final long timespan, final long timeshift, final TimeUnit unit, final Scheduler scheduler) { + public static <T> OnSubscribeFunc<Observable<T>> window(final Observable<? extends T> source, final long timespan, final long timeshift, final TimeUnit unit, final Scheduler scheduler) { return new OnSubscribeFunc<Observable<T>>() { @Override public Subscription onSubscribe(final Observer<? super Observable<T>> observer) {
f0bae099e625f74dac8cb391811188fadffb07bf
drools
JBRULES-2375: fixing deadlock on queued actions--git-svn-id: https://svn.jboss.org/repos/labs/labs/jbossrules/trunk@33500 c60d74c8-e8f6-0310-9e8f-d4a2fc68ab70-
c
https://github.com/kiegroup/drools
diff --git a/drools-core/src/main/java/org/drools/common/AbstractWorkingMemory.java b/drools-core/src/main/java/org/drools/common/AbstractWorkingMemory.java index b28c673ce7f..0795aa1188c 100644 --- a/drools-core/src/main/java/org/drools/common/AbstractWorkingMemory.java +++ b/drools-core/src/main/java/org/drools/common/AbstractWorkingMemory.java @@ -31,6 +31,7 @@ import java.util.Queue; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -174,7 +175,7 @@ public abstract class AbstractWorkingMemory protected Queue<WorkingMemoryAction> actionQueue; - protected volatile boolean evaluatingActionQueue; + protected AtomicBoolean evaluatingActionQueue; protected ReentrantLock lock; @@ -353,7 +354,8 @@ public AbstractWorkingMemory(final int id, this.initialFactHandle = initialFactHandle; } - this.actionQueue = new LinkedList<WorkingMemoryAction>(); + this.actionQueue = new ConcurrentLinkedQueue<WorkingMemoryAction>(); + this.evaluatingActionQueue = new AtomicBoolean( false ); this.addRemovePropertyChangeListenerArgs = new Object[]{this}; this.workingMemoryEventSupport = workingMemoryEventSupport; @@ -1459,9 +1461,8 @@ public void update(org.drools.FactHandle factHandle, public void executeQueuedActions() { try { startOperation(); - synchronized ( this.actionQueue ) { - if ( !this.actionQueue.isEmpty() && !evaluatingActionQueue ) { - evaluatingActionQueue = true; + if( evaluatingActionQueue.compareAndSet( false, true ) ) { + if ( !this.actionQueue.isEmpty() ) { WorkingMemoryAction action = null; while ( (action = actionQueue.poll()) != null ) { @@ -1472,10 +1473,10 @@ public void executeQueuedActions() { e ); } } - evaluatingActionQueue = false; } } } finally { + evaluatingActionQueue.compareAndSet( true, false ); endOperation(); } } @@ -1485,14 +1486,12 @@ public Queue<WorkingMemoryAction> getActionQueue() { } public void queueWorkingMemoryAction(final WorkingMemoryAction action) { - synchronized ( this.actionQueue ) { - try { - startOperation(); - this.actionQueue.add( action ); - this.agenda.notifyHalt(); - } finally { - endOperation(); - } + try { + startOperation(); + this.actionQueue.add( action ); + this.agenda.notifyHalt(); + } finally { + endOperation(); } } diff --git a/drools-core/src/main/java/org/drools/process/instance/event/DefaultSignalManager.java b/drools-core/src/main/java/org/drools/process/instance/event/DefaultSignalManager.java index 6af11ad39d2..c3371f6d039 100644 --- a/drools-core/src/main/java/org/drools/process/instance/event/DefaultSignalManager.java +++ b/drools-core/src/main/java/org/drools/process/instance/event/DefaultSignalManager.java @@ -52,7 +52,6 @@ public void removeEventListener(String type, EventListener eventListener) { public void signalEvent(String type, Object event) { ((InternalWorkingMemory) workingMemory).queueWorkingMemoryAction(new SignalAction(type, event)); - workingMemory.fireAllRules(); } public void internalSignalEvent(String type, Object event) { diff --git a/drools-core/src/test/java/org/drools/reteoo/RuleFlowGroupTest.java b/drools-core/src/test/java/org/drools/reteoo/RuleFlowGroupTest.java index 6fa79a49202..f48a5080b95 100644 --- a/drools-core/src/test/java/org/drools/reteoo/RuleFlowGroupTest.java +++ b/drools-core/src/test/java/org/drools/reteoo/RuleFlowGroupTest.java @@ -78,6 +78,7 @@ public void testRuleFlowGroup() { public void evaluate(KnowledgeHelper knowledgeHelper, WorkingMemory workingMemory) { + System.out.println( knowledgeHelper.getRule() ); list.add( knowledgeHelper.getRule() ); }
8d07116266f2a2cb19dc303e5937bb6f248b81fb
hadoop
YARN-2331. Distinguish shutdown during supervision- vs. shutdown for rolling upgrade. Contributed by Jason Lowe--(cherry picked from commit 088156de43abb07bec590a3fcd1a5af2feb02cd2)-
p
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 02f61fb3a0049..4e577e989e302 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -161,6 +161,9 @@ Release 2.8.0 - UNRELEASED yarn.scheduler.capacity.node-locality-delay in code and default xml file. (Nijel SF via vinodkv) + YARN-2331. Distinguish shutdown during supervision vs. shutdown for + rolling upgrade. (Jason Lowe via xgong) + OPTIMIZATIONS YARN-3339. TestDockerContainerExecutor should pull a single image and not diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 9c1db1744f788..315903dcbf2f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1158,6 +1158,10 @@ private static void addDeprecatedKeys() { public static final String NM_RECOVERY_DIR = NM_RECOVERY_PREFIX + "dir"; + public static final String NM_RECOVERY_SUPERVISED = + NM_RECOVERY_PREFIX + "supervised"; + public static final boolean DEFAULT_NM_RECOVERY_SUPERVISED = false; + //////////////////////////////// // Web Proxy Configs //////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index e1e0ebd3d777b..4d74f7622f89e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -1192,6 +1192,15 @@ <value>${hadoop.tmp.dir}/yarn-nm-recovery</value> </property> + <property> + <description>Whether the nodemanager is running under supervision. A + nodemanager that supports recovery and is running under supervision + will not try to cleanup containers as it exits with the assumption + it will be immediately be restarted and recover containers.</description> + <name>yarn.nodemanager.recovery.supervised</name> + <value>false</value> + </property> + <!--Docker configuration--> <property> diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index c48df64bd9f46..494fa8fbd88a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -530,8 +530,11 @@ public void cleanUpApplicationsOnNMShutDown() { if (this.context.getNMStateStore().canRecover() && !this.context.getDecommissioned()) { - // do not cleanup apps as they can be recovered on restart - return; + if (getConfig().getBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, + YarnConfiguration.DEFAULT_NM_RECOVERY_SUPERVISED)) { + // do not cleanup apps as they can be recovered on restart + return; + } } List<ApplicationId> appIds = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java index 0018d56421470..dbbfcd5deb21d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java @@ -145,10 +145,13 @@ protected void serviceStop() throws Exception { private void stopAggregators() { threadPool.shutdown(); + boolean supervised = getConfig().getBoolean( + YarnConfiguration.NM_RECOVERY_SUPERVISED, + YarnConfiguration.DEFAULT_NM_RECOVERY_SUPERVISED); // if recovery on restart is supported then leave outstanding aggregations // to the next restart boolean shouldAbort = context.getNMStateStore().canRecover() - && !context.getDecommissioned(); + && !context.getDecommissioned() && supervised; // politely ask to finish for (AppLogAggregator aggregator : appLogAggregators.values()) { if (shouldAbort) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index c45ffbb93ddef..781950e08d27f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -22,7 +22,11 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.isA; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import java.nio.ByteBuffer; import java.security.PrivilegedExceptionAction; @@ -68,6 +72,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; @@ -82,27 +87,18 @@ public class TestContainerManagerRecovery { public void testApplicationRecovery() throws Exception { YarnConfiguration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); + conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true); conf.set(YarnConfiguration.NM_ADDRESS, "localhost:1234"); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); conf.set(YarnConfiguration.YARN_ADMIN_ACL, "yarn_admin_user"); NMStateStoreService stateStore = new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); - Context context = new NMContext(new NMContainerTokenSecretManager( - conf), new NMTokenSecretManagerInNM(), null, - new ApplicationACLsManager(conf), stateStore); + Context context = createContext(conf, stateStore); ContainerManagerImpl cm = createContainerManager(context); cm.init(conf); cm.start(); - // simulate registration with RM - MasterKey masterKey = new MasterKeyPBImpl(); - masterKey.setKeyId(123); - masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) - .byteValue() })); - context.getContainerTokenSecretManager().setMasterKey(masterKey); - context.getNMTokenSecretManager().setMasterKey(masterKey); - // add an application by starting a container String appUser = "app_user1"; String modUser = "modify_user1"; @@ -155,9 +151,7 @@ public void testApplicationRecovery() throws Exception { // reset container manager and verify app recovered with proper acls cm.stop(); - context = new NMContext(new NMContainerTokenSecretManager( - conf), new NMTokenSecretManagerInNM(), null, - new ApplicationACLsManager(conf), stateStore); + context = createContext(conf, stateStore); cm = createContainerManager(context); cm.init(conf); cm.start(); @@ -201,9 +195,7 @@ public void testApplicationRecovery() throws Exception { // restart and verify app is marked for finishing cm.stop(); - context = new NMContext(new NMContainerTokenSecretManager( - conf), new NMTokenSecretManagerInNM(), null, - new ApplicationACLsManager(conf), stateStore); + context = createContext(conf, stateStore); cm = createContainerManager(context); cm.init(conf); cm.start(); @@ -233,9 +225,7 @@ public void testApplicationRecovery() throws Exception { // restart and verify app is no longer present after recovery cm.stop(); - context = new NMContext(new NMContainerTokenSecretManager( - conf), new NMTokenSecretManagerInNM(), null, - new ApplicationACLsManager(conf), stateStore); + context = createContext(conf, stateStore); cm = createContainerManager(context); cm.init(conf); cm.start(); @@ -243,6 +233,95 @@ public void testApplicationRecovery() throws Exception { cm.stop(); } + @Test + public void testContainerCleanupOnShutdown() throws Exception { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId attemptId = + ApplicationAttemptId.newInstance(appId, 1); + ContainerId cid = ContainerId.newContainerId(attemptId, 1); + Map<String, LocalResource> localResources = Collections.emptyMap(); + Map<String, String> containerEnv = Collections.emptyMap(); + List<String> containerCmds = Collections.emptyList(); + Map<String, ByteBuffer> serviceData = Collections.emptyMap(); + Credentials containerCreds = new Credentials(); + DataOutputBuffer dob = new DataOutputBuffer(); + containerCreds.writeTokenStorageToStream(dob); + ByteBuffer containerTokens = ByteBuffer.wrap(dob.getData(), 0, + dob.getLength()); + Map<ApplicationAccessType, String> acls = Collections.emptyMap(); + ContainerLaunchContext clc = ContainerLaunchContext.newInstance( + localResources, containerEnv, containerCmds, serviceData, + containerTokens, acls); + // create the logAggregationContext + LogAggregationContext logAggregationContext = + LogAggregationContext.newInstance("includePattern", "excludePattern"); + + // verify containers are stopped on shutdown without recovery + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); + conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, false); + conf.set(YarnConfiguration.NM_ADDRESS, "localhost:1234"); + Context context = createContext(conf, new NMNullStateStoreService()); + ContainerManagerImpl cm = spy(createContainerManager(context)); + cm.init(conf); + cm.start(); + StartContainersResponse startResponse = startContainer(context, cm, cid, + clc, logAggregationContext); + assertEquals(1, startResponse.getSuccessfullyStartedContainers().size()); + cm.stop(); + verify(cm).handle(isA(CMgrCompletedAppsEvent.class)); + + // verify containers are stopped on shutdown with unsupervised recovery + conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); + conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, false); + NMMemoryStateStoreService memStore = new NMMemoryStateStoreService(); + memStore.init(conf); + memStore.start(); + context = createContext(conf, memStore); + cm = spy(createContainerManager(context)); + cm.init(conf); + cm.start(); + startResponse = startContainer(context, cm, cid, + clc, logAggregationContext); + assertEquals(1, startResponse.getSuccessfullyStartedContainers().size()); + cm.stop(); + memStore.close(); + verify(cm).handle(isA(CMgrCompletedAppsEvent.class)); + + // verify containers are not stopped on shutdown with supervised recovery + conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); + conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true); + memStore = new NMMemoryStateStoreService(); + memStore.init(conf); + memStore.start(); + context = createContext(conf, memStore); + cm = spy(createContainerManager(context)); + cm.init(conf); + cm.start(); + startResponse = startContainer(context, cm, cid, + clc, logAggregationContext); + assertEquals(1, startResponse.getSuccessfullyStartedContainers().size()); + cm.stop(); + memStore.close(); + verify(cm, never()).handle(isA(CMgrCompletedAppsEvent.class)); + } + + private NMContext createContext(YarnConfiguration conf, + NMStateStoreService stateStore) { + NMContext context = new NMContext(new NMContainerTokenSecretManager( + conf), new NMTokenSecretManagerInNM(), null, + new ApplicationACLsManager(conf), stateStore); + + // simulate registration with RM + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + context.getContainerTokenSecretManager().setMasterKey(masterKey); + context.getNMTokenSecretManager().setMasterKey(masterKey); + return context; + } + private StartContainersResponse startContainer(Context context, final ContainerManagerImpl cm, ContainerId cid, ContainerLaunchContext clc, LogAggregationContext logAggregationContext)
a09d266b247b185054d0ab0dfdb6e8dc2e8898bc
orientdb
Minor: removed some warnings--
p
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/index/OIndexManager.java b/core/src/main/java/com/orientechnologies/orient/core/index/OIndexManager.java index f5fe061175a..dcf7d2ae48b 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/index/OIndexManager.java +++ b/core/src/main/java/com/orientechnologies/orient/core/index/OIndexManager.java @@ -32,13 +32,13 @@ public interface OIndexManager { public void create(); - public Collection<? extends OIndex> getIndexes(); + public Collection<? extends OIndex<?>> getIndexes(); - public OIndex getIndex(final String iName); + public OIndex<?> getIndex(final String iName); - public OIndex getIndex(final ORID iRID); + public OIndex<?> getIndex(final ORID iRID); - public OIndex createIndex(final String iName, final String iType, final OType iKeyType, final int[] iClusterIdsToIndex, + public OIndex<?> createIndex(final String iName, final String iType, final OType iKeyType, final int[] iClusterIdsToIndex, OIndexCallback iCallback, final OProgressListener iProgressListener, final boolean iAutomatic); public OIndexManager dropIndex(final String iIndexName); diff --git a/core/src/main/java/com/orientechnologies/orient/core/index/OIndexManagerProxy.java b/core/src/main/java/com/orientechnologies/orient/core/index/OIndexManagerProxy.java index d0a9eadd979..7b403ff8d30 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/index/OIndexManagerProxy.java +++ b/core/src/main/java/com/orientechnologies/orient/core/index/OIndexManagerProxy.java @@ -47,24 +47,24 @@ public void create() { delegate.create(); } - public Collection<? extends OIndex> getIndexes() { + public Collection<? extends OIndex<?>> getIndexes() { return delegate.getIndexes(); } - public OIndex getIndex(String iName) { + public OIndex<?> getIndex(String iName) { return delegate.getIndex(iName); } - public OIndex getIndex(ORID iRID) { + public OIndex<?> getIndex(ORID iRID) { return delegate.getIndex(iRID); } - public OIndex createIndex(String iName, String iType, final OType iKeyType, int[] iClusterIdsToIndex, OIndexCallback iCallback, + public OIndex<?> createIndex(String iName, String iType, final OType iKeyType, int[] iClusterIdsToIndex, OIndexCallback iCallback, OProgressListener iProgressListener, boolean iAutomatic) { return delegate.createIndex(iName, iType, iKeyType, iClusterIdsToIndex, iCallback, iProgressListener, iAutomatic); } - public OIndex getIndexInternal(final String iName) { + public OIndex<?> getIndexInternal(final String iName) { return ((OIndexManagerShared) delegate).getIndexInternal(iName); } diff --git a/core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLSelect.java b/core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLSelect.java index d85b127e2d9..10e8f89b848 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLSelect.java +++ b/core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLSelect.java @@ -108,9 +108,9 @@ public class OCommandExecutorSQLSelect extends OCommandExecutorSQLAbstract imple private static final class OSearchInIndexTriple { private OQueryOperator indexOperator; private Object key; - private OIndex index; + private OIndex<?> index; - private OSearchInIndexTriple(final OQueryOperator indexOperator, final Object key, final OIndex index) { + private OSearchInIndexTriple(final OQueryOperator indexOperator, final Object key, final OIndex<?> index) { this.indexOperator = indexOperator; this.key = key; this.index = index; @@ -120,7 +120,6 @@ private OSearchInIndexTriple(final OQueryOperator indexOperator, final Object ke /** * Compile the filter conditions only the first time. */ - @SuppressWarnings("unchecked") public OCommandExecutorSQLSelect parse(final OCommandRequestText iRequest) { iRequest.getDatabase().checkSecurity(ODatabaseSecurityResources.COMMAND, ORole.PERMISSION_READ); @@ -386,7 +385,7 @@ private boolean searchForIndexes(final List<ORecord<?>> iResultSet, final OClass return false; for (OSearchInIndexTriple indexTriple : searchInIndexTriples) { - final OIndex idx = indexTriple.index.getInternal(); + final OIndex<?> idx = indexTriple.index.getInternal(); final OQueryOperator operator = indexTriple.indexOperator; final Object key = indexTriple.key; @@ -482,7 +481,7 @@ private boolean searchIndexedProperty(OClass iSchemaClass, final OSQLFilterCondi if (prop != null && prop.isIndexed()) { final Object origValue = iCondition.getLeft() == iItem ? iCondition.getRight() : iCondition.getLeft(); - final OIndex underlyingIndex = prop.getIndex().getUnderlying(); + final OIndex<?> underlyingIndex = prop.getIndex().getUnderlying(); if (iCondition.getOperator() instanceof OQueryOperatorBetween) { iSearchInIndexTriples.add(new OSearchInIndexTriple(iCondition.getOperator(), origValue, underlyingIndex)); @@ -788,7 +787,8 @@ record = database.load(rid); } private void searchInIndex() { - final OIndex<Object> index = database.getMetadata().getIndexManager().getIndex(compiledFilter.getTargetIndex()); + final OIndex<Object> index = (OIndex<Object>) database.getMetadata().getIndexManager() + .getIndex(compiledFilter.getTargetIndex()); if (index == null) throw new OCommandExecutionException("Target index '" + compiledFilter.getTargetIndex() + "' not found"); diff --git a/tests/src/test/java/com/orientechnologies/orient/test/database/auto/IndexTest.java b/tests/src/test/java/com/orientechnologies/orient/test/database/auto/IndexTest.java index 280c175c157..dac497b5198 100644 --- a/tests/src/test/java/com/orientechnologies/orient/test/database/auto/IndexTest.java +++ b/tests/src/test/java/com/orientechnologies/orient/test/database/auto/IndexTest.java @@ -706,7 +706,7 @@ public void createNotUniqueIndexOnNick() { public void LongTypes() { database.getMetadata().getSchema().getClass("Profile").createProperty("hash", OType.LONG).createIndex(INDEX_TYPE.UNIQUE); - OIndex<OIdentifiable> idx = database.getMetadata().getIndexManager().getIndex("Profile.hash"); + OIndex<OIdentifiable> idx = (OIndex<OIdentifiable>) database.getMetadata().getIndexManager().getIndex("Profile.hash"); for (int i = 0; i < 5; i++) { Profile profile = new Profile("HashTest1").setHash(100l + i);
9cedaa31ffbb5af791ea46548e8d5e75ca9fe738
drools
Fixing test--git-svn-id: https://svn.jboss.org/repos/labs/labs/jbossrules/trunk@20387 c60d74c8-e8f6-0310-9e8f-d4a2fc68ab70-
c
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/test/java/org/drools/integrationtests/MiscTest.java b/drools-compiler/src/test/java/org/drools/integrationtests/MiscTest.java index ea5368c6539..4ed34a49808 100644 --- a/drools-compiler/src/test/java/org/drools/integrationtests/MiscTest.java +++ b/drools-compiler/src/test/java/org/drools/integrationtests/MiscTest.java @@ -553,8 +553,8 @@ public void testGeneratedBeans1() throws Exception { "stilton" ); // just documenting toString() result: - assertEquals( "Cheese( type=stilton )", - cheese.toString() ); +// assertEquals( "Cheese( type=stilton )", +// cheese.toString() ); // reading the field attribute, using the method chain assertEquals( "stilton", @@ -594,8 +594,8 @@ public void testGeneratedBeans1() throws Exception { 7 ); // just documenting toString() result: - assertEquals( "Person( age=7, likes=Cheese( type=stilton ) )", - person.toString() ); +// assertEquals( "Person( age=7, likes=Cheese( type=stilton ) )", +// person.toString() ); // inserting fact wm.insert( person );
d2a3f29496d22df52ba4bf66aa5c408cf286d4bc
ReactiveX-RxJava
Implement Scheduler method with dueTime--- added method: schedule(T state, Func2<Scheduler, T, Subscription> action, Date dueTime)-
a
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/Scheduler.java b/rxjava-core/src/main/java/rx/Scheduler.java index f3edfd6a0a..afc35b6a32 100644 --- a/rxjava-core/src/main/java/rx/Scheduler.java +++ b/rxjava-core/src/main/java/rx/Scheduler.java @@ -15,6 +15,7 @@ */ package rx; +import java.util.Date; import java.util.concurrent.TimeUnit; import rx.subscriptions.Subscriptions; @@ -37,7 +38,8 @@ * <ol> * <li>Java doesn't support extension methods and there are many overload methods needing default implementations.</li> * <li>Virtual extension methods aren't available until Java8 which RxJava will not set as a minimum target for a long time.</li> - * <li>If only an interface were used Scheduler implementations would then need to extend from an AbstractScheduler pair that gives all of the functionality unless they intend on copy/pasting the functionality.</li> + * <li>If only an interface were used Scheduler implementations would then need to extend from an AbstractScheduler pair that gives all of the functionality unless they intend on copy/pasting the + * functionality.</li> * <li>Without virtual extension methods even additive changes are breaking and thus severely impede library maintenance.</li> * </ol> */ @@ -69,6 +71,27 @@ public abstract class Scheduler { */ public abstract <T> Subscription schedule(T state, Func2<Scheduler, T, Subscription> action, long delayTime, TimeUnit unit); + /** + * Schedules a cancelable action to be executed at dueTime. + * + * @param state + * State to pass into the action. + * @param action + * Action to schedule. + * @param dueTime + * Time the action is to be executed. If in the past it will be executed immediately. + * @return a subscription to be able to unsubscribe from action. + */ + public <T> Subscription schedule(T state, Func2<Scheduler, T, Subscription> action, Date dueTime) { + long scheduledTime = dueTime.getTime(); + long timeInFuture = scheduledTime - now(); + if (timeInFuture <= 0) { + return schedule(state, action); + } else { + return schedule(state, action, timeInFuture, TimeUnit.MILLISECONDS); + } + } + /** * Schedules a cancelable action to be executed. * diff --git a/rxjava-core/src/test/java/rx/concurrency/TestSchedulers.java b/rxjava-core/src/test/java/rx/concurrency/TestSchedulers.java index a4760ff65e..59edfade39 100644 --- a/rxjava-core/src/test/java/rx/concurrency/TestSchedulers.java +++ b/rxjava-core/src/test/java/rx/concurrency/TestSchedulers.java @@ -18,6 +18,7 @@ import static org.junit.Assert.*; import static org.mockito.Mockito.*; +import java.util.Date; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -359,4 +360,39 @@ public void onNext(Integer args) { assertTrue(completed.get()); } + @Test + public void testSchedulingWithDueTime() throws InterruptedException { + + final CountDownLatch latch = new CountDownLatch(5); + final AtomicInteger counter = new AtomicInteger(); + + long start = System.currentTimeMillis(); + + Schedulers.threadPoolForComputation().schedule(null, new Func2<Scheduler, String, Subscription>() { + + @Override + public Subscription call(Scheduler scheduler, String state) { + System.out.println("doing work"); + latch.countDown(); + counter.incrementAndGet(); + if (latch.getCount() == 0) { + return Subscriptions.empty(); + } else { + return scheduler.schedule(state, this, new Date(System.currentTimeMillis() + 50)); + } + } + }, new Date(System.currentTimeMillis() + 100)); + + if (!latch.await(3000, TimeUnit.MILLISECONDS)) { + fail("didn't execute ... timed out"); + } + + long end = System.currentTimeMillis(); + + assertEquals(5, counter.get()); + if ((end - start) < 250) { + fail("it should have taken over 250ms since each step was scheduled 50ms in the future"); + } + } + }
4a9f2fb05a3163e11ed35d85a33a6b8e216dde77
ReactiveX-RxJava
TakeWhile protect calls to predicate--
c
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/operators/OperationTakeWhile.java b/rxjava-core/src/main/java/rx/operators/OperationTakeWhile.java index f45efabc92..1bad2d36e5 100644 --- a/rxjava-core/src/main/java/rx/operators/OperationTakeWhile.java +++ b/rxjava-core/src/main/java/rx/operators/OperationTakeWhile.java @@ -123,7 +123,15 @@ public void onError(Exception e) { @Override public void onNext(T args) { - if (predicate.call(args, counter.getAndIncrement())) { + Boolean isSelected; + try { + isSelected = predicate.call(args, counter.getAndIncrement()); + } + catch (Exception e) { + observer.onError(e); + return; + } + if (isSelected) { observer.onNext(args); } else { observer.onCompleted(); @@ -238,6 +246,35 @@ public Boolean call(String s) })).last(); } + @Test + public void testTakeWhileProtectsPredicateCall() { + TestObservable source = new TestObservable(mock(Subscription.class), "one"); + final RuntimeException testException = new RuntimeException("test exception"); + + @SuppressWarnings("unchecked") + Observer<String> aObserver = mock(Observer.class); + Observable<String> take = Observable.create(takeWhile(source, new Func1<String, Boolean>() + { + @Override + public Boolean call(String s) + { + throw testException; + } + })); + take.subscribe(aObserver); + + // wait for the Observable to complete + try { + source.t.join(); + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + verify(aObserver, never()).onNext(any(String.class)); + verify(aObserver, times(1)).onError(testException); + } + @Test public void testUnsubscribeAfterTake() { Subscription s = mock(Subscription.class);
b923c291b474c246afa9f37ae5d9f6bdbbfef9d2
hadoop
YARN-2608. FairScheduler: Potential deadlocks in- loading alloc files and clock access. (Wei Yan via kasha)--(cherry picked from commit c9811af09a3d3f9f2f1b86fc9d6f2763d3225e44)-
c
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 421fac6cd831d..2022204d9fc9a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -420,6 +420,9 @@ Release 2.6.0 - UNRELEASED YARN-2523. ResourceManager UI showing negative value for "Decommissioned Nodes" field (Rohith via jlowe) + YARN-2608. FairScheduler: Potential deadlocks in loading alloc files and + clock access. (Wei Yan via kasha) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 296d8844373d5..d6339813487ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -117,7 +117,7 @@ public class FairScheduler extends private Resource incrAllocation; private QueueManager queueMgr; - private Clock clock; + private volatile Clock clock; private boolean usePortForNodeName; private static final Log LOG = LogFactory.getLog(FairScheduler.class); @@ -555,11 +555,12 @@ public synchronized int getContinuousSchedulingSleepMs() { return continuousSchedulingSleepMs; } - public synchronized Clock getClock() { + public Clock getClock() { return clock; } - protected synchronized void setClock(Clock clock) { + @VisibleForTesting + void setClock(Clock clock) { this.clock = clock; } @@ -1204,64 +1205,65 @@ public synchronized void setRMContext(RMContext rmContext) { this.rmContext = rmContext; } - private synchronized void initScheduler(Configuration conf) - throws IOException { - this.conf = new FairSchedulerConfiguration(conf); - validateConf(this.conf); - minimumAllocation = this.conf.getMinimumAllocation(); - maximumAllocation = this.conf.getMaximumAllocation(); - incrAllocation = this.conf.getIncrementAllocation(); - continuousSchedulingEnabled = this.conf.isContinuousSchedulingEnabled(); - continuousSchedulingSleepMs = - this.conf.getContinuousSchedulingSleepMs(); - nodeLocalityThreshold = this.conf.getLocalityThresholdNode(); - rackLocalityThreshold = this.conf.getLocalityThresholdRack(); - nodeLocalityDelayMs = this.conf.getLocalityDelayNodeMs(); - rackLocalityDelayMs = this.conf.getLocalityDelayRackMs(); - preemptionEnabled = this.conf.getPreemptionEnabled(); - preemptionUtilizationThreshold = - this.conf.getPreemptionUtilizationThreshold(); - assignMultiple = this.conf.getAssignMultiple(); - maxAssign = this.conf.getMaxAssign(); - sizeBasedWeight = this.conf.getSizeBasedWeight(); - preemptionInterval = this.conf.getPreemptionInterval(); - waitTimeBeforeKill = this.conf.getWaitTimeBeforeKill(); - usePortForNodeName = this.conf.getUsePortForNodeName(); - - updateInterval = this.conf.getUpdateInterval(); - if (updateInterval < 0) { - updateInterval = FairSchedulerConfiguration.DEFAULT_UPDATE_INTERVAL_MS; - LOG.warn(FairSchedulerConfiguration.UPDATE_INTERVAL_MS - + " is invalid, so using default value " + - + FairSchedulerConfiguration.DEFAULT_UPDATE_INTERVAL_MS - + " ms instead"); - } - - rootMetrics = FSQueueMetrics.forQueue("root", null, true, conf); - fsOpDurations = FSOpDurations.getInstance(true); - - // This stores per-application scheduling information - this.applications = new ConcurrentHashMap< - ApplicationId, SchedulerApplication<FSAppAttempt>>(); - this.eventLog = new FairSchedulerEventLog(); - eventLog.init(this.conf); - - allocConf = new AllocationConfiguration(conf); - try { - queueMgr.initialize(conf); - } catch (Exception e) { - throw new IOException("Failed to start FairScheduler", e); - } + private void initScheduler(Configuration conf) throws IOException { + synchronized (this) { + this.conf = new FairSchedulerConfiguration(conf); + validateConf(this.conf); + minimumAllocation = this.conf.getMinimumAllocation(); + maximumAllocation = this.conf.getMaximumAllocation(); + incrAllocation = this.conf.getIncrementAllocation(); + continuousSchedulingEnabled = this.conf.isContinuousSchedulingEnabled(); + continuousSchedulingSleepMs = + this.conf.getContinuousSchedulingSleepMs(); + nodeLocalityThreshold = this.conf.getLocalityThresholdNode(); + rackLocalityThreshold = this.conf.getLocalityThresholdRack(); + nodeLocalityDelayMs = this.conf.getLocalityDelayNodeMs(); + rackLocalityDelayMs = this.conf.getLocalityDelayRackMs(); + preemptionEnabled = this.conf.getPreemptionEnabled(); + preemptionUtilizationThreshold = + this.conf.getPreemptionUtilizationThreshold(); + assignMultiple = this.conf.getAssignMultiple(); + maxAssign = this.conf.getMaxAssign(); + sizeBasedWeight = this.conf.getSizeBasedWeight(); + preemptionInterval = this.conf.getPreemptionInterval(); + waitTimeBeforeKill = this.conf.getWaitTimeBeforeKill(); + usePortForNodeName = this.conf.getUsePortForNodeName(); + + updateInterval = this.conf.getUpdateInterval(); + if (updateInterval < 0) { + updateInterval = FairSchedulerConfiguration.DEFAULT_UPDATE_INTERVAL_MS; + LOG.warn(FairSchedulerConfiguration.UPDATE_INTERVAL_MS + + " is invalid, so using default value " + + +FairSchedulerConfiguration.DEFAULT_UPDATE_INTERVAL_MS + + " ms instead"); + } - updateThread = new UpdateThread(); - updateThread.setName("FairSchedulerUpdateThread"); - updateThread.setDaemon(true); + rootMetrics = FSQueueMetrics.forQueue("root", null, true, conf); + fsOpDurations = FSOpDurations.getInstance(true); - if (continuousSchedulingEnabled) { - // start continuous scheduling thread - schedulingThread = new ContinuousSchedulingThread(); - schedulingThread.setName("FairSchedulerContinuousScheduling"); - schedulingThread.setDaemon(true); + // This stores per-application scheduling information + this.applications = new ConcurrentHashMap< + ApplicationId, SchedulerApplication<FSAppAttempt>>(); + this.eventLog = new FairSchedulerEventLog(); + eventLog.init(this.conf); + + allocConf = new AllocationConfiguration(conf); + try { + queueMgr.initialize(conf); + } catch (Exception e) { + throw new IOException("Failed to start FairScheduler", e); + } + + updateThread = new UpdateThread(); + updateThread.setName("FairSchedulerUpdateThread"); + updateThread.setDaemon(true); + + if (continuousSchedulingEnabled) { + // start continuous scheduling thread + schedulingThread = new ContinuousSchedulingThread(); + schedulingThread.setName("FairSchedulerContinuousScheduling"); + schedulingThread.setDaemon(true); + } } allocsLoader.init(conf); @@ -1321,7 +1323,7 @@ public void serviceStop() throws Exception { } @Override - public synchronized void reinitialize(Configuration conf, RMContext rmContext) + public void reinitialize(Configuration conf, RMContext rmContext) throws IOException { try { allocsLoader.reloadAllocations();
ac32fa187cf37e5a51fd579e052105662ab3c411
hadoop
YARN-3457. NPE when NodeManager.serviceInit fails- and stopRecoveryStore called. Contributed by Bibin A Chundatt.--(cherry picked from commit dd852f5b8c8fe9e52d15987605f36b5b60f02701)-
c
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index a4673bdc2dc5d..d9e1754c8ef23 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -105,6 +105,9 @@ Release 2.8.0 - UNRELEASED YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via xgong) + YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called. + (Bibin A Chundatt via ozawa) + Release 2.7.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 5727f102d4bdf..d54180a3627b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -176,16 +176,18 @@ private void initAndStartRecoveryStore(Configuration conf) private void stopRecoveryStore() throws IOException { nmStore.stop(); - if (context.getDecommissioned() && nmStore.canRecover()) { - LOG.info("Removing state store due to decommission"); - Configuration conf = getConfig(); - Path recoveryRoot = new Path( - conf.get(YarnConfiguration.NM_RECOVERY_DIR)); - LOG.info("Removing state store at " + recoveryRoot - + " due to decommission"); - FileSystem recoveryFs = FileSystem.getLocal(conf); - if (!recoveryFs.delete(recoveryRoot, true)) { - LOG.warn("Unable to delete " + recoveryRoot); + if (null != context) { + if (context.getDecommissioned() && nmStore.canRecover()) { + LOG.info("Removing state store due to decommission"); + Configuration conf = getConfig(); + Path recoveryRoot = + new Path(conf.get(YarnConfiguration.NM_RECOVERY_DIR)); + LOG.info("Removing state store at " + recoveryRoot + + " due to decommission"); + FileSystem recoveryFs = FileSystem.getLocal(conf); + if (!recoveryFs.delete(recoveryRoot, true)) { + LOG.warn("Unable to delete " + recoveryRoot); + } } } }
b07d6d56a0d2ace5413bc5b61ad2aa9884294924
kotlin
Escaping keywords used as identifiers in- DescriptorRenderer.-- -KT-2810 fixed-
c
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend/src/org/jetbrains/jet/resolve/DescriptorRenderer.java b/compiler/frontend/src/org/jetbrains/jet/resolve/DescriptorRenderer.java index 852a414388ed2..35b528d27c8ba 100644 --- a/compiler/frontend/src/org/jetbrains/jet/resolve/DescriptorRenderer.java +++ b/compiler/frontend/src/org/jetbrains/jet/resolve/DescriptorRenderer.java @@ -16,6 +16,9 @@ package org.jetbrains.jet.resolve; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.intellij.psi.tree.IElementType; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jetbrains.jet.lang.descriptors.*; @@ -23,8 +26,10 @@ import org.jetbrains.jet.lang.resolve.DescriptorUtils; import org.jetbrains.jet.lang.resolve.name.FqName; import org.jetbrains.jet.lang.resolve.name.FqNameUnsafe; +import org.jetbrains.jet.lang.resolve.name.Name; import org.jetbrains.jet.lang.types.*; import org.jetbrains.jet.lang.types.lang.KotlinBuiltIns; +import org.jetbrains.jet.lexer.JetKeywordToken; import org.jetbrains.jet.lexer.JetTokens; import java.util.*; @@ -33,6 +38,14 @@ * @author abreslav */ public class DescriptorRenderer implements Renderer<DeclarationDescriptor> { + private static final Set<String> KEYWORDS = Sets.newHashSet(); + static { + for (IElementType elementType : JetTokens.KEYWORDS.getTypes()) { + assert elementType instanceof JetKeywordToken; + assert !((JetKeywordToken) elementType).isSoft(); + KEYWORDS.add(((JetKeywordToken) elementType).getValue()); + } + } public static final DescriptorRenderer COMPACT_WITH_MODIFIERS = new DescriptorRenderer() { @Override @@ -112,6 +125,28 @@ protected String renderKeyword(String keyword) { return keyword; } + private String renderName(Name identifier) { + String asString = identifier.toString(); + return escape(KEYWORDS.contains(asString) ? '`' + asString + '`' : asString); + } + + @NotNull + private String renderFqName(@NotNull FqNameUnsafe fqName) { + return renderFqName(fqName.pathSegments()); + } + + @NotNull + private String renderFqName(@NotNull List<Name> pathSegments) { + StringBuilder buf = new StringBuilder(); + for (Name element : pathSegments) { + if (buf.length() != 0) { + buf.append("."); + } + buf.append(renderName(element)); + } + return buf.toString(); + } + public String renderType(JetType type) { return renderType(type, false); } @@ -156,25 +191,32 @@ private String renderDefaultType(JetType type, boolean shortNamesOnly) { return sb.toString(); } - private static String renderTypeName(@NotNull TypeConstructor typeConstructor, boolean shortNamesOnly) { + private String renderTypeName(@NotNull TypeConstructor typeConstructor, boolean shortNamesOnly) { ClassifierDescriptor cd = typeConstructor.getDeclarationDescriptor(); - if (cd == null || cd instanceof TypeParameterDescriptor) { + if (cd == null) { return typeConstructor.toString(); } + else if (cd instanceof TypeParameterDescriptor) { + return renderName(cd.getName()); + } else { if (shortNamesOnly) { - Object typeNameObject; + List<Name> qualifiedNameElements = Lists.newArrayList(); + // for nested classes qualified name should be used - typeNameObject = cd.getName(); - DeclarationDescriptor parent = cd.getContainingDeclaration(); - while (parent instanceof ClassDescriptor) { - typeNameObject = parent.getName() + "." + typeNameObject; - parent = parent.getContainingDeclaration(); + DeclarationDescriptor current = cd; + do { + qualifiedNameElements.add(current.getName()); + current = current.getContainingDeclaration(); } - return typeNameObject.toString(); + while (current instanceof ClassDescriptor); + + Collections.reverse(qualifiedNameElements); + + return renderFqName(qualifiedNameElements); } else { - return DescriptorUtils.getFQName(cd).getFqName(); + return renderFqName(DescriptorUtils.getFQName(cd)); } } } @@ -281,7 +323,7 @@ private void appendDefinedIn(DeclarationDescriptor declarationDescriptor, String final DeclarationDescriptor containingDeclaration = declarationDescriptor.getContainingDeclaration(); if (containingDeclaration != null) { FqNameUnsafe fqName = DescriptorUtils.getFQName(containingDeclaration); - stringBuilder.append(FqName.ROOT.equalsTo(fqName) ? "root package" : escape(fqName.getFqName())); + stringBuilder.append(FqName.ROOT.equalsTo(fqName) ? "root package" : renderFqName(fqName)); } } @@ -627,7 +669,7 @@ public void renderClassDescriptor(ClassDescriptor descriptor, StringBuilder buil } protected void renderName(DeclarationDescriptor descriptor, StringBuilder stringBuilder) { - stringBuilder.append(escape(descriptor.getName().getName())); + stringBuilder.append(escape(DescriptorRenderer.this.renderName(descriptor.getName()))); } protected void renderTypeParameter(TypeParameterDescriptor descriptor, StringBuilder builder, boolean topLevel) { diff --git a/compiler/testData/renderer/KeywordsInNames.kt b/compiler/testData/renderer/KeywordsInNames.kt new file mode 100644 index 0000000000000..9f72fd68c8f08 --- /dev/null +++ b/compiler/testData/renderer/KeywordsInNames.kt @@ -0,0 +1,23 @@ +val `val` = 5 + +trait `trait` + +class `class`<`in`>(p: `in`?) { + class `class` +} + +val `is` = `class`.`class`() +val `in` = `class`<`trait`>(null) + +fun `trait`.`fun`(`false`: `trait`): `trait` + +//internal final val `val` : jet.Int defined in root package +//internal trait `trait` defined in root package +//internal final class `class`<`in`> defined in root package +//<`in`> defined in `class` +//value-parameter val p : `in`? defined in `class`.<init> +//internal final class `class` defined in `class` +//internal final val `is` : `class`.`class` defined in root package +//internal final val `in` : `class`<`trait`> defined in root package +//internal final fun `trait`.`fun`(`false` : `trait`) : `trait` defined in root package +//value-parameter val `false` : `trait` defined in `fun` \ No newline at end of file diff --git a/compiler/tests/org/jetbrains/jet/jvm/compiler/LoadJavaTestGenerated.java b/compiler/tests/org/jetbrains/jet/jvm/compiler/LoadJavaTestGenerated.java index 8f30906d08e65..26ae420df5961 100644 --- a/compiler/tests/org/jetbrains/jet/jvm/compiler/LoadJavaTestGenerated.java +++ b/compiler/tests/org/jetbrains/jet/jvm/compiler/LoadJavaTestGenerated.java @@ -15,13 +15,16 @@ */ package org.jetbrains.jet.jvm.compiler; +import junit.framework.Assert; import junit.framework.Test; import junit.framework.TestSuite; + +import java.io.File; import org.jetbrains.jet.JetTestUtils; import org.jetbrains.jet.test.InnerTestClasses; import org.jetbrains.jet.test.TestMetadata; -import java.io.File; +import org.jetbrains.jet.jvm.compiler.AbstractLoadJavaTest; /** This class is generated by {@link org.jetbrains.jet.generators.tests.GenerateTests}. DO NOT MODIFY MANUALLY */ @TestMetadata("compiler/testData/loadJava") diff --git a/compiler/tests/org/jetbrains/jet/lang/resolve/lazy/LazyResolveDescriptorRendererTestGenerated.java b/compiler/tests/org/jetbrains/jet/lang/resolve/lazy/LazyResolveDescriptorRendererTestGenerated.java index e7e3a46b2b8c2..0bf4581aaaa42 100644 --- a/compiler/tests/org/jetbrains/jet/lang/resolve/lazy/LazyResolveDescriptorRendererTestGenerated.java +++ b/compiler/tests/org/jetbrains/jet/lang/resolve/lazy/LazyResolveDescriptorRendererTestGenerated.java @@ -70,6 +70,11 @@ public void testInheritedMembersVisibility() throws Exception { doTest("compiler/testData/renderer/InheritedMembersVisibility.kt"); } + @TestMetadata("KeywordsInNames.kt") + public void testKeywordsInNames() throws Exception { + doTest("compiler/testData/renderer/KeywordsInNames.kt"); + } + @TestMetadata("TupleTypes.kt") public void testTupleTypes() throws Exception { doTest("compiler/testData/renderer/TupleTypes.kt"); diff --git a/compiler/tests/org/jetbrains/jet/resolve/DescriptorRendererTest.java b/compiler/tests/org/jetbrains/jet/resolve/DescriptorRendererTest.java index 10bac3b80658a..ad3c5d5345c84 100644 --- a/compiler/tests/org/jetbrains/jet/resolve/DescriptorRendererTest.java +++ b/compiler/tests/org/jetbrains/jet/resolve/DescriptorRendererTest.java @@ -80,6 +80,10 @@ public void testInheritedMembersVisibility() throws IOException { doTest(); } + public void testKeywordsInNames() throws IOException { + doTest(); + } + @Override protected String getTestDataPath() { return JetTestCaseBuilder.getTestDataPathBase() + "/renderer";
69902e6dedbfeb4e44eace12a101169cdfb97def
restlet-framework-java
Add SpringRouter-style supplemental direct routing- via the attachments property. This allows for multiple routes for the same- bean or routes which would be illegal/unmatched as bean names.--
a
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet.ext.spring/src/org/restlet/ext/spring/SpringBeanRouter.java b/modules/org.restlet.ext.spring/src/org/restlet/ext/spring/SpringBeanRouter.java index 1101183992..b686612d6a 100644 --- a/modules/org.restlet.ext.spring/src/org/restlet/ext/spring/SpringBeanRouter.java +++ b/modules/org.restlet.ext.spring/src/org/restlet/ext/spring/SpringBeanRouter.java @@ -41,6 +41,8 @@ import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; +import java.util.Map; + /** * Restlet {@link Router} which behaves like Spring's * {@link org.springframework.web.servlet.handler.BeanNameUrlHandlerMapping}. It @@ -85,6 +87,9 @@ public class SpringBeanRouter extends Router implements /** If beans should be searched for higher up in the BeanFactory hierarchy */ private volatile boolean findInAncestors = true; + /** Supplemental explicit mappings */ + private Map<String, String> attachments; + /** * Creates an instance of {@link SpringBeanFinder}. This can be overriden if * necessary. @@ -130,7 +135,14 @@ public void postProcessBeanFactory(ConfigurableListableBeanFactory factory) for (final String name : names) { final String uri = resolveUri(name, factory); if (uri != null) { - attach(uri, createFinder(bf, name)); + if (this.attachments == null || !this.attachments.containsKey(uri)) { + attach(uri, createFinder(bf, name)); + } + } + } + if (this.attachments != null) { + for (Map.Entry<String, String> attachment : this.attachments.entrySet()) { + attach(attachment.getKey(), createFinder(bf, attachment.getValue())); } } } @@ -180,4 +192,16 @@ public void setApplicationContext(ApplicationContext applicationContext) public void setFindInAncestors(boolean findInAncestors) { this.findInAncestors = findInAncestors; } + + /** + * Sets an explicit mapping of URI templates to bean IDs to use + * in addition to the usual bean name mapping behavior. If a URI template + * appears in both this mapping and as a bean name, the bean it is mapped + * to here is the one that will be used. + * + * @see SpringRouter + */ + public void setAttachments(Map<String, String> attachments) { + this.attachments = attachments; + } } diff --git a/modules/org.restlet.test/src/org/restlet/test/ext/spring/SpringBeanRouterTestCase.java b/modules/org.restlet.test/src/org/restlet/test/ext/spring/SpringBeanRouterTestCase.java index b9bd592724..847a959a5c 100644 --- a/modules/org.restlet.test/src/org/restlet/test/ext/spring/SpringBeanRouterTestCase.java +++ b/modules/org.restlet.test/src/org/restlet/test/ext/spring/SpringBeanRouterTestCase.java @@ -31,6 +31,9 @@ package org.restlet.test.ext.spring; import org.restlet.Restlet; +import org.restlet.data.Method; +import org.restlet.data.Request; +import org.restlet.data.Response; import org.restlet.ext.spring.SpringBeanFinder; import org.restlet.ext.spring.SpringBeanRouter; import org.restlet.resource.Resource; @@ -41,9 +44,10 @@ import org.springframework.beans.factory.support.DefaultListableBeanFactory; import org.springframework.beans.factory.support.RootBeanDefinition; +import java.util.Collections; import java.util.HashSet; -import java.util.Set; import java.util.List; +import java.util.Set; /** * @author Rhett Sutphin @@ -57,7 +61,7 @@ public class SpringBeanRouterTestCase extends RestletTestCase { private SpringBeanRouter router; private void assertFinderForBean(String expectedBeanName, Restlet restlet) { - assertTrue("Restlet is not a bean finder restlet", + assertTrue("Restlet is not a bean finder restlet: " + restlet.getClass().getName(), restlet instanceof SpringBeanFinder); final SpringBeanFinder actualFinder = (SpringBeanFinder) restlet; assertEquals("Finder does not point to correct bean", expectedBeanName, @@ -90,6 +94,11 @@ private void registerResourceBeanDefinition(String id, String alias) { } } + private RouteList actualRoutes() { + this.router.postProcessBeanFactory(this.factory); + return this.router.getRoutes(); + } + private Set<String> routeUris(List<Route> routes) { final Set<String> uris = new HashSet<String>(); for (final Route actualRoute : routes) { @@ -119,15 +128,8 @@ public void testRoutesCreatedForBeanIdsIfAppropriate() throws Exception { public void testRoutesPointToFindersForBeans() throws Exception { final RouteList actualRoutes = actualRoutes(); assertEquals("Wrong number of routes", 2, actualRoutes.size()); - Route oreRoute = null, fishRoute = null; - for (final Route actualRoute : actualRoutes) { - if (actualRoute.getTemplate().getPattern().equals(FISH_URI)) { - fishRoute = actualRoute; - } - if (actualRoute.getTemplate().getPattern().equals(ORE_URI)) { - oreRoute = actualRoute; - } - } + Route oreRoute = matchRouteFor(ORE_URI); + Route fishRoute = matchRouteFor(FISH_URI); assertNotNull("ore route not present: " + actualRoutes, oreRoute); assertNotNull("fish route not present: " + actualRoutes, fishRoute); @@ -135,13 +137,6 @@ public void testRoutesPointToFindersForBeans() throws Exception { assertFinderForBean("fish", fishRoute.getNext()); } - private RouteList actualRoutes() { - this.router.postProcessBeanFactory(this.factory); - - final RouteList actualRoutes = this.router.getRoutes(); - return actualRoutes; - } - public void testRoutingSkipsResourcesWithoutAppropriateAliases() throws Exception { final BeanDefinition bd = new RootBeanDefinition(Resource.class); @@ -153,4 +148,35 @@ public void testRoutingSkipsResourcesWithoutAppropriateAliases() assertEquals("Timber resource should have been skipped", 2, actualRoutes.size()); } + + public void testRoutingIncludesSpringRouterStyleExplicitlyMappedBeans() throws Exception { + final BeanDefinition bd = new RootBeanDefinition(Resource.class); + bd.setScope(BeanDefinition.SCOPE_PROTOTYPE); + this.factory.registerBeanDefinition("timber", bd); + this.factory.registerAlias("timber", "no-slash"); + + String expectedTemplate = "/renewable/timber/{farm_type}"; + router.setAttachments(Collections.singletonMap(expectedTemplate, "timber")); + final RouteList actualRoutes = actualRoutes(); + + assertEquals("Wrong number of routes", 3, actualRoutes.size()); + Route timberRoute = matchRouteFor(expectedTemplate); + assertNotNull("Missing timber route: " + actualRoutes, timberRoute); + assertFinderForBean("timber", timberRoute.getNext()); + } + + public void testExplicitAttachmentsTrumpBeanNames() throws Exception { + this.router.setAttachments(Collections.singletonMap(ORE_URI, "fish")); + RouteList actualRoutes = actualRoutes(); + assertEquals("Wrong number of routes", 2, actualRoutes.size()); + + Route oreRoute = matchRouteFor(ORE_URI); + assertNotNull("No route for " + ORE_URI, oreRoute); + assertFinderForBean("fish", oreRoute.getNext()); + } + + private Route matchRouteFor(String uri) { + Request req = new Request(Method.GET, uri); + return (Route) router.getNext(req, new Response(req)); + } }
c6cf7489853d603bb1a77ad699b3068cb4779bbf
hadoop
YARN-2958. Made RMStateStore not update the last- sequence number when updating the delegation token. Contributed by Varun- Saxena.--(cherry picked from commit 562a701945be3a672f9cb5a52cc6db2c1589ba2b)-
c
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 06fcedce51ff0..83acc08bfe3e2 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -279,6 +279,9 @@ Release 2.7.0 - UNRELEASED YARN-2922. ConcurrentModificationException in CapacityScheduler's LeafQueue. (Rohith Sharmaks via ozawa) + YARN-2958. Made RMStateStore not update the last sequence number when updating the + delegation token. (Varun Saxena via zjshen) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index 51e3916cf2307..77836620b2967 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -60,8 +60,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl; -import org.apache.hadoop.yarn.util.ConverterUtils; - import com.google.common.annotations.VisibleForTesting; @Private @@ -452,11 +450,10 @@ public synchronized void removeApplicationStateInternal( } @Override - public synchronized void storeRMDelegationTokenAndSequenceNumberState( - RMDelegationTokenIdentifier identifier, Long renewDate, - int latestSequenceNumber) throws Exception { - storeOrUpdateRMDelegationTokenAndSequenceNumberState( - identifier, renewDate,latestSequenceNumber, false); + public synchronized void storeRMDelegationTokenState( + RMDelegationTokenIdentifier identifier, Long renewDate) + throws Exception { + storeOrUpdateRMDelegationTokenState(identifier, renewDate, false); } @Override @@ -469,16 +466,15 @@ public synchronized void removeRMDelegationTokenState( } @Override - protected void updateRMDelegationTokenAndSequenceNumberInternal( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception { - storeOrUpdateRMDelegationTokenAndSequenceNumberState( - rmDTIdentifier, renewDate,latestSequenceNumber, true); + protected void updateRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception { + storeOrUpdateRMDelegationTokenState(rmDTIdentifier, renewDate, true); } - private void storeOrUpdateRMDelegationTokenAndSequenceNumberState( + private void storeOrUpdateRMDelegationTokenState( RMDelegationTokenIdentifier identifier, Long renewDate, - int latestSequenceNumber, boolean isUpdate) throws Exception { + boolean isUpdate) throws Exception { Path nodeCreatePath = getNodePath(rmDTSecretManagerRoot, DELEGATION_TOKEN_PREFIX + identifier.getSequenceNumber()); @@ -490,23 +486,24 @@ private void storeOrUpdateRMDelegationTokenAndSequenceNumberState( } else { LOG.info("Storing RMDelegationToken_" + identifier.getSequenceNumber()); writeFile(nodeCreatePath, identifierData.toByteArray()); - } - // store sequence number - Path latestSequenceNumberPath = getNodePath(rmDTSecretManagerRoot, - DELEGATION_TOKEN_SEQUENCE_NUMBER_PREFIX + latestSequenceNumber); - LOG.info("Storing " + DELEGATION_TOKEN_SEQUENCE_NUMBER_PREFIX - + latestSequenceNumber); - if (dtSequenceNumberPath == null) { - if (!createFile(latestSequenceNumberPath)) { - throw new Exception("Failed to create " + latestSequenceNumberPath); - } - } else { - if (!renameFile(dtSequenceNumberPath, latestSequenceNumberPath)) { - throw new Exception("Failed to rename " + dtSequenceNumberPath); + // store sequence number + Path latestSequenceNumberPath = getNodePath(rmDTSecretManagerRoot, + DELEGATION_TOKEN_SEQUENCE_NUMBER_PREFIX + + identifier.getSequenceNumber()); + LOG.info("Storing " + DELEGATION_TOKEN_SEQUENCE_NUMBER_PREFIX + + identifier.getSequenceNumber()); + if (dtSequenceNumberPath == null) { + if (!createFile(latestSequenceNumberPath)) { + throw new Exception("Failed to create " + latestSequenceNumberPath); + } + } else { + if (!renameFile(dtSequenceNumberPath, latestSequenceNumberPath)) { + throw new Exception("Failed to rename " + dtSequenceNumberPath); + } } + dtSequenceNumberPath = latestSequenceNumberPath; } - dtSequenceNumberPath = latestSequenceNumberPath; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java index 0f880c8b0fba9..2c927146480e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java @@ -544,31 +544,30 @@ protected void removeApplicationStateInternal(ApplicationStateData appState) throw new IOException(e); } } - - @Override - protected void storeRMDelegationTokenAndSequenceNumberState( - RMDelegationTokenIdentifier tokenId, Long renewDate, - int latestSequenceNumber) throws IOException { + + private void storeOrUpdateRMDT(RMDelegationTokenIdentifier tokenId, + Long renewDate, boolean isUpdate) throws IOException { String tokenKey = getRMDTTokenNodeKey(tokenId); RMDelegationTokenIdentifierData tokenData = new RMDelegationTokenIdentifierData(tokenId, renewDate); - ByteArrayOutputStream bs = new ByteArrayOutputStream(); - DataOutputStream ds = new DataOutputStream(bs); - try { - ds.writeInt(latestSequenceNumber); - } finally { - ds.close(); - } if (LOG.isDebugEnabled()) { LOG.debug("Storing token to " + tokenKey); - LOG.debug("Storing " + latestSequenceNumber + " to " - + RM_DT_SEQUENCE_NUMBER_KEY); } try { WriteBatch batch = db.createWriteBatch(); try { batch.put(bytes(tokenKey), tokenData.toByteArray()); - batch.put(bytes(RM_DT_SEQUENCE_NUMBER_KEY), bs.toByteArray()); + if(!isUpdate) { + ByteArrayOutputStream bs = new ByteArrayOutputStream(); + try (DataOutputStream ds = new DataOutputStream(bs)) { + ds.writeInt(tokenId.getSequenceNumber()); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Storing " + tokenId.getSequenceNumber() + " to " + + RM_DT_SEQUENCE_NUMBER_KEY); + } + batch.put(bytes(RM_DT_SEQUENCE_NUMBER_KEY), bs.toByteArray()); + } db.write(batch); } finally { batch.close(); @@ -579,11 +578,17 @@ protected void storeRMDelegationTokenAndSequenceNumberState( } @Override - protected void updateRMDelegationTokenAndSequenceNumberInternal( - RMDelegationTokenIdentifier tokenId, Long renewDate, - int latestSequenceNumber) throws IOException { - storeRMDelegationTokenAndSequenceNumberState(tokenId, renewDate, - latestSequenceNumber); + protected void storeRMDelegationTokenState( + RMDelegationTokenIdentifier tokenId, Long renewDate) + throws IOException { + storeOrUpdateRMDT(tokenId, renewDate, false); + } + + @Override + protected void updateRMDelegationTokenState( + RMDelegationTokenIdentifier tokenId, Long renewDate) + throws IOException { + storeOrUpdateRMDT(tokenId, renewDate, true); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index 917fdc13a3816..3646949b60492 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -149,23 +149,30 @@ public synchronized void removeApplicationStateInternal( } } - @Override - public synchronized void storeRMDelegationTokenAndSequenceNumberState( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception { + private void storeOrUpdateRMDT(RMDelegationTokenIdentifier rmDTIdentifier, + Long renewDate, boolean isUpdate) throws Exception { Map<RMDelegationTokenIdentifier, Long> rmDTState = state.rmSecretManagerState.getTokenState(); if (rmDTState.containsKey(rmDTIdentifier)) { IOException e = new IOException("RMDelegationToken: " + rmDTIdentifier - + "is already stored."); + + "is already stored."); LOG.info("Error storing info for RMDelegationToken: " + rmDTIdentifier, e); throw e; } rmDTState.put(rmDTIdentifier, renewDate); - state.rmSecretManagerState.dtSequenceNumber = latestSequenceNumber; + if(!isUpdate) { + state.rmSecretManagerState.dtSequenceNumber = + rmDTIdentifier.getSequenceNumber(); + } LOG.info("Store RMDT with sequence number " - + rmDTIdentifier.getSequenceNumber() - + ". And the latest sequence number is " + latestSequenceNumber); + + rmDTIdentifier.getSequenceNumber()); + } + + @Override + public synchronized void storeRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception { + storeOrUpdateRMDT(rmDTIdentifier, renewDate, false); } @Override @@ -179,12 +186,11 @@ public synchronized void removeRMDelegationTokenState( } @Override - protected void updateRMDelegationTokenAndSequenceNumberInternal( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception { + protected void updateRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception { removeRMDelegationTokenState(rmDTIdentifier); - storeRMDelegationTokenAndSequenceNumberState( - rmDTIdentifier, renewDate, latestSequenceNumber); + storeOrUpdateRMDT(rmDTIdentifier, renewDate, true); LOG.info("Update RMDT with sequence number " + rmDTIdentifier.getSequenceNumber()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java index f80c497e80af5..d2c1e9d71f351 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java @@ -77,9 +77,9 @@ protected void removeApplicationStateInternal(ApplicationStateData appState) } @Override - public void storeRMDelegationTokenAndSequenceNumberState( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception { + public void storeRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception { // Do nothing } @@ -90,9 +90,9 @@ public void removeRMDelegationTokenState(RMDelegationTokenIdentifier rmDTIdentif } @Override - protected void updateRMDelegationTokenAndSequenceNumberInternal( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception { + protected void updateRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception { // Do nothing } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index 3966dc495dd2a..bccde53ddff39 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -296,9 +296,8 @@ public void transition(RMStateStore store, RMStateStoreEvent event) { RMStateStoreRMDTEvent dtEvent = (RMStateStoreRMDTEvent) event; try { LOG.info("Storing RMDelegationToken and SequenceNumber"); - store.storeRMDelegationTokenAndSequenceNumberState( - dtEvent.getRmDTIdentifier(), dtEvent.getRenewDate(), - dtEvent.getLatestSequenceNumber()); + store.storeRMDelegationTokenState( + dtEvent.getRmDTIdentifier(), dtEvent.getRenewDate()); } catch (Exception e) { LOG.error("Error While Storing RMDelegationToken and SequenceNumber ", e); @@ -341,9 +340,8 @@ public void transition(RMStateStore store, RMStateStoreEvent event) { RMStateStoreRMDTEvent dtEvent = (RMStateStoreRMDTEvent) event; try { LOG.info("Updating RMDelegationToken and SequenceNumber"); - store.updateRMDelegationTokenAndSequenceNumberInternal( - dtEvent.getRmDTIdentifier(), dtEvent.getRenewDate(), - dtEvent.getLatestSequenceNumber()); + store.updateRMDelegationTokenState( + dtEvent.getRmDTIdentifier(), dtEvent.getRenewDate()); } catch (Exception e) { LOG.error("Error While Updating RMDelegationToken and SequenceNumber ", e); @@ -672,11 +670,10 @@ protected abstract void updateApplicationAttemptStateInternal( * RMDTSecretManager call this to store the state of a delegation token * and sequence number */ - public void storeRMDelegationTokenAndSequenceNumber( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) { + public void storeRMDelegationToken( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) { handleStoreEvent(new RMStateStoreRMDTEvent(rmDTIdentifier, renewDate, - latestSequenceNumber, RMStateStoreEventType.STORE_DELEGATION_TOKEN)); + RMStateStoreEventType.STORE_DELEGATION_TOKEN)); } /** @@ -684,17 +681,17 @@ public void storeRMDelegationTokenAndSequenceNumber( * Derived classes must implement this method to store the state of * RMDelegationToken and sequence number */ - protected abstract void storeRMDelegationTokenAndSequenceNumberState( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception; + protected abstract void storeRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception; /** * RMDTSecretManager call this to remove the state of a delegation token */ public void removeRMDelegationToken( - RMDelegationTokenIdentifier rmDTIdentifier, int sequenceNumber) { + RMDelegationTokenIdentifier rmDTIdentifier) { handleStoreEvent(new RMStateStoreRMDTEvent(rmDTIdentifier, null, - sequenceNumber, RMStateStoreEventType.REMOVE_DELEGATION_TOKEN)); + RMStateStoreEventType.REMOVE_DELEGATION_TOKEN)); } /** @@ -708,11 +705,10 @@ protected abstract void removeRMDelegationTokenState( * RMDTSecretManager call this to update the state of a delegation token * and sequence number */ - public void updateRMDelegationTokenAndSequenceNumber( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) { + public void updateRMDelegationToken( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) { handleStoreEvent(new RMStateStoreRMDTEvent(rmDTIdentifier, renewDate, - latestSequenceNumber, RMStateStoreEventType.UPDATE_DELEGATION_TOKEN)); + RMStateStoreEventType.UPDATE_DELEGATION_TOKEN)); } /** @@ -720,9 +716,9 @@ public void updateRMDelegationTokenAndSequenceNumber( * Derived classes must implement this method to update the state of * RMDelegationToken and sequence number */ - protected abstract void updateRMDelegationTokenAndSequenceNumberInternal( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception; + protected abstract void updateRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception; /** * RMDTSecretManager call this to store the state of a master key diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreRMDTEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreRMDTEvent.java index 4cd4d2e25415b..a3519ff6f8fe1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreRMDTEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreRMDTEvent.java @@ -23,18 +23,16 @@ public class RMStateStoreRMDTEvent extends RMStateStoreEvent { private RMDelegationTokenIdentifier rmDTIdentifier; private Long renewDate; - private int latestSequenceNumber; public RMStateStoreRMDTEvent(RMStateStoreEventType type) { super(type); } public RMStateStoreRMDTEvent(RMDelegationTokenIdentifier rmDTIdentifier, - Long renewDate, int latestSequenceNumber, RMStateStoreEventType type) { + Long renewDate, RMStateStoreEventType type) { this(type); this.rmDTIdentifier = rmDTIdentifier; this.renewDate = renewDate; - this.latestSequenceNumber = latestSequenceNumber; } public RMDelegationTokenIdentifier getRmDTIdentifier() { @@ -44,8 +42,4 @@ public RMDelegationTokenIdentifier getRmDTIdentifier() { public Long getRenewDate() { return renewDate; } - - public int getLatestSequenceNumber() { - return latestSequenceNumber; - } -} +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 2babc82a1d598..f3da21e51609e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -698,12 +698,11 @@ public synchronized void removeApplicationStateInternal( } @Override - protected synchronized void storeRMDelegationTokenAndSequenceNumberState( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception { + protected synchronized void storeRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception { ArrayList<Op> opList = new ArrayList<Op>(); - addStoreOrUpdateOps( - opList, rmDTIdentifier, renewDate, latestSequenceNumber, false); + addStoreOrUpdateOps(opList, rmDTIdentifier, renewDate, false); doMultiWithRetries(opList); } @@ -727,29 +726,27 @@ protected synchronized void removeRMDelegationTokenState( } @Override - protected synchronized void updateRMDelegationTokenAndSequenceNumberInternal( - RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber) throws Exception { + protected synchronized void updateRMDelegationTokenState( + RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate) + throws Exception { ArrayList<Op> opList = new ArrayList<Op>(); String nodeRemovePath = getNodePath(delegationTokensRootPath, DELEGATION_TOKEN_PREFIX + rmDTIdentifier.getSequenceNumber()); if (existsWithRetries(nodeRemovePath, true) == null) { // in case znode doesn't exist - addStoreOrUpdateOps( - opList, rmDTIdentifier, renewDate, latestSequenceNumber, false); + addStoreOrUpdateOps(opList, rmDTIdentifier, renewDate, false); LOG.debug("Attempted to update a non-existing znode " + nodeRemovePath); } else { // in case znode exists - addStoreOrUpdateOps( - opList, rmDTIdentifier, renewDate, latestSequenceNumber, true); + addStoreOrUpdateOps(opList, rmDTIdentifier, renewDate, true); } doMultiWithRetries(opList); } private void addStoreOrUpdateOps(ArrayList<Op> opList, RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, - int latestSequenceNumber, boolean isUpdate) throws Exception { + boolean isUpdate) throws Exception { // store RM delegation token String nodeCreatePath = getNodePath(delegationTokensRootPath, DELEGATION_TOKEN_PREFIX @@ -769,16 +766,15 @@ private void addStoreOrUpdateOps(ArrayList<Op> opList, } else { opList.add(Op.create(nodeCreatePath, identifierData.toByteArray(), zkAcl, CreateMode.PERSISTENT)); + // Update Sequence number only while storing DT + seqOut.writeInt(rmDTIdentifier.getSequenceNumber()); + if (LOG.isDebugEnabled()) { + LOG.debug((isUpdate ? "Storing " : "Updating ") + + dtSequenceNumberPath + ". SequenceNumber: " + + rmDTIdentifier.getSequenceNumber()); + } + opList.add(Op.setData(dtSequenceNumberPath, seqOs.toByteArray(), -1)); } - - - seqOut.writeInt(latestSequenceNumber); - if (LOG.isDebugEnabled()) { - LOG.debug((isUpdate ? "Storing " : "Updating ") + dtSequenceNumberPath + - ". SequenceNumber: " + latestSequenceNumber); - } - - opList.add(Op.setData(dtSequenceNumberPath, seqOs.toByteArray(), -1)); } finally { seqOs.close(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java index 90706ff8c94b5..83defc5424707 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java @@ -29,10 +29,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; import org.apache.hadoop.security.token.delegation.DelegationKey; -import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -109,8 +107,7 @@ protected void storeNewToken(RMDelegationTokenIdentifier identifier, try { LOG.info("storing RMDelegation token with sequence number: " + identifier.getSequenceNumber()); - rmContext.getStateStore().storeRMDelegationTokenAndSequenceNumber( - identifier, renewDate, identifier.getSequenceNumber()); + rmContext.getStateStore().storeRMDelegationToken(identifier, renewDate); } catch (Exception e) { LOG.error("Error in storing RMDelegationToken with sequence number: " + identifier.getSequenceNumber()); @@ -124,11 +121,10 @@ protected void updateStoredToken(RMDelegationTokenIdentifier id, try { LOG.info("updating RMDelegation token with sequence number: " + id.getSequenceNumber()); - rmContext.getStateStore().updateRMDelegationTokenAndSequenceNumber(id, - renewDate, id.getSequenceNumber()); + rmContext.getStateStore().updateRMDelegationToken(id, renewDate); } catch (Exception e) { - LOG.error("Error in updating persisted RMDelegationToken with sequence number: " - + id.getSequenceNumber()); + LOG.error("Error in updating persisted RMDelegationToken" + + " with sequence number: " + id.getSequenceNumber()); ExitUtil.terminate(1, e); } } @@ -139,8 +135,7 @@ protected void removeStoredToken(RMDelegationTokenIdentifier ident) try { LOG.info("removing RMDelegation token with sequence number: " + ident.getSequenceNumber()); - rmContext.getStateStore().removeRMDelegationToken(ident, - delegationTokenSequenceNumber); + rmContext.getStateStore().removeRMDelegationToken(ident); } catch (Exception e) { LOG.error("Error in removing RMDelegationToken with sequence number: " + ident.getSequenceNumber()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java index 82ecac01ed03a..b01969bd08525 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java @@ -411,16 +411,15 @@ public void testRMDTSecretManagerStateStore( RMDelegationTokenIdentifier dtId1 = new RMDelegationTokenIdentifier(new Text("owner1"), new Text("renewer1"), new Text("realuser1")); + int sequenceNumber = 1111; + dtId1.setSequenceNumber(sequenceNumber); byte[] tokenBeforeStore = dtId1.getBytes(); Long renewDate1 = new Long(System.currentTimeMillis()); - int sequenceNumber = 1111; - store.storeRMDelegationTokenAndSequenceNumber(dtId1, renewDate1, - sequenceNumber); + store.storeRMDelegationToken(dtId1, renewDate1); modifyRMDelegationTokenState(); Map<RMDelegationTokenIdentifier, Long> token1 = new HashMap<RMDelegationTokenIdentifier, Long>(); token1.put(dtId1, renewDate1); - // store delegation key; DelegationKey key = new DelegationKey(1234, 4321 , "keyBytes".getBytes()); HashSet<DelegationKey> keySet = new HashSet<DelegationKey>(); @@ -440,9 +439,7 @@ public void testRMDTSecretManagerStateStore( // update RM delegation token; renewDate1 = new Long(System.currentTimeMillis()); - ++sequenceNumber; - store.updateRMDelegationTokenAndSequenceNumber( - dtId1, renewDate1, sequenceNumber); + store.updateRMDelegationToken(dtId1, renewDate1); token1.put(dtId1, renewDate1); RMDTSecretManagerState updateSecretManagerState = @@ -463,7 +460,7 @@ public void testRMDTSecretManagerStateStore( noKeySecretManagerState.getDTSequenceNumber()); // check to delete delegationToken - store.removeRMDelegationToken(dtId1, sequenceNumber); + store.removeRMDelegationToken(dtId1); RMDTSecretManagerState noKeyAndTokenSecretManagerState = store.loadState().getRMDTSecretManagerState(); token1.clear(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 204348478ab64..87df3d6fbceb2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -337,20 +337,18 @@ public void testFencedState() throws Exception { RMDelegationTokenIdentifier dtId1 = new RMDelegationTokenIdentifier(new Text("owner1"), new Text("renewer1"), new Text("realuser1")); - Long renewDate1 = new Long(System.currentTimeMillis()); - int sequenceNumber = 1111; - store.storeRMDelegationTokenAndSequenceNumber(dtId1, renewDate1, - sequenceNumber); + Long renewDate1 = new Long(System.currentTimeMillis()); + dtId1.setSequenceNumber(1111); + store.storeRMDelegationToken(dtId1, renewDate1); assertEquals("RMStateStore should have been in fenced state", true, store.isFencedState()); - store.updateRMDelegationTokenAndSequenceNumber(dtId1, renewDate1, - sequenceNumber); + store.updateRMDelegationToken(dtId1, renewDate1); assertEquals("RMStateStore should have been in fenced state", true, store.isFencedState()); // remove delegation key; - store.removeRMDelegationToken(dtId1, sequenceNumber); + store.removeRMDelegationToken(dtId1); assertEquals("RMStateStore should have been in fenced state", true, store.isFencedState());
5148146dafe9a89c5d21af420a179603399e9c40
hbase
HBASE-1938 Make in-memory table scanning faster ;- reverted 20110726_1938_MemStore.patch till we figure why it seems to slow- tests--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1151653 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/hbase
diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 49faa92d94aa..1cf46fccf5f3 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -646,15 +646,11 @@ protected class MemStoreScanner implements KeyValueScanner { private KeyValue snapshotNextRow = null; // iterator based scanning. - private Iterator<KeyValue> kvsetIt; - private Iterator<KeyValue> snapshotIt; + Iterator<KeyValue> kvsetIt; + Iterator<KeyValue> snapshotIt; // number of iterations in this reseek operation - private int numIterReseek; - - - // the pre-calculated KeyValue to be returned by peek() or next() - private KeyValue theNext; + int numIterReseek; /* Some notes... @@ -680,9 +676,9 @@ protected class MemStoreScanner implements KeyValueScanner { //DebugPrint.println(" MS new@" + hashCode()); } - protected KeyValue getNext(Iterator<KeyValue> it, long readPoint) { + protected KeyValue getNext(Iterator<KeyValue> it) { KeyValue ret = null; - //long readPoint = ReadWriteConsistencyControl.getThreadReadPoint(); + long readPoint = ReadWriteConsistencyControl.getThreadReadPoint(); //DebugPrint.println( " MS@" + hashCode() + ": threadpoint = " + readPoint); while (ret == null && it.hasNext()) { @@ -714,11 +710,9 @@ public synchronized boolean seek(KeyValue key) { kvsetIt = kvTail.iterator(); snapshotIt = snapshotTail.iterator(); - long readPoint = ReadWriteConsistencyControl.getThreadReadPoint(); - kvsetNextRow = getNext(kvsetIt, readPoint); - snapshotNextRow = getNext(snapshotIt, readPoint); + kvsetNextRow = getNext(kvsetIt); + snapshotNextRow = getNext(snapshotIt); - theNext = getLowest(); //long readPoint = ReadWriteConsistencyControl.getThreadReadPoint(); //DebugPrint.println( " MS@" + hashCode() + " kvset seek: " + kvsetNextRow + " with size = " + @@ -726,18 +720,19 @@ public synchronized boolean seek(KeyValue key) { //DebugPrint.println( " MS@" + hashCode() + " snapshot seek: " + snapshotNextRow + " with size = " + // snapshot.size() + " threadread = " + readPoint); - // has data - return (theNext != null); + + KeyValue lowest = getLowest(); + + // has data := (lowest != null) + return lowest != null; } @Override - public synchronized boolean reseek(KeyValue key) { - + public boolean reseek(KeyValue key) { numIterReseek = reseekNumKeys; while (kvsetNextRow != null && comparator.compare(kvsetNextRow, key) < 0) { - kvsetNextRow = getNext(kvsetIt, - ReadWriteConsistencyControl.getThreadReadPoint()); + kvsetNextRow = getNext(kvsetIt); // if we scanned enough entries but still not able to find the // kv we are looking for, better cut our costs and do a tree // scan using seek. @@ -748,8 +743,7 @@ public synchronized boolean reseek(KeyValue key) { while (snapshotNextRow != null && comparator.compare(snapshotNextRow, key) < 0) { - snapshotNextRow = getNext(snapshotIt, - ReadWriteConsistencyControl.getThreadReadPoint()); + snapshotNextRow = getNext(snapshotIt); // if we scanned enough entries but still not able to find the // kv we are looking for, better cut our costs and do a tree // scan using seek. @@ -757,48 +751,38 @@ public synchronized boolean reseek(KeyValue key) { return seek(key); } } - - // Calculate the next value - theNext = getLowest(); - - return (theNext != null); + return (kvsetNextRow != null || snapshotNextRow != null); } - @Override public synchronized KeyValue peek() { //DebugPrint.println(" MS@" + hashCode() + " peek = " + getLowest()); - return theNext; + return getLowest(); } - @Override public synchronized KeyValue next() { + KeyValue theNext = getLowest(); if (theNext == null) { return null; } - KeyValue ret = theNext; - // Advance one of the iterators - long readPoint = ReadWriteConsistencyControl.getThreadReadPoint(); if (theNext == kvsetNextRow) { - kvsetNextRow = getNext(kvsetIt, readPoint); + kvsetNextRow = getNext(kvsetIt); } else { - snapshotNextRow = getNext(snapshotIt, readPoint); + snapshotNextRow = getNext(snapshotIt); } - // Calculate the next value - theNext = getLowest(); - - //readpoint = ReadWriteConsistencyControl.getThreadReadPoint(); - //DebugPrint.println(" MS@" + hashCode() + " next: " + theNext + - // " next_next: " + getLowest() + " threadpoint=" + readpoint); - return ret; + //long readpoint = ReadWriteConsistencyControl.getThreadReadPoint(); + //DebugPrint.println(" MS@" + hashCode() + " next: " + theNext + " next_next: " + + // getLowest() + " threadpoint=" + readpoint); + return theNext; } protected KeyValue getLowest() { - return getLower(kvsetNextRow, snapshotNextRow); + return getLower(kvsetNextRow, + snapshotNextRow); } /* @@ -807,15 +791,14 @@ protected KeyValue getLowest() { * comparator. */ protected KeyValue getLower(KeyValue first, KeyValue second) { - if (first == null) { - return second; + if (first == null && second == null) { + return null; } - if (second == null) { - return first; + if (first != null && second != null) { + int compare = comparator.compare(first, second); + return (compare <= 0 ? first : second); } - - int compare = comparator.compare(first, second); - return (compare <= 0 ? first : second); + return (first != null ? first : second); } public synchronized void close() {
0fdcf884987f1906a6ebfe2a2cb7cc86e05440ed
hadoop
HDFS-1330 and HADOOP-6889. Added additional unit- tests. Contributed by John George.--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1163464 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/hadoop
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 595f4678ce6c4..9ddcdf8bfa8c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1084,6 +1084,7 @@ Release 0.22.0 - Unreleased (jghoman) HDFS-1330. Make RPCs to DataNodes timeout. (hairong) + Added additional unit tests per HADOOP-6889. (John George via mattf) HDFS-202. HDFS support of listLocatedStatus introduced in HADOOP-6870. HDFS piggyback block locations to each file status when listing a diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 05fa648653a75..714bce7045e3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -25,7 +25,12 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; +import java.net.SocketTimeoutException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.LongWritable; import java.io.IOException; +import java.net.InetSocketAddress; import java.io.InputStream; import java.io.OutputStream; import java.security.MessageDigest; @@ -44,6 +49,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -52,6 +59,11 @@ import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.Client; +import org.apache.hadoop.ipc.ProtocolSignature; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.NetUtils; import org.mockito.internal.stubbing.answers.ThrowsException; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -61,9 +73,51 @@ * properly in case of errors. */ public class TestDFSClientRetries extends TestCase { + private static final String ADDRESS = "0.0.0.0"; + final static private int PING_INTERVAL = 1000; + final static private int MIN_SLEEP_TIME = 1000; public static final Log LOG = LogFactory.getLog(TestDFSClientRetries.class.getName()); - + final static private Configuration conf = new HdfsConfiguration(); + + private static class TestServer extends Server { + private boolean sleep; + private Class<? extends Writable> responseClass; + + public TestServer(int handlerCount, boolean sleep) throws IOException { + this(handlerCount, sleep, LongWritable.class, null); + } + + public TestServer(int handlerCount, boolean sleep, + Class<? extends Writable> paramClass, + Class<? extends Writable> responseClass) + throws IOException { + super(ADDRESS, 0, paramClass, handlerCount, conf); + this.sleep = sleep; + this.responseClass = responseClass; + } + + @Override + public Writable call(Class<?> protocol, Writable param, long receiveTime) + throws IOException { + if (sleep) { + // sleep a bit + try { + Thread.sleep(PING_INTERVAL + MIN_SLEEP_TIME); + } catch (InterruptedException e) {} + } + if (responseClass != null) { + try { + return responseClass.newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + return param; // echo param as result + } + } + } + // writes 'len' bytes of data to out. private static void writeData(OutputStream out, int len) throws IOException { byte [] buf = new byte[4096*16]; @@ -80,8 +134,6 @@ private static void writeData(OutputStream out, int len) throws IOException { */ public void testWriteTimeoutAtDataNode() throws IOException, InterruptedException { - Configuration conf = new HdfsConfiguration(); - final int writeTimeout = 100; //milliseconds. // set a very short write timeout for datanode, so that tests runs fast. conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, writeTimeout); @@ -136,7 +188,6 @@ public void testNotYetReplicatedErrors() throws IOException { final String exceptionMsg = "Nope, not replicated yet..."; final int maxRetries = 1; // Allow one retry (total of two calls) - Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries); NameNode mockNN = mock(NameNode.class); @@ -182,7 +233,6 @@ public void testFailuresArePerOperation() throws Exception long fileSize = 4096; Path file = new Path("/testFile"); - Configuration conf = new Configuration(); // Set short retry timeout so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -379,7 +429,6 @@ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, in long blockSize = 128*1024*1024; // DFS block size int bufferSize = 4096; - Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers); conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, retries); @@ -540,7 +589,6 @@ public void testGetFileChecksum() throws Exception { final String f = "/testGetFileChecksum"; final Path p = new Path(f); - final Configuration conf = new Configuration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); @@ -566,5 +614,39 @@ public void testGetFileChecksum() throws Exception { cluster.shutdown(); } } + + /** Test that timeout occurs when DN does not respond to RPC. + * Start up a server and ask it to sleep for n seconds. Make an + * RPC to the server and set rpcTimeout to less than n and ensure + * that socketTimeoutException is obtained + */ + public void testClientDNProtocolTimeout() throws IOException { + final Server server = new TestServer(1, true); + server.start(); + + final InetSocketAddress addr = NetUtils.getConnectAddress(server); + DatanodeID fakeDnId = new DatanodeID( + "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort()); + + ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); + LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); + + ClientDatanodeProtocol proxy = null; + + try { + proxy = DFSUtil.createClientDatanodeProtocolProxy( + fakeDnId, conf, 500, fakeBlock); + + proxy.getReplicaVisibleLength(null); + fail ("Did not get expected exception: SocketTimeoutException"); + } catch (SocketTimeoutException e) { + LOG.info("Got the expected Exception: SocketTimeoutException"); + } finally { + if (proxy != null) { + RPC.stopProxy(proxy); + } + server.stop(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java index eb58f7f195afc..b1f1fc911c9a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java @@ -22,6 +22,20 @@ import java.io.IOException; import java.util.List; +import java.net.InetSocketAddress; + +import java.net.SocketTimeoutException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.StringUtils; + +import org.apache.hadoop.ipc.Client; +import org.apache.hadoop.ipc.ProtocolSignature; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -38,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; @@ -48,6 +63,50 @@ * This tests InterDataNodeProtocol for block handling. */ public class TestInterDatanodeProtocol { + private static final String ADDRESS = "0.0.0.0"; + final static private int PING_INTERVAL = 1000; + final static private int MIN_SLEEP_TIME = 1000; + private static Configuration conf = new HdfsConfiguration(); + + + private static class TestServer extends Server { + private boolean sleep; + private Class<? extends Writable> responseClass; + + public TestServer(int handlerCount, boolean sleep) throws IOException { + this(handlerCount, sleep, LongWritable.class, null); + } + + public TestServer(int handlerCount, boolean sleep, + Class<? extends Writable> paramClass, + Class<? extends Writable> responseClass) + throws IOException { + super(ADDRESS, 0, paramClass, handlerCount, conf); + this.sleep = sleep; + this.responseClass = responseClass; + } + + @Override + public Writable call(Class<?> protocol, Writable param, long receiveTime) + throws IOException { + if (sleep) { + // sleep a bit + try { + Thread.sleep(PING_INTERVAL + MIN_SLEEP_TIME); + } catch (InterruptedException e) {} + } + if (responseClass != null) { + try { + return responseClass.newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + return param; // echo param as result + } + } + } + public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException { Block metainfo = dn.data.getStoredBlock(b.getBlockPoolId(), b.getBlockId()); Assert.assertEquals(b.getBlockId(), metainfo.getBlockId()); @@ -73,7 +132,6 @@ public static LocatedBlock getLastLocatedBlock( */ @Test public void testBlockMetaDataInfo() throws Exception { - Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { @@ -222,7 +280,6 @@ public void testInitReplicaRecovery() throws IOException { * */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { - final Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { @@ -291,4 +348,33 @@ public void testUpdateReplicaUnderRecovery() throws IOException { if (cluster != null) cluster.shutdown(); } } + + /** Test to verify that InterDatanode RPC timesout as expected when + * the server DN does not respond. + */ + @Test + public void testInterDNProtocolTimeout() throws Exception { + final Server server = new TestServer(1, true); + server.start(); + + final InetSocketAddress addr = NetUtils.getConnectAddress(server); + DatanodeID fakeDnId = new DatanodeID( + "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort()); + DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); + InterDatanodeProtocol proxy = null; + + try { + proxy = DataNode.createInterDataNodeProtocolProxy( + dInfo, conf, 500); + proxy.initReplicaRecovery(null); + fail ("Expected SocketTimeoutException exception, but did not get."); + } catch (SocketTimeoutException e) { + DataNode.LOG.info("Got expected Exception: SocketTimeoutException" + e); + } finally { + if (proxy != null) { + RPC.stopProxy(proxy); + } + server.stop(); + } + } }
d344d95b5c0d4a45640cd01d6d1e828b96587e6e
restlet-framework-java
Fixed bug in DomRepresentation causing the loss of- both public and system DocType. Reported by Lee Saferite.--
c
https://github.com/restlet/restlet-framework-java
diff --git a/build/tmpl/text/changes.txt b/build/tmpl/text/changes.txt index 26393df071..2f75338220 100644 --- a/build/tmpl/text/changes.txt +++ b/build/tmpl/text/changes.txt @@ -9,8 +9,10 @@ Changes log - Fixed issue in the selection of connectors. The last connector in the classpath for a given protocol was selected instead of the first one, leading to counter-intuitive behavior when multiple connectors were present in the classpath. - -[Enhancements] +- Fixed bug in DomRepresentation causing the loss of both public and system DocType. + Reported by Lee Saferite. +[ +Enhancements] - Added a getApplication() method to Context and Resource classes. - Added a new Grizzly based connector (full NIO). - Upgraded Apache MINA to version 1.1.0. diff --git a/modules/org.restlet/src/org/restlet/resource/DomRepresentation.java b/modules/org.restlet/src/org/restlet/resource/DomRepresentation.java index f869d58999..59b45caac0 100644 --- a/modules/org.restlet/src/org/restlet/resource/DomRepresentation.java +++ b/modules/org.restlet/src/org/restlet/resource/DomRepresentation.java @@ -154,6 +154,8 @@ public void write(OutputStream outputStream) throws IOException { .newTransformer(); transformer.setOutputProperty(OutputKeys.DOCTYPE_SYSTEM, getDocument().getDoctype().getSystemId()); + transformer.setOutputProperty(OutputKeys.DOCTYPE_PUBLIC, + getDocument().getDoctype().getPublicId()); transformer.transform(new DOMSource(getDocument()), new StreamResult(outputStream)); } catch (TransformerConfigurationException tce) {
cf5fb324e3d006d4360c0ae1bba0b2f57d3b6097
restlet-framework-java
Updated Ranges test case.--
p
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet.test/src/org/restlet/test/RangeTestCase.java b/modules/org.restlet.test/src/org/restlet/test/RangeTestCase.java index 81b1fa31fb..6846d24ed0 100644 --- a/modules/org.restlet.test/src/org/restlet/test/RangeTestCase.java +++ b/modules/org.restlet.test/src/org/restlet/test/RangeTestCase.java @@ -103,7 +103,7 @@ public void handle(Request request, Response response) { String[] tab = value.split("-"); if (tab.length == 2) { index = Long.parseLong(tab[0]); - length = index + Long.parseLong(tab[1]); + length = Long.parseLong(tab[1]) - index; } } @@ -182,7 +182,6 @@ public void testRanges() { assertEquals(Status.SUCCESS_OK, client.handle(request).getStatus()); // Test partial Get. -/* request = new Request(Method.GET, "http://localhost:8182/testGet"); Response response = client.handle(request); assertEquals(Status.SUCCESS_OK, response.getStatus()); @@ -199,12 +198,12 @@ public void testRanges() { assertEquals(Status.SUCCESS_OK, response.getStatus()); assertEquals("12", response.getEntity().getText()); - request.setRanges(Arrays.asList(new Range(3, 2))); + request.setRanges(Arrays.asList(new Range(2, 2))); response = client.handle(request); assertEquals(Status.SUCCESS_OK, response.getStatus()); assertEquals("34", response.getEntity().getText()); - request.setRanges(Arrays.asList(new Range(3, 7))); + request.setRanges(Arrays.asList(new Range(2, 7))); response = client.handle(request); assertEquals(Status.SUCCESS_OK, response.getStatus()); assertEquals("3456789", response.getEntity().getText()); @@ -213,7 +212,7 @@ public void testRanges() { response = client.handle(request); assertEquals(Status.SUCCESS_OK, response.getStatus()); assertEquals("4567890", response.getEntity().getText()); -*/ + component.stop(); } catch (Exception e) { e.printStackTrace();
b9feaa999969e0c81ce4a84e10db110c37a97535
elasticsearch
Simplify TestCluster--TestCluster now doesn't use any reference counting anymore and-testcluster names are based on creation time to prevent confilcts if-builds hang.-
p
https://github.com/elastic/elasticsearch
diff --git a/src/test/java/org/elasticsearch/test/integration/AbstractSharedClusterTest.java b/src/test/java/org/elasticsearch/test/integration/AbstractSharedClusterTest.java index 8fe09c8c1b58c..5c38750105c6e 100644 --- a/src/test/java/org/elasticsearch/test/integration/AbstractSharedClusterTest.java +++ b/src/test/java/org/elasticsearch/test/integration/AbstractSharedClusterTest.java @@ -124,9 +124,8 @@ public ClusterService clusterService() { @AfterClass protected static void afterClass() { - TestCluster toRelease = cluster; cluster = null; - ClusterManager.releaseCluster(toRelease); + ClusterManager.releaseCluster(); } public static Client client() { diff --git a/src/test/java/org/elasticsearch/test/integration/ClusterManager.java b/src/test/java/org/elasticsearch/test/integration/ClusterManager.java index 652a75a3b1e75..9fb0d28b490c3 100644 --- a/src/test/java/org/elasticsearch/test/integration/ClusterManager.java +++ b/src/test/java/org/elasticsearch/test/integration/ClusterManager.java @@ -18,41 +18,21 @@ */ package org.elasticsearch.test.integration; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; public class ClusterManager { - private static final ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor(); private static TestCluster cluster; - private static int generation = 0; public synchronized static TestCluster accquireCluster() { if (cluster == null) { - cluster = new TestCluster(generation++); + cluster = new TestCluster(); } - TestCluster c = cluster; - if (!c.tryAccquire()) { - c = new TestCluster(generation++); - boolean tryAccquire = c.tryAccquire(); - assert tryAccquire; - cluster = c; - } - - c.reset(); - return c; + cluster.reset(); + return cluster; } - public static synchronized void releaseCluster(final TestCluster toRelease) { - toRelease.decrementReference(); - // TODO find a better way -// service.schedule(new Runnable() { -// @Override -// public void run() { -// toRelease.close(); -// } -// }, 3, TimeUnit.MINUTES); + public static synchronized void releaseCluster() { + // doNothing } } diff --git a/src/test/java/org/elasticsearch/test/integration/TestCluster.java b/src/test/java/org/elasticsearch/test/integration/TestCluster.java index 885bca75738b0..f44f4bc04f7f5 100644 --- a/src/test/java/org/elasticsearch/test/integration/TestCluster.java +++ b/src/test/java/org/elasticsearch/test/integration/TestCluster.java @@ -35,7 +35,6 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.Client; @@ -69,15 +68,15 @@ public class TestCluster { * "action.auto_create_index" * "node.local" */ - - protected final ESLogger logger = Loggers.getLogger(getClass()); private Map<String, NodeAndClient> nodes = newHashMap(); - private final AtomicInteger refCount = new AtomicInteger(1); - private final String clusterName; + + private final AtomicBoolean open = new AtomicBoolean(true); + + private final Settings defaultSettings; @@ -85,8 +84,8 @@ public class TestCluster { private NodeAndClient clientNode; - public TestCluster(int generation) { - this("simple-test-cluster-" + NetworkUtils.getLocalAddress().getHostName() + "_gen_" + generation, ImmutableSettings.settingsBuilder().build()); + public TestCluster() { + this("simple-test-cluster-" + NetworkUtils.getLocalAddress().getHostName() + "_" + System.currentTimeMillis(), ImmutableSettings.settingsBuilder().build()); } private TestCluster(String clusterName, Settings defaultSettings) { @@ -103,25 +102,11 @@ private TestCluster(String clusterName, Settings defaultSettings) { this.defaultSettings = ImmutableSettings.settingsBuilder().put(defaultSettings).put("cluster.name", clusterName).build(); } - boolean tryAccquire() { - int refs = this.refCount.get(); - while (refs > 0) { - if (this.refCount.compareAndSet(refs, refs + 1)) { - return true; - } - } - return false; - } private void ensureOpen() { - if (this.refCount.get() == 0) { + if (!open.get()) { throw new RuntimeException("Cluster is already closed"); } - assert this.refCount.get() >= 0; - } - - int decrementReference() { - return this.refCount.decrementAndGet(); } public Node getOneNode() { @@ -201,15 +186,13 @@ public Client client() { public void close() { ensureOpen(); - while (this.refCount.get() == 1) { - if (this.refCount.compareAndSet(1, 0)) { - IOUtils.closeWhileHandlingException(nodes.values()); - nodes.clear(); + if (this.open.compareAndSet(true, false)) { + IOUtils.closeWhileHandlingException(nodes.values()); + nodes.clear(); + if (clientNode != null) { + IOUtils.closeWhileHandlingException(clientNode); } } - if (clientNode != null) { - IOUtils.closeWhileHandlingException(clientNode); - } } public ImmutableSet<ClusterBlock> waitForNoBlocks(TimeValue timeout, Node node) throws InterruptedException {
395959bf216574cad6a1632b13f472f4565c5cfb
kotlin
reformat code, optimize imports--
p
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidUIXmlParser.kt b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidUIXmlParser.kt index 977cea50c740d..c9352a22b4daa 100644 --- a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidUIXmlParser.kt +++ b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidUIXmlParser.kt @@ -33,7 +33,6 @@ import com.intellij.openapi.project.Project import java.util.concurrent.ConcurrentLinkedQueue import com.intellij.openapi.util.Key import com.intellij.testFramework.LightVirtualFile -import com.intellij.openapi.vfs.VirtualFileSystem import com.intellij.psi.PsiManager import java.io.FileInputStream import org.xml.sax.helpers.DefaultHandler @@ -44,20 +43,17 @@ import com.intellij.psi.util.PsiModificationTracker import com.intellij.psi.impl.PsiModificationTrackerImpl import java.util.Queue import com.intellij.psi.PsiFile -import org.xml.sax.InputSource -import java.io.ByteArrayInputStream -import com.intellij.openapi.diagnostic.Log -import org.xml.sax.SAXException import com.intellij.openapi.diagnostic.Logger import com.intellij.psi.PsiElement abstract class AndroidUIXmlParser { - inner class NoUIXMLsFound: Exception("No android UI xmls found in $searchPath") - class NoAndroidManifestFound: Exception("No android manifest file found in project root") + inner class NoUIXMLsFound : Exception("No android UI xmls found in $searchPath") + class NoAndroidManifestFound : Exception("No android manifest file found in project root") class ManifestParsingFailed - enum class CacheAction { HIT; MISS } + enum class CacheAction { HIT; MISS + } val androidImports = arrayListOf("android.app.Activity", "android.view.View", @@ -105,11 +101,13 @@ abstract class AndroidUIXmlParser { psiFile.putUserData(ANDROID_USER_PACKAGE, androidAppPackage) lastCachedPsi = psiFile psiFile - } catch (e: Exception) { + } + catch (e: Exception) { invalidateCaches() null } - } else lastCachedPsi + } + else lastCachedPsi } private fun isAndroidUIXml(file: File): Boolean { @@ -128,7 +126,8 @@ abstract class AndroidUIXmlParser { if (!path.exists()) continue; if (path.isFile() && isAndroidUIXml(path)) { res.add(path) - } else if (path.isDirectory()) { + } + else if (path.isDirectory()) { res.addAll(searchForUIXml(path.listFiles()?.toArrayList())) } } @@ -190,7 +189,7 @@ abstract class AndroidUIXmlParser { val fileManager = VirtualFileManager.getInstance() val watchDir = fileManager.findFileByUrl("file://" + searchPath) val psiManager = PsiManager.getInstance(project) - return watchDir?.getChildren()?.toArrayList()?.map { psiManager.findFile(it) } ?.mapNotNull { it } ?: ArrayList(0) + return watchDir?.getChildren()?.toArrayList()?.map { psiManager.findFile(it) }?.mapNotNull { it } ?: ArrayList(0) } protected fun populateQueue(project: Project) { @@ -210,11 +209,13 @@ abstract class AndroidUIXmlParser { _package = attributes.toMap()["package"] ?: "" } }) - } catch (e: Exception) { + } + catch (e: Exception) { throw e } return AndroidManifest(_package) - } catch (e: Exception) { + } + catch (e: Exception) { throw NoAndroidManifestFound() } } @@ -223,9 +224,9 @@ abstract class AndroidUIXmlParser { for (id in ids) { val body = arrayListOf("return findViewById(0) as ${id.className}") kw.writeImmutableExtensionProperty(receiver = "Activity", - name = id.id, - retType = id.className, - getterBody = body ) + name = id.id, + retType = id.className, + getterBody = body) } return kw.output() } diff --git a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidUtil.kt b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidUtil.kt index 0aed83cc2551e..06d62b12ef1b6 100644 --- a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidUtil.kt +++ b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidUtil.kt @@ -16,13 +16,12 @@ package org.jetbrains.jet.lang.resolve.android -import com.intellij.openapi.vfs.VirtualFile import com.intellij.psi.PsiFile import com.intellij.psi.PsiElement trait AndroidResource -class AndroidID(val rawID: String): AndroidResource { +class AndroidID(val rawID: String) : AndroidResource { override fun equals(other: Any?): Boolean { return other is AndroidID && this.rawID == other.rawID @@ -35,20 +34,11 @@ class AndroidID(val rawID: String): AndroidResource { } } -class AndroidWidget(val id: String, val className: String): AndroidResource +class AndroidWidget(val id: String, val className: String) : AndroidResource -class AndroidManifest(val _package: String): AndroidResource - -//fun isAndroidSyntheticFile(f: VirtualFile?): Boolean { -// if (f?.getName() == AndroidConst.SYNTHETIC_FILENAME) { -// val userData = f?.getUserData(AndroidConst.ANDROID_SYNTHETIC); -// return (userData != null && userData.equals("OK")) -// } -// return false -//} +class AndroidManifest(val _package: String) : AndroidResource fun isAndroidSyntheticFile(f: PsiFile?): Boolean { -// return isAndroidSyntheticFile(f?.getVirtualFile()) if (f?.getName() == AndroidConst.SYNTHETIC_FILENAME) { val userData = f?.getUserData(AndroidConst.ANDROID_SYNTHETIC); return (userData != null && userData.equals("OK")) diff --git a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidXmlHandler.kt b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidXmlHandler.kt index 0576fe04f5acf..c09e4e887fe62 100644 --- a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidXmlHandler.kt +++ b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/AndroidXmlHandler.kt @@ -20,7 +20,7 @@ import org.xml.sax.helpers.DefaultHandler import org.xml.sax.Attributes import java.util.HashMap -class AndroidXmlHandler(val elementCallback: (String, String)-> Unit): DefaultHandler() { +class AndroidXmlHandler(val elementCallback: (String, String) -> Unit) : DefaultHandler() { override fun startDocument() { super<DefaultHandler>.startDocument() @@ -46,7 +46,7 @@ class AndroidXmlHandler(val elementCallback: (String, String)-> Unit): DefaultHa public fun Attributes.toMap(): HashMap<String, String> { val res = HashMap<String, String>() - for (index in 0..getLength()-1) { + for (index in 0..getLength() - 1) { val attrName = getLocalName(index)!! val attrVal = getValue(index)!! res[attrName] = attrVal diff --git a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/CliAndroidUIXmlParser.kt b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/CliAndroidUIXmlParser.kt index 6e84f67d8459c..8484a90afba86 100644 --- a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/CliAndroidUIXmlParser.kt +++ b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/CliAndroidUIXmlParser.kt @@ -16,17 +16,12 @@ package org.jetbrains.jet.lang.resolve.android -import com.intellij.openapi.vfs.VirtualFileManager import java.util.ArrayList import com.intellij.openapi.project.Project import com.intellij.psi.PsiFile -import java.io.ByteArrayInputStream -import org.xml.sax.InputSource -import javax.xml.parsers.SAXParser -import javax.xml.parsers.SAXParserFactory import com.intellij.psi.PsiElement -class CliAndroidUIXmlParser(val project: Project, override val searchPath: String?): AndroidUIXmlParser() { +class CliAndroidUIXmlParser(val project: Project, override val searchPath: String?) : AndroidUIXmlParser() { override var androidAppPackage: String = "" @@ -42,7 +37,8 @@ class CliAndroidUIXmlParser(val project: Project, override val searchPath: Strin try { saxParser.parse(file.getVirtualFile()?.getInputStream()!!, handler) return produceKotlinProperties(KotlinStringWriter(), ids).toString() - } catch (e: Throwable) { + } + catch (e: Throwable) { LOG.error(e) return "" } diff --git a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/KotlinWriter.kt b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/KotlinWriter.kt index 261eebcca5a21..096ddaad1f6e1 100644 --- a/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/KotlinWriter.kt +++ b/compiler/frontend.android/src/org/jetbrains/jet/lang/resolve/android/KotlinWriter.kt @@ -62,7 +62,8 @@ class KotlinStringWriter : KotlinWriter { } body.decIndent() body.writeln("}") - } else { + } + else { body.writeNoIndent("=") body.writeNoIndent(getterBody.join("").replace("return", "")) body.newLine() diff --git a/idea/src/org/jetbrains/jet/plugin/android/AndroidGotoDeclarationHandler.kt b/idea/src/org/jetbrains/jet/plugin/android/AndroidGotoDeclarationHandler.kt index d9cbf0c1fa19a..ff2ca51dcb539 100644 --- a/idea/src/org/jetbrains/jet/plugin/android/AndroidGotoDeclarationHandler.kt +++ b/idea/src/org/jetbrains/jet/plugin/android/AndroidGotoDeclarationHandler.kt @@ -20,15 +20,11 @@ import com.intellij.codeInsight.navigation.actions.GotoDeclarationHandler import com.intellij.psi.PsiElement import com.intellij.openapi.editor.Editor import com.intellij.openapi.actionSystem.DataContext -import org.jetbrains.jet.lang.psi.JetProperty import com.intellij.openapi.components.ServiceManager import org.jetbrains.jet.lang.resolve.android.AndroidUIXmlParser -import com.intellij.psi.util.PsiTreeUtil -import com.intellij.psi.PsiReferenceExpression -import com.intellij.psi.PsiIdentifier import com.intellij.psi.impl.source.tree.LeafPsiElement -public class AndroidGotoDeclarationHandler: GotoDeclarationHandler { +public class AndroidGotoDeclarationHandler : GotoDeclarationHandler { override fun getGotoDeclarationTargets(sourceElement: PsiElement?, offset: Int, editor: Editor?): Array<PsiElement>? { if (sourceElement is LeafPsiElement) { val parser = ServiceManager.getService(sourceElement.getProject(), javaClass<AndroidUIXmlParser>()) @@ -37,7 +33,8 @@ public class AndroidGotoDeclarationHandler: GotoDeclarationHandler { return array(psiElement) } else return null - } else return null + } + else return null } override fun getActionText(context: DataContext?): String? { diff --git a/idea/src/org/jetbrains/jet/plugin/android/AndroidXmlVisitor.kt b/idea/src/org/jetbrains/jet/plugin/android/AndroidXmlVisitor.kt index 03b9ac3368487..5ffb43963db5e 100644 --- a/idea/src/org/jetbrains/jet/plugin/android/AndroidXmlVisitor.kt +++ b/idea/src/org/jetbrains/jet/plugin/android/AndroidXmlVisitor.kt @@ -21,7 +21,7 @@ import com.intellij.psi.PsiElement import com.intellij.psi.xml.XmlElement import com.intellij.psi.xml.XmlTag -class AndroidXmlVisitor(val elementCallback: (String, String)-> Unit): XmlElementVisitor() { +class AndroidXmlVisitor(val elementCallback: (String, String) -> Unit) : XmlElementVisitor() { override fun visitElement(element: PsiElement) { element.acceptChildren(this) diff --git a/idea/src/org/jetbrains/jet/plugin/android/IDEAndroidUIXmlParser.kt b/idea/src/org/jetbrains/jet/plugin/android/IDEAndroidUIXmlParser.kt index 8526bcd3b189e..ce1f2f787fd36 100644 --- a/idea/src/org/jetbrains/jet/plugin/android/IDEAndroidUIXmlParser.kt +++ b/idea/src/org/jetbrains/jet/plugin/android/IDEAndroidUIXmlParser.kt @@ -18,10 +18,7 @@ package org.jetbrains.jet.plugin.android import org.jetbrains.jet.lang.resolve.android.AndroidUIXmlParser import com.intellij.openapi.project.Project -import com.intellij.openapi.vfs.VirtualFileManager import java.util.ArrayList -import com.intellij.openapi.vfs.VirtualFileAdapter -import com.intellij.openapi.vfs.VirtualFileEvent import com.intellij.psi.PsiFile import org.jetbrains.jet.lang.resolve.android.AndroidWidget import org.jetbrains.jet.lang.resolve.android.KotlinStringWriter @@ -30,9 +27,8 @@ import com.intellij.psi.XmlElementVisitor import com.intellij.psi.xml.XmlTag import com.intellij.psi.PsiElement import java.util.HashMap -import com.intellij.psi.xml.XmlAttribute -class IDEAndroidUIXmlParser(val project: Project): AndroidUIXmlParser() { +class IDEAndroidUIXmlParser(val project: Project) : AndroidUIXmlParser() { override val searchPath: String? = project.getBasePath() + "/res/layout/" override var androidAppPackage: String = "" @@ -74,7 +70,7 @@ class IDEAndroidUIXmlParser(val project: Project): AndroidUIXmlParser() { override fun parseSingleFileImpl(file: PsiFile): String { val ids: MutableCollection<AndroidWidget> = ArrayList() - file.accept(AndroidXmlVisitor({ id, wClass -> ids.add(AndroidWidget(id, wClass))})) + file.accept(AndroidXmlVisitor({ id, wClass -> ids.add(AndroidWidget(id, wClass)) })) return produceKotlinProperties(KotlinStringWriter(), ids).toString() } @@ -89,7 +85,7 @@ class IDEAndroidUIXmlParser(val project: Project): AndroidUIXmlParser() { val idPrefix = "@+id/" val attribute = tag?.getAttribute("android:id") if (attribute != null && attribute.getValue() == idPrefix + oldName) { - allRenames[XmlAttributeValueWrapper(attribute.getValueElement()!!)] = idPrefix+newName + allRenames[XmlAttributeValueWrapper(attribute.getValueElement()!!)] = idPrefix + newName } tag?.acceptChildren(this) } diff --git a/idea/src/org/jetbrains/jet/plugin/android/XmlAttributeValueWrapper.java b/idea/src/org/jetbrains/jet/plugin/android/XmlAttributeValueWrapper.java index dc6c326baaad0..d82abfcf5b9cf 100644 --- a/idea/src/org/jetbrains/jet/plugin/android/XmlAttributeValueWrapper.java +++ b/idea/src/org/jetbrains/jet/plugin/android/XmlAttributeValueWrapper.java @@ -30,13 +30,9 @@ import com.intellij.psi.search.GlobalSearchScope; import com.intellij.psi.search.PsiElementProcessor; import com.intellij.psi.search.SearchScope; -import com.intellij.psi.util.PsiTreeUtil; import com.intellij.psi.xml.XmlAttribute; import com.intellij.psi.xml.XmlAttributeValue; -import com.intellij.psi.xml.XmlTag; import com.intellij.util.IncorrectOperationException; -import com.intellij.util.xml.DomElement; -import com.intellij.util.xml.DomManager; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -384,25 +380,14 @@ public Icon getIcon(int flags) { @Override public String getName() { - String value = myWrappee.getValue(); return ((NavigationItem) myWrappee).getName(); } @Override @Nullable public PsiElement setName(@NonNls @NotNull String name) throws IncorrectOperationException { - //if (AndroidResourceUtil.isIdDeclaration(myWrappee)) { - XmlAttribute attribute = (XmlAttribute) myWrappee.getParent(); - attribute.setValue(name); - //} - //else { - // // then it is a value resource - // XmlTag tag = PsiTreeUtil.getParentOfType(myWrappee, XmlTag.class); - // DomElement domElement = DomManager.getDomManager(getProject()).getDomElement(tag); - // assert domElement instanceof ResourceElement; - // ResourceElement resElement = (ResourceElement) domElement; - // resElement.getName().setValue(name); - //} + XmlAttribute attribute = (XmlAttribute) myWrappee.getParent(); + attribute.setValue(name); return null; }
12aa442f95c567f3bc1488bb641fca1e9636e9f1
restlet-framework-java
Fixed issue 210 : a Language tag is composed of a- list of subtags--
c
https://github.com/restlet/restlet-framework-java
diff --git a/module/org.restlet/src/org/restlet/data/Language.java b/module/org.restlet/src/org/restlet/data/Language.java index 050fea006c..cae571f3ef 100644 --- a/module/org.restlet/src/org/restlet/data/Language.java +++ b/module/org.restlet/src/org/restlet/data/Language.java @@ -60,9 +60,6 @@ public final class Language extends Metadata { public static final Language SPANISH = new Language("es", "Spanish language"); - /** The metadata main tag taken from the metadata name like "en" for "en-us". */ - private String primaryTag; - /** The metadata main list of subtags taken from the metadata name. */ private List<String> subTags; @@ -117,15 +114,7 @@ public Language(final String name) { */ public Language(final String name, final String description) { super(name, description); - String[] tags = getName().split("-"); - subTags = new ArrayList<String>(); - - if (tags.length > 0) { - primaryTag = tags[0]; - for (int i = 1; i < tags.length; i++) { - subTags.add(tags[i]); - } - } + this.subTags = null; } /** {@inheritDoc} */ @@ -141,7 +130,29 @@ public boolean equals(final Object object) { * @return The primary tag. */ public String getPrimaryTag() { - return this.primaryTag; + int separator = getName().indexOf('-'); + + if (separator == -1) { + return getName(); + } else { + return getName().substring(0, separator); + } + } + + /** + * Returns the main tag. + * + * @return The main tag. + */ + @Deprecated + public String getMainTag() { + int separator = getName().indexOf('-'); + + if (separator == -1) { + return getName(); + } else { + return getName().substring(0, separator); + } } /** @@ -150,6 +161,17 @@ public String getPrimaryTag() { * @return The list of subtags for this language Tag. */ public List<String> getSubTags() { + if (subTags == null) { + String[] tags = getName().split("-"); + subTags = new ArrayList<String>(); + + if (tags.length > 0) { + for (int i = 1; i < tags.length; i++) { + subTags.add(tags[i]); + } + } + } + return subTags; }
f49f42476b5931574e1d7f86ac7f3d763f344343
camel
Removed the System.out.print line from RunMojo--git-svn-id: https://svn.apache.org/repos/asf/camel/trunk@758544 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/camel
diff --git a/tooling/maven/camel-maven-plugin/src/main/java/org/apache/camel/maven/RunMojo.java b/tooling/maven/camel-maven-plugin/src/main/java/org/apache/camel/maven/RunMojo.java index 3e7feaba3948a..09ce2cb0e2b4f 100644 --- a/tooling/maven/camel-maven-plugin/src/main/java/org/apache/camel/maven/RunMojo.java +++ b/tooling/maven/camel-maven-plugin/src/main/java/org/apache/camel/maven/RunMojo.java @@ -400,8 +400,6 @@ public void execute() throws MojoExecutionException, MojoFailureException { getLog().info("Using the org.apache.camel.spring.javaconfig.Main to initate a camel context"); } - System.out.println("The mainClass is " + mainClass); - if (getLog().isDebugEnabled()) { StringBuffer msg = new StringBuffer("Invoking : "); msg.append(mainClass);
992ba4c79ce11b0b72f9e15463a3b88505a41b88
hadoop
merge YARN-360 from trunk. Allow apps to- concurrently register tokens for renewal. Contributed by Daryn Sharp.--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1442442 13f79535-47bb-0310-9956-ffa450edef68-
a
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 01f1bcbe45cc5..6c8f2e26bf5e1 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -90,6 +90,9 @@ Release 2.0.3-alpha - Unreleased YARN-277. Use AMRMClient in DistributedShell to exemplify the approach. (Bikas Saha via hitesh) + YARN-360. Allow apps to concurrently register tokens for renewal. + (Daryn Sharp via sseth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 082df54630510..8526c9a737639 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -167,6 +167,11 @@ <Field name="minimumAllocation" /> <Bug pattern="IS2_INCONSISTENT_SYNC" /> </Match> + <Match> + <Class name="org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer"/> + <Field name="renewalTimer" /> + <Bug code="IS"/> + </Match> <!-- Don't care if putIfAbsent value is ignored --> <Match> diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index 9232190ba3bec..066a0a5b969d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -261,7 +261,7 @@ private void addTokenToList(DelegationTokenToRenew t) { * done else false. * @throws IOException */ - public synchronized void addApplication( + public void addApplication( ApplicationId applicationId, Credentials ts, boolean shouldCancelAtEnd) throws IOException { if (ts == null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index ad127a9264d9d..c59625361ca8c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -21,11 +21,17 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.Collections; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -50,6 +56,8 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; /** * unit test - @@ -541,4 +549,54 @@ public void testDTKeepAlive2() throws Exception { fail("Renewal of cancelled token should have failed"); } catch (InvalidToken ite) {} } + + @Test(timeout=2000) + public void testConncurrentAddApplication() + throws IOException, InterruptedException, BrokenBarrierException { + final CyclicBarrier startBarrier = new CyclicBarrier(2); + final CyclicBarrier endBarrier = new CyclicBarrier(2); + + // this token uses barriers to block during renew + final Credentials creds1 = new Credentials(); + final Token<?> token1 = mock(Token.class); + creds1.addToken(new Text("token"), token1); + doReturn(true).when(token1).isManaged(); + doAnswer(new Answer<Long>() { + public Long answer(InvocationOnMock invocation) + throws InterruptedException, BrokenBarrierException { + startBarrier.await(); + endBarrier.await(); + return Long.MAX_VALUE; + }}).when(token1).renew(any(Configuration.class)); + + // this dummy token fakes renewing + final Credentials creds2 = new Credentials(); + final Token<?> token2 = mock(Token.class); + creds2.addToken(new Text("token"), token2); + doReturn(true).when(token2).isManaged(); + doReturn(Long.MAX_VALUE).when(token2).renew(any(Configuration.class)); + + // fire up the renewer + final DelegationTokenRenewer dtr = new DelegationTokenRenewer(); + dtr.init(conf); + dtr.start(); + + // submit a job that blocks during renewal + Thread submitThread = new Thread() { + @Override + public void run() { + try { + dtr.addApplication(mock(ApplicationId.class), creds1, false); + } catch (IOException e) {} + } + }; + submitThread.start(); + + // wait till 1st submit blocks, then submit another + startBarrier.await(); + dtr.addApplication(mock(ApplicationId.class), creds2, false); + // signal 1st to complete + endBarrier.await(); + submitThread.join(); + } }
6e15ed3153e6fbcacd2b8798b9c5ebea29768210
kotlin
WithDeferredResolve removed (it was never used)--
p
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/MutableClassDescriptorLite.java b/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/MutableClassDescriptorLite.java index 01dab6767eb70..da643a50d59df 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/MutableClassDescriptorLite.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/MutableClassDescriptorLite.java @@ -37,8 +37,7 @@ /** * @author Stepan Koltsov */ -public abstract class MutableClassDescriptorLite extends ClassDescriptorBase - implements WithDeferredResolve { +public abstract class MutableClassDescriptorLite extends ClassDescriptorBase { private List<AnnotationDescriptor> annotations = Lists.newArrayList(); @@ -68,16 +67,6 @@ public MutableClassDescriptorLite(@NotNull DeclarationDescriptor containingDecla this.kind = kind; } - @Override - public void forceResolve() { - - } - - @Override - public boolean isAlreadyResolved() { - return false; - } - @NotNull @Override public DeclarationDescriptor getContainingDeclaration() { diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/NamespaceDescriptorImpl.java b/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/NamespaceDescriptorImpl.java index e97dccd31d767..77345674a3657 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/NamespaceDescriptorImpl.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/NamespaceDescriptorImpl.java @@ -28,7 +28,7 @@ /** * @author abreslav */ -public class NamespaceDescriptorImpl extends AbstractNamespaceDescriptorImpl implements WithDeferredResolve { +public class NamespaceDescriptorImpl extends AbstractNamespaceDescriptorImpl { private WritableScope memberScope; @@ -102,14 +102,4 @@ public ClassObjectStatus setClassObjectDescriptor(@NotNull MutableClassDescripto return builder; } - - @Override - public void forceResolve() { - - } - - @Override - public boolean isAlreadyResolved() { - return false; - } } diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/WithDeferredResolve.java b/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/WithDeferredResolve.java deleted file mode 100644 index da3177fa2e45b..0000000000000 --- a/compiler/frontend/src/org/jetbrains/jet/lang/descriptors/WithDeferredResolve.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2010-2012 JetBrains s.r.o. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.jetbrains.jet.lang.descriptors; - -/** - * @author Nikolay Krasko - */ -public interface WithDeferredResolve { - void forceResolve(); - boolean isAlreadyResolved(); -} diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TopDownAnalysisContext.java b/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TopDownAnalysisContext.java index bb5ec6502428e..ed81b3ddcba82 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TopDownAnalysisContext.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TopDownAnalysisContext.java @@ -47,7 +47,7 @@ public class TopDownAnalysisContext implements BodiesResolveContext { // File scopes - package scope extended with imports protected final Map<JetFile, WritableScope> namespaceScopes = Maps.newHashMap(); - public final Map<JetDeclarationContainer, WithDeferredResolve> forDeferredResolver = Maps.newHashMap(); + public final Map<JetDeclarationContainer, DeclarationDescriptor> forDeferredResolver = Maps.newHashMap(); public final Map<JetDeclarationContainer, JetScope> normalScope = Maps.newHashMap(); diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TypeHierarchyResolver.java b/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TypeHierarchyResolver.java index 09edbc218c6c0..f207cd7a421be 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TypeHierarchyResolver.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/resolve/TypeHierarchyResolver.java @@ -112,7 +112,7 @@ public void process( JetDeclarationContainer declarationContainer = forDeferredResolve.poll(); assert declarationContainer != null; - WithDeferredResolve descriptorForDeferredResolve = context.forDeferredResolver.get(declarationContainer); + DeclarationDescriptor descriptorForDeferredResolve = context.forDeferredResolver.get(declarationContainer); JetScope scope = context.normalScope.get(declarationContainer); // Even more temp code @@ -668,12 +668,12 @@ private ConstructorDescriptorImpl createPrimaryConstructorForObject( private void prepareForDeferredCall( @NotNull JetScope outerScope, - @NotNull WithDeferredResolve withDeferredResolve, + @NotNull DeclarationDescriptor descriptorForDeferredResolve, @NotNull JetDeclarationContainer container ) { forDeferredResolve.add(container); context.normalScope.put(container, outerScope); - context.forDeferredResolver.put(container, withDeferredResolve); + context.forDeferredResolver.put(container, descriptorForDeferredResolve); } @Nullable
31f4ff749c34c8ff573a1d6e1688c39ea1b4463c
kotlin
Type annotations supported in Java elements--Reflection-related implementations are pending-
a
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/components/PsiBasedExternalAnnotationResolver.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/components/PsiBasedExternalAnnotationResolver.java index 4bd2daea519fb..0b8ff30a6a078 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/components/PsiBasedExternalAnnotationResolver.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/components/PsiBasedExternalAnnotationResolver.java @@ -24,8 +24,8 @@ import org.jetbrains.kotlin.load.java.structure.JavaAnnotation; import org.jetbrains.kotlin.load.java.structure.JavaAnnotationOwner; import org.jetbrains.kotlin.load.java.structure.impl.JavaAnnotationImpl; -import org.jetbrains.kotlin.load.java.structure.impl.JavaAnnotationOwnerImpl; import org.jetbrains.kotlin.load.java.structure.impl.JavaElementCollectionFromPsiArrayUtil; +import org.jetbrains.kotlin.load.java.structure.impl.JavaModifierListOwnerImpl; import org.jetbrains.kotlin.name.FqName; import java.util.Collection; @@ -35,18 +35,25 @@ public class PsiBasedExternalAnnotationResolver implements ExternalAnnotationRes @Nullable @Override public JavaAnnotation findExternalAnnotation(@NotNull JavaAnnotationOwner owner, @NotNull FqName fqName) { - PsiAnnotation psiAnnotation = findExternalAnnotation(((JavaAnnotationOwnerImpl) owner).getPsi(), fqName); - return psiAnnotation == null ? null : new JavaAnnotationImpl(psiAnnotation); + if (owner instanceof JavaModifierListOwnerImpl) { + JavaModifierListOwnerImpl modifierListOwner = (JavaModifierListOwnerImpl) owner; + PsiAnnotation psiAnnotation = findExternalAnnotation(modifierListOwner.getPsi(), fqName); + return psiAnnotation == null ? null : new JavaAnnotationImpl(psiAnnotation); + } + return null; } @NotNull @Override public Collection<JavaAnnotation> findExternalAnnotations(@NotNull JavaAnnotationOwner owner) { - PsiModifierListOwner psiOwner = ((JavaAnnotationOwnerImpl) owner).getPsi(); - PsiAnnotation[] annotations = ExternalAnnotationsManager.getInstance(psiOwner.getProject()).findExternalAnnotations(psiOwner); - return annotations == null - ? Collections.<JavaAnnotation>emptyList() - : JavaElementCollectionFromPsiArrayUtil.annotations(annotations); + if (owner instanceof JavaModifierListOwnerImpl) { + PsiModifierListOwner psiOwner = ((JavaModifierListOwnerImpl) owner).getPsi(); + PsiAnnotation[] annotations = ExternalAnnotationsManager.getInstance(psiOwner.getProject()).findExternalAnnotations(psiOwner); + return annotations == null + ? Collections.<JavaAnnotation>emptyList() + : JavaElementCollectionFromPsiArrayUtil.annotations(annotations); + } + return Collections.emptyList(); } @Nullable diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaAnnotationOwnerImpl.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaAnnotationOwnerImpl.java index dab67466412c0..c13860e33d61d 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaAnnotationOwnerImpl.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaAnnotationOwnerImpl.java @@ -16,11 +16,11 @@ package org.jetbrains.kotlin.load.java.structure.impl; -import com.intellij.psi.PsiModifierListOwner; -import org.jetbrains.annotations.NotNull; +import com.intellij.psi.PsiAnnotationOwner; +import org.jetbrains.annotations.Nullable; import org.jetbrains.kotlin.load.java.structure.JavaAnnotationOwner; public interface JavaAnnotationOwnerImpl extends JavaAnnotationOwner { - @NotNull - PsiModifierListOwner getPsi(); + @Nullable + PsiAnnotationOwner getAnnotationOwnerPsi(); } diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaClassImpl.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaClassImpl.java index 57db2aef0371b..d7f9e5d13dd79 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaClassImpl.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaClassImpl.java @@ -146,18 +146,6 @@ public Visibility getVisibility() { return JavaElementUtil.getVisibility(this); } - @NotNull - @Override - public Collection<JavaAnnotation> getAnnotations() { - return JavaElementUtil.getAnnotations(this); - } - - @Nullable - @Override - public JavaAnnotation findAnnotation(@NotNull FqName fqName) { - return JavaElementUtil.findAnnotation(this, fqName); - } - @Override @NotNull public JavaClassifierType getDefaultType() { diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaClassifierImpl.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaClassifierImpl.java index cbca1c0ecaf74..f94bd71652e28 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaClassifierImpl.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaClassifierImpl.java @@ -16,16 +16,28 @@ package org.jetbrains.kotlin.load.java.structure.impl; +import com.intellij.psi.PsiAnnotationOwner; import com.intellij.psi.PsiClass; import com.intellij.psi.PsiTypeParameter; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.jetbrains.kotlin.load.java.structure.JavaAnnotation; import org.jetbrains.kotlin.load.java.structure.JavaClassifier; +import org.jetbrains.kotlin.name.FqName; -public abstract class JavaClassifierImpl<Psi extends PsiClass> extends JavaElementImpl<Psi> implements JavaClassifier { +import java.util.Collection; + +public abstract class JavaClassifierImpl<Psi extends PsiClass> extends JavaElementImpl<Psi> implements JavaClassifier, JavaAnnotationOwnerImpl { protected JavaClassifierImpl(@NotNull Psi psiClass) { super(psiClass); } + @NotNull + @Override + public PsiAnnotationOwner getAnnotationOwnerPsi() { + return getPsi().getModifierList(); + } + @NotNull /* package */ static JavaClassifier create(@NotNull PsiClass psiClass) { if (psiClass instanceof PsiTypeParameter) { @@ -35,4 +47,16 @@ protected JavaClassifierImpl(@NotNull Psi psiClass) { return new JavaClassImpl(psiClass); } } + + @NotNull + @Override + public Collection<JavaAnnotation> getAnnotations() { + return JavaElementUtil.getAnnotations(this); + } + + @Nullable + @Override + public JavaAnnotation findAnnotation(@NotNull FqName fqName) { + return JavaElementUtil.findAnnotation(this, fqName); + } } diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaElementUtil.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaElementUtil.java index 03c6cdc46d60a..c0a225744c313 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaElementUtil.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaElementUtil.java @@ -16,10 +16,7 @@ package org.jetbrains.kotlin.load.java.structure.impl; -import com.intellij.psi.PsiAnnotation; -import com.intellij.psi.PsiModifier; -import com.intellij.psi.PsiModifierList; -import com.intellij.psi.PsiModifierListOwner; +import com.intellij.psi.*; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jetbrains.kotlin.descriptors.Visibilities; @@ -66,18 +63,18 @@ public static Visibility getVisibility(@NotNull JavaModifierListOwnerImpl owner) @NotNull public static Collection<JavaAnnotation> getAnnotations(@NotNull JavaAnnotationOwnerImpl owner) { - PsiModifierList modifierList = owner.getPsi().getModifierList(); - if (modifierList != null) { - return annotations(modifierList.getAnnotations()); + PsiAnnotationOwner annotationOwnerPsi = owner.getAnnotationOwnerPsi(); + if (annotationOwnerPsi != null) { + return annotations(annotationOwnerPsi.getAnnotations()); } return Collections.emptyList(); } @Nullable public static JavaAnnotation findAnnotation(@NotNull JavaAnnotationOwnerImpl owner, @NotNull FqName fqName) { - PsiModifierList modifierList = owner.getPsi().getModifierList(); - if (modifierList != null) { - PsiAnnotation psiAnnotation = modifierList.findAnnotation(fqName.asString()); + PsiAnnotationOwner annotationOwnerPsi = owner.getAnnotationOwnerPsi(); + if (annotationOwnerPsi != null) { + PsiAnnotation psiAnnotation = annotationOwnerPsi.findAnnotation(fqName.asString()); return psiAnnotation == null ? null : new JavaAnnotationImpl(psiAnnotation); } return null; diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaMemberImpl.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaMemberImpl.java index 03989fb0a0b0d..f730d42fc57c9 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaMemberImpl.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaMemberImpl.java @@ -16,6 +16,7 @@ package org.jetbrains.kotlin.load.java.structure.impl; +import com.intellij.psi.PsiAnnotationOwner; import com.intellij.psi.PsiClass; import com.intellij.psi.PsiMember; import org.jetbrains.annotations.NotNull; @@ -35,6 +36,12 @@ protected JavaMemberImpl(@NotNull Psi psiMember) { super(psiMember); } + @Nullable + @Override + public PsiAnnotationOwner getAnnotationOwnerPsi() { + return getPsi().getModifierList(); + } + @NotNull @Override public Name getName() { diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaTypeImpl.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaTypeImpl.java index 1b55598c52969..e2874bafc5af7 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaTypeImpl.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaTypeImpl.java @@ -19,10 +19,14 @@ import com.intellij.psi.*; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.jetbrains.kotlin.load.java.structure.JavaAnnotation; import org.jetbrains.kotlin.load.java.structure.JavaArrayType; import org.jetbrains.kotlin.load.java.structure.JavaType; +import org.jetbrains.kotlin.name.FqName; -public abstract class JavaTypeImpl<Psi extends PsiType> implements JavaType { +import java.util.Collection; + +public abstract class JavaTypeImpl<Psi extends PsiType> implements JavaType, JavaAnnotationOwnerImpl { private final Psi psiType; public JavaTypeImpl(@NotNull Psi psiType) { @@ -34,6 +38,12 @@ public Psi getPsi() { return psiType; } + @Nullable + @Override + public PsiAnnotationOwner getAnnotationOwnerPsi() { + return getPsi(); + } + @NotNull public static JavaTypeImpl<?> create(@NotNull PsiType psiType) { return psiType.accept(new PsiTypeVisitor<JavaTypeImpl<?>>() { @@ -75,6 +85,19 @@ public JavaArrayType createArrayType() { return new JavaArrayTypeImpl(getPsi().createArrayType()); } + @NotNull + @Override + public Collection<JavaAnnotation> getAnnotations() { + return JavaElementUtil.getAnnotations(this); + } + + @Nullable + @Override + public JavaAnnotation findAnnotation(@NotNull FqName fqName) { + return JavaElementUtil.findAnnotation(this, fqName); + } + + @Override public int hashCode() { return getPsi().hashCode(); diff --git a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaValueParameterImpl.java b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaValueParameterImpl.java index d54774cbed419..d8a03f5ad37b7 100644 --- a/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaValueParameterImpl.java +++ b/compiler/frontend.java/src/org/jetbrains/kotlin/load/java/structure/impl/JavaValueParameterImpl.java @@ -16,10 +16,13 @@ package org.jetbrains.kotlin.load.java.structure.impl; +import com.intellij.psi.PsiAnnotationOwner; import com.intellij.psi.PsiParameter; import com.intellij.psi.impl.compiled.ClsParameterImpl; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.jetbrains.kotlin.descriptors.Visibilities; +import org.jetbrains.kotlin.descriptors.Visibility; import org.jetbrains.kotlin.load.java.structure.JavaAnnotation; import org.jetbrains.kotlin.load.java.structure.JavaType; import org.jetbrains.kotlin.load.java.structure.JavaValueParameter; @@ -28,11 +31,39 @@ import java.util.Collection; -public class JavaValueParameterImpl extends JavaElementImpl<PsiParameter> implements JavaValueParameter, JavaAnnotationOwnerImpl { +public class JavaValueParameterImpl extends JavaElementImpl<PsiParameter> + implements JavaValueParameter, JavaAnnotationOwnerImpl, JavaModifierListOwnerImpl { public JavaValueParameterImpl(@NotNull PsiParameter psiParameter) { super(psiParameter); } + @Nullable + @Override + public PsiAnnotationOwner getAnnotationOwnerPsi() { + return getPsi().getModifierList(); + } + + @Override + public boolean isAbstract() { + return false; + } + + @Override + public boolean isStatic() { + return false; + } + + @Override + public boolean isFinal() { + return false; + } + + @NotNull + @Override + public Visibility getVisibility() { + return Visibilities.LOCAL; + } + @NotNull @Override public Collection<JavaAnnotation> getAnnotations() { diff --git a/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/java/structure/JavaClassifierType.java b/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/java/structure/JavaClassifierType.java index d776e0de3c86d..02711b38a9b1d 100644 --- a/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/java/structure/JavaClassifierType.java +++ b/core/descriptor.loader.java/src/org/jetbrains/kotlin/load/java/structure/JavaClassifierType.java @@ -23,7 +23,7 @@ import java.util.Collection; import java.util.List; -public interface JavaClassifierType extends JavaType { +public interface JavaClassifierType extends JavaType, JavaAnnotationOwner { @Nullable JavaClassifier getClassifier(); diff --git a/core/descriptors.runtime/src/org/jetbrains/kotlin/load/java/structure/reflect/ReflectJavaClassifierType.kt b/core/descriptors.runtime/src/org/jetbrains/kotlin/load/java/structure/reflect/ReflectJavaClassifierType.kt index f18d036a06f14..f7e863fe164c4 100644 --- a/core/descriptors.runtime/src/org/jetbrains/kotlin/load/java/structure/reflect/ReflectJavaClassifierType.kt +++ b/core/descriptors.runtime/src/org/jetbrains/kotlin/load/java/structure/reflect/ReflectJavaClassifierType.kt @@ -16,10 +16,8 @@ package org.jetbrains.kotlin.load.java.structure.reflect -import org.jetbrains.kotlin.load.java.structure.JavaClassifier -import org.jetbrains.kotlin.load.java.structure.JavaClassifierType -import org.jetbrains.kotlin.load.java.structure.JavaType -import org.jetbrains.kotlin.load.java.structure.JavaTypeSubstitutor +import org.jetbrains.kotlin.load.java.structure.* +import org.jetbrains.kotlin.name.FqName import java.lang.reflect.ParameterizedType import java.lang.reflect.Type import java.lang.reflect.TypeVariable @@ -48,4 +46,12 @@ public class ReflectJavaClassifierType(public override val type: Type) : Reflect override fun getTypeArguments(): List<JavaType> { return (type as? ParameterizedType)?.getActualTypeArguments()?.map { ReflectJavaType.create(it) } ?: listOf() } + + override fun getAnnotations(): Collection<JavaAnnotation> { + return emptyList() // TODO + } + + override fun findAnnotation(fqName: FqName): JavaAnnotation? { + return null // TODO + } }
c9b6c3c36c184a03de3b5a1860f336ed834f1f04
hadoop
YARN-1635. Implemented a Leveldb based- ApplicationTimelineStore. Contributed by Billie Rinaldi. svn merge- --ignore-ancestry -c 1565868 ../../trunk/--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1565869 13f79535-47bb-0310-9956-ffa450edef68-
a
https://github.com/apache/hadoop
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 3dc6476c85f39..80ecc54ab0999 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -778,6 +778,12 @@ <artifactId>grizzly-http-servlet</artifactId> <version>2.1.2</version> </dependency> + + <dependency> + <groupId>org.fusesource.leveldbjni</groupId> + <artifactId>leveldbjni-all</artifactId> + <version>1.8</version> + </dependency> </dependencies> </dependencyManagement> diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 4da20a6fb808a..0359628850607 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -87,6 +87,9 @@ Release 2.4.0 - UNRELEASED YARN-1566. Changed Distributed Shell to retain containers across application attempts. (Jian He via vinodkv) + YARN-1635. Implemented a Leveldb based ApplicationTimelineStore. (Billie + Rinaldi via zjshen) + IMPROVEMENTS YARN-1007. Enhance History Reader interface for Containers. (Mayank Bansal via diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSPutErrors.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSPutErrors.java index 91458e1419f0f..d330eb41dff1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSPutErrors.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSPutErrors.java @@ -94,9 +94,21 @@ public void setErrors(List<ATSPutError> errors) { @Public @Unstable public static class ATSPutError { + /** + * Error code returned when no start time can be found when putting an + * entity. This occurs when the entity does not already exist in the + * store and it is put with no start time or events specified. + */ + public static final int NO_START_TIME = 1; + /** + * Error code returned if an IOException is encountered when putting an + * entity. + */ + public static final int IO_EXCEPTION = 2; + private String entityId; private String entityType; - private Integer errorCode; + private int errorCode; /** * Get the entity Id @@ -144,7 +156,7 @@ public void setEntityType(String entityType) { * @return an error code */ @XmlElement(name = "errorcode") - public Integer getErrorCode() { + public int getErrorCode() { return errorCode; } @@ -154,7 +166,7 @@ public Integer getErrorCode() { * @param errorCode * an error code */ - public void setErrorCode(Integer errorCode) { + public void setErrorCode(int errorCode) { this.errorCode = errorCode; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 5322ccd5de6a6..8c8ad16e8e46e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1041,6 +1041,10 @@ public class YarnConfiguration extends Configuration { /** ATS store class */ public static final String ATS_STORE = ATS_PREFIX + "store.class"; + /** ATS leveldb path */ + public static final String ATS_LEVELDB_PATH_PROPERTY = + ATS_PREFIX + "leveldb-apptimeline-store.path"; + //////////////////////////////// // Other Configs //////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index c50ea7b7087c5..cc8b12437ea03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -1145,7 +1145,13 @@ <property> <description>Store class name for application timeline store</description> <name>yarn.ats.store.class</name> - <value>org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.MemoryApplicationTimelineStore</value> + <value>org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.LeveldbApplicationTimelineStore</value> + </property> + + <property> + <description>Store file name for leveldb application timeline store</description> + <name>yarn.ats.leveldb-apptimeline-store.path</name> + <value>${yarn.log.dir}/ats</value> </property> <!-- Other configuration --> diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/apptimeline/TestApplicationTimelineRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/apptimeline/TestApplicationTimelineRecords.java index f2a6d3ef46131..24d1ce91e626a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/apptimeline/TestApplicationTimelineRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/apptimeline/TestApplicationTimelineRecords.java @@ -117,14 +117,14 @@ public void testATSPutErrors() { ATSPutError error1 = new ATSPutError(); error1.setEntityId("entity id 1"); error1.setEntityId("entity type 1"); - error1.setErrorCode(1); + error1.setErrorCode(ATSPutError.NO_START_TIME); atsPutErrors.addError(error1); List<ATSPutError> errors = new ArrayList<ATSPutError>(); errors.add(error1); ATSPutError error2 = new ATSPutError(); error2.setEntityId("entity id 2"); error2.setEntityId("entity type 2"); - error2.setErrorCode(2); + error2.setErrorCode(ATSPutError.IO_EXCEPTION); errors.add(error2); atsPutErrors.addErrors(errors); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml index 843afe3260abd..b8f43eca7e694 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml @@ -167,6 +167,25 @@ <artifactId>jersey-test-framework-grizzly2</artifactId> <scope>test</scope> </dependency> + + <dependency> + <groupId>org.codehaus.jackson</groupId> + <artifactId>jackson-core-asl</artifactId> + </dependency> + <dependency> + <groupId>org.codehaus.jackson</groupId> + <artifactId>jackson-mapper-asl</artifactId> + </dependency> + + <dependency> + <groupId>commons-collections</groupId> + <artifactId>commons-collections</artifactId> + </dependency> + + <dependency> + <groupId>org.fusesource.leveldbjni</groupId> + <artifactId>leveldbjni-all</artifactId> + </dependency> </dependencies> </project> diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineReader.java index 97a217dc98a0b..e448ba8bcad9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineReader.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline; +import java.io.IOException; import java.util.Collection; import java.util.EnumSet; import java.util.Set; @@ -78,13 +79,15 @@ enum Field { * retrieve (see {@link Field}). If the set of fields * contains {@link Field#LAST_EVENT_ONLY} and not * {@link Field#EVENTS}, the most recent event for - * each entity is retrieved. + * each entity is retrieved. If null, retrieves all + * fields. * @return An {@link ATSEntities} object. + * @throws IOException */ ATSEntities getEntities(String entityType, Long limit, Long windowStart, Long windowEnd, NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters, - EnumSet<Field> fieldsToRetrieve); + EnumSet<Field> fieldsToRetrieve) throws IOException; /** * This method retrieves the entity information for a given entity. @@ -95,11 +98,13 @@ ATSEntities getEntities(String entityType, * retrieve (see {@link Field}). If the set of * fields contains {@link Field#LAST_EVENT_ONLY} and * not {@link Field#EVENTS}, the most recent event - * for each entity is retrieved. + * for each entity is retrieved. If null, retrieves + * all fields. * @return An {@link ATSEntity} object. + * @throws IOException */ ATSEntity getEntity(String entity, String entityType, EnumSet<Field> - fieldsToRetrieve); + fieldsToRetrieve) throws IOException; /** * This method retrieves the events for a list of entities all of the same @@ -118,8 +123,9 @@ ATSEntity getEntity(String entity, String entityType, EnumSet<Field> * @param eventTypes Restricts the events returned to the given types. If * null, events of all types will be returned. * @return An {@link ATSEvents} object. + * @throws IOException */ ATSEvents getEntityTimelines(String entityType, SortedSet<String> entityIds, Long limit, Long windowStart, - Long windowEnd, Set<String> eventTypes); + Long windowEnd, Set<String> eventTypes) throws IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineWriter.java index b7bd0708e43e8..2a16833d98066 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineWriter.java @@ -23,6 +23,8 @@ import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities; import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors; +import java.io.IOException; + /** * This interface is for storing application timeline information. */ @@ -37,7 +39,8 @@ public interface ApplicationTimelineWriter { * * @param data An {@link ATSEntities} object. * @return An {@link ATSPutErrors} object. + * @throws IOException */ - ATSPutErrors put(ATSEntities data); + ATSPutErrors put(ATSEntities data) throws IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/EntityId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/EntityIdentifier.java similarity index 91% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/EntityId.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/EntityIdentifier.java index 26431f875693a..d22e616fd1c78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/EntityId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/EntityIdentifier.java @@ -26,12 +26,12 @@ */ @Private @Unstable -public class EntityId implements Comparable<EntityId> { +public class EntityIdentifier implements Comparable<EntityIdentifier> { private String id; private String type; - public EntityId(String id, String type) { + public EntityIdentifier(String id, String type) { this.id = id; this.type = type; } @@ -53,7 +53,7 @@ public String getType() { } @Override - public int compareTo(EntityId other) { + public int compareTo(EntityIdentifier other) { int c = type.compareTo(other.type); if (c != 0) return c; return id.compareTo(other.id); @@ -78,7 +78,7 @@ public boolean equals(Object obj) { return false; if (getClass() != obj.getClass()) return false; - EntityId other = (EntityId) obj; + EntityIdentifier other = (EntityIdentifier) obj; if (id == null) { if (other.id != null) return false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/GenericObjectMapper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/GenericObjectMapper.java new file mode 100644 index 0000000000000..38ceb30c7d49a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/GenericObjectMapper.java @@ -0,0 +1,208 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.WritableUtils; +import org.codehaus.jackson.map.ObjectMapper; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +/** + * A utility class providing methods for serializing and deserializing + * objects. The {@link #write(Object)}, {@link #read(byte[])} and {@link + * #write(java.io.DataOutputStream, Object)}, {@link + * #read(java.io.DataInputStream)} methods are used by the + * {@link LeveldbApplicationTimelineStore} to store and retrieve arbitrary + * JSON, while the {@link #writeReverseOrderedLong} and {@link + * #readReverseOrderedLong} methods are used to sort entities in descending + * start time order. + */ [email protected] [email protected] +public class GenericObjectMapper { + private static final byte[] EMPTY_BYTES = new byte[0]; + + private static final byte LONG = 0x1; + private static final byte INTEGER = 0x2; + private static final byte DOUBLE = 0x3; + private static final byte STRING = 0x4; + private static final byte BOOLEAN = 0x5; + private static final byte LIST = 0x6; + private static final byte MAP = 0x7; + + /** + * Serializes an Object into a byte array. Along with {@link #read(byte[]) }, + * can be used to serialize an Object and deserialize it into an Object of + * the same type without needing to specify the Object's type, + * as long as it is one of the JSON-compatible objects Long, Integer, + * Double, String, Boolean, List, or Map. The current implementation uses + * ObjectMapper to serialize complex objects (List and Map) while using + * Writable to serialize simpler objects, to produce fewer bytes. + * + * @param o An Object + * @return A byte array representation of the Object + * @throws IOException + */ + public static byte[] write(Object o) throws IOException { + if (o == null) + return EMPTY_BYTES; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + write(new DataOutputStream(baos), o); + return baos.toByteArray(); + } + + /** + * Serializes an Object and writes it to a DataOutputStream. Along with + * {@link #read(java.io.DataInputStream)}, can be used to serialize an Object + * and deserialize it into an Object of the same type without needing to + * specify the Object's type, as long as it is one of the JSON-compatible + * objects Long, Integer, Double, String, Boolean, List, or Map. The current + * implementation uses ObjectMapper to serialize complex objects (List and + * Map) while using Writable to serialize simpler objects, to produce fewer + * bytes. + * + * @param dos A DataOutputStream + * @param o An Object + * @throws IOException + */ + public static void write(DataOutputStream dos, Object o) + throws IOException { + if (o == null) + return; + if (o instanceof Long) { + dos.write(LONG); + WritableUtils.writeVLong(dos, (Long) o); + } else if(o instanceof Integer) { + dos.write(INTEGER); + WritableUtils.writeVInt(dos, (Integer) o); + } else if(o instanceof Double) { + dos.write(DOUBLE); + dos.writeDouble((Double) o); + } else if (o instanceof String) { + dos.write(STRING); + WritableUtils.writeString(dos, (String) o); + } else if (o instanceof Boolean) { + dos.write(BOOLEAN); + dos.writeBoolean((Boolean) o); + } else if (o instanceof List) { + dos.write(LIST); + ObjectMapper mapper = new ObjectMapper(); + mapper.writeValue(dos, o); + } else if (o instanceof Map) { + dos.write(MAP); + ObjectMapper mapper = new ObjectMapper(); + mapper.writeValue(dos, o); + } else { + throw new IOException("Couldn't serialize object"); + } + } + + /** + * Deserializes an Object from a byte array created with + * {@link #write(Object)}. + * + * @param b A byte array + * @return An Object + * @throws IOException + */ + public static Object read(byte[] b) throws IOException { + if (b == null || b.length == 0) + return null; + ByteArrayInputStream bais = new ByteArrayInputStream(b); + return read(new DataInputStream(bais)); + } + + /** + * Reads an Object from a DataInputStream whose data has been written with + * {@link #write(java.io.DataOutputStream, Object)}. + * + * @param dis A DataInputStream + * @return An Object, null if an unrecognized type + * @throws IOException + */ + public static Object read(DataInputStream dis) throws IOException { + byte code = (byte)dis.read(); + ObjectMapper mapper; + switch (code) { + case LONG: + return WritableUtils.readVLong(dis); + case INTEGER: + return WritableUtils.readVInt(dis); + case DOUBLE: + return dis.readDouble(); + case STRING: + return WritableUtils.readString(dis); + case BOOLEAN: + return dis.readBoolean(); + case LIST: + mapper = new ObjectMapper(); + return mapper.readValue(dis, ArrayList.class); + case MAP: + mapper = new ObjectMapper(); + return mapper.readValue(dis, HashMap.class); + default: + return null; + } + } + + /** + * Converts a long to a 8-byte array so that lexicographic ordering of the + * produced byte arrays sort the longs in descending order. + * + * @param l A long + * @return A byte array + */ + public static byte[] writeReverseOrderedLong(long l) { + byte[] b = new byte[8]; + b[0] = (byte)(0x7f ^ ((l >> 56) & 0xff)); + for (int i = 1; i < 7; i++) + b[i] = (byte)(0xff ^ ((l >> 8*(7-i)) & 0xff)); + b[7] = (byte)(0xff ^ (l & 0xff)); + return b; + } + + /** + * Reads 8 bytes from an array starting at the specified offset and + * converts them to a long. The bytes are assumed to have been created + * with {@link #writeReverseOrderedLong}. + * + * @param b A byte array + * @param offset An offset into the byte array + * @return A long + */ + public static long readReverseOrderedLong(byte[] b, int offset) { + long l = b[offset] & 0xff; + for (int i = 1; i < 8; i++) { + l = l << 8; + l = l | (b[offset+i]&0xff); + } + return l ^ 0x7fffffffffffffffl; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/LeveldbApplicationTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/LeveldbApplicationTimelineStore.java new file mode 100644 index 0000000000000..c2e93cab94860 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/LeveldbApplicationTimelineStore.java @@ -0,0 +1,854 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeMap; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.collections.map.LRUMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvent; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents.ATSEventsOfOneEntity; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors.ATSPutError; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.fusesource.leveldbjni.JniDBFactory; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBIterator; +import org.iq80.leveldb.Options; +import org.iq80.leveldb.WriteBatch; + +import static org.apache.hadoop.yarn.server.applicationhistoryservice + .apptimeline.GenericObjectMapper.readReverseOrderedLong; +import static org.apache.hadoop.yarn.server.applicationhistoryservice + .apptimeline.GenericObjectMapper.writeReverseOrderedLong; + +/** + * An implementation of an application timeline store backed by leveldb. + */ [email protected] [email protected] +public class LeveldbApplicationTimelineStore extends AbstractService + implements ApplicationTimelineStore { + private static final Log LOG = LogFactory + .getLog(LeveldbApplicationTimelineStore.class); + + private static final String FILENAME = "leveldb-apptimeline-store.ldb"; + + private static final byte[] START_TIME_LOOKUP_PREFIX = "k".getBytes(); + private static final byte[] ENTITY_ENTRY_PREFIX = "e".getBytes(); + private static final byte[] INDEXED_ENTRY_PREFIX = "i".getBytes(); + + private static final byte[] PRIMARY_FILTER_COLUMN = "f".getBytes(); + private static final byte[] OTHER_INFO_COLUMN = "i".getBytes(); + private static final byte[] RELATED_COLUMN = "r".getBytes(); + private static final byte[] TIME_COLUMN = "t".getBytes(); + + private static final byte[] EMPTY_BYTES = new byte[0]; + + private static final int START_TIME_CACHE_SIZE = 10000; + + @SuppressWarnings("unchecked") + private final Map<EntityIdentifier, Long> startTimeCache = + Collections.synchronizedMap(new LRUMap(START_TIME_CACHE_SIZE)); + + private DB db; + + public LeveldbApplicationTimelineStore() { + super(LeveldbApplicationTimelineStore.class.getName()); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + Options options = new Options(); + options.createIfMissing(true); + JniDBFactory factory = new JniDBFactory(); + String path = conf.get(YarnConfiguration.ATS_LEVELDB_PATH_PROPERTY); + File p = new File(path); + if (!p.exists()) + if (!p.mkdirs()) + throw new IOException("Couldn't create directory for leveldb " + + "application timeline store " + path); + LOG.info("Using leveldb path " + path); + db = factory.open(new File(path, FILENAME), options); + super.serviceInit(conf); + } + + @Override + protected void serviceStop() throws Exception { + IOUtils.cleanup(LOG, db); + super.serviceStop(); + } + + private static class KeyBuilder { + private static final int MAX_NUMBER_OF_KEY_ELEMENTS = 10; + private byte[][] b; + private boolean[] useSeparator; + private int index; + private int length; + + public KeyBuilder(int size) { + b = new byte[size][]; + useSeparator = new boolean[size]; + index = 0; + length = 0; + } + + public static KeyBuilder newInstance() { + return new KeyBuilder(MAX_NUMBER_OF_KEY_ELEMENTS); + } + + public KeyBuilder add(String s) { + return add(s.getBytes(), true); + } + + public KeyBuilder add(byte[] t) { + return add(t, false); + } + + public KeyBuilder add(byte[] t, boolean sep) { + b[index] = t; + useSeparator[index] = sep; + length += t.length; + if (sep) + length++; + index++; + return this; + } + + public byte[] getBytes() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(length); + for (int i = 0; i < index; i++) { + baos.write(b[i]); + if (i < index-1 && useSeparator[i]) + baos.write(0x0); + } + return baos.toByteArray(); + } + + public byte[] getBytesForLookup() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(length); + for (int i = 0; i < index; i++) { + baos.write(b[i]); + if (useSeparator[i]) + baos.write(0x0); + } + return baos.toByteArray(); + } + } + + private static class KeyParser { + private final byte[] b; + private int offset; + + public KeyParser(byte[] b, int offset) { + this.b = b; + this.offset = offset; + } + + public String getNextString() throws IOException { + if (offset >= b.length) + throw new IOException( + "tried to read nonexistent string from byte array"); + int i = 0; + while (offset+i < b.length && b[offset+i] != 0x0) + i++; + String s = new String(b, offset, i); + offset = offset + i + 1; + return s; + } + + public long getNextLong() throws IOException { + if (offset+8 >= b.length) + throw new IOException("byte array ran out when trying to read long"); + long l = readReverseOrderedLong(b, offset); + offset += 8; + return l; + } + + public int getOffset() { + return offset; + } + } + + @Override + public ATSEntity getEntity(String entity, String entityType, + EnumSet<Field> fields) throws IOException { + DBIterator iterator = null; + try { + byte[] revStartTime = getStartTime(entity, entityType, null, null, null); + if (revStartTime == null) + return null; + byte[] prefix = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX) + .add(entityType).add(revStartTime).add(entity).getBytesForLookup(); + + iterator = db.iterator(); + iterator.seek(prefix); + + return getEntity(entity, entityType, + readReverseOrderedLong(revStartTime, 0), fields, iterator, prefix, + prefix.length); + } finally { + IOUtils.cleanup(LOG, iterator); + } + } + + /** + * Read entity from a db iterator. If no information is found in the + * specified fields for this entity, return null. + */ + private static ATSEntity getEntity(String entity, String entityType, + Long startTime, EnumSet<Field> fields, DBIterator iterator, + byte[] prefix, int prefixlen) throws IOException { + if (fields == null) + fields = EnumSet.allOf(Field.class); + + ATSEntity atsEntity = new ATSEntity(); + boolean events = false; + boolean lastEvent = false; + if (fields.contains(Field.EVENTS)) { + events = true; + atsEntity.setEvents(new ArrayList<ATSEvent>()); + } else if (fields.contains(Field.LAST_EVENT_ONLY)) { + lastEvent = true; + atsEntity.setEvents(new ArrayList<ATSEvent>()); + } + else { + atsEntity.setEvents(null); + } + boolean relatedEntities = false; + if (fields.contains(Field.RELATED_ENTITIES)) { + relatedEntities = true; + atsEntity.setRelatedEntities(new HashMap<String, List<String>>()); + } else { + atsEntity.setRelatedEntities(null); + } + boolean primaryFilters = false; + if (fields.contains(Field.PRIMARY_FILTERS)) { + primaryFilters = true; + atsEntity.setPrimaryFilters(new HashMap<String, Object>()); + } else { + atsEntity.setPrimaryFilters(null); + } + boolean otherInfo = false; + if (fields.contains(Field.OTHER_INFO)) { + otherInfo = true; + atsEntity.setOtherInfo(new HashMap<String, Object>()); + } else { + atsEntity.setOtherInfo(null); + } + + // iterate through the entity's entry, parsing information if it is part + // of a requested field + for (; iterator.hasNext(); iterator.next()) { + byte[] key = iterator.peekNext().getKey(); + if (!prefixMatches(prefix, prefixlen, key)) + break; + if (key[prefixlen] == PRIMARY_FILTER_COLUMN[0]) { + if (primaryFilters) { + atsEntity.addPrimaryFilter(parseRemainingKey(key, + prefixlen + PRIMARY_FILTER_COLUMN.length), + GenericObjectMapper.read(iterator.peekNext().getValue())); + } + } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) { + if (otherInfo) { + atsEntity.addOtherInfo(parseRemainingKey(key, + prefixlen + OTHER_INFO_COLUMN.length), + GenericObjectMapper.read(iterator.peekNext().getValue())); + } + } else if (key[prefixlen] == RELATED_COLUMN[0]) { + if (relatedEntities) { + addRelatedEntity(atsEntity, key, + prefixlen + RELATED_COLUMN.length); + } + } else if (key[prefixlen] == TIME_COLUMN[0]) { + if (events || (lastEvent && atsEntity.getEvents().size() == 0)) { + ATSEvent event = getEntityEvent(null, key, prefixlen + + TIME_COLUMN.length, iterator.peekNext().getValue()); + if (event != null) { + atsEntity.addEvent(event); + } + } + } else { + LOG.warn(String.format("Found unexpected column for entity %s of " + + "type %s (0x%02x)", entity, entityType, key[prefixlen])); + } + } + + atsEntity.setEntityId(entity); + atsEntity.setEntityType(entityType); + atsEntity.setStartTime(startTime); + + return atsEntity; + } + + @Override + public ATSEvents getEntityTimelines(String entityType, + SortedSet<String> entityIds, Long limit, Long windowStart, + Long windowEnd, Set<String> eventType) throws IOException { + ATSEvents atsEvents = new ATSEvents(); + if (entityIds == null || entityIds.isEmpty()) + return atsEvents; + // create a lexicographically-ordered map from start time to entities + Map<byte[], List<EntityIdentifier>> startTimeMap = new TreeMap<byte[], + List<EntityIdentifier>>(new Comparator<byte[]>() { + @Override + public int compare(byte[] o1, byte[] o2) { + return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0, + o2.length); + } + }); + DBIterator iterator = null; + try { + // look up start times for the specified entities + // skip entities with no start time + for (String entity : entityIds) { + byte[] startTime = getStartTime(entity, entityType, null, null, null); + if (startTime != null) { + List<EntityIdentifier> entities = startTimeMap.get(startTime); + if (entities == null) { + entities = new ArrayList<EntityIdentifier>(); + startTimeMap.put(startTime, entities); + } + entities.add(new EntityIdentifier(entity, entityType)); + } + } + for (Entry<byte[], List<EntityIdentifier>> entry : + startTimeMap.entrySet()) { + // look up the events matching the given parameters (limit, + // start time, end time, event types) for entities whose start times + // were found and add the entities to the return list + byte[] revStartTime = entry.getKey(); + for (EntityIdentifier entity : entry.getValue()) { + ATSEventsOfOneEntity atsEntity = new ATSEventsOfOneEntity(); + atsEntity.setEntityId(entity.getId()); + atsEntity.setEntityType(entityType); + atsEvents.addEvent(atsEntity); + KeyBuilder kb = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX) + .add(entityType).add(revStartTime).add(entity.getId()) + .add(TIME_COLUMN); + byte[] prefix = kb.getBytesForLookup(); + if (windowEnd == null) { + windowEnd = Long.MAX_VALUE; + } + byte[] revts = writeReverseOrderedLong(windowEnd); + kb.add(revts); + byte[] first = kb.getBytesForLookup(); + byte[] last = null; + if (windowStart != null) { + last = KeyBuilder.newInstance().add(prefix) + .add(writeReverseOrderedLong(windowStart)).getBytesForLookup(); + } + if (limit == null) { + limit = DEFAULT_LIMIT; + } + iterator = db.iterator(); + for (iterator.seek(first); atsEntity.getEvents().size() < limit && + iterator.hasNext(); iterator.next()) { + byte[] key = iterator.peekNext().getKey(); + if (!prefixMatches(prefix, prefix.length, key) || (last != null && + WritableComparator.compareBytes(key, 0, key.length, last, 0, + last.length) > 0)) + break; + ATSEvent event = getEntityEvent(eventType, key, prefix.length, + iterator.peekNext().getValue()); + if (event != null) + atsEntity.addEvent(event); + } + } + } + } finally { + IOUtils.cleanup(LOG, iterator); + } + return atsEvents; + } + + /** + * Returns true if the byte array begins with the specified prefix. + */ + private static boolean prefixMatches(byte[] prefix, int prefixlen, + byte[] b) { + if (b.length < prefixlen) + return false; + return WritableComparator.compareBytes(prefix, 0, prefixlen, b, 0, + prefixlen) == 0; + } + + @Override + public ATSEntities getEntities(String entityType, + Long limit, Long windowStart, Long windowEnd, + NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters, + EnumSet<Field> fields) throws IOException { + if (primaryFilter == null) { + // if no primary filter is specified, prefix the lookup with + // ENTITY_ENTRY_PREFIX + return getEntityByTime(ENTITY_ENTRY_PREFIX, entityType, limit, + windowStart, windowEnd, secondaryFilters, fields); + } else { + // if a primary filter is specified, prefix the lookup with + // INDEXED_ENTRY_PREFIX + primaryFilterName + primaryFilterValue + + // ENTITY_ENTRY_PREFIX + byte[] base = KeyBuilder.newInstance().add(INDEXED_ENTRY_PREFIX) + .add(primaryFilter.getName()) + .add(GenericObjectMapper.write(primaryFilter.getValue()), true) + .add(ENTITY_ENTRY_PREFIX).getBytesForLookup(); + return getEntityByTime(base, entityType, limit, windowStart, windowEnd, + secondaryFilters, fields); + } + } + + /** + * Retrieves a list of entities satisfying given parameters. + * + * @param base A byte array prefix for the lookup + * @param entityType The type of the entity + * @param limit A limit on the number of entities to return + * @param starttime The earliest entity start time to retrieve (exclusive) + * @param endtime The latest entity start time to retrieve (inclusive) + * @param secondaryFilters Filter pairs that the entities should match + * @param fields The set of fields to retrieve + * @return A list of entities + * @throws IOException + */ + private ATSEntities getEntityByTime(byte[] base, + String entityType, Long limit, Long starttime, Long endtime, + Collection<NameValuePair> secondaryFilters, EnumSet<Field> fields) + throws IOException { + DBIterator iterator = null; + try { + KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType); + // only db keys matching the prefix (base + entity type) will be parsed + byte[] prefix = kb.getBytesForLookup(); + if (endtime == null) { + // if end time is null, place no restriction on end time + endtime = Long.MAX_VALUE; + } + // using end time, construct a first key that will be seeked to + byte[] revts = writeReverseOrderedLong(endtime); + kb.add(revts); + byte[] first = kb.getBytesForLookup(); + byte[] last = null; + if (starttime != null) { + // if start time is not null, set a last key that will not be + // iterated past + last = KeyBuilder.newInstance().add(base).add(entityType) + .add(writeReverseOrderedLong(starttime)).getBytesForLookup(); + } + if (limit == null) { + // if limit is not specified, use the default + limit = DEFAULT_LIMIT; + } + + ATSEntities atsEntities = new ATSEntities(); + iterator = db.iterator(); + iterator.seek(first); + // iterate until one of the following conditions is met: limit is + // reached, there are no more keys, the key prefix no longer matches, + // or a start time has been specified and reached/exceeded + while (atsEntities.getEntities().size() < limit && iterator.hasNext()) { + byte[] key = iterator.peekNext().getKey(); + if (!prefixMatches(prefix, prefix.length, key) || (last != null && + WritableComparator.compareBytes(key, 0, key.length, last, 0, + last.length) > 0)) + break; + // read the start time and entity from the current key + KeyParser kp = new KeyParser(key, prefix.length); + Long startTime = kp.getNextLong(); + String entity = kp.getNextString(); + // parse the entity that owns this key, iterating over all keys for + // the entity + ATSEntity atsEntity = getEntity(entity, entityType, startTime, + fields, iterator, key, kp.getOffset()); + if (atsEntity == null) + continue; + // determine if the retrieved entity matches the provided secondary + // filters, and if so add it to the list of entities to return + boolean filterPassed = true; + if (secondaryFilters != null) { + for (NameValuePair filter : secondaryFilters) { + Object v = atsEntity.getOtherInfo().get(filter.getName()); + if (v == null) + v = atsEntity.getPrimaryFilters().get(filter.getName()); + if (v == null || !v.equals(filter.getValue())) { + filterPassed = false; + break; + } + } + } + if (filterPassed) + atsEntities.addEntity(atsEntity); + } + return atsEntities; + } finally { + IOUtils.cleanup(LOG, iterator); + } + } + + /** + * Put a single entity. If there is an error, add a PutError to the given + * response. + */ + private void put(ATSEntity atsEntity, ATSPutErrors response) { + WriteBatch writeBatch = null; + try { + writeBatch = db.createWriteBatch(); + List<ATSEvent> events = atsEntity.getEvents(); + // look up the start time for the entity + byte[] revStartTime = getStartTime(atsEntity.getEntityId(), + atsEntity.getEntityType(), atsEntity.getStartTime(), events, + writeBatch); + if (revStartTime == null) { + // if no start time is found, add an error and return + ATSPutError error = new ATSPutError(); + error.setEntityId(atsEntity.getEntityId()); + error.setEntityType(atsEntity.getEntityType()); + error.setErrorCode(ATSPutError.NO_START_TIME); + response.addError(error); + return; + } + Long revStartTimeLong = readReverseOrderedLong(revStartTime, 0); + Map<String, Object> primaryFilters = atsEntity.getPrimaryFilters(); + + // write event entries + if (events != null && !events.isEmpty()) { + for (ATSEvent event : events) { + byte[] revts = writeReverseOrderedLong(event.getTimestamp()); + byte[] key = createEntityEventKey(atsEntity.getEntityId(), + atsEntity.getEntityType(), revStartTime, revts, + event.getEventType()); + byte[] value = GenericObjectMapper.write(event.getEventInfo()); + writeBatch.put(key, value); + writePrimaryFilterEntries(writeBatch, primaryFilters, key, value); + } + } + + // write related entity entries + Map<String,List<String>> relatedEntities = + atsEntity.getRelatedEntities(); + if (relatedEntities != null && !relatedEntities.isEmpty()) { + for (Entry<String, List<String>> relatedEntityList : + relatedEntities.entrySet()) { + String relatedEntityType = relatedEntityList.getKey(); + for (String relatedEntityId : relatedEntityList.getValue()) { + // look up start time of related entity + byte[] relatedEntityStartTime = getStartTime(relatedEntityId, + relatedEntityType, null, null, writeBatch); + if (relatedEntityStartTime == null) { + // if start time is not found, set start time of the related + // entity to the start time of this entity, and write it to the + // db and the cache + relatedEntityStartTime = revStartTime; + writeBatch.put(createStartTimeLookupKey(relatedEntityId, + relatedEntityType), relatedEntityStartTime); + startTimeCache.put(new EntityIdentifier(relatedEntityId, + relatedEntityType), revStartTimeLong); + } + // write reverse entry (related entity -> entity) + byte[] key = createReleatedEntityKey(relatedEntityId, + relatedEntityType, relatedEntityStartTime, + atsEntity.getEntityId(), atsEntity.getEntityType()); + writeBatch.put(key, EMPTY_BYTES); + // TODO: write forward entry (entity -> related entity)? + } + } + } + + // write primary filter entries + if (primaryFilters != null && !primaryFilters.isEmpty()) { + for (Entry<String, Object> primaryFilter : primaryFilters.entrySet()) { + byte[] key = createPrimaryFilterKey(atsEntity.getEntityId(), + atsEntity.getEntityType(), revStartTime, primaryFilter.getKey()); + byte[] value = GenericObjectMapper.write(primaryFilter.getValue()); + writeBatch.put(key, value); + writePrimaryFilterEntries(writeBatch, primaryFilters, key, value); + } + } + + // write other info entries + Map<String, Object> otherInfo = atsEntity.getOtherInfo(); + if (otherInfo != null && !otherInfo.isEmpty()) { + for (Entry<String, Object> i : otherInfo.entrySet()) { + byte[] key = createOtherInfoKey(atsEntity.getEntityId(), + atsEntity.getEntityType(), revStartTime, i.getKey()); + byte[] value = GenericObjectMapper.write(i.getValue()); + writeBatch.put(key, value); + writePrimaryFilterEntries(writeBatch, primaryFilters, key, value); + } + } + db.write(writeBatch); + } catch (IOException e) { + LOG.error("Error putting entity " + atsEntity.getEntityId() + + " of type " + atsEntity.getEntityType(), e); + ATSPutError error = new ATSPutError(); + error.setEntityId(atsEntity.getEntityId()); + error.setEntityType(atsEntity.getEntityType()); + error.setErrorCode(ATSPutError.IO_EXCEPTION); + response.addError(error); + } finally { + IOUtils.cleanup(LOG, writeBatch); + } + } + + /** + * For a given key / value pair that has been written to the db, + * write additional entries to the db for each primary filter. + */ + private static void writePrimaryFilterEntries(WriteBatch writeBatch, + Map<String, Object> primaryFilters, byte[] key, byte[] value) + throws IOException { + if (primaryFilters != null && !primaryFilters.isEmpty()) { + for (Entry<String, Object> p : primaryFilters.entrySet()) { + writeBatch.put(addPrimaryFilterToKey(p.getKey(), p.getValue(), + key), value); + } + } + } + + @Override + public ATSPutErrors put(ATSEntities atsEntities) { + ATSPutErrors response = new ATSPutErrors(); + for (ATSEntity atsEntity : atsEntities.getEntities()) { + put(atsEntity, response); + } + return response; + } + + /** + * Get the unique start time for a given entity as a byte array that sorts + * the timestamps in reverse order (see {@link + * GenericObjectMapper#writeReverseOrderedLong(long)}). + * + * @param entityId The id of the entity + * @param entityType The type of the entity + * @param startTime The start time of the entity, or null + * @param events A list of events for the entity, or null + * @param writeBatch A leveldb write batch, if the method is called by a + * put as opposed to a get + * @return A byte array + * @throws IOException + */ + private byte[] getStartTime(String entityId, String entityType, + Long startTime, List<ATSEvent> events, WriteBatch writeBatch) + throws IOException { + EntityIdentifier entity = new EntityIdentifier(entityId, entityType); + if (startTime == null) { + // start time is not provided, so try to look it up + if (startTimeCache.containsKey(entity)) { + // found the start time in the cache + startTime = startTimeCache.get(entity); + } else { + // try to look up the start time in the db + byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType()); + byte[] v = db.get(b); + if (v == null) { + // did not find the start time in the db + // if this is a put, try to set it from the provided events + if (events == null || writeBatch == null) { + // no events, or not a put, so return null + return null; + } + Long min = Long.MAX_VALUE; + for (ATSEvent e : events) + if (min > e.getTimestamp()) + min = e.getTimestamp(); + startTime = min; + // selected start time as minimum timestamp of provided events + // write start time to db and cache + writeBatch.put(b, writeReverseOrderedLong(startTime)); + startTimeCache.put(entity, startTime); + } else { + // found the start time in the db + startTime = readReverseOrderedLong(v, 0); + if (writeBatch != null) { + // if this is a put, re-add the start time to the cache + startTimeCache.put(entity, startTime); + } + } + } + } else { + // start time is provided + // TODO: verify start time in db as well as cache? + if (startTimeCache.containsKey(entity)) { + // if the start time is already in the cache, + // and it is different from the provided start time, + // use the one from the cache + if (!startTime.equals(startTimeCache.get(entity))) + startTime = startTimeCache.get(entity); + } else if (writeBatch != null) { + // if this is a put, write the provided start time to the db and the + // cache + byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType()); + writeBatch.put(b, writeReverseOrderedLong(startTime)); + startTimeCache.put(entity, startTime); + } + } + return writeReverseOrderedLong(startTime); + } + + /** + * Creates a key for looking up the start time of a given entity, + * of the form START_TIME_LOOKUP_PREFIX + entitytype + entity. + */ + private static byte[] createStartTimeLookupKey(String entity, + String entitytype) throws IOException { + return KeyBuilder.newInstance().add(START_TIME_LOOKUP_PREFIX) + .add(entitytype).add(entity).getBytes(); + } + + /** + * Creates an index entry for the given key of the form + * INDEXED_ENTRY_PREFIX + primaryfiltername + primaryfiltervalue + key. + */ + private static byte[] addPrimaryFilterToKey(String primaryFilterName, + Object primaryFilterValue, byte[] key) throws IOException { + return KeyBuilder.newInstance().add(INDEXED_ENTRY_PREFIX) + .add(primaryFilterName) + .add(GenericObjectMapper.write(primaryFilterValue), true).add(key) + .getBytes(); + } + + /** + * Creates an event key, serializing ENTITY_ENTRY_PREFIX + entitytype + + * revstarttime + entity + TIME_COLUMN + reveventtimestamp + eventtype. + */ + private static byte[] createEntityEventKey(String entity, String entitytype, + byte[] revStartTime, byte[] reveventtimestamp, String eventtype) + throws IOException { + return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX) + .add(entitytype).add(revStartTime).add(entity).add(TIME_COLUMN) + .add(reveventtimestamp).add(eventtype).getBytes(); + } + + /** + * Creates an event object from the given key, offset, and value. If the + * event type is not contained in the specified set of event types, + * returns null. + */ + private static ATSEvent getEntityEvent(Set<String> eventTypes, byte[] key, + int offset, byte[] value) throws IOException { + KeyParser kp = new KeyParser(key, offset); + long ts = kp.getNextLong(); + String tstype = kp.getNextString(); + if (eventTypes == null || eventTypes.contains(tstype)) { + ATSEvent event = new ATSEvent(); + event.setTimestamp(ts); + event.setEventType(tstype); + Object o = GenericObjectMapper.read(value); + if (o == null) { + event.setEventInfo(null); + } else if (o instanceof Map) { + @SuppressWarnings("unchecked") + Map<String, Object> m = (Map<String, Object>) o; + event.setEventInfo(m); + } else { + throw new IOException("Couldn't deserialize event info map"); + } + return event; + } + return null; + } + + /** + * Creates a primary filter key, serializing ENTITY_ENTRY_PREFIX + + * entitytype + revstarttime + entity + PRIMARY_FILTER_COLUMN + name. + */ + private static byte[] createPrimaryFilterKey(String entity, + String entitytype, byte[] revStartTime, String name) throws IOException { + return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entitytype) + .add(revStartTime).add(entity).add(PRIMARY_FILTER_COLUMN).add(name) + .getBytes(); + } + + /** + * Creates an other info key, serializing ENTITY_ENTRY_PREFIX + entitytype + + * revstarttime + entity + OTHER_INFO_COLUMN + name. + */ + private static byte[] createOtherInfoKey(String entity, String entitytype, + byte[] revStartTime, String name) throws IOException { + return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entitytype) + .add(revStartTime).add(entity).add(OTHER_INFO_COLUMN).add(name) + .getBytes(); + } + + /** + * Creates a string representation of the byte array from the given offset + * to the end of the array (for parsing other info keys). + */ + private static String parseRemainingKey(byte[] b, int offset) { + return new String(b, offset, b.length - offset); + } + + /** + * Creates a related entity key, serializing ENTITY_ENTRY_PREFIX + + * entitytype + revstarttime + entity + RELATED_COLUMN + relatedentitytype + + * relatedentity. + */ + private static byte[] createReleatedEntityKey(String entity, + String entitytype, byte[] revStartTime, String relatedEntity, + String relatedEntityType) throws IOException { + return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entitytype) + .add(revStartTime).add(entity).add(RELATED_COLUMN) + .add(relatedEntityType).add(relatedEntity).getBytes(); + } + + /** + * Parses the related entity from the given key at the given offset and + * adds it to the given entity. + */ + private static void addRelatedEntity(ATSEntity atsEntity, byte[] key, + int offset) throws IOException { + KeyParser kp = new KeyParser(key, offset); + String type = kp.getNextString(); + String id = kp.getNextString(); + atsEntity.addRelatedEntity(type, id); + } + + /** + * Clears the cache to test reloading start times from leveldb (only for + * testing). + */ + @VisibleForTesting + void clearStartTimeCache() { + startTimeCache.clear(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/MemoryApplicationTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/MemoryApplicationTimelineStore.java index 45f0a11d764d0..1c8e392cfe289 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/MemoryApplicationTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/MemoryApplicationTimelineStore.java @@ -53,8 +53,8 @@ public class MemoryApplicationTimelineStore extends AbstractService implements ApplicationTimelineStore { - private Map<EntityId, ATSEntity> entities = - new HashMap<EntityId, ATSEntity>(); + private Map<EntityIdentifier, ATSEntity> entities = + new HashMap<EntityIdentifier, ATSEntity>(); public MemoryApplicationTimelineStore() { super(MemoryApplicationTimelineStore.class.getName()); @@ -125,7 +125,7 @@ public ATSEntity getEntity(String entityId, String entityType, if (fieldsToRetrieve == null) { fieldsToRetrieve = EnumSet.allOf(Field.class); } - ATSEntity entity = entities.get(new EntityId(entityId, entityType)); + ATSEntity entity = entities.get(new EntityIdentifier(entityId, entityType)); if (entity == null) { return null; } else { @@ -152,7 +152,7 @@ public ATSEvents getEntityTimelines(String entityType, windowEnd = Long.MAX_VALUE; } for (String entityId : entityIds) { - EntityId entityID = new EntityId(entityId, entityType); + EntityIdentifier entityID = new EntityIdentifier(entityId, entityType); ATSEntity entity = entities.get(entityID); if (entity == null) { continue; @@ -184,8 +184,8 @@ public ATSEvents getEntityTimelines(String entityType, public ATSPutErrors put(ATSEntities data) { ATSPutErrors errors = new ATSPutErrors(); for (ATSEntity entity : data.getEntities()) { - EntityId entityId = - new EntityId(entity.getEntityId(), entity.getEntityType()); + EntityIdentifier entityId = + new EntityIdentifier(entity.getEntityId(), entity.getEntityType()); // store entity info in memory ATSEntity existingEntity = entities.get(entityId); if (existingEntity == null) { @@ -210,7 +210,7 @@ public ATSPutErrors put(ATSEntities data) { ATSPutError error = new ATSPutError(); error.setEntityId(entityId.getId()); error.setEntityType(entityId.getType()); - error.setErrorCode(1); + error.setErrorCode(ATSPutError.NO_START_TIME); errors.addError(error); entities.remove(entityId); continue; @@ -242,12 +242,20 @@ public ATSPutErrors put(ATSEntities data) { continue; } for (String idStr : partRelatedEntities.getValue()) { - EntityId relatedEntityId = - new EntityId(idStr, partRelatedEntities.getKey()); + EntityIdentifier relatedEntityId = + new EntityIdentifier(idStr, partRelatedEntities.getKey()); ATSEntity relatedEntity = entities.get(relatedEntityId); if (relatedEntity != null) { relatedEntity.addRelatedEntity( existingEntity.getEntityType(), existingEntity.getEntityId()); + } else { + relatedEntity = new ATSEntity(); + relatedEntity.setEntityId(relatedEntityId.getId()); + relatedEntity.setEntityType(relatedEntityId.getType()); + relatedEntity.setStartTime(existingEntity.getStartTime()); + relatedEntity.addRelatedEntity(existingEntity.getEntityType(), + existingEntity.getEntityId()); + entities.put(relatedEntityId, relatedEntity); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ATSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ATSWebServices.java index 4ea501d89a845..063b67afd07fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ATSWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ATSWebServices.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.EnumSet; @@ -45,6 +46,8 @@ import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities; @@ -64,6 +67,8 @@ //TODO: support XML serialization/deserialization public class ATSWebServices { + private static final Log LOG = LogFactory.getLog(ATSWebServices.class); + private ApplicationTimelineStore store; @Inject @@ -143,6 +148,10 @@ public ATSEntities getEntities( "windowStart, windowEnd or limit is not a numeric value."); } catch (IllegalArgumentException e) { throw new BadRequestException("requested invalid field."); + } catch (IOException e) { + LOG.error("Error getting entities", e); + throw new WebApplicationException(e, + Response.Status.INTERNAL_SERVER_ERROR); } if (entities == null) { return new ATSEntities(); @@ -171,6 +180,10 @@ public ATSEntity getEntity( } catch (IllegalArgumentException e) { throw new BadRequestException( "requested invalid field."); + } catch (IOException e) { + LOG.error("Error getting entity", e); + throw new WebApplicationException(e, + Response.Status.INTERNAL_SERVER_ERROR); } if (entity == null) { throw new WebApplicationException(Response.Status.NOT_FOUND); @@ -206,6 +219,10 @@ public ATSEvents getEvents( } catch (NumberFormatException e) { throw new BadRequestException( "windowStart, windowEnd or limit is not a numeric value."); + } catch (IOException e) { + LOG.error("Error getting entity timelines", e); + throw new WebApplicationException(e, + Response.Status.INTERNAL_SERVER_ERROR); } if (events == null) { return new ATSEvents(); @@ -228,7 +245,13 @@ public ATSPutErrors postEntities( if (entities == null) { return new ATSPutErrors(); } - return store.put(entities); + try { + return store.put(entities); + } catch (IOException e) { + LOG.error("Error putting entities", e); + throw new WebApplicationException(e, + Response.Status.INTERNAL_SERVER_ERROR); + } } private void init(HttpServletResponse response) { @@ -275,7 +298,17 @@ private static EnumSet<Field> parseFieldsStr(String str, String delimiter) { String[] strs = str.split(delimiter); List<Field> fieldList = new ArrayList<Field>(); for (String s : strs) { - fieldList.add(Field.valueOf(s.toUpperCase())); + s = s.trim().toUpperCase(); + if (s.equals("EVENTS")) + fieldList.add(Field.EVENTS); + else if (s.equals("LASTEVENTONLY")) + fieldList.add(Field.LAST_EVENT_ONLY); + else if (s.equals("RELATEDENTITIES")) + fieldList.add(Field.RELATED_ENTITIES); + else if (s.equals("PRIMARYFILTERS")) + fieldList.add(Field.PRIMARY_FILTERS); + else if (s.equals("OTHERINFO")) + fieldList.add(Field.OTHER_INFO); } if (fieldList.size() == 0) return null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineStoreTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineStoreTestUtils.java index 5825af192b8c2..9afa5c0234a07 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineStoreTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineStoreTestUtils.java @@ -21,6 +21,8 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.io.File; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -71,7 +73,7 @@ public class ApplicationTimelineStoreTestUtils { /** * Load test data into the given store */ - protected void loadTestData() { + protected void loadTestData() throws IOException { ATSEntities atsEntities = new ATSEntities(); Map<String, Object> primaryFilters = new HashMap<String, Object>(); primaryFilters.put("user", "username"); @@ -126,7 +128,7 @@ protected void loadTestData() { response = store.put(atsEntities); assertEquals(0, response.getErrors().size()); atsEntities.setEntities(Collections.singletonList(createEntity(entity1b, - entityType1, 123l, Collections.singletonList(ev2), null, + entityType1, 789l, Collections.singletonList(ev2), null, primaryFilters, otherInfo2))); response = store.put(atsEntities); assertEquals(0, response.getErrors().size()); @@ -138,11 +140,11 @@ protected void loadTestData() { ATSPutError error = response.getErrors().get(0); assertEquals("badentityid", error.getEntityId()); assertEquals("badentity", error.getEntityType()); - assertEquals((Integer) 1, error.getErrorCode()); + assertEquals(ATSPutError.NO_START_TIME, error.getErrorCode()); } /** - * Load veification data + * Load verification data */ protected void loadVerificationData() throws Exception { userFilter = new NameValuePair("user", @@ -197,7 +199,7 @@ protected void loadVerificationData() throws Exception { events2.add(ev4); } - public void testGetSingleEntity() { + public void testGetSingleEntity() throws IOException { // test getting entity info verifyEntityInfo(null, null, null, null, null, null, store.getEntity("id_1", "type_2", EnumSet.allOf(Field.class))); @@ -222,6 +224,10 @@ public void testGetSingleEntity() { null, null, null, store.getEntity(entity1, entityType1, EnumSet.of(Field.LAST_EVENT_ONLY))); + verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES, + primaryFilters, otherInfo, store.getEntity(entity1b, entityType1, + null)); + verifyEntityInfo(entity1, entityType1, null, null, primaryFilters, null, store.getEntity(entity1, entityType1, EnumSet.of(Field.PRIMARY_FILTERS))); @@ -234,7 +240,7 @@ public void testGetSingleEntity() { EnumSet.of(Field.RELATED_ENTITIES))); } - public void testGetEntities() { + public void testGetEntities() throws IOException { // test getting entities assertEquals("nonzero entities size for nonexistent type", 0, store.getEntities("type_0", null, null, null, null, null, @@ -305,7 +311,7 @@ public void testGetEntities() { primaryFilters, otherInfo, entities.get(1)); } - public void testGetEntitiesWithPrimaryFilters() { + public void testGetEntitiesWithPrimaryFilters() throws IOException { // test using primary filter assertEquals("nonzero entities size for primary filter", 0, store.getEntities("type_1", null, null, null, @@ -361,7 +367,7 @@ public void testGetEntitiesWithPrimaryFilters() { primaryFilters, otherInfo, entities.get(1)); } - public void testGetEntitiesWithSecondaryFilters() { + public void testGetEntitiesWithSecondaryFilters() throws IOException { // test using secondary filter List<ATSEntity> entities = store.getEntities("type_1", null, null, null, null, goodTestingFilters, EnumSet.allOf(Field.class)).getEntities(); @@ -388,7 +394,7 @@ public void testGetEntitiesWithSecondaryFilters() { assertEquals(0, entities.size()); } - public void testGetEvents() { + public void testGetEvents() throws IOException { // test getting entity timelines SortedSet<String> sortedSet = new TreeSet<String>(); sortedSet.add(entity1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestGenericObjectMapper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestGenericObjectMapper.java new file mode 100644 index 0000000000000..4bb453a41be4d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestGenericObjectMapper.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.WritableComparator; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + [email protected] [email protected] +public class TestGenericObjectMapper { + + @Test + public void testEncoding() { + testEncoding(Long.MAX_VALUE); + testEncoding(Long.MIN_VALUE); + testEncoding(0l); + testEncoding(128l); + testEncoding(256l); + testEncoding(512l); + testEncoding(-256l); + } + + private static void testEncoding(long l) { + byte[] b = GenericObjectMapper.writeReverseOrderedLong(l); + assertEquals("error decoding", l, + GenericObjectMapper.readReverseOrderedLong(b, 0)); + byte[] buf = new byte[16]; + System.arraycopy(b, 0, buf, 5, 8); + assertEquals("error decoding at offset", l, + GenericObjectMapper.readReverseOrderedLong(buf, 5)); + if (l > Long.MIN_VALUE) { + byte[] a = GenericObjectMapper.writeReverseOrderedLong(l-1); + assertEquals("error preserving ordering", 1, + WritableComparator.compareBytes(a, 0, a.length, b, 0, b.length)); + } + if (l < Long.MAX_VALUE) { + byte[] c = GenericObjectMapper.writeReverseOrderedLong(l+1); + assertEquals("error preserving ordering", 1, + WritableComparator.compareBytes(b, 0, b.length, c, 0, c.length)); + } + } + + private static void verify(Object o) throws IOException { + assertEquals(o, GenericObjectMapper.read(GenericObjectMapper.write(o))); + } + + @Test + public void testValueTypes() throws IOException { + verify(42l); + verify(42); + verify(1.23); + verify("abc"); + verify(true); + List<String> list = new ArrayList<String>(); + list.add("123"); + list.add("abc"); + verify(list); + Map<String,String> map = new HashMap<String,String>(); + map.put("k1","v1"); + map.put("k2","v2"); + verify(map); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestLeveldbApplicationTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestLeveldbApplicationTimelineStore.java new file mode 100644 index 0000000000000..b868049c4fbaa --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestLeveldbApplicationTimelineStore.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline; + +import java.io.File; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors; +import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors.ATSPutError; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + [email protected] [email protected] +public class TestLeveldbApplicationTimelineStore + extends ApplicationTimelineStoreTestUtils { + private FileContext fsContext; + private File fsPath; + + @Before + public void setup() throws Exception { + fsContext = FileContext.getLocalFSFileContext(); + Configuration conf = new Configuration(); + fsPath = new File("target", this.getClass().getSimpleName() + + "-tmpDir").getAbsoluteFile(); + fsContext.delete(new Path(fsPath.getAbsolutePath()), true); + conf.set(YarnConfiguration.ATS_LEVELDB_PATH_PROPERTY, + fsPath.getAbsolutePath()); + store = new LeveldbApplicationTimelineStore(); + store.init(conf); + store.start(); + loadTestData(); + loadVerificationData(); + } + + @After + public void tearDown() throws Exception { + store.stop(); + fsContext.delete(new Path(fsPath.getAbsolutePath()), true); + } + + @Test + public void testGetSingleEntity() throws IOException { + super.testGetSingleEntity(); + ((LeveldbApplicationTimelineStore)store).clearStartTimeCache(); + super.testGetSingleEntity(); + } + + @Test + public void testGetEntities() throws IOException { + super.testGetEntities(); + } + + @Test + public void testGetEntitiesWithPrimaryFilters() throws IOException { + super.testGetEntitiesWithPrimaryFilters(); + } + + @Test + public void testGetEntitiesWithSecondaryFilters() throws IOException { + super.testGetEntitiesWithSecondaryFilters(); + } + + @Test + public void testGetEvents() throws IOException { + super.testGetEvents(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestMemoryApplicationTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestMemoryApplicationTimelineStore.java index aa88b74a90100..07a3955bf67ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestMemoryApplicationTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestMemoryApplicationTimelineStore.java @@ -23,6 +23,7 @@ import org.junit.Before; import org.junit.Test; +import java.io.IOException; public class TestMemoryApplicationTimelineStore extends ApplicationTimelineStoreTestUtils { @@ -46,27 +47,27 @@ public ApplicationTimelineStore getApplicationTimelineStore() { } @Test - public void testGetSingleEntity() { + public void testGetSingleEntity() throws IOException { super.testGetSingleEntity(); } @Test - public void testGetEntities() { + public void testGetEntities() throws IOException { super.testGetEntities(); } @Test - public void testGetEntitiesWithPrimaryFilters() { + public void testGetEntitiesWithPrimaryFilters() throws IOException { super.testGetEntitiesWithPrimaryFilters(); } @Test - public void testGetEntitiesWithSecondaryFilters() { + public void testGetEntitiesWithSecondaryFilters() throws IOException { super.testGetEntitiesWithSecondaryFilters(); } @Test - public void testGetEvents() { + public void testGetEvents() throws IOException { super.testGetEvents(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestATSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestATSWebServices.java index 1ff73ff35a22a..58a826c9ac033 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestATSWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestATSWebServices.java @@ -156,6 +156,43 @@ public void testGetEntity() throws Exception { Assert.assertEquals(4, entity.getOtherInfo().size()); } + @Test + public void testGetEntityFields1() throws Exception { + WebResource r = resource(); + ClientResponse response = r.path("ws").path("v1").path("apptimeline") + .path("type_1").path("id_1").queryParam("fields", "events,otherinfo") + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + ATSEntity entity = response.getEntity(ATSEntity.class); + Assert.assertNotNull(entity); + Assert.assertEquals("id_1", entity.getEntityId()); + Assert.assertEquals("type_1", entity.getEntityType()); + Assert.assertEquals(123l, entity.getStartTime().longValue()); + Assert.assertEquals(2, entity.getEvents().size()); + Assert.assertEquals(0, entity.getPrimaryFilters().size()); + Assert.assertEquals(4, entity.getOtherInfo().size()); + } + + @Test + public void testGetEntityFields2() throws Exception { + WebResource r = resource(); + ClientResponse response = r.path("ws").path("v1").path("apptimeline") + .path("type_1").path("id_1").queryParam("fields", "lasteventonly," + + "primaryfilters,relatedentities") + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + ATSEntity entity = response.getEntity(ATSEntity.class); + Assert.assertNotNull(entity); + Assert.assertEquals("id_1", entity.getEntityId()); + Assert.assertEquals("type_1", entity.getEntityType()); + Assert.assertEquals(123l, entity.getStartTime().longValue()); + Assert.assertEquals(1, entity.getEvents().size()); + Assert.assertEquals(2, entity.getPrimaryFilters().size()); + Assert.assertEquals(0, entity.getOtherInfo().size()); + } + @Test public void testGetEvents() throws Exception { WebResource r = resource();
ccedd8f8e0e01c6472cd32d371d8f579f60af9fc
orientdb
fixed cluster id selection in distributed mode.--
c
https://github.com/orientechnologies/orientdb
diff --git a/server/src/main/java/com/orientechnologies/orient/server/distributed/task/OCreateRecordTask.java b/server/src/main/java/com/orientechnologies/orient/server/distributed/task/OCreateRecordTask.java index 8a2b793b258..113f0231f3d 100644 --- a/server/src/main/java/com/orientechnologies/orient/server/distributed/task/OCreateRecordTask.java +++ b/server/src/main/java/com/orientechnologies/orient/server/distributed/task/OCreateRecordTask.java @@ -20,6 +20,8 @@ package com.orientechnologies.orient.server.distributed.task; import com.orientechnologies.orient.core.Orient; +import com.orientechnologies.orient.core.db.ODatabaseDocumentInternal; +import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal; import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx; import com.orientechnologies.orient.core.db.record.OPlaceholder; import com.orientechnologies.orient.core.id.ORID; @@ -28,6 +30,7 @@ import com.orientechnologies.orient.core.record.ORecord; import com.orientechnologies.orient.core.record.ORecordInternal; import com.orientechnologies.orient.core.record.impl.ODocument; +import com.orientechnologies.orient.core.record.impl.ODocumentInternal; import com.orientechnologies.orient.core.version.ORecordVersion; import com.orientechnologies.orient.server.OServer; import com.orientechnologies.orient.server.distributed.ODistributedRequest; @@ -65,11 +68,14 @@ public OCreateRecordTask(final ORecordId iRid, final byte[] iContent, final ORec public OCreateRecordTask(final ORecord record) { this((ORecordId) record.getIdentity(), record.toStream(), record.getRecordVersion(), ORecordInternal.getRecordType(record)); - if (rid.getClusterId() == ORID.CLUSTER_ID_INVALID && record instanceof ODocument) { - final OClass clazz = ((ODocument) record).getSchemaClass(); - if (clazz != null) { + if (rid.getClusterId() == ORID.CLUSTER_ID_INVALID) { + final OClass clazz; + if (record instanceof ODocument && (clazz = ODocumentInternal.getImmutableSchemaClass((ODocument) record)) != null) { // PRE-ASSIGN THE CLUSTER ID ON CALLER NODE clusterId = clazz.getClusterSelection().getCluster(clazz, (ODocument) record); + } else { + ODatabaseDocumentInternal db = ODatabaseRecordThreadLocal.INSTANCE.get(); + clusterId = db.getDefaultClusterId(); } } }
6368578020adf4796ac899ef2b0b9a695c59601c
ReactiveX-RxJava
unit tests for covariance--- refactoring so not everything for the entire Observable ends up in a single class-
p
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/test/java/rx/CombineLatestTests.java b/rxjava-core/src/test/java/rx/CombineLatestTests.java new file mode 100644 index 0000000000..78dbd4c4e3 --- /dev/null +++ b/rxjava-core/src/test/java/rx/CombineLatestTests.java @@ -0,0 +1,53 @@ +package rx; + +import org.junit.Test; + +import rx.CovarianceTest.CoolRating; +import rx.CovarianceTest.ExtendedResult; +import rx.CovarianceTest.HorrorMovie; +import rx.CovarianceTest.Media; +import rx.CovarianceTest.Movie; +import rx.CovarianceTest.Rating; +import rx.CovarianceTest.Result; +import rx.util.functions.Action1; +import rx.util.functions.Func2; + +public class CombineLatestTests { + /** + * This won't compile if super/extends isn't done correctly on generics + */ + @Test + public void testCovarianceOfCombineLatest() { + Observable<HorrorMovie> horrors = Observable.from(new HorrorMovie()); + Observable<CoolRating> ratings = Observable.from(new CoolRating()); + + Observable.<Movie, CoolRating, Result> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(action); + Observable.<Movie, CoolRating, Result> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(action); + Observable.<Media, Rating, ExtendedResult> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(extendedAction); + Observable.<Media, Rating, Result> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(action); + Observable.<Media, Rating, ExtendedResult> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(action); + + Observable.<Movie, CoolRating, Result> combineLatest(horrors, ratings, combine); + } + + Func2<Media, Rating, ExtendedResult> combine = new Func2<Media, Rating, ExtendedResult>() { + @Override + public ExtendedResult call(Media m, Rating r) { + return new ExtendedResult(); + } + }; + + Action1<Result> action = new Action1<Result>() { + @Override + public void call(Result t1) { + System.out.println("Result: " + t1); + } + }; + + Action1<ExtendedResult> extendedAction = new Action1<ExtendedResult>() { + @Override + public void call(ExtendedResult t1) { + System.out.println("Result: " + t1); + } + }; +} diff --git a/rxjava-core/src/test/java/rx/ConcatTests.java b/rxjava-core/src/test/java/rx/ConcatTests.java index 524b7fd73f..29a51d8dfe 100644 --- a/rxjava-core/src/test/java/rx/ConcatTests.java +++ b/rxjava-core/src/test/java/rx/ConcatTests.java @@ -7,6 +7,12 @@ import org.junit.Test; +import rx.CovarianceTest.HorrorMovie; +import rx.CovarianceTest.Media; +import rx.CovarianceTest.Movie; +import rx.Observable.OnSubscribeFunc; +import rx.subscriptions.Subscriptions; + public class ConcatTests { @Test @@ -54,4 +60,62 @@ public void testConcatWithIterableOfObservable() { assertEquals("three", values.get(2)); assertEquals("four", values.get(3)); } + + @Test + public void testConcatCovariance() { + Observable<Media> o1 = Observable.<Media> from(new HorrorMovie(), new Movie()); + Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); + + Observable<Observable<Media>> os = Observable.from(o1, o2); + + List<Media> values = Observable.concat(os).toList().toBlockingObservable().single(); + } + + @Test + public void testConcatCovariance2() { + Observable<Media> o1 = Observable.from(new HorrorMovie(), new Movie(), new Media()); + Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); + + Observable<Observable<Media>> os = Observable.from(o1, o2); + + List<Media> values = Observable.concat(os).toList().toBlockingObservable().single(); + } + + @Test + public void testConcatCovariance3() { + Observable<Movie> o1 = Observable.from(new HorrorMovie(), new Movie()); + Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); + + List<Media> values = Observable.concat(o1, o2).toList().toBlockingObservable().single(); + + assertTrue(values.get(0) instanceof HorrorMovie); + assertTrue(values.get(1) instanceof Movie); + assertTrue(values.get(2) instanceof Media); + assertTrue(values.get(3) instanceof HorrorMovie); + } + + @Test + public void testConcatCovariance4() { + + Observable<Movie> o1 = Observable.create(new OnSubscribeFunc<Movie>() { + + @Override + public Subscription onSubscribe(Observer<? super Movie> o) { + o.onNext(new HorrorMovie()); + o.onNext(new Movie()); + // o.onNext(new Media()); // correctly doesn't compile + o.onCompleted(); + return Subscriptions.empty(); + } + }); + + Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); + + List<Media> values = Observable.concat(o1, o2).toList().toBlockingObservable().single(); + + assertTrue(values.get(0) instanceof HorrorMovie); + assertTrue(values.get(1) instanceof Movie); + assertTrue(values.get(2) instanceof Media); + assertTrue(values.get(3) instanceof HorrorMovie); + } } diff --git a/rxjava-core/src/test/java/rx/CovarianceTest.java b/rxjava-core/src/test/java/rx/CovarianceTest.java index 4eb31f0da1..69110b6c6a 100644 --- a/rxjava-core/src/test/java/rx/CovarianceTest.java +++ b/rxjava-core/src/test/java/rx/CovarianceTest.java @@ -1,17 +1,9 @@ package rx; -import static org.junit.Assert.*; - import java.util.ArrayList; -import java.util.List; import org.junit.Test; -import rx.Observable.OnSubscribeFunc; -import rx.subscriptions.Subscriptions; -import rx.util.functions.Action1; -import rx.util.functions.Func2; - /** * Test super/extends of generics. * @@ -29,187 +21,9 @@ public void testCovarianceOfFrom() { // Observable.<HorrorMovie>from(new Movie()); // may not compile } - /** - * This won't compile if super/extends isn't done correctly on generics - */ - @Test - public void testCovarianceOfMerge() { - Observable<HorrorMovie> horrors = Observable.from(new HorrorMovie()); - Observable<Observable<HorrorMovie>> metaHorrors = Observable.just(horrors); - Observable.<Media> merge(metaHorrors); - } - - /** - * This won't compile if super/extends isn't done correctly on generics - */ - @Test - public void testCovarianceOfZip() { - Observable<HorrorMovie> horrors = Observable.from(new HorrorMovie()); - Observable<CoolRating> ratings = Observable.from(new CoolRating()); - - Observable.<Movie, CoolRating, Result> zip(horrors, ratings, combine).toBlockingObservable().forEach(action); - Observable.<Movie, CoolRating, Result> zip(horrors, ratings, combine).toBlockingObservable().forEach(action); - Observable.<Media, Rating, ExtendedResult> zip(horrors, ratings, combine).toBlockingObservable().forEach(extendedAction); - Observable.<Media, Rating, Result> zip(horrors, ratings, combine).toBlockingObservable().forEach(action); - Observable.<Media, Rating, ExtendedResult> zip(horrors, ratings, combine).toBlockingObservable().forEach(action); - - Observable.<Movie, CoolRating, Result> zip(horrors, ratings, combine); - } - - /** - * This won't compile if super/extends isn't done correctly on generics + /* + * Most tests are moved into their applicable classes such as [Operator]Tests.java */ - @Test - public void testCovarianceOfCombineLatest() { - Observable<HorrorMovie> horrors = Observable.from(new HorrorMovie()); - Observable<CoolRating> ratings = Observable.from(new CoolRating()); - - Observable.<Movie, CoolRating, Result> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(action); - Observable.<Movie, CoolRating, Result> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(action); - Observable.<Media, Rating, ExtendedResult> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(extendedAction); - Observable.<Media, Rating, Result> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(action); - Observable.<Media, Rating, ExtendedResult> combineLatest(horrors, ratings, combine).toBlockingObservable().forEach(action); - - Observable.<Movie, CoolRating, Result> combineLatest(horrors, ratings, combine); - } - - @Test - public void testConcatCovariance() { - Observable<Media> o1 = Observable.<Media> from(new HorrorMovie(), new Movie()); - Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); - - Observable<Observable<Media>> os = Observable.from(o1, o2); - - List<Media> values = Observable.concat(os).toList().toBlockingObservable().single(); - } - - @Test - public void testConcatCovariance2() { - Observable<Media> o1 = Observable.from(new HorrorMovie(), new Movie(), new Media()); - Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); - - Observable<Observable<Media>> os = Observable.from(o1, o2); - - List<Media> values = Observable.concat(os).toList().toBlockingObservable().single(); - } - - @Test - public void testConcatCovariance3() { - Observable<Movie> o1 = Observable.from(new HorrorMovie(), new Movie()); - Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); - - List<Media> values = Observable.concat(o1, o2).toList().toBlockingObservable().single(); - - assertTrue(values.get(0) instanceof HorrorMovie); - assertTrue(values.get(1) instanceof Movie); - assertTrue(values.get(2) instanceof Media); - assertTrue(values.get(3) instanceof HorrorMovie); - } - - @Test - public void testConcatCovariance4() { - - Observable<Movie> o1 = Observable.create(new OnSubscribeFunc<Movie>() { - - @Override - public Subscription onSubscribe(Observer<? super Movie> o) { - o.onNext(new HorrorMovie()); - o.onNext(new Movie()); - // o.onNext(new Media()); // correctly doesn't compile - o.onCompleted(); - return Subscriptions.empty(); - } - }); - - Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); - - List<Media> values = Observable.concat(o1, o2).toList().toBlockingObservable().single(); - - assertTrue(values.get(0) instanceof HorrorMovie); - assertTrue(values.get(1) instanceof Movie); - assertTrue(values.get(2) instanceof Media); - assertTrue(values.get(3) instanceof HorrorMovie); - } - - - @Test - public void testMergeCovariance() { - Observable<Media> o1 = Observable.<Media> from(new HorrorMovie(), new Movie()); - Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); - - Observable<Observable<Media>> os = Observable.from(o1, o2); - - List<Media> values = Observable.merge(os).toList().toBlockingObservable().single(); - } - - @Test - public void testMergeCovariance2() { - Observable<Media> o1 = Observable.from(new HorrorMovie(), new Movie(), new Media()); - Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); - - Observable<Observable<Media>> os = Observable.from(o1, o2); - - List<Media> values = Observable.merge(os).toList().toBlockingObservable().single(); - } - - @Test - public void testMergeCovariance3() { - Observable<Movie> o1 = Observable.from(new HorrorMovie(), new Movie()); - Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); - - List<Media> values = Observable.merge(o1, o2).toList().toBlockingObservable().single(); - - assertTrue(values.get(0) instanceof HorrorMovie); - assertTrue(values.get(1) instanceof Movie); - assertTrue(values.get(2) instanceof Media); - assertTrue(values.get(3) instanceof HorrorMovie); - } - - @Test - public void testMergeCovariance4() { - - Observable<Movie> o1 = Observable.create(new OnSubscribeFunc<Movie>() { - - @Override - public Subscription onSubscribe(Observer<? super Movie> o) { - o.onNext(new HorrorMovie()); - o.onNext(new Movie()); - // o.onNext(new Media()); // correctly doesn't compile - o.onCompleted(); - return Subscriptions.empty(); - } - }); - - Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); - - List<Media> values = Observable.merge(o1, o2).toList().toBlockingObservable().single(); - - assertTrue(values.get(0) instanceof HorrorMovie); - assertTrue(values.get(1) instanceof Movie); - assertTrue(values.get(2) instanceof Media); - assertTrue(values.get(3) instanceof HorrorMovie); - } - - Func2<Media, Rating, ExtendedResult> combine = new Func2<Media, Rating, ExtendedResult>() { - @Override - public ExtendedResult call(Media m, Rating r) { - return new ExtendedResult(); - } - }; - - Action1<Result> action = new Action1<Result>() { - @Override - public void call(Result t1) { - System.out.println("Result: " + t1); - } - }; - - Action1<ExtendedResult> extendedAction = new Action1<ExtendedResult>() { - @Override - public void call(ExtendedResult t1) { - System.out.println("Result: " + t1); - } - }; static class Media { } diff --git a/rxjava-core/src/test/java/rx/MergeTests.java b/rxjava-core/src/test/java/rx/MergeTests.java new file mode 100644 index 0000000000..11f30c7908 --- /dev/null +++ b/rxjava-core/src/test/java/rx/MergeTests.java @@ -0,0 +1,86 @@ +package rx; + +import static org.junit.Assert.*; + +import java.util.List; + +import org.junit.Test; + +import rx.CovarianceTest.HorrorMovie; +import rx.CovarianceTest.Media; +import rx.CovarianceTest.Movie; +import rx.Observable.OnSubscribeFunc; +import rx.subscriptions.Subscriptions; + +public class MergeTests { + + /** + * This won't compile if super/extends isn't done correctly on generics + */ + @Test + public void testCovarianceOfMerge() { + Observable<HorrorMovie> horrors = Observable.from(new HorrorMovie()); + Observable<Observable<HorrorMovie>> metaHorrors = Observable.just(horrors); + Observable.<Media> merge(metaHorrors); + } + + @Test + public void testMergeCovariance() { + Observable<Media> o1 = Observable.<Media> from(new HorrorMovie(), new Movie()); + Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); + + Observable<Observable<Media>> os = Observable.from(o1, o2); + + List<Media> values = Observable.merge(os).toList().toBlockingObservable().single(); + } + + @Test + public void testMergeCovariance2() { + Observable<Media> o1 = Observable.from(new HorrorMovie(), new Movie(), new Media()); + Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); + + Observable<Observable<Media>> os = Observable.from(o1, o2); + + List<Media> values = Observable.merge(os).toList().toBlockingObservable().single(); + } + + @Test + public void testMergeCovariance3() { + Observable<Movie> o1 = Observable.from(new HorrorMovie(), new Movie()); + Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); + + List<Media> values = Observable.merge(o1, o2).toList().toBlockingObservable().single(); + + assertTrue(values.get(0) instanceof HorrorMovie); + assertTrue(values.get(1) instanceof Movie); + assertTrue(values.get(2) instanceof Media); + assertTrue(values.get(3) instanceof HorrorMovie); + } + + @Test + public void testMergeCovariance4() { + + Observable<Movie> o1 = Observable.create(new OnSubscribeFunc<Movie>() { + + @Override + public Subscription onSubscribe(Observer<? super Movie> o) { + o.onNext(new HorrorMovie()); + o.onNext(new Movie()); + // o.onNext(new Media()); // correctly doesn't compile + o.onCompleted(); + return Subscriptions.empty(); + } + }); + + Observable<Media> o2 = Observable.from(new Media(), new HorrorMovie()); + + List<Media> values = Observable.merge(o1, o2).toList().toBlockingObservable().single(); + + assertTrue(values.get(0) instanceof HorrorMovie); + assertTrue(values.get(1) instanceof Movie); + assertTrue(values.get(2) instanceof Media); + assertTrue(values.get(3) instanceof HorrorMovie); + } + + +} diff --git a/rxjava-core/src/test/java/rx/ReduceTests.java b/rxjava-core/src/test/java/rx/ReduceTests.java new file mode 100644 index 0000000000..822001e615 --- /dev/null +++ b/rxjava-core/src/test/java/rx/ReduceTests.java @@ -0,0 +1,69 @@ +package rx; + +import static org.junit.Assert.*; + +import org.junit.Test; + +import rx.CovarianceTest.HorrorMovie; +import rx.CovarianceTest.Movie; +import rx.operators.OperationScan; +import rx.util.functions.Func2; + +public class ReduceTests { + + @Test + public void reduceInts() { + Observable<Integer> o = Observable.from(1, 2, 3); + int value = o.reduce(new Func2<Integer, Integer, Integer>() { + + @Override + public Integer call(Integer t1, Integer t2) { + return t1 + t2; + } + }).toBlockingObservable().single(); + + assertEquals(6, value); + } + + @Test + public void reduceWithObjects() { + Observable<HorrorMovie> horrorMovies = Observable.from(new HorrorMovie()); + + Func2<Movie, Movie, Movie> chooseSecondMovie = + new Func2<Movie, Movie, Movie>() { + public Movie call(Movie t1, Movie t2) { + return t2; + } + }; + + Observable<Movie> reduceResult = Observable.create(OperationScan.scan(horrorMovies, chooseSecondMovie)).takeLast(1); + + //TODO this isn't compiling + // Observable<Movie> reduceResult2 = horrorMovies.reduce(chooseSecondMovie); + } + + @Test + public void reduceCovariance() { + Observable<HorrorMovie> horrorMovies = Observable.from(new HorrorMovie()); + + // do something with horrorMovies, relying on the fact that all are HorrorMovies + // and not just any Movies... + + // pass it to library (works because it takes Observable<? extends Movie>) + libraryFunctionActingOnMovieObservables(horrorMovies); + } + + public void libraryFunctionActingOnMovieObservables(Observable<? extends Movie> obs) { + Func2<Movie, Movie, Movie> chooseSecondMovie = + new Func2<Movie, Movie, Movie>() { + public Movie call(Movie t1, Movie t2) { + return t2; + } + }; + + //TODO this isn't compiling + // Observable<Movie> reduceResult = obs.reduce((Func2<? super Movie, ? super Movie, ? extends Movie>) chooseSecondMovie); + // do something with reduceResult... + } + +} diff --git a/rxjava-core/src/test/java/rx/ZipTests.java b/rxjava-core/src/test/java/rx/ZipTests.java new file mode 100644 index 0000000000..ee25bb6ade --- /dev/null +++ b/rxjava-core/src/test/java/rx/ZipTests.java @@ -0,0 +1,54 @@ +package rx; + +import org.junit.Test; + +import rx.CovarianceTest.CoolRating; +import rx.CovarianceTest.ExtendedResult; +import rx.CovarianceTest.HorrorMovie; +import rx.CovarianceTest.Media; +import rx.CovarianceTest.Movie; +import rx.CovarianceTest.Rating; +import rx.CovarianceTest.Result; +import rx.util.functions.Action1; +import rx.util.functions.Func2; + +public class ZipTests { + + /** + * This won't compile if super/extends isn't done correctly on generics + */ + @Test + public void testCovarianceOfZip() { + Observable<HorrorMovie> horrors = Observable.from(new HorrorMovie()); + Observable<CoolRating> ratings = Observable.from(new CoolRating()); + + Observable.<Movie, CoolRating, Result> zip(horrors, ratings, combine).toBlockingObservable().forEach(action); + Observable.<Movie, CoolRating, Result> zip(horrors, ratings, combine).toBlockingObservable().forEach(action); + Observable.<Media, Rating, ExtendedResult> zip(horrors, ratings, combine).toBlockingObservable().forEach(extendedAction); + Observable.<Media, Rating, Result> zip(horrors, ratings, combine).toBlockingObservable().forEach(action); + Observable.<Media, Rating, ExtendedResult> zip(horrors, ratings, combine).toBlockingObservable().forEach(action); + + Observable.<Movie, CoolRating, Result> zip(horrors, ratings, combine); + } + + Func2<Media, Rating, ExtendedResult> combine = new Func2<Media, Rating, ExtendedResult>() { + @Override + public ExtendedResult call(Media m, Rating r) { + return new ExtendedResult(); + } + }; + + Action1<Result> action = new Action1<Result>() { + @Override + public void call(Result t1) { + System.out.println("Result: " + t1); + } + }; + + Action1<ExtendedResult> extendedAction = new Action1<ExtendedResult>() { + @Override + public void call(ExtendedResult t1) { + System.out.println("Result: " + t1); + } + }; +}
5a2052fad3f92e6675ba6362028f5f5851934f01
camel
CAMEL-4059: Fixed test on windows--git-svn-id: https://svn.apache.org/repos/asf/camel/trunk@1132659 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/camel
diff --git a/components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsTestConnectionOnStartupTest.java b/components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsTestConnectionOnStartupTest.java index 8b24f76a2c209..045aed17b0efd 100644 --- a/components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsTestConnectionOnStartupTest.java +++ b/components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsTestConnectionOnStartupTest.java @@ -46,7 +46,6 @@ public void configure() throws Exception { context.start(); fail("Should have thrown an exception"); } catch (FailedToCreateConsumerException e) { - // expected assertEquals("Failed to create Consumer for endpoint: Endpoint[activemq://queue:foo?testConnectionOnStartup=true]. " + "Reason: Cannot get JMS Connection on startup for destination foo", e.getMessage()); } @@ -65,12 +64,8 @@ public void configure() throws Exception { context.start(); fail("Should have thrown an exception"); } catch (FailedToCreateProducerException e) { - // expected - assertEquals("Failed to create Producer for endpoint: Endpoint[activemq://queue:foo?testConnectionOnStartup=true]. " - + "Reason: org.apache.camel.FailedToCreateProducerException: Failed to create Producer for endpoint: " - + "Endpoint[activemq://queue:foo?testConnectionOnStartup=true]. Reason: javax.jms.JMSException: " - + "Could not connect to broker URL: tcp://localhost:61111. Reason: java.net.ConnectException: Connection refused", - e.getMessage()); + assertTrue(e.getMessage().startsWith("Failed to create Producer for endpoint: Endpoint[activemq://queue:foo?testConnectionOnStartup=true].")); + assertTrue(e.getMessage().contains("java.net.ConnectException")); } }
8d056194e11076f0da84e62f63f86d2abcbcbb60
hbase
HBASE-4861 Fix some misspells and extraneous- characters in logs; set some to TRACE--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1205732 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/hbase
diff --git a/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 0536b6e24aea..0c1fa3fe4c21 100644 --- a/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -655,9 +655,8 @@ public boolean isSplitParent() { */ @Override public String toString() { - return "REGION => {" + HConstants.NAME + " => '" + + return "{" + HConstants.NAME + " => '" + this.regionNameStr - + "', TableName => '" + Bytes.toStringBinary(this.tableName) + "', STARTKEY => '" + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java b/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java index dc1e872be17a..19fee5c4acdc 100644 --- a/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java +++ b/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java @@ -289,8 +289,9 @@ public static void deleteDaughtersReferencesInParent(CatalogTracker catalogTrack delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); deleteMetaTable(catalogTracker, delete); - LOG.info("Deleted daughters references, qualifier=" + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) + " and qualifier=" - + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) + ", from parent " + parent.getRegionNameAsString()); + LOG.info("Deleted daughters references, qualifier=" + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) + + " and qualifier=" + Bytes.toStringBinary(HConstants.SPLITB_QUALIFIER) + + ", from parent " + parent.getRegionNameAsString()); } public static HRegionInfo getHRegionInfo( @@ -317,4 +318,4 @@ private static Put addLocation(final Put p, final ServerName sn) { Bytes.toBytes(sn.getStartcode())); return p; } -} \ No newline at end of file +} diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index e4de22ad9d84..6af1f82092aa 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -1816,7 +1816,7 @@ public void close() { } else { close(true); } - LOG.debug("The connection to " + this.zooKeeper + " has been closed."); + if (LOG.isTraceEnabled()) LOG.debug("" + this.zooKeeper + " closed."); } /** diff --git a/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 8cf220b5052f..3f6ccb6fece3 100644 --- a/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -746,8 +746,8 @@ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); - if (LOG.isDebugEnabled()) { - LOG.debug("Wrote a " + numLevels + "-level index with root level at pos " + if (LOG.isTraceEnabled()) { + LOG.trace("Wrote a " + numLevels + "-level index with root level at pos " + out.getPos() + ", " + rootChunk.getNumEntries() + " root-level entries, " + totalNumEntries + " total entries, " + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + @@ -782,9 +782,11 @@ public void writeSingleLevelIndex(DataOutput out, String description) rootChunk = curInlineChunk; curInlineChunk = new BlockIndexChunk(); - LOG.info("Wrote a single-level " + description + " index with " + if (LOG.isTraceEnabled()) { + LOG.trace("Wrote a single-level " + description + " index with " + rootChunk.getNumEntries() + " entries, " + rootChunk.getRootSize() + " bytes"); + } rootChunk.writeRoot(out); } diff --git a/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index 1bae2615ab69..c1f304e9df63 100644 --- a/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -331,8 +331,10 @@ public void close(boolean evictOnClose) throws IOException { if (evictOnClose && cacheConf.isBlockCacheEnabled()) { int numEvicted = cacheConf.getBlockCache().evictBlocksByPrefix(name + HFile.CACHE_KEY_SEPARATOR); - LOG.debug("On close, file=" + name + " evicted=" + numEvicted + if (LOG.isTraceEnabled()) { + LOG.trace("On close, file=" + name + " evicted=" + numEvicted + " block(s)"); + } } if (closeIStream && istream != null) { istream.close(); diff --git a/src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java b/src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java index f3812344137c..2d544dd155f9 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java +++ b/src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java @@ -110,7 +110,7 @@ public void process() { parent.getEncodedName() + ")", e); } } - LOG.info("Handled SPLIT report); parent=" + + LOG.info("Handled SPLIT event; parent=" + this.parent.getRegionNameAsString() + " daughter a=" + this.daughters.get(0).getRegionNameAsString() + "daughter b=" + this.daughters.get(1).getRegionNameAsString()); diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 6f37b84bc637..94a8c1d46f43 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1580,7 +1580,6 @@ public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct, // Add to online regions if all above was successful. addToOnlineRegions(r); - LOG.info("addToOnlineRegions is done" + r.getRegionInfo()); // Update ZK, ROOT or META if (r.getRegionInfo().isRootRegion()) { RootLocationEditor.setRootLocation(getZooKeeper(), @@ -1598,7 +1597,7 @@ public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct, this.serverNameFromMasterPOV); } } - LOG.info("Done with post open deploy taks for region=" + + LOG.info("Done with post open deploy task for region=" + r.getRegionNameAsString() + ", daughter=" + daughter); } diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 0cc2f63a6b8c..08b7de316320 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -393,7 +393,7 @@ private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) { // that it's possible for the master to miss an event. do { if (spins % 10 == 0) { - LOG.info("Still waiting on the master to process the split for " + + LOG.debug("Still waiting on the master to process the split for " + this.parent.getRegionInfo().getEncodedName()); } Thread.sleep(100); diff --git a/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java b/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java index 79c72208291f..418bd16ae9c1 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java +++ b/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java @@ -173,12 +173,12 @@ public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf, CacheConfig cacheConf, BloomType bloomType, int maxKeys, HFile.Writer writer) { if (!isGeneralBloomEnabled(conf)) { - LOG.debug("Bloom filters are disabled by configuration for " + LOG.trace("Bloom filters are disabled by configuration for " + writer.getPath() + (conf == null ? " (configuration is null)" : "")); return null; } else if (bloomType == BloomType.NONE) { - LOG.debug("Bloom filter is turned off for the column family"); + LOG.trace("Bloom filter is turned off for the column family"); return null; }
5804249b521c4edda47d0dffc514075cd6fb4503
intellij-community
Mac laf: add support for search controls with- history popups--
a
https://github.com/JetBrains/intellij-community
diff --git a/platform/platform-impl/src/com/intellij/ide/ui/laf/intellij/MacIntelliJTextFieldUI.java b/platform/platform-impl/src/com/intellij/ide/ui/laf/intellij/MacIntelliJTextFieldUI.java index 21ec9d3211230..3190fac057a9b 100644 --- a/platform/platform-impl/src/com/intellij/ide/ui/laf/intellij/MacIntelliJTextFieldUI.java +++ b/platform/platform-impl/src/com/intellij/ide/ui/laf/intellij/MacIntelliJTextFieldUI.java @@ -127,13 +127,15 @@ protected void paintSearchField(Graphics2D g, JTextComponent c, Rectangle r) { gg.dispose(); right.paintIcon(c, g, stop, r.y); - Icon label = MacIntelliJIconCache.getIcon("searchFieldLabel"); - if (StringUtil.isEmpty(c.getText()) && !c.hasFocus()) { + boolean withHistoryPopup = isSearchFieldWithHistoryPopup(c); + Icon label = MacIntelliJIconCache.getIcon(withHistoryPopup ? "searchFieldWithHistory" : "searchFieldLabel"); + if (StringUtil.isEmpty(c.getText()) && !c.hasFocus() && !withHistoryPopup) { label.paintIcon(c, g, r.x + (r.width - label.getIconWidth())/ 2, r.y); } else { gg = g.create(0, 0, c.getWidth(), c.getHeight()); - gg.setClip(r.x + 8, r.y, StringUtil.isEmpty(c.getText()) ? label.getIconWidth() : 16, label.getIconHeight()); - label.paintIcon(c, gg, r.x + 8, r.y); + int offset = withHistoryPopup ? 5 : 8; + gg.setClip(r.x + offset, r.y, StringUtil.isEmpty(c.getText()) ? label.getIconWidth() : 16, label.getIconHeight()); + label.paintIcon(c, gg, r.x + offset, r.y); } if (!StringUtil.isEmpty(c.getText())) { @@ -145,7 +147,7 @@ protected void paintSearchField(Graphics2D g, JTextComponent c, Rectangle r) { @Override protected Rectangle getVisibleEditorRect() { Rectangle rect = super.getVisibleEditorRect(); - if (isSearchField(myTextField)) { + if (rect != null && isSearchField(myTextField)) { rect.width -= 36; rect.x += 19; rect.y +=1;
c512c4594c95d08edf0a63cb30ed643a4ed69fcc
drools
[JBRULES-3668] format and shorten kproject.xml file--
p
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/main/java/org/drools/cdi/KProjectExtension.java b/drools-compiler/src/main/java/org/drools/cdi/KProjectExtension.java index 5bc78adf1fd..1e7ddc2d3bd 100644 --- a/drools-compiler/src/main/java/org/drools/cdi/KProjectExtension.java +++ b/drools-compiler/src/main/java/org/drools/cdi/KProjectExtension.java @@ -33,6 +33,7 @@ import org.drools.kproject.KBaseImpl; import org.drools.kproject.KProject; +import org.drools.kproject.KProjectImpl; import org.drools.kproject.KSessionImpl; import org.kie.KnowledgeBase; import org.kie.KnowledgeBaseFactory; @@ -473,8 +474,7 @@ public void buildKProjects() { while ( e.hasMoreElements() ) { URL url = e.nextElement();; try { - XStream xstream = new XStream(); - KProject kProject = (KProject) xstream.fromXML( url ); + KProject kProject = KProjectImpl.fromXML(url); String kProjectId = kProject.getGroupArtifactVersion().getGroupId() + ":" + kProject.getGroupArtifactVersion().getArtifactId(); urls.put( kProjectId, fixURL( url ) ); kProjects.put( kProjectId, kProject ); diff --git a/drools-compiler/src/main/java/org/drools/kproject/KBaseImpl.java b/drools-compiler/src/main/java/org/drools/kproject/KBaseImpl.java index 4a75bcb012c..85d915f492d 100644 --- a/drools-compiler/src/main/java/org/drools/kproject/KBaseImpl.java +++ b/drools-compiler/src/main/java/org/drools/kproject/KBaseImpl.java @@ -10,9 +10,14 @@ import java.util.Map; import java.util.Set; -import org.drools.RuleBaseConfiguration.AssertBehaviour; +import com.thoughtworks.xstream.converters.MarshallingContext; +import com.thoughtworks.xstream.converters.UnmarshallingContext; +import com.thoughtworks.xstream.io.HierarchicalStreamReader; +import com.thoughtworks.xstream.io.HierarchicalStreamWriter; +import org.drools.core.util.AbstractXStreamConverter; import org.kie.conf.AssertBehaviorOption; import org.kie.conf.EventProcessingOption; +import org.kie.runtime.conf.ClockTypeOption; public class KBaseImpl implements @@ -37,6 +42,11 @@ public class KBaseImpl private transient PropertyChangeListener listener; + private KBaseImpl() { + this.includes = new HashSet<String>(); + this.files = new ArrayList<String>(); + } + public KBaseImpl(KProjectImpl kProject, String namespace, String name) { @@ -267,4 +277,60 @@ public String toString() { return "KBase [namespace=" + namespace + ", name=" + name + ", files=" + files + ", annotations=" + annotations + ", equalsBehaviour=" + equalsBehavior + ", eventProcessingMode=" + eventProcessingMode + ", ksessions=" + kSessions + "]"; } + public static class KBaseConverter extends AbstractXStreamConverter { + + public KBaseConverter() { + super(KBaseImpl.class); + } + + public void marshal(Object value, HierarchicalStreamWriter writer, MarshallingContext context) { + KBaseImpl kBase = (KBaseImpl) value; + writer.addAttribute("name", kBase.getName()); + writer.addAttribute("namespace", kBase.getNamespace()); + if (kBase.getEventProcessingMode() != null) { + writer.addAttribute("eventProcessingMode", kBase.getEventProcessingMode().getMode()); + } + if (kBase.getEqualsBehavior() != null) { + writer.addAttribute("equalsBehavior", kBase.getEqualsBehavior().toString()); + } + writeList(writer, "files", "file", kBase.getFiles()); + writeList(writer, "includes", "include", kBase.getIncludes()); + writeObjectList(writer, context, "ksessions", "ksession", kBase.getKSessions().values()); + } + + public Object unmarshal(HierarchicalStreamReader reader, final UnmarshallingContext context) { + final KBaseImpl kBase = new KBaseImpl(); + kBase.setName(reader.getAttribute("name")); + kBase.setNamespace(reader.getAttribute("namespace")); + + String eventMode = reader.getAttribute("eventProcessingMode"); + if (eventMode != null) { + kBase.setEventProcessingMode(EventProcessingOption.determineEventProcessingMode(eventMode)); + } + String equalsBehavior = reader.getAttribute("equalsBehavior"); + if (equalsBehavior != null) { + kBase.setEqualsBehavior(AssertBehaviorOption.valueOf(equalsBehavior)); + } + + readNodes(reader, new AbstractXStreamConverter.NodeReader() { + public void onNode(HierarchicalStreamReader reader, String name, String value) { + if ("ksessions".equals(name)) { + Map<String, KSession> kSessions = new HashMap<String, KSession>(); + for (KSessionImpl kSession : readObjectList(reader, context, KSessionImpl.class)) { + kSession.setKBase(kBase); + kSessions.put( kSession.getQName(), kSession ); + } + kBase.setKSessions(kSessions); + } else if ("files".equals(name)) { + kBase.setFiles(readList(reader)); + } else if ("includes".equals(name)) { + for (String include : readList(reader)) { + kBase.addInclude(include); + } + } + } + }); + return kBase; + } + } } diff --git a/drools-compiler/src/main/java/org/drools/kproject/KProjectImpl.java b/drools-compiler/src/main/java/org/drools/kproject/KProjectImpl.java index fcb73c87302..a02ab1516ff 100644 --- a/drools-compiler/src/main/java/org/drools/kproject/KProjectImpl.java +++ b/drools-compiler/src/main/java/org/drools/kproject/KProjectImpl.java @@ -1,6 +1,14 @@ package org.drools.kproject; import com.thoughtworks.xstream.XStream; +import com.thoughtworks.xstream.converters.MarshallingContext; +import com.thoughtworks.xstream.converters.UnmarshallingContext; +import com.thoughtworks.xstream.io.HierarchicalStreamReader; +import com.thoughtworks.xstream.io.HierarchicalStreamWriter; +import com.thoughtworks.xstream.io.xml.DomDriver; +import org.drools.core.util.AbstractXStreamConverter; +import org.kie.conf.AssertBehaviorOption; +import org.kie.conf.EventProcessingOption; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; @@ -177,18 +185,87 @@ public String toString() { } public String toXML() { - return new XStream().toXML(this); + return MARSHALLER.toXML(this); } public static KProject fromXML(InputStream kProjectStream) { - return (KProject)new XStream().fromXML(kProjectStream); + return MARSHALLER.fromXML(kProjectStream); } public static KProject fromXML(java.io.File kProjectFile) { - return (KProject)new XStream().fromXML(kProjectFile); + return MARSHALLER.fromXML(kProjectFile); } public static KProject fromXML(URL kProjectUrl) { - return (KProject)new XStream().fromXML(kProjectUrl); + return MARSHALLER.fromXML(kProjectUrl); } -} + + private static final KProjectMarshaller MARSHALLER = new KProjectMarshaller(); + + private static class KProjectMarshaller { + private final XStream xStream = new XStream(new DomDriver()); + + private KProjectMarshaller() { + xStream.registerConverter(new KProjectConverter()); + xStream.registerConverter(new KBaseImpl.KBaseConverter()); + xStream.registerConverter(new KSessionImpl.KSessionConverter()); + xStream.alias("kproject", KProjectImpl.class); + xStream.alias("kbase", KBaseImpl.class); + xStream.alias("ksession", KSessionImpl.class); + } + + public String toXML(KProject kProject) { + return xStream.toXML(kProject); + } + + public KProject fromXML(InputStream kProjectStream) { + return (KProject)xStream.fromXML(kProjectStream); + } + + public KProject fromXML(java.io.File kProjectFile) { + return (KProject)xStream.fromXML(kProjectFile); + } + + public KProject fromXML(URL kProjectUrl) { + return (KProject)xStream.fromXML(kProjectUrl); + } + } + + public static class KProjectConverter extends AbstractXStreamConverter { + + public KProjectConverter() { + super(KProjectImpl.class); + } + + public void marshal(Object value, HierarchicalStreamWriter writer, MarshallingContext context) { + KProjectImpl kProject = (KProjectImpl) value; + writeAttribute(writer, "kBasesPath", kProject.getKBasesPath()); + writeAttribute(writer, "kProjectPath", kProject.getKProjectPath()); + writeObject(writer, context, "groupArtifactVersion", kProject.getGroupArtifactVersion()); + writeObjectList(writer, context, "kbases", "kbase", kProject.getKBases().values()); + } + + public Object unmarshal(HierarchicalStreamReader reader, final UnmarshallingContext context) { + final KProjectImpl kProject = new KProjectImpl(); + kProject.setKBasesPath(reader.getAttribute("kBasesPath")); + kProject.setKProjectPath(reader.getAttribute("kProjectPath")); + + readNodes(reader, new AbstractXStreamConverter.NodeReader() { + public void onNode(HierarchicalStreamReader reader, String name, String value) { + if ("groupArtifactVersion".equals(name)) { + kProject.setGroupArtifactVersion((GroupArtifactVersion) context.convertAnother(reader.getValue(), GroupArtifactVersion.class)); + } else if ("kbases".equals(name)) { + Map<String, KBase> kBases = new HashMap<String, KBase>(); + for (KBaseImpl kBase : readObjectList(reader, context, KBaseImpl.class)) { + kBase.setKProject(kProject); + kBases.put(kBase.getQName(), kBase); + } + kProject.setKBases(kBases); + } + } + }); + + return kProject; + } + } +} \ No newline at end of file diff --git a/drools-compiler/src/main/java/org/drools/kproject/KSessionImpl.java b/drools-compiler/src/main/java/org/drools/kproject/KSessionImpl.java index d9548f7aeea..d43f11163ea 100644 --- a/drools-compiler/src/main/java/org/drools/kproject/KSessionImpl.java +++ b/drools-compiler/src/main/java/org/drools/kproject/KSessionImpl.java @@ -1,13 +1,18 @@ package org.drools.kproject; +import com.thoughtworks.xstream.converters.MarshallingContext; +import com.thoughtworks.xstream.converters.UnmarshallingContext; +import com.thoughtworks.xstream.io.HierarchicalStreamReader; +import com.thoughtworks.xstream.io.HierarchicalStreamWriter; +import org.drools.core.util.AbstractXStreamConverter; +import org.kie.conf.AssertBehaviorOption; +import org.kie.runtime.conf.ClockTypeOption; + import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.util.ArrayList; import java.util.List; -import org.drools.ClockType; -import org.kie.runtime.conf.ClockTypeOption; - public class KSessionImpl implements KSession { @@ -23,6 +28,8 @@ public class KSessionImpl private transient PropertyChangeListener listener; + private KSessionImpl() { } + public KSessionImpl(KBaseImpl kBase, String namespace, String name) { @@ -157,4 +164,33 @@ public String toString() { return "KSession [namespace=" + namespace + ", name=" + name + ", clockType=" + clockType + ", annotations=" + annotations + "]"; } -} + public static class KSessionConverter extends AbstractXStreamConverter { + + public KSessionConverter() { + super(KSessionImpl.class); + } + + public void marshal(Object value, HierarchicalStreamWriter writer, MarshallingContext context) { + KSessionImpl kSession = (KSessionImpl) value; + writer.addAttribute("name", kSession.getName()); + writer.addAttribute("namespace", kSession.getNamespace()); + writer.addAttribute("type", kSession.getType()); + if (kSession.getClockType() != null) { + writer.addAttribute("clockType", kSession.getClockType().getClockType()); + } + } + + public Object unmarshal(HierarchicalStreamReader reader, UnmarshallingContext context) { + KSessionImpl kSession = new KSessionImpl(); + kSession.setName(reader.getAttribute("name")); + kSession.setNamespace(reader.getAttribute("namespace")); + kSession.setType(reader.getAttribute("type")); + + String clockType = reader.getAttribute("clockType"); + if (clockType != null) { + kSession.setClockType(ClockTypeOption.get(clockType)); + } + return kSession; + } + } +} \ No newline at end of file diff --git a/drools-compiler/src/test/java/org/drools/kproject/AbstractKnowledgeTest.java b/drools-compiler/src/test/java/org/drools/kproject/AbstractKnowledgeTest.java index 2491b71b265..01476f07b4a 100644 --- a/drools-compiler/src/test/java/org/drools/kproject/AbstractKnowledgeTest.java +++ b/drools-compiler/src/test/java/org/drools/kproject/AbstractKnowledgeTest.java @@ -102,9 +102,8 @@ public void createKProjectJar(String namespace, File fle2 = fld2.getFile( "beans.xml" ); fle2.create( new ByteArrayInputStream( generateBeansXML( kproj ).getBytes() ) ); - XStream xstream = new XStream(); fle2 = fld2.getFile( "kproject.xml" ); - fle2.create( new ByteArrayInputStream( xstream.toXML( kproj ).getBytes() ) ); + fle2.create( new ByteArrayInputStream( ((KProjectImpl)kproj).toXML().getBytes() ) ); String kBase1R1 = getRule( namespace + ".test1", "rule1" ); String kBase1R2 = getRule( namespace + ".test1", "rule2" ); diff --git a/drools-compiler/src/test/java/org/drools/kproject/KJarTest.java b/drools-compiler/src/test/java/org/drools/kproject/KJarTest.java index 04d7e403c70..08ecb7ccb12 100644 --- a/drools-compiler/src/test/java/org/drools/kproject/KJarTest.java +++ b/drools-compiler/src/test/java/org/drools/kproject/KJarTest.java @@ -113,8 +113,7 @@ private void createKJar() throws IOException { .setAnnotations( asList( "@ApplicationScoped; @Inject" ) ) .setClockType( ClockTypeOption.get("realtime") ); - XStream xstream = new XStream(); - fileManager.write(fileManager.newFile(KnowledgeContainerImpl.KPROJECT_RELATIVE_PATH), xstream.toXML( kproj )); + fileManager.write( fileManager.newFile(KnowledgeContainerImpl.KPROJECT_RELATIVE_PATH), ((KProjectImpl)kproj).toXML() ); KnowledgeContainer kcontainer = KnowledgeBuilderFactory.newKnowledgeContainer(); diff --git a/drools-compiler/src/test/java/org/drools/kproject/KnowledgeContainerTest.java b/drools-compiler/src/test/java/org/drools/kproject/KnowledgeContainerTest.java index 51545347550..d55c02a3bcd 100644 --- a/drools-compiler/src/test/java/org/drools/kproject/KnowledgeContainerTest.java +++ b/drools-compiler/src/test/java/org/drools/kproject/KnowledgeContainerTest.java @@ -104,7 +104,7 @@ private File createKJar(KnowledgeContainer kContainer, String kjarName, String.. .setAnnotations( asList( "@ApplicationScoped; @Inject" ) ) .setClockType( ClockTypeOption.get("realtime") ); - fileManager.write(fileManager.newFile(KnowledgeContainerImpl.KPROJECT_RELATIVE_PATH), new XStream().toXML(kproj)); + fileManager.write(fileManager.newFile(KnowledgeContainerImpl.KPROJECT_RELATIVE_PATH), ((KProjectImpl)kproj).toXML() ); return kContainer.buildKJar(fileManager.getRootDirectory(), fileManager.getRootDirectory(), kjarName); } diff --git a/drools-core/src/main/java/org/drools/core/util/AbstractXStreamConverter.java b/drools-core/src/main/java/org/drools/core/util/AbstractXStreamConverter.java new file mode 100644 index 00000000000..b129d5e7ac6 --- /dev/null +++ b/drools-core/src/main/java/org/drools/core/util/AbstractXStreamConverter.java @@ -0,0 +1,105 @@ +package org.drools.core.util; + +import com.thoughtworks.xstream.converters.Converter; +import com.thoughtworks.xstream.converters.MarshallingContext; +import com.thoughtworks.xstream.converters.UnmarshallingContext; +import com.thoughtworks.xstream.io.HierarchicalStreamReader; +import com.thoughtworks.xstream.io.HierarchicalStreamWriter; + +import java.util.*; + +public abstract class AbstractXStreamConverter implements Converter { + private final Class type; + + protected AbstractXStreamConverter(Class type) { + this.type = type; + } + + public boolean canConvert(Class clazz) { + return type.isAssignableFrom(clazz); + } + + protected void writeAttribute(HierarchicalStreamWriter writer, String name, String value) { + if (value != null) { + writer.addAttribute(name, value); + } + } + + protected void writeString(HierarchicalStreamWriter writer, String name, String value) { + if (value != null) { + writer.startNode(name); + writer.setValue(value); + writer.endNode(); + } + } + + protected void writeObject(HierarchicalStreamWriter writer, MarshallingContext context, String name, Object value) { + if (value != null) { + writer.startNode(name); + context.convertAnother(value); + writer.endNode(); + } + } + + protected void writeList(HierarchicalStreamWriter writer, String listName, String itemName, Iterable<String> list) { + if (list != null) { + java.util.Iterator<String> i = list.iterator(); + if (i.hasNext()) { + writer.startNode(listName); + while (i.hasNext()) { + writer.startNode(itemName); + writer.setValue(i.next()); + writer.endNode(); + } + writer.endNode(); + } + + } + } + + protected void writeObjectList(HierarchicalStreamWriter writer, MarshallingContext context, String listName, String itemName, Iterable<? extends Object> list) { + if (list != null) { + java.util.Iterator<? extends Object> i = list.iterator(); + if (i.hasNext()) { + writer.startNode(listName); + while (i.hasNext()) { + writeObject(writer, context, itemName, i.next()); + } + writer.endNode(); + } + + } + } + + protected void readNodes(HierarchicalStreamReader reader, NodeReader nodeReader) { + while (reader.hasMoreChildren()) { + reader.moveDown(); + nodeReader.onNode(reader, reader.getNodeName(), reader.getValue()); + reader.moveUp(); + } + } + + protected List<String> readList(HierarchicalStreamReader reader) { + List<String> list = new ArrayList<String>(); + while (reader.hasMoreChildren()) { + reader.moveDown(); + list.add(reader.getValue()); + reader.moveUp(); + } + return list; + } + + protected <T> List<T> readObjectList(HierarchicalStreamReader reader, UnmarshallingContext context, Class<? extends T> clazz) { + List<T> list = new ArrayList<T>(); + while (reader.hasMoreChildren()) { + reader.moveDown(); + list.add((T) context.convertAnother(reader.getValue(), clazz)); + reader.moveUp(); + } + return list; + } + + public interface NodeReader { + void onNode(HierarchicalStreamReader reader, String name, String value); + } +} diff --git a/drools-maven-plugin-example/src/main/resources/META-INF/kproject.xml b/drools-maven-plugin-example/src/main/resources/META-INF/kproject.xml index a5f271f9573..f8d80417680 100644 --- a/drools-maven-plugin-example/src/main/resources/META-INF/kproject.xml +++ b/drools-maven-plugin-example/src/main/resources/META-INF/kproject.xml @@ -1,56 +1,24 @@ -<org.drools.kproject.KProjectImpl> - <kBases> - <entry> - <string>org.drools.FireAlarmKBase</string> - <org.drools.kproject.KBaseImpl> - <namespace>org.drools</namespace> - <name>FireAlarmKBase</name> - <includes/> - <files> - <string>org.drools.sample/alarm.drl</string> - <string>org.drools.sample/rules.drl</string> - <string>org.drools.sample/rules2.drl</string> - </files> - <kSessions> - <entry> - <string>org.drools.FireAlarmKBase.session</string> - <org.drools.kproject.KSessionImpl> - <namespace>org.drools</namespace> - <name>FireAlarmKBase.session</name> - <type>stateful</type> - <annotations/> - <kBase reference="../../../.."/> - </org.drools.kproject.KSessionImpl> - </entry> - </kSessions> - <kProject reference="../../../.."/> - </org.drools.kproject.KBaseImpl> - </entry> - <entry> - <string>org.test.KBase1</string> - <org.drools.kproject.KBaseImpl> - <namespace>org.test</namespace> - <name>KBase1</name> - <includes/> - <files> - <string>org.drools.test/rule.drl</string> - <string>org.drools.test/decA.drl</string> - <string>org.drools.test/decB.drl</string> - </files> - <kSessions> - <entry> - <string>org.test.KBase1.session</string> - <org.drools.kproject.KSessionImpl> - <namespace>org.test</namespace> - <name>KBase1.session</name> - <type>stateful</type> - <annotations/> - <kBase reference="../../../.."/> - </org.drools.kproject.KSessionImpl> - </entry> - </kSessions> - <kProject reference="../../../.."/> - </org.drools.kproject.KBaseImpl> - </entry> - </kBases> -</org.drools.kproject.KProjectImpl> \ No newline at end of file +<kproject> + <kbases> + <kbase name="FireAlarmKBase" namespace="org.drools"> + <files> + <file>org.drools.sample/alarm.drl</file> + <file>org.drools.sample/rules.drl</file> + <file>org.drools.sample/rules2.drl</file> + </files> + <ksessions> + <ksession name="FireAlarmKBase.session" namespace="org.drools" type="stateful"/> + </ksessions> + </kbase> + <kbase name="KBase1" namespace="org.test"> + <files> + <file>org.drools.test/rule.drl</file> + <file>org.drools.test/decA.drl</file> + <file>org.drools.test/decB.drl</file> + </files> + <ksessions> + <ksession name="KBase1.session" namespace="org.test" type="stateful"/> + </ksessions> + </kbase> + </kbases> +</kproject> \ No newline at end of file
3e714ddd9f062e534c1ee028ac8b2269a062004b
intellij-community
IDEADEV-11483 IE conditional comments support- [in progress]--
a
https://github.com/JetBrains/intellij-community
diff --git a/platform-resources/src/META-INF/XmlPlugin.xml b/platform-resources/src/META-INF/XmlPlugin.xml index 100f7071318fa..2955152781070 100644 --- a/platform-resources/src/META-INF/XmlPlugin.xml +++ b/platform-resources/src/META-INF/XmlPlugin.xml @@ -254,7 +254,7 @@ <xml.xmlSuppressionProvider implementation="com.intellij.codeInspection.DefaultXmlSuppressionProvider" order="last"/> - <!--<multiHostInjector implementation="com.intellij.psi.impl.source.html.HtmlConditionalCommentInjector"/>--> + <multiHostInjector implementation="com.intellij.psi.impl.source.html.HtmlConditionalCommentInjector"/> <intentionAction> <category>XML</category> diff --git a/xml/impl/src/com/intellij/ide/highlighter/HtmlFileHighlighter.java b/xml/impl/src/com/intellij/ide/highlighter/HtmlFileHighlighter.java index b510d831222b9..330ec39728e4d 100644 --- a/xml/impl/src/com/intellij/ide/highlighter/HtmlFileHighlighter.java +++ b/xml/impl/src/com/intellij/ide/highlighter/HtmlFileHighlighter.java @@ -56,6 +56,10 @@ public class HtmlFileHighlighter extends SyntaxHighlighterBase { keys1.put(XmlTokenType.XML_COMMENT_START, XmlHighlighterColors.HTML_COMMENT); keys1.put(XmlTokenType.XML_COMMENT_END, XmlHighlighterColors.HTML_COMMENT); keys1.put(XmlTokenType.XML_COMMENT_CHARACTERS, XmlHighlighterColors.HTML_COMMENT); + keys1.put(XmlTokenType.XML_CONDITIONAL_COMMENT_END, XmlHighlighterColors.HTML_COMMENT); + keys1.put(XmlTokenType.XML_CONDITIONAL_COMMENT_END_START, XmlHighlighterColors.HTML_COMMENT); + keys1.put(XmlTokenType.XML_CONDITIONAL_COMMENT_START, XmlHighlighterColors.HTML_COMMENT); + keys1.put(XmlTokenType.XML_CONDITIONAL_COMMENT_START_END, XmlHighlighterColors.HTML_COMMENT); keys1.put(XmlTokenType.XML_START_TAG_START, XmlHighlighterColors.HTML_TAG); keys1.put(XmlTokenType.XML_END_TAG_START, XmlHighlighterColors.HTML_TAG); diff --git a/xml/impl/src/com/intellij/lang/html/HtmlParsing.java b/xml/impl/src/com/intellij/lang/html/HtmlParsing.java index 49774b0b6c746..ce9108a27525b 100644 --- a/xml/impl/src/com/intellij/lang/html/HtmlParsing.java +++ b/xml/impl/src/com/intellij/lang/html/HtmlParsing.java @@ -416,7 +416,9 @@ protected void parseComment() { advance(); while (true) { final IElementType tt = token(); - if (tt == XmlTokenType.XML_COMMENT_CHARACTERS || tt == XmlTokenType.XML_CHAR_ENTITY_REF) { + if (tt == XmlTokenType.XML_COMMENT_CHARACTERS || tt == XmlTokenType.XML_CHAR_ENTITY_REF || tt == XmlTokenType.XML_CONDITIONAL_COMMENT_START + || tt == XmlTokenType.XML_CONDITIONAL_COMMENT_START_END || tt == XmlTokenType.XML_CONDITIONAL_COMMENT_END_START + || tt == XmlTokenType.XML_CONDITIONAL_COMMENT_END) { advance(); continue; } diff --git a/xml/impl/src/com/intellij/lexer/_HtmlLexer.java b/xml/impl/src/com/intellij/lexer/_HtmlLexer.java index 9e1954ed1ba7e..0911995fa5282 100644 --- a/xml/impl/src/com/intellij/lexer/_HtmlLexer.java +++ b/xml/impl/src/com/intellij/lexer/_HtmlLexer.java @@ -1,4 +1,4 @@ -/* The following code was generated by JFlex 1.4.1 on 12/18/07 7:25 PM */ +/* The following code was generated by JFlex 1.4.1 on 1/20/09 3:22 PM */ /* It's an automatically generated code. Do not modify it. */ package com.intellij.lexer; @@ -8,9 +8,9 @@ /** - * This class is a scanner generated by + * This class is a scanner generated by * <a href="http://www.jflex.de/">JFlex</a> 1.4.1 - * on 12/18/07 7:25 PM from the specification file + * on 1/20/09 3:22 PM from the specification file * <tt>/Users/spleaner/workspace/IDEA/tools/lexer/_HtmlLexer.flex</tt> */ public class _HtmlLexer implements FlexLexer { @@ -19,6 +19,7 @@ public class _HtmlLexer implements FlexLexer { /** lexical states */ public static final int PROCESSING_INSTRUCTION = 9; + public static final int C_COMMENT_END = 14; public static final int END_TAG_NAME = 4; public static final int START_TAG_NAME = 3; public static final int ATTRIBUTE_VALUE_SQ = 8; @@ -30,98 +31,100 @@ public class _HtmlLexer implements FlexLexer { public static final int TAG_ATTRIBUTES = 5; public static final int COMMENT = 2; public static final int ATTRIBUTE_VALUE_START = 6; + public static final int C_COMMENT_START = 13; public static final int END_TAG_NAME2 = 11; - /** + /** * Translates characters to character classes */ - private static final String ZZ_CMAP_PACKED = - "\11\0\2\3\1\0\2\3\22\0\1\3\1\12\1\7\1\6\1\37"+ - "\1\0\1\47\1\10\5\0\1\5\1\4\1\44\12\2\1\1\1\50"+ - "\1\11\1\46\1\42\1\43\1\0\1\57\1\34\1\16\1\13\1\24"+ - "\1\57\1\1\1\25\1\36\2\1\1\30\1\26\1\1\1\14\1\22"+ - "\3\1\1\17\1\32\3\1\1\21\1\1\1\0\1\45\2\0\1\1"+ - "\1\0\1\52\1\35\1\16\1\13\1\24\1\57\1\51\1\25\1\36"+ - "\2\1\1\31\1\27\1\55\1\15\1\23\1\54\1\1\1\53\1\20"+ - "\1\33\2\1\1\56\1\21\1\1\1\40\1\0\1\41\54\0\1\1"+ - "\12\0\1\1\4\0\1\1\5\0\27\1\1\0\37\1\1\0\u013f\1"+ - "\31\0\162\1\4\0\14\1\16\0\5\1\11\0\1\1\213\0\1\1"+ - "\13\0\1\1\1\0\3\1\1\0\1\1\1\0\24\1\1\0\54\1"+ - "\1\0\46\1\1\0\5\1\4\0\202\1\10\0\105\1\1\0\46\1"+ - "\2\0\2\1\6\0\20\1\41\0\46\1\2\0\1\1\7\0\47\1"+ - "\110\0\33\1\5\0\3\1\56\0\32\1\5\0\13\1\43\0\2\1"+ - "\1\0\143\1\1\0\1\1\17\0\2\1\7\0\2\1\12\0\3\1"+ - "\2\0\1\1\20\0\1\1\1\0\36\1\35\0\3\1\60\0\46\1"+ - "\13\0\1\1\u0152\0\66\1\3\0\1\1\22\0\1\1\7\0\12\1"+ - "\43\0\10\1\2\0\2\1\2\0\26\1\1\0\7\1\1\0\1\1"+ - "\3\0\4\1\3\0\1\1\36\0\2\1\1\0\3\1\16\0\2\1"+ - "\23\0\6\1\4\0\2\1\2\0\26\1\1\0\7\1\1\0\2\1"+ - "\1\0\2\1\1\0\2\1\37\0\4\1\1\0\1\1\23\0\3\1"+ - "\20\0\11\1\1\0\3\1\1\0\26\1\1\0\7\1\1\0\2\1"+ - "\1\0\5\1\3\0\1\1\22\0\1\1\17\0\2\1\43\0\10\1"+ - "\2\0\2\1\2\0\26\1\1\0\7\1\1\0\2\1\1\0\5\1"+ - "\3\0\1\1\36\0\2\1\1\0\3\1\17\0\1\1\21\0\1\1"+ - "\1\0\6\1\3\0\3\1\1\0\4\1\3\0\2\1\1\0\1\1"+ - "\1\0\2\1\3\0\2\1\3\0\3\1\3\0\10\1\1\0\3\1"+ - "\113\0\10\1\1\0\3\1\1\0\27\1\1\0\12\1\1\0\5\1"+ - "\46\0\2\1\43\0\10\1\1\0\3\1\1\0\27\1\1\0\12\1"+ - "\1\0\5\1\3\0\1\1\40\0\1\1\1\0\2\1\43\0\10\1"+ - "\1\0\3\1\1\0\27\1\1\0\20\1\46\0\2\1\43\0\22\1"+ - "\3\0\30\1\1\0\11\1\1\0\1\1\2\0\7\1\72\0\60\1"+ - "\1\0\2\1\14\0\7\1\72\0\2\1\1\0\1\1\2\0\2\1"+ - "\1\0\1\1\2\0\1\1\6\0\4\1\1\0\7\1\1\0\3\1"+ - "\1\0\1\1\1\0\1\1\2\0\2\1\1\0\4\1\1\0\2\1"+ - "\11\0\1\1\2\0\5\1\1\0\1\1\25\0\2\1\42\0\1\1"+ - "\77\0\10\1\1\0\42\1\35\0\4\1\164\0\42\1\1\0\5\1"+ - "\1\0\2\1\45\0\6\1\112\0\46\1\12\0\51\1\7\0\132\1"+ - "\5\0\104\1\5\0\122\1\6\0\7\1\1\0\77\1\1\0\1\1"+ - "\1\0\4\1\2\0\7\1\1\0\1\1\1\0\4\1\2\0\47\1"+ - "\1\0\1\1\1\0\4\1\2\0\37\1\1\0\1\1\1\0\4\1"+ - "\2\0\7\1\1\0\1\1\1\0\4\1\2\0\7\1\1\0\7\1"+ - "\1\0\27\1\1\0\37\1\1\0\1\1\1\0\4\1\2\0\7\1"+ - "\1\0\47\1\1\0\23\1\105\0\125\1\14\0\u026c\1\2\0\10\1"+ - "\12\0\32\1\5\0\113\1\25\0\15\1\1\0\4\1\16\0\22\1"+ - "\16\0\22\1\16\0\15\1\1\0\3\1\17\0\64\1\43\0\1\1"+ - "\4\0\1\1\103\0\130\1\10\0\51\1\127\0\35\1\63\0\36\1"+ - "\2\0\5\1\u038b\0\154\1\224\0\234\1\4\0\132\1\6\0\26\1"+ - "\2\0\6\1\2\0\46\1\2\0\6\1\2\0\10\1\1\0\1\1"+ - "\1\0\1\1\1\0\1\1\1\0\37\1\2\0\65\1\1\0\7\1"+ - "\1\0\1\1\3\0\3\1\1\0\7\1\3\0\4\1\2\0\6\1"+ - "\4\0\15\1\5\0\3\1\1\0\7\1\164\0\1\1\15\0\1\1"+ - "\202\0\1\1\4\0\1\1\2\0\12\1\1\0\1\1\3\0\5\1"+ - "\6\0\1\1\1\0\1\1\1\0\1\1\1\0\4\1\1\0\3\1"+ - "\1\0\7\1\3\0\3\1\5\0\5\1\u0ebb\0\2\1\52\0\5\1"+ - "\5\0\2\1\4\0\126\1\6\0\3\1\1\0\132\1\1\0\4\1"+ - "\5\0\50\1\4\0\136\1\21\0\30\1\70\0\20\1\u0200\0\u19b6\1"+ - "\112\0\u51a6\1\132\0\u048d\1\u0773\0\u2ba4\1\u215c\0\u012e\1\2\0\73\1"+ - "\225\0\7\1\14\0\5\1\5\0\1\1\1\0\12\1\1\0\15\1"+ - "\1\0\5\1\1\0\1\1\1\0\2\1\1\0\2\1\1\0\154\1"+ - "\41\0\u016b\1\22\0\100\1\2\0\66\1\50\0\14\1\164\0\5\1"+ - "\1\0\207\1\44\0\32\1\6\0\32\1\13\0\131\1\3\0\6\1"+ - "\2\0\6\1\2\0\6\1\2\0\3\1\43\0"; - - /** + private static final String ZZ_CMAP_PACKED = + "\11\0\2\3\1\0\2\3\22\0\1\3\1\13\1\10\1\7\1\40"+ + "\1\0\1\45\1\11\1\44\1\44\3\0\1\6\1\5\1\47\12\2"+ + "\1\4\1\54\1\12\1\53\1\43\1\46\1\0\1\63\1\35\1\17"+ + "\1\14\1\25\1\63\1\1\1\26\1\37\2\1\1\31\1\27\1\1"+ + "\1\15\1\23\3\1\1\20\1\33\3\1\1\22\1\1\1\50\1\52"+ + "\1\51\1\0\1\4\1\0\1\56\1\36\1\17\1\14\1\25\1\63"+ + "\1\55\1\26\1\37\2\1\1\32\1\30\1\61\1\16\1\24\1\60"+ + "\1\1\1\57\1\21\1\34\2\1\1\62\1\22\1\1\1\41\1\44"+ + "\1\42\54\0\1\1\12\0\1\1\4\0\1\1\5\0\27\1\1\0"+ + "\37\1\1\0\u013f\1\31\0\162\1\4\0\14\1\16\0\5\1\11\0"+ + "\1\1\213\0\1\1\13\0\1\1\1\0\3\1\1\0\1\1\1\0"+ + "\24\1\1\0\54\1\1\0\46\1\1\0\5\1\4\0\202\1\10\0"+ + "\105\1\1\0\46\1\2\0\2\1\6\0\20\1\41\0\46\1\2\0"+ + "\1\1\7\0\47\1\110\0\33\1\5\0\3\1\56\0\32\1\5\0"+ + "\13\1\43\0\2\1\1\0\143\1\1\0\1\1\17\0\2\1\7\0"+ + "\2\1\12\0\3\1\2\0\1\1\20\0\1\1\1\0\36\1\35\0"+ + "\3\1\60\0\46\1\13\0\1\1\u0152\0\66\1\3\0\1\1\22\0"+ + "\1\1\7\0\12\1\43\0\10\1\2\0\2\1\2\0\26\1\1\0"+ + "\7\1\1\0\1\1\3\0\4\1\3\0\1\1\36\0\2\1\1\0"+ + "\3\1\16\0\2\1\23\0\6\1\4\0\2\1\2\0\26\1\1\0"+ + "\7\1\1\0\2\1\1\0\2\1\1\0\2\1\37\0\4\1\1\0"+ + "\1\1\23\0\3\1\20\0\11\1\1\0\3\1\1\0\26\1\1\0"+ + "\7\1\1\0\2\1\1\0\5\1\3\0\1\1\22\0\1\1\17\0"+ + "\2\1\43\0\10\1\2\0\2\1\2\0\26\1\1\0\7\1\1\0"+ + "\2\1\1\0\5\1\3\0\1\1\36\0\2\1\1\0\3\1\17\0"+ + "\1\1\21\0\1\1\1\0\6\1\3\0\3\1\1\0\4\1\3\0"+ + "\2\1\1\0\1\1\1\0\2\1\3\0\2\1\3\0\3\1\3\0"+ + "\10\1\1\0\3\1\113\0\10\1\1\0\3\1\1\0\27\1\1\0"+ + "\12\1\1\0\5\1\46\0\2\1\43\0\10\1\1\0\3\1\1\0"+ + "\27\1\1\0\12\1\1\0\5\1\3\0\1\1\40\0\1\1\1\0"+ + "\2\1\43\0\10\1\1\0\3\1\1\0\27\1\1\0\20\1\46\0"+ + "\2\1\43\0\22\1\3\0\30\1\1\0\11\1\1\0\1\1\2\0"+ + "\7\1\72\0\60\1\1\0\2\1\14\0\7\1\72\0\2\1\1\0"+ + "\1\1\2\0\2\1\1\0\1\1\2\0\1\1\6\0\4\1\1\0"+ + "\7\1\1\0\3\1\1\0\1\1\1\0\1\1\2\0\2\1\1\0"+ + "\4\1\1\0\2\1\11\0\1\1\2\0\5\1\1\0\1\1\25\0"+ + "\2\1\42\0\1\1\77\0\10\1\1\0\42\1\35\0\4\1\164\0"+ + "\42\1\1\0\5\1\1\0\2\1\45\0\6\1\112\0\46\1\12\0"+ + "\51\1\7\0\132\1\5\0\104\1\5\0\122\1\6\0\7\1\1\0"+ + "\77\1\1\0\1\1\1\0\4\1\2\0\7\1\1\0\1\1\1\0"+ + "\4\1\2\0\47\1\1\0\1\1\1\0\4\1\2\0\37\1\1\0"+ + "\1\1\1\0\4\1\2\0\7\1\1\0\1\1\1\0\4\1\2\0"+ + "\7\1\1\0\7\1\1\0\27\1\1\0\37\1\1\0\1\1\1\0"+ + "\4\1\2\0\7\1\1\0\47\1\1\0\23\1\105\0\125\1\14\0"+ + "\u026c\1\2\0\10\1\12\0\32\1\5\0\113\1\25\0\15\1\1\0"+ + "\4\1\16\0\22\1\16\0\22\1\16\0\15\1\1\0\3\1\17\0"+ + "\64\1\43\0\1\1\4\0\1\1\103\0\130\1\10\0\51\1\127\0"+ + "\35\1\63\0\36\1\2\0\5\1\u038b\0\154\1\224\0\234\1\4\0"+ + "\132\1\6\0\26\1\2\0\6\1\2\0\46\1\2\0\6\1\2\0"+ + "\10\1\1\0\1\1\1\0\1\1\1\0\1\1\1\0\37\1\2\0"+ + "\65\1\1\0\7\1\1\0\1\1\3\0\3\1\1\0\7\1\3\0"+ + "\4\1\2\0\6\1\4\0\15\1\5\0\3\1\1\0\7\1\164\0"+ + "\1\1\15\0\1\1\202\0\1\1\4\0\1\1\2\0\12\1\1\0"+ + "\1\1\3\0\5\1\6\0\1\1\1\0\1\1\1\0\1\1\1\0"+ + "\4\1\1\0\3\1\1\0\7\1\3\0\3\1\5\0\5\1\u0ebb\0"+ + "\2\1\52\0\5\1\5\0\2\1\4\0\126\1\6\0\3\1\1\0"+ + "\132\1\1\0\4\1\5\0\50\1\4\0\136\1\21\0\30\1\70\0"+ + "\20\1\u0200\0\u19b6\1\112\0\u51a6\1\132\0\u048d\1\u0773\0\u2ba4\1\u215c\0"+ + "\u012e\1\2\0\73\1\225\0\7\1\14\0\5\1\5\0\1\1\1\0"+ + "\12\1\1\0\15\1\1\0\5\1\1\0\1\1\1\0\2\1\1\0"+ + "\2\1\1\0\154\1\41\0\u016b\1\22\0\100\1\2\0\66\1\50\0"+ + "\14\1\164\0\5\1\1\0\207\1\44\0\32\1\6\0\32\1\13\0"+ + "\131\1\3\0\6\1\2\0\6\1\2\0\6\1\2\0\3\1\43\0"; + + /** * Translates characters to character classes */ private static final char [] ZZ_CMAP = zzUnpackCMap(ZZ_CMAP_PACKED); - /** + /** * Translates DFA states to action switch labels. */ private static final int [] ZZ_ACTION = zzUnpackAction(); private static final String ZZ_ACTION_PACKED_0 = - "\1\1\5\0\1\2\2\0\1\3\3\0\1\1\1\4"+ - "\5\1\1\5\1\6\4\5\1\7\1\5\3\10\1\11"+ - "\1\12\1\13\2\11\1\14\1\15\1\11\1\16\1\2"+ - "\1\17\1\20\2\2\1\21\1\22\4\21\1\3\1\23"+ - "\1\5\1\24\3\25\1\26\1\27\1\0\1\30\1\31"+ - "\14\0\1\31\1\32\2\2\5\0\1\33\1\34\1\35"+ - "\1\36\11\0\1\37\1\0\1\40\1\2\1\40\1\41"+ - "\1\0\1\42\3\0\1\14\3\0\1\43\2\0\1\44"; + "\1\1\5\0\1\2\2\0\1\3\5\0\1\1\1\4"+ + "\5\1\1\5\1\6\4\5\1\7\1\5\4\10\1\11"+ + "\1\12\1\13\1\14\2\12\1\15\1\16\1\12\1\17"+ + "\1\2\1\20\1\21\2\2\1\22\1\23\4\22\1\3"+ + "\1\24\1\5\1\25\3\26\1\27\1\10\2\27\1\30"+ + "\1\31\1\32\1\0\1\33\1\34\15\0\1\34\1\35"+ + "\2\2\3\0\1\36\2\0\1\37\1\40\1\41\1\42"+ + "\11\0\1\43\1\44\1\0\1\45\1\2\1\45\1\46"+ + "\1\0\1\47\3\0\1\15\3\0\1\50\2\0\1\51"; private static int [] zzUnpackAction() { - int [] result = new int[116]; + int [] result = new int[128]; int offset = 0; offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result); return result; @@ -140,30 +143,31 @@ private static int zzUnpackAction(String packed, int offset, int [] result) { } - /** + /** * Translates a state to a row index in the transition table */ private static final int [] ZZ_ROWMAP = zzUnpackRowMap(); private static final String ZZ_ROWMAP_PACKED_0 = - "\0\0\0\60\0\140\0\220\0\300\0\360\0\u0120\0\u0150"+ - "\0\u0180\0\u01b0\0\u01e0\0\u0210\0\u0240\0\u0270\0\u02a0\0\u02d0"+ - "\0\u0300\0\u0330\0\u0360\0\u0390\0\u02d0\0\u03c0\0\u03f0\0\u0420"+ - "\0\u0450\0\u0480\0\u02d0\0\u0390\0\u02d0\0\u04b0\0\u0390\0\u02d0"+ - "\0\u04e0\0\u02d0\0\u0390\0\u0510\0\u0540\0\u02d0\0\u0570\0\u02d0"+ - "\0\u05a0\0\u02d0\0\u02d0\0\u05d0\0\u0600\0\u02d0\0\u02d0\0\u0630"+ - "\0\u0660\0\u0390\0\u0690\0\u06c0\0\u02d0\0\u06f0\0\u0720\0\u02d0"+ - "\0\u0570\0\u0390\0\u0750\0\u0780\0\u07b0\0\u02d0\0\u07e0\0\u0810"+ - "\0\u0840\0\u0870\0\u08a0\0\u08d0\0\u0900\0\u0930\0\u03f0\0\u0420"+ - "\0\u0960\0\u0990\0\u09c0\0\u02d0\0\u02d0\0\u09f0\0\u0a20\0\u0a50"+ - "\0\u0a80\0\u0ab0\0\u0ae0\0\u0b10\0\u0b40\0\u0b70\0\u02d0\0\u02d0"+ - "\0\u0ba0\0\u0bd0\0\u0c00\0\u0c30\0\u0c60\0\u0c90\0\u0cc0\0\u0cf0"+ - "\0\u0d20\0\u02d0\0\u0d50\0\u05a0\0\u0d80\0\u02d0\0\u02d0\0\u0db0"+ - "\0\u02d0\0\u0de0\0\u0e10\0\u0e40\0\u02d0\0\u0e70\0\u0ea0\0\u0ed0"+ - "\0\u02d0\0\u0f00\0\u0f30\0\u02d0"; + "\0\0\0\64\0\150\0\234\0\320\0\u0104\0\u0138\0\u016c"+ + "\0\u01a0\0\u01d4\0\u0208\0\u023c\0\u0270\0\u02a4\0\u02d8\0\u030c"+ + "\0\u0340\0\u0374\0\u03a8\0\u03dc\0\u0410\0\u0444\0\u0374\0\u0478"+ + "\0\u04ac\0\u04e0\0\u0514\0\u0548\0\u0374\0\u0410\0\u0374\0\u057c"+ + "\0\u05b0\0\u0410\0\u0374\0\u0374\0\u05e4\0\u0374\0\u0410\0\u0618"+ + "\0\u064c\0\u0374\0\u0680\0\u0374\0\u06b4\0\u0374\0\u0374\0\u06e8"+ + "\0\u071c\0\u0374\0\u0374\0\u0750\0\u0410\0\u0784\0\u07b8\0\u07ec"+ + "\0\u0374\0\u0820\0\u0854\0\u0374\0\u0410\0\u0680\0\u0374\0\u0888"+ + "\0\u0410\0\u08bc\0\u0374\0\u08f0\0\u0924\0\u0958\0\u0374\0\u098c"+ + "\0\u09c0\0\u09f4\0\u0a28\0\u0a5c\0\u0a90\0\u0ac4\0\u0af8\0\u04ac"+ + "\0\u04e0\0\u0b2c\0\u0b60\0\u0b94\0\u0bc8\0\u0374\0\u0374\0\u0bfc"+ + "\0\u0c30\0\u0c64\0\u0c98\0\u0ccc\0\u0374\0\u0d00\0\u0d34\0\u0d68"+ + "\0\u0d9c\0\u0374\0\u0374\0\u0dd0\0\u0e04\0\u0e38\0\u0e6c\0\u0ea0"+ + "\0\u0ed4\0\u0f08\0\u0f3c\0\u0f70\0\u0374\0\u0374\0\u0fa4\0\u06b4"+ + "\0\u0fd8\0\u0374\0\u0374\0\u100c\0\u0374\0\u1040\0\u1074\0\u10a8"+ + "\0\u0374\0\u10dc\0\u1110\0\u1144\0\u0374\0\u1178\0\u11ac\0\u0374"; private static int [] zzUnpackRowMap() { - int [] result = new int[116]; + int [] result = new int[128]; int offset = 0; offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result); return result; @@ -180,83 +184,89 @@ private static int zzUnpackRowMap(String packed, int offset, int [] result) { return j; } - /** + /** * The transition table of the DFA */ private static final int [] ZZ_TRANS = zzUnpackTrans(); private static final String ZZ_TRANS_PACKED_0 = - "\3\16\1\17\2\16\1\20\2\16\1\21\25\16\1\22"+ - "\5\16\1\23\1\16\1\24\10\16\3\25\1\26\3\25"+ - "\1\27\1\30\11\25\2\31\1\25\1\32\14\25\1\33"+ - "\4\25\1\34\10\25\5\35\1\36\41\35\1\37\10\35"+ - "\1\40\1\41\1\40\1\26\5\40\1\42\1\40\24\41"+ - "\10\40\1\43\1\40\7\41\1\40\1\41\1\40\1\26"+ - "\5\40\1\44\1\40\24\41\10\40\1\43\1\40\7\41"+ - "\1\40\1\45\1\40\1\26\7\40\24\45\3\40\1\46"+ - "\1\40\1\47\1\40\1\50\1\43\1\40\7\45\3\51"+ - "\1\26\3\51\1\52\1\53\26\51\1\54\2\51\1\46"+ - "\1\51\1\55\13\51\7\56\1\57\27\56\1\60\5\56"+ - "\1\61\1\56\1\62\20\56\1\57\26\56\1\63\5\56"+ - "\1\61\1\56\1\62\10\56\42\64\1\65\1\66\14\64"+ - "\6\25\1\67\2\25\1\42\35\25\1\34\10\25\3\40"+ - "\1\26\2\40\1\45\2\40\1\44\30\40\1\46\4\40"+ - "\1\43\10\40\3\70\1\26\5\70\1\42\30\70\1\46"+ - "\1\70\1\71\2\70\1\72\10\70\3\16\1\0\2\16"+ - "\1\0\2\16\1\0\25\16\1\0\5\16\1\23\1\16"+ - "\1\0\10\16\3\0\1\17\135\0\1\73\4\0\1\74"+ - "\3\0\1\75\24\73\4\0\1\76\1\77\4\0\7\73"+ - "\40\0\1\100\17\0\3\16\1\0\5\16\1\0\33\16"+ - "\1\23\1\16\1\0\10\16\1\0\1\101\4\0\1\102"+ - "\4\0\16\101\1\103\5\101\12\0\1\103\1\104\1\101"+ - "\1\105\1\106\2\101\3\0\1\26\54\0\7\107\1\56"+ - "\50\107\10\110\1\56\47\110\32\0\2\111\43\0\2\112"+ - "\44\0\1\113\53\0\2\41\1\0\2\41\5\0\24\41"+ - "\12\0\7\41\44\0\1\114\14\0\2\45\1\0\2\45"+ - "\5\0\24\45\12\0\7\45\42\0\1\115\15\0\3\51"+ - "\1\0\3\51\2\0\31\51\1\0\1\51\1\116\16\51"+ - "\1\0\3\51\2\0\27\51\1\117\1\51\1\0\1\51"+ - "\1\116\55\51\1\115\1\51\1\116\13\51\40\0\1\120"+ - "\56\0\1\56\60\0\1\121\17\0\42\64\1\0\1\122"+ - "\56\64\1\65\15\64\1\0\2\67\1\0\2\67\5\0"+ - "\24\67\12\0\7\67\1\0\2\73\1\0\2\73\5\0"+ - "\24\73\12\0\7\73\1\0\2\74\1\0\2\74\5\0"+ - "\24\74\12\0\7\74\5\0\1\123\5\0\1\124\45\0"+ - "\1\125\4\0\1\126\4\0\24\125\12\0\7\125\41\100"+ - "\1\127\16\100\1\0\2\101\1\0\2\101\5\0\24\101"+ - "\11\0\1\130\7\101\2\0\1\131\53\0\1\132\2\0"+ - "\2\101\1\0\2\101\5\0\5\101\1\133\16\101\11\0"+ - "\1\130\7\101\1\0\2\101\1\0\2\101\5\0\10\101"+ - "\1\134\3\101\1\135\7\101\11\0\1\130\7\101\1\0"+ - "\2\101\1\0\2\101\5\0\20\101\1\136\3\101\11\0"+ - "\1\130\7\101\1\0\2\101\1\0\2\101\5\0\22\101"+ - "\1\137\1\101\11\0\1\130\7\101\34\0\2\140\50\0"+ - "\2\141\33\0\1\113\36\0\1\142\15\0\42\51\1\0"+ - "\1\51\1\116\13\51\3\117\1\143\3\117\2\143\30\117"+ - "\1\144\1\143\1\117\1\145\13\117\7\120\1\0\31\120"+ - "\1\146\16\120\10\121\1\0\30\121\1\146\16\121\42\64"+ - "\1\0\15\64\5\0\1\147\66\0\2\150\43\0\2\125"+ - "\1\0\2\125\5\0\24\125\12\0\7\125\1\0\2\126"+ - "\1\0\2\126\5\0\24\126\12\0\7\126\2\0\1\131"+ - "\45\0\1\151\11\0\1\152\10\0\1\152\2\0\1\152"+ - "\5\0\1\152\7\0\2\152\14\0\1\152\4\0\1\152"+ - "\1\0\2\101\1\0\2\101\5\0\24\101\11\0\1\151"+ - "\7\101\1\0\2\101\1\0\2\101\5\0\2\101\1\153"+ - "\21\101\11\0\1\130\7\101\1\0\2\101\1\0\2\101"+ - "\5\0\10\101\1\133\13\101\11\0\1\130\7\101\1\0"+ - "\2\101\1\0\2\101\5\0\2\101\1\103\21\101\11\0"+ - "\1\130\7\101\1\0\2\101\1\0\2\101\5\0\24\101"+ - "\11\0\1\130\2\101\1\135\4\101\30\0\2\154\56\0"+ - "\2\155\26\0\41\143\1\146\16\143\41\117\1\144\1\143"+ - "\1\117\1\145\13\117\16\0\1\156\43\0\1\152\10\0"+ - "\1\152\2\0\1\152\5\0\1\152\7\0\2\152\12\0"+ - "\1\151\1\0\1\152\4\0\1\152\1\0\2\101\1\0"+ - "\2\101\5\0\24\101\11\0\1\130\2\101\1\133\4\101"+ - "\36\0\1\157\40\0\2\160\55\0\1\161\62\0\1\162"+ - "\60\0\2\163\60\0\1\164\33\0"; + "\3\20\1\21\3\20\1\22\2\20\1\23\25\20\1\24"+ + "\4\20\1\25\4\20\1\26\11\20\3\27\1\30\4\27"+ + "\1\31\1\32\11\27\2\33\1\27\1\34\14\27\1\35"+ + "\1\27\1\36\16\27\6\37\1\40\3\37\1\41\32\37"+ + "\1\42\2\37\1\43\13\37\1\44\1\45\1\44\1\30"+ + "\1\45\5\44\1\46\1\44\24\45\5\44\1\47\7\44"+ + "\7\45\1\44\1\45\1\44\1\30\1\45\5\44\1\50"+ + "\1\44\24\45\5\44\1\47\7\44\7\45\1\44\1\51"+ + "\1\44\1\30\1\51\7\44\24\51\3\44\1\52\1\44"+ + "\1\47\1\44\1\53\3\44\1\54\1\44\7\51\3\55"+ + "\1\30\4\55\1\56\1\57\26\55\1\60\2\55\1\52"+ + "\3\55\1\61\14\55\10\62\1\63\27\62\1\64\4\62"+ + "\1\65\4\62\1\66\22\62\1\63\26\62\1\67\4\62"+ + "\1\65\4\62\1\66\11\62\43\70\1\71\2\70\1\72"+ + "\15\70\7\27\1\73\2\27\1\46\32\27\1\36\16\27"+ + "\3\44\1\30\3\44\1\51\2\44\1\50\30\44\1\52"+ + "\1\44\1\47\16\44\3\74\1\30\6\74\1\46\30\74"+ + "\1\52\1\74\1\75\1\74\1\76\14\74\1\77\1\100"+ + "\12\77\24\100\5\77\1\101\3\77\1\102\3\77\7\100"+ + "\1\77\1\100\12\77\24\100\5\77\1\101\3\77\1\103"+ + "\3\77\7\100\3\20\1\0\3\20\1\0\2\20\1\0"+ + "\25\20\1\0\4\20\1\0\4\20\1\26\11\20\3\0"+ + "\1\21\145\0\1\104\2\0\1\104\2\0\1\105\3\0"+ + "\1\106\24\104\6\0\1\107\1\110\5\0\7\104\41\0"+ + "\1\111\23\0\1\112\2\0\1\112\2\0\1\113\4\0"+ + "\16\112\1\114\5\112\15\0\1\114\1\115\1\112\1\116"+ + "\1\117\2\112\3\20\1\0\6\20\1\0\32\20\1\0"+ + "\4\20\1\26\11\20\3\0\1\30\60\0\10\120\1\62"+ + "\53\120\11\121\1\62\52\121\33\0\2\122\47\0\2\123"+ + "\50\0\1\124\70\0\1\125\51\0\2\45\1\0\3\45"+ + "\5\0\24\45\15\0\7\45\47\0\1\126\15\0\2\51"+ + "\1\0\3\51\5\0\24\51\15\0\7\51\43\0\1\127"+ + "\20\0\3\55\1\0\4\55\2\0\31\55\1\0\3\55"+ + "\1\130\17\55\1\0\4\55\2\0\27\55\1\131\1\55"+ + "\1\0\3\55\1\130\57\55\1\127\3\55\1\130\14\55"+ + "\41\0\1\132\62\0\1\62\64\0\1\133\22\0\43\70"+ + "\1\0\2\70\1\134\60\70\1\71\20\70\1\0\2\73"+ + "\1\0\3\73\5\0\24\73\15\0\7\73\1\0\3\100"+ + "\1\0\1\100\5\0\25\100\4\0\2\100\7\0\7\100"+ + "\43\0\1\135\21\0\2\104\1\0\3\104\5\0\24\104"+ + "\15\0\7\104\1\0\2\105\1\0\3\105\5\0\24\105"+ + "\15\0\7\105\6\0\1\136\5\0\1\137\50\0\1\140"+ + "\2\0\1\140\2\0\1\141\4\0\24\140\15\0\7\140"+ + "\42\111\1\142\21\111\1\0\2\112\1\0\3\112\5\0"+ + "\24\112\14\0\1\143\7\112\2\0\1\144\57\0\1\145"+ + "\2\0\2\112\1\0\3\112\5\0\5\112\1\146\16\112"+ + "\14\0\1\143\7\112\1\0\2\112\1\0\3\112\5\0"+ + "\10\112\1\147\3\112\1\150\7\112\14\0\1\143\7\112"+ + "\1\0\2\112\1\0\3\112\5\0\20\112\1\151\3\112"+ + "\14\0\1\143\7\112\1\0\2\112\1\0\3\112\5\0"+ + "\22\112\1\152\1\112\14\0\1\143\7\112\35\0\2\153"+ + "\54\0\2\154\36\0\1\124\37\0\1\155\70\0\1\156"+ + "\13\0\43\55\1\0\3\55\1\130\14\55\3\131\1\157"+ + "\4\131\2\157\30\131\1\160\1\157\3\131\1\161\14\131"+ + "\10\132\1\0\31\132\1\162\21\132\11\133\1\0\30\133"+ + "\1\162\21\133\43\70\1\0\20\70\6\0\1\163\72\0"+ + "\2\164\46\0\2\140\1\0\3\140\5\0\24\140\15\0"+ + "\7\140\1\0\2\141\1\0\3\141\5\0\24\141\15\0"+ + "\7\141\2\0\1\144\51\0\1\165\11\0\1\166\11\0"+ + "\1\166\2\0\1\166\5\0\1\166\7\0\2\166\17\0"+ + "\1\166\4\0\1\166\1\0\2\112\1\0\3\112\5\0"+ + "\24\112\14\0\1\165\7\112\1\0\2\112\1\0\3\112"+ + "\5\0\2\112\1\167\21\112\14\0\1\143\7\112\1\0"+ + "\2\112\1\0\3\112\5\0\10\112\1\146\13\112\14\0"+ + "\1\143\7\112\1\0\2\112\1\0\3\112\5\0\2\112"+ + "\1\114\21\112\14\0\1\143\7\112\1\0\2\112\1\0"+ + "\3\112\5\0\24\112\14\0\1\143\2\112\1\150\4\112"+ + "\31\0\2\170\62\0\2\171\31\0\42\157\1\162\21\157"+ + "\42\131\1\160\1\157\3\131\1\161\14\131\17\0\1\172"+ + "\46\0\1\166\11\0\1\166\2\0\1\166\5\0\1\166"+ + "\7\0\2\166\15\0\1\165\1\0\1\166\4\0\1\166"+ + "\1\0\2\112\1\0\3\112\5\0\24\112\14\0\1\143"+ + "\2\112\1\146\4\112\37\0\1\173\44\0\2\174\61\0"+ + "\1\175\66\0\1\176\64\0\2\177\64\0\1\200\36\0"; private static int [] zzUnpackTrans() { - int [] result = new int[3936]; + int [] result = new int[4576]; int offset = 0; offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result); return result; @@ -297,16 +307,17 @@ private static int zzUnpackTrans(String packed, int offset, int [] result) { private static final int [] ZZ_ATTRIBUTE = zzUnpackAttribute(); private static final String ZZ_ATTRIBUTE_PACKED_0 = - "\1\1\5\0\1\1\2\0\1\1\3\0\2\1\1\11"+ - "\4\1\1\11\5\1\1\11\1\1\1\11\2\1\1\11"+ + "\1\1\5\0\1\1\2\0\1\1\5\0\2\1\1\11"+ + "\4\1\1\11\5\1\1\11\1\1\1\11\3\1\2\11"+ "\1\1\1\11\3\1\1\11\1\1\1\11\1\1\2\11"+ - "\2\1\2\11\5\1\1\11\2\1\1\11\4\1\1\0"+ - "\1\11\1\1\14\0\2\11\2\1\5\0\2\1\2\11"+ - "\11\0\1\11\1\0\2\1\2\11\1\0\1\11\3\0"+ - "\1\11\3\0\1\11\2\0\1\11"; + "\2\1\2\11\5\1\1\11\2\1\1\11\2\1\1\11"+ + "\3\1\1\11\2\1\1\0\1\11\1\1\15\0\2\11"+ + "\2\1\3\0\1\11\2\0\2\1\2\11\11\0\2\11"+ + "\1\0\2\1\2\11\1\0\1\11\3\0\1\11\3\0"+ + "\1\11\2\0\1\11"; private static int [] zzUnpackAttribute() { - int [] result = new int[116]; + int [] result = new int[128]; int offset = 0; offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result); return result; @@ -387,7 +398,7 @@ public _HtmlLexer(java.io.InputStream in) { this(new java.io.InputStreamReader(in)); } - /** + /** * Unpacks the compressed character translation table. * * @param packed the packed character translation table @@ -397,7 +408,7 @@ public _HtmlLexer(java.io.InputStream in) { char [] map = new char[0x10000]; int i = 0; /* index in packed string */ int j = 0; /* index in unpacked array */ - while (i < 1254) { + while (i < 1260) { int count = packed.charAt(i++); char value = packed.charAt(i++); do map[j++] = value; while (--count > 0); @@ -427,7 +438,7 @@ public void reset(CharSequence buffer, int start, int end,int initialState){ // For Demetra compatibility public void reset(CharSequence buffer, int initialState){ zzBuffer = buffer; - zzBufferArray = null; + zzBufferArray = null; zzCurrentPos = zzMarkedPos = zzStartRead = 0; zzPushbackPos = 0; zzAtEOF = false; @@ -548,7 +559,7 @@ public void yypushback(int number) { private void zzDoEOF() { if (!zzEOFDone) { zzEOFDone = true; - + } } @@ -631,150 +642,170 @@ else if (zzAtEOF) { zzMarkedPos = zzMarkedPosL; switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) { - case 17: + case 18: { return XmlTokenType.XML_ATTRIBUTE_VALUE_TOKEN; } - case 37: break; - case 22: + case 42: break; + case 25: { yybegin(START_TAG_NAME); yypushback(yylength()); } - case 38: break; - case 21: + case 43: break; + case 22: { return XmlTokenType.XML_TAG_CHARACTERS; } - case 39: break; - case 14: + case 44: break; + case 15: { yybegin(ATTRIBUTE_VALUE_START); return XmlTokenType.XML_EQ; } - case 40: break; - case 29: + case 45: break; + case 33: { return elTokenType; } - case 41: break; - case 12: + case 46: break; + case 13: { return XmlTokenType.XML_NAME; } - case 42: break; - case 18: + case 47: break; + case 19: { yybegin(TAG_ATTRIBUTES); return XmlTokenType.XML_ATTRIBUTE_VALUE_END_DELIMITER; } - case 43: break; - case 9: + case 48: break; + case 10: { yybegin(YYINITIAL); yypushback(1); break; } - case 44: break; - case 35: + case 49: break; + case 40: { return XmlTokenType.XML_DOCTYPE_PUBLIC; } - case 45: break; - case 33: + case 50: break; + case 38: { yybegin(COMMENT); return XmlTokenType.XML_COMMENT_START; } - case 46: break; - case 4: + case 51: break; + case 4: { return XmlTokenType.XML_REAL_WHITE_SPACE; } - case 47: break; - case 27: + case 52: break; + case 31: { yybegin(END_TAG_NAME); yypushback(yylength()); } - case 48: break; - case 1: + case 53: break; + case 1: { return XmlTokenType.XML_DATA_CHARACTERS; } - case 49: break; - case 28: + case 54: break; + case 24: + { yybegin(COMMENT); return XmlTokenType.XML_CONDITIONAL_COMMENT_END; + } + case 55: break; + case 32: { yybegin(END_TAG_NAME2); yypushback(yylength()); } - case 50: break; - case 5: + case 56: break; + case 5: { return XmlTokenType.XML_BAD_CHARACTER; } - case 51: break; - case 13: + case 57: break; + case 14: { yybegin(YYINITIAL); return XmlTokenType.XML_TAG_END; } - case 52: break; - case 25: + case 58: break; + case 30: + { yybegin(COMMENT); return XmlTokenType.XML_CONDITIONAL_COMMENT_START_END; + } + case 59: break; + case 28: { return XmlTokenType.XML_END_TAG_START; } - case 53: break; - case 31: + case 60: break; + case 35: { yybegin(YYINITIAL); return XmlTokenType.XML_COMMENT_END; } - case 54: break; - case 36: + case 61: break; + case 41: { yybegin(DOC_TYPE); return XmlTokenType.XML_DOCTYPE_START; } - case 55: break; - case 3: + case 62: break; + case 3: { return XmlTokenType.XML_PI_TARGET; } - case 56: break; - case 26: + case 63: break; + case 29: { yybegin(YYINITIAL); return XmlTokenType.XML_EMPTY_ELEMENT_END; } - case 57: break; - case 24: + case 64: break; + case 36: + { yybegin(C_COMMENT_END); return XmlTokenType.XML_CONDITIONAL_COMMENT_END_START; + } + case 65: break; + case 27: { yybegin(PROCESSING_INSTRUCTION); return XmlTokenType.XML_PI_START; } - case 58: break; - case 20: + case 66: break; + case 9: + { yybegin(C_COMMENT_START); return XmlTokenType.XML_CONDITIONAL_COMMENT_START; + } + case 67: break; + case 21: { yybegin(TAG_CHARACTERS); return XmlTokenType.XML_NAME; } - case 59: break; - case 7: + case 68: break; + case 7: { yybegin(YYINITIAL); return XmlTokenType.XML_DOCTYPE_END; } - case 60: break; - case 34: + case 69: break; + case 39: { return XmlTokenType.XML_CHAR_ENTITY_REF; } - case 61: break; - case 11: + case 70: break; + case 12: { return XmlTokenType.XML_START_TAG_START; } - case 62: break; - case 6: + case 71: break; + case 6: { return XmlTokenType.XML_WHITE_SPACE; } - case 63: break; - case 30: + case 72: break; + case 34: { return XmlTokenType.XML_ENTITY_REF_TOKEN; } - case 64: break; - case 32: + case 73: break; + case 23: + { yybegin(COMMENT); return XmlTokenType.XML_COMMENT_CHARACTERS; + } + case 74: break; + case 37: { return elTokenType2; } - case 65: break; - case 19: + case 75: break; + case 20: { yybegin(YYINITIAL); return XmlTokenType.XML_PI_END; } - case 66: break; - case 15: + case 76: break; + case 16: { yybegin(ATTRIBUTE_VALUE_DQ); return XmlTokenType.XML_ATTRIBUTE_VALUE_START_DELIMITER; } - case 67: break; - case 16: + case 77: break; + case 17: { yybegin(ATTRIBUTE_VALUE_SQ); return XmlTokenType.XML_ATTRIBUTE_VALUE_START_DELIMITER; } - case 68: break; - case 10: + case 78: break; + case 11: { yybegin(TAG_ATTRIBUTES); return XmlTokenType.XML_NAME; } - case 69: break; - case 23: + case 79: break; + case 26: { yybegin(START_TAG_NAME2); yypushback(yylength()); } - case 70: break; - case 2: + case 80: break; + case 2: { yybegin(TAG_ATTRIBUTES); return XmlTokenType.XML_ATTRIBUTE_VALUE_TOKEN; } - case 71: break; - case 8: + case 81: break; + case 8: { return XmlTokenType.XML_COMMENT_CHARACTERS; } - case 72: break; + case 82: break; default: if (zzInput == YYEOF && zzStartRead == zzCurrentPos) { zzAtEOF = true; diff --git a/xml/impl/src/com/intellij/psi/impl/source/html/HtmlConditionalCommentInjector.java b/xml/impl/src/com/intellij/psi/impl/source/html/HtmlConditionalCommentInjector.java new file mode 100644 index 0000000000000..5708b1853aa7e --- /dev/null +++ b/xml/impl/src/com/intellij/psi/impl/source/html/HtmlConditionalCommentInjector.java @@ -0,0 +1,55 @@ +package com.intellij.psi.impl.source.html; + +import com.intellij.lang.ASTNode; +import com.intellij.lang.html.HTMLLanguage; +import com.intellij.lang.injection.MultiHostInjector; +import com.intellij.lang.injection.MultiHostRegistrar; +import com.intellij.openapi.util.TextRange; +import com.intellij.psi.PsiComment; +import com.intellij.psi.PsiElement; +import com.intellij.psi.PsiLanguageInjectionHost; +import com.intellij.psi.tree.TokenSet; +import com.intellij.psi.xml.XmlComment; +import com.intellij.psi.xml.XmlTokenType; +import org.jetbrains.annotations.NotNull; + +import java.util.Arrays; +import java.util.List; + +/** + * @author spleaner + */ +public class HtmlConditionalCommentInjector implements MultiHostInjector { + public void getLanguagesToInject(@NotNull final MultiHostRegistrar registrar, @NotNull final PsiElement host) { + if (host instanceof XmlComment) { + final ASTNode comment = host.getNode(); + if (comment != null) { + final ASTNode[] conditionalStarts = comment.getChildren(TokenSet.create(XmlTokenType.XML_CONDITIONAL_COMMENT_START_END)); + if (conditionalStarts.length > 0) { + final ASTNode[] conditionalEnds = comment.getChildren(TokenSet.create(XmlTokenType.XML_CONDITIONAL_COMMENT_END_START)); + if (conditionalEnds.length > 0) { + final ASTNode[] endOfEnd = comment.getChildren(TokenSet.create(XmlTokenType.XML_CONDITIONAL_COMMENT_END)); + if (endOfEnd.length > 0) { + final TextRange textRange = host.getTextRange(); + final int startOffset = textRange.getStartOffset(); + + final ASTNode start = conditionalStarts[0]; + final ASTNode end = conditionalEnds[0]; + registrar.startInjecting(HTMLLanguage.INSTANCE).addPlace(null, null, (PsiLanguageInjectionHost)host, + new TextRange(start.getTextRange().getEndOffset() - startOffset, + end.getStartOffset() - startOffset)).doneInjecting(); + } + } + } + } + } + + + } + + + @NotNull + public List<? extends Class<? extends PsiElement>> elementsToInjectIn() { + return Arrays.asList(PsiComment.class); + } +} diff --git a/xml/impl/src/com/intellij/psi/impl/source/tree/injected/XmlCommentLiteralEscaper.java b/xml/impl/src/com/intellij/psi/impl/source/tree/injected/XmlCommentLiteralEscaper.java new file mode 100644 index 0000000000000..029c05fbef686 --- /dev/null +++ b/xml/impl/src/com/intellij/psi/impl/source/tree/injected/XmlCommentLiteralEscaper.java @@ -0,0 +1,40 @@ +package com.intellij.psi.impl.source.tree.injected; + +import com.intellij.psi.LiteralTextEscaper; +import com.intellij.psi.impl.source.xml.XmlCommentImpl; +import com.intellij.openapi.util.TextRange; +import com.intellij.openapi.util.ProperTextRange; +import com.intellij.lang.Commenter; +import com.intellij.lang.LanguageCommenters; +import com.intellij.lang.CodeDocumentationAwareCommenter; +import org.jetbrains.annotations.NotNull; + +/** + * @author spleaner + */ +public class XmlCommentLiteralEscaper extends LiteralTextEscaper<XmlCommentImpl> { + public XmlCommentLiteralEscaper(@NotNull XmlCommentImpl host) { + super(host); + } + + public boolean decode(@NotNull final TextRange rangeInsideHost, @NotNull final StringBuilder outChars) { + ProperTextRange.assertProperRange(rangeInsideHost); + outChars.append(myHost.getText(), rangeInsideHost.getStartOffset(), rangeInsideHost.getEndOffset()); + return true; + } + + public int getOffsetInHost(final int offsetInDecoded, @NotNull final TextRange rangeInsideHost) { + int offset = offsetInDecoded + rangeInsideHost.getStartOffset(); + if (offset < rangeInsideHost.getStartOffset()) offset = rangeInsideHost.getStartOffset(); + if (offset > rangeInsideHost.getEndOffset()) offset = rangeInsideHost.getEndOffset(); + return offset; + } + + public boolean isOneLine() { + final Commenter commenter = LanguageCommenters.INSTANCE.forLanguage(myHost.getLanguage()); + if (commenter instanceof CodeDocumentationAwareCommenter) { + return myHost.getTokenType() == ((CodeDocumentationAwareCommenter) commenter).getLineCommentTokenType(); + } + return false; + } +} diff --git a/xml/impl/src/com/intellij/psi/impl/source/xml/XmlCommentImpl.java b/xml/impl/src/com/intellij/psi/impl/source/xml/XmlCommentImpl.java index 87e51cc525d30..7b6ed010ae5a6 100644 --- a/xml/impl/src/com/intellij/psi/impl/source/xml/XmlCommentImpl.java +++ b/xml/impl/src/com/intellij/psi/impl/source/xml/XmlCommentImpl.java @@ -1,24 +1,26 @@ package com.intellij.psi.impl.source.xml; -import com.intellij.psi.PsiElementVisitor; -import com.intellij.psi.PsiReference; -import com.intellij.psi.XmlElementVisitor; +import com.intellij.openapi.util.Pair; +import com.intellij.openapi.util.TextRange; +import com.intellij.psi.*; +import com.intellij.psi.util.PsiTreeUtil; import com.intellij.psi.impl.meta.MetaRegistry; import com.intellij.psi.impl.source.resolve.reference.ReferenceProvidersRegistry; +import com.intellij.psi.impl.source.tree.injected.InjectedLanguageUtil; +import com.intellij.psi.impl.source.tree.injected.XmlCommentLiteralEscaper; import com.intellij.psi.meta.PsiMetaData; import com.intellij.psi.meta.PsiMetaOwner; import com.intellij.psi.tree.IElementType; -import com.intellij.psi.xml.XmlComment; -import com.intellij.psi.xml.XmlElementType; -import com.intellij.psi.xml.XmlTag; -import com.intellij.psi.xml.XmlTagChild; +import com.intellij.psi.xml.*; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import java.util.List; + /** * @author Mike */ -public class XmlCommentImpl extends XmlElementImpl implements XmlComment, XmlElementType, PsiMetaOwner { +public class XmlCommentImpl extends XmlElementImpl implements XmlComment, XmlElementType, PsiMetaOwner, PsiLanguageInjectionHost { public XmlCommentImpl() { super(XML_COMMENT); } @@ -60,4 +62,32 @@ public PsiReference[] getReferences() { public PsiMetaData getMetaData() { return MetaRegistry.getMetaBase(this); } + + public List<Pair<PsiElement, TextRange>> getInjectedPsi() { + return InjectedLanguageUtil.getInjectedPsiFiles(this); + } + + public void processInjectedPsi(@NotNull final InjectedPsiVisitor visitor) { + InjectedLanguageUtil.enumerate(this, visitor); + } + + public PsiLanguageInjectionHost updateText(@NotNull final String text) { + final PsiFile psiFile = getContainingFile(); + + final XmlDocument document = + ((XmlFile)PsiFileFactory.getInstance(getProject()).createFileFromText("dummy", psiFile.getFileType(), text)).getDocument(); + assert document != null; + + final XmlComment comment = PsiTreeUtil.getChildOfType(document, XmlComment.class); + + assert comment != null; + replaceAllChildrenToChildrenOf(comment.getNode()); + + return this; + } + + @NotNull + public LiteralTextEscaper<? extends PsiLanguageInjectionHost> createLiteralTextEscaper() { + return new XmlCommentLiteralEscaper(this); + } } diff --git a/xml/openapi/src/com/intellij/psi/xml/XmlTokenType.java b/xml/openapi/src/com/intellij/psi/xml/XmlTokenType.java index 721c8d5bc8cc3..9a43f4058f0fc 100644 --- a/xml/openapi/src/com/intellij/psi/xml/XmlTokenType.java +++ b/xml/openapi/src/com/intellij/psi/xml/XmlTokenType.java @@ -92,6 +92,11 @@ public interface XmlTokenType { IElementType XML_BAD_CHARACTER = new IXmlLeafElementType("XML_BAD_CHARACTER"); + IElementType XML_CONDITIONAL_COMMENT_START = new IXmlLeafElementType("CONDITIONAL_COMMENT_START"); + IElementType XML_CONDITIONAL_COMMENT_START_END = new IXmlLeafElementType("CONDITIONAL_COMMENT_START_END"); + IElementType XML_CONDITIONAL_COMMENT_END_START = new IXmlLeafElementType("CONDITIONAL_COMMENT_END_START"); + IElementType XML_CONDITIONAL_COMMENT_END = new IXmlLeafElementType("CONDITIONAL_COMMENT_END"); + TokenSet COMMENTS = TokenSet.create(XML_COMMENT_START, XML_COMMENT_CHARACTERS, XML_COMMENT_END); TokenSet WHITESPACES = TokenSet.create(XML_WHITE_SPACE); }
bb8a666b6dd51330be8a88cb48f923a57264a673
elasticsearch
make test less evil--
p
https://github.com/elastic/elasticsearch
diff --git a/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java b/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java index 162250153f97e..a9268e2a25368 100644 --- a/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java +++ b/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java @@ -491,9 +491,9 @@ public void testRandomIDsAndVersions() throws Exception { int numIDs; if (isNightly()) { - numIDs = scaledRandomIntBetween(3000, 10000); + numIDs = scaledRandomIntBetween(300, 1000); } else { - numIDs = scaledRandomIntBetween(500, 1000); + numIDs = scaledRandomIntBetween(50, 100); } while (idsSet.size() < numIDs) { @@ -506,7 +506,7 @@ public void testRandomIDsAndVersions() throws Exception { // Attach random versions to them: long version = 0; - final IDAndVersion[] idVersions = new IDAndVersion[TestUtil.nextInt(random, numIDs/2, numIDs*(isNightly() ? 4 : 2))]; + final IDAndVersion[] idVersions = new IDAndVersion[TestUtil.nextInt(random, numIDs/2, numIDs*(isNightly() ? 8 : 2))]; final Map<String,IDAndVersion> truth = new HashMap<>(); if (VERBOSE) {
69438cc0d3444c064f3555ef465176ef5c28f69e
orientdb
Fixed issue on missed saving of the configuration- when Local Data has multiple files. Reported by Ed Barbeau:- http://groups.google.com/group/orient-database/msg/0299391834b65b73--
c
https://github.com/orientechnologies/orientdb
diff --git a/core/src/main/java/com/orientechnologies/orient/core/config/OStorageConfiguration.java b/core/src/main/java/com/orientechnologies/orient/core/config/OStorageConfiguration.java index 2c004364862..6b0b13cd8a2 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/config/OStorageConfiguration.java +++ b/core/src/main/java/com/orientechnologies/orient/core/config/OStorageConfiguration.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.Locale; +import com.orientechnologies.orient.core.exception.OConfigurationException; import com.orientechnologies.orient.core.id.ORecordId; import com.orientechnologies.orient.core.record.impl.ORecordBytes; import com.orientechnologies.orient.core.serialization.OSerializableStream; @@ -55,6 +56,8 @@ public class OStorageConfiguration implements OSerializableStream { private transient OStorage storage; private transient byte[] record; + private static final int FIXED_CONFIG_SIZE = 20000; + public OStorageConfiguration load() throws IOException { record = storage.readRecord(-1, storage.getClusterIdByName(OStorage.CLUSTER_INTERNAL_NAME), CONFIG_RECORD_NUM).buffer; fromStream(record); @@ -136,7 +139,7 @@ public OSerializableStream fromStream(byte[] iStream) throws IOException { clusterType = read(values[index++]); // PHYSICAL CLUSTER if (clusterType.equals("p")) { - phyCluster = new OStoragePhysicalClusterConfiguration(i); + phyCluster = new OStoragePhysicalClusterConfiguration(this, i); index = phySegmentFromStream(values, index, phyCluster); phyCluster.holeFile = new OStorageClusterHoleConfiguration(phyCluster, read(values[index++]), read(values[index++]), read(values[index++])); @@ -153,7 +156,7 @@ public OSerializableStream fromStream(byte[] iStream) throws IOException { dataSegments = new ArrayList<OStorageDataConfiguration>(size); OStorageDataConfiguration data; for (int i = 0; i < size; ++i) { - data = new OStorageDataConfiguration(); + data = new OStorageDataConfiguration(this); index = phySegmentFromStream(values, index, data); data.holeFile = new OStorageDataHoleConfiguration(data, read(values[index++]), read(values[index++]), read(values[index++])); dataSegments.add(data); @@ -211,6 +214,13 @@ public byte[] toStream() throws IOException { for (OEntryConfiguration e : properties) entryToStream(buffer, e); + if (buffer.length() > FIXED_CONFIG_SIZE) + throw new OConfigurationException("Configuration data exceeded size limit: " + FIXED_CONFIG_SIZE + " bytes"); + + // ALLOCATE ENOUGHT SPACE TO REUSE IT EVERY TIME + buffer.append("|"); + buffer.setLength(FIXED_CONFIG_SIZE); + return buffer.toString().getBytes(); } diff --git a/core/src/main/java/com/orientechnologies/orient/core/config/OStorageDataConfiguration.java b/core/src/main/java/com/orientechnologies/orient/core/config/OStorageDataConfiguration.java index 3e411c1f253..280cafd0895 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/config/OStorageDataConfiguration.java +++ b/core/src/main/java/com/orientechnologies/orient/core/config/OStorageDataConfiguration.java @@ -22,7 +22,8 @@ public class OStorageDataConfiguration extends OStorageSegmentConfiguration { private static final String START_SIZE = "10Mb"; private static final String INCREMENT_SIZE = "100%"; - public OStorageDataConfiguration() { + public OStorageDataConfiguration(final OStorageConfiguration iStorageConfiguration) { + root = iStorageConfiguration; fileStartSize = START_SIZE; fileIncrementSize = INCREMENT_SIZE; } diff --git a/core/src/main/java/com/orientechnologies/orient/core/config/OStoragePhysicalClusterConfiguration.java b/core/src/main/java/com/orientechnologies/orient/core/config/OStoragePhysicalClusterConfiguration.java index 3ee268b90db..52fb3eafb03 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/config/OStoragePhysicalClusterConfiguration.java +++ b/core/src/main/java/com/orientechnologies/orient/core/config/OStoragePhysicalClusterConfiguration.java @@ -22,7 +22,8 @@ public class OStoragePhysicalClusterConfiguration extends OStorageSegmentConfigu private static final String START_SIZE = "1Mb"; - public OStoragePhysicalClusterConfiguration(final int iId) { + public OStoragePhysicalClusterConfiguration(final OStorageConfiguration iStorageConfiguration, final int iId) { + root = iStorageConfiguration; fileStartSize = START_SIZE; id = iId; } diff --git a/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OMultiFileSegment.java b/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OMultiFileSegment.java index 72a405972d3..8e34ec57e6c 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OMultiFileSegment.java +++ b/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OMultiFileSegment.java @@ -27,15 +27,15 @@ import com.orientechnologies.orient.core.storage.fs.OFileFactory; public class OMultiFileSegment extends OSegment { - protected OFile[] files = new OFile[0]; - private String fileExtension; - private String type; - private long maxSize; - private String defrag; - private int fileStartSize; - private int fileMaxSize; - private int fileIncrementSize; - private OStorageSegmentConfiguration config; + protected OStorageSegmentConfiguration config; + protected OFile[] files = new OFile[0]; + private String fileExtension; + private String type; + private long maxSize; + private String defrag; + private int fileStartSize; + private int fileMaxSize; + private int fileIncrementSize; public OMultiFileSegment(final OStorageLocal iStorage, final OStorageSegmentConfiguration iConfig, final String iFileExtension, final int iRoundMaxSize) throws IOException { @@ -206,6 +206,8 @@ protected int[] allocateSpace(final int iRecordSize) throws IOException { file = createNewFile(); file.allocateSpace(iRecordSize); + config.root.update(); + return new int[] { files.length - 1, 0 }; } @@ -232,7 +234,7 @@ protected int[] getRelativePosition(final long iPosition) { if (fileNum >= files.length) throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum - + " that is out if limit (current=" + files.length + ")"); + + " that is out of limit (current=" + (files.length - 1) + ")"); final int fileRec = (int) (iPosition % fileMaxSize); @@ -257,7 +259,7 @@ private OFile createNewFile() throws IOException { return file; } - private void addInfoFileConfigEntry(final OFile file) { + private void addInfoFileConfigEntry(final OFile file) throws IOException { OStorageFileConfiguration[] newConfigFiles = new OStorageFileConfiguration[config.infoFiles.length + 1]; for (int i = 0; i < config.infoFiles.length; ++i) newConfigFiles[i] = config.infoFiles[i]; diff --git a/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OStorageLocal.java b/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OStorageLocal.java index a8cbd078607..e90d87db627 100644 --- a/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OStorageLocal.java +++ b/core/src/main/java/com/orientechnologies/orient/core/storage/impl/local/OStorageLocal.java @@ -35,6 +35,7 @@ import com.orientechnologies.orient.core.config.OStorageDataConfiguration; import com.orientechnologies.orient.core.config.OStorageLogicalClusterConfiguration; import com.orientechnologies.orient.core.config.OStoragePhysicalClusterConfiguration; +import com.orientechnologies.orient.core.config.OStorageSegmentConfiguration; import com.orientechnologies.orient.core.db.record.ODatabaseRecord; import com.orientechnologies.orient.core.dictionary.ODictionary; import com.orientechnologies.orient.core.exception.OCommandExecutionException; @@ -631,8 +632,11 @@ protected int registerDataSegment(final OStorageDataConfiguration iConfig) throw // CHECK FOR DUPLICATION OF NAMES for (ODataLocal data : dataSegments) - if (data.getName().equals(iConfig.name)) + if (data.getName().equals(iConfig.name)) { + // OVERWRITE CONFIG + data.config = iConfig; return -1; + } pos = dataSegments.length; // CREATE AND ADD THE NEW REF SEGMENT @@ -654,9 +658,11 @@ protected int registerDataSegment(final OStorageDataConfiguration iConfig) throw * @throws IOException */ private int createClusterFromConfig(final OStorageClusterConfiguration iConfig) throws IOException { - if (clusterMap.containsKey(iConfig.getName())) - // ALREADY CONFIGURED + if (clusterMap.containsKey(iConfig.getName())) { + // ALREADY CONFIGURED, JUST OVERWRITE CONFIG + ((OClusterLocal) clusterMap.get(iConfig.getName())).config = (OStorageSegmentConfiguration) iConfig; return -1; + } final OCluster cluster; diff --git a/tests/src/test/java/com/orientechnologies/orient/test/database/speed/SQLSynchQuerySpeedTest.java b/tests/src/test/java/com/orientechnologies/orient/test/database/speed/SQLSynchQuerySpeedTest.java index 0f0e8354cf5..258c216dff5 100644 --- a/tests/src/test/java/com/orientechnologies/orient/test/database/speed/SQLSynchQuerySpeedTest.java +++ b/tests/src/test/java/com/orientechnologies/orient/test/database/speed/SQLSynchQuerySpeedTest.java @@ -41,20 +41,7 @@ public SQLSynchQuerySpeedTest(String iURL) { public void cycle() throws UnsupportedEncodingException { System.out.println("1 ----------------------"); List<ODocument> result = database.command( - new OSQLSynchQuery<ODocument>("select * from animal where id = 10 and name like 'G%'")).execute(); - - OrientTest.printRecords(result); - - System.out.println("2 ----------------------"); - result = database.command( - new OSQLSynchQuery<ODocument>("select * from animal where column(0) < 5 or column(0) >= 3 and column(5) < 7")).execute(); - - OrientTest.printRecords(result); - - /* - * System.out.println("3 ----------------------"); printResults((List<String>) database.query(new OSQLSynchQuery<String>( - * "select * from animal where column(0) < 5 and column(0) >= 3 or column(0) < 7"), 1000)); - */ + new OSQLSynchQuery<ODocument>("select * from Account where id = 10 and name like 'G%'")).execute(); } public boolean result(final Object iRecord) {
8c3065c971a65448b29ee600d5c889b33e2afdb1
hadoop
MAPREDUCE-2766. Fixed NM to set secure permissions- for files and directories in distributed-cache. Contributed by Hitesh Shah.- svn merge -c r1195340 --ignore-ancestry ../../trunk/--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1195341 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hadoop
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 41be54ab5bfae..223ff031ebfd8 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -1826,6 +1826,9 @@ Release 0.23.0 - Unreleased MAPREDUCE-3313. Fixed initialization of ClusterMetrics which was failing TestResourceTrackerService sometimes. (Hitesh Shah via vinodkv) + MAPREDUCE-2766. Fixed NM to set secure permissions for files and directories + in distributed-cache. (Hitesh Shah via vinodkv) + Release 0.22.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FSDownload.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FSDownload.java index 671f3ae30b8bf..85d1690701615 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FSDownload.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FSDownload.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.RunJar; import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.util.ConverterUtils; /** @@ -56,7 +57,13 @@ public class FSDownload implements Callable<Path> { private Configuration conf; private LocalResource resource; private LocalDirAllocator dirs; - private FsPermission cachePerms = new FsPermission((short) 0755); + private static final FsPermission cachePerms = new FsPermission( + (short) 0755); + static final FsPermission PUBLIC_FILE_PERMS = new FsPermission((short) 0555); + static final FsPermission PRIVATE_FILE_PERMS = new FsPermission( + (short) 0500); + static final FsPermission PUBLIC_DIR_PERMS = new FsPermission((short) 0755); + static final FsPermission PRIVATE_DIR_PERMS = new FsPermission((short) 0700); FSDownload(FileContext files, UserGroupInformation ugi, Configuration conf, LocalDirAllocator dirs, LocalResource resource, Random rand) { @@ -150,6 +157,7 @@ public Path run() throws Exception { }; }); unpack(new File(dTmp.toUri()), new File(dFinal.toUri())); + changePermissions(dFinal.getFileSystem(conf), dFinal); files.rename(dst_work, dst, Rename.OVERWRITE); } catch (Exception e) { try { files.delete(dst, true); } catch (IOException ignore) { } @@ -163,11 +171,56 @@ public Path run() throws Exception { conf = null; resource = null; dirs = null; - cachePerms = null; } return files.makeQualified(new Path(dst, sCopy.getName())); } + /** + * Recursively change permissions of all files/dirs on path based + * on resource visibility. + * Change to 755 or 700 for dirs, 555 or 500 for files. + * @param fs FileSystem + * @param path Path to modify perms for + * @throws IOException + * @throws InterruptedException + */ + private void changePermissions(FileSystem fs, final Path path) + throws IOException, InterruptedException { + FileStatus fStatus = fs.getFileStatus(path); + FsPermission perm = cachePerms; + // set public perms as 755 or 555 based on dir or file + if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) { + perm = fStatus.isDirectory() ? PUBLIC_DIR_PERMS : PUBLIC_FILE_PERMS; + } + // set private perms as 700 or 500 + else { + // PRIVATE: + // APPLICATION: + perm = fStatus.isDirectory() ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS; + } + LOG.debug("Changing permissions for path " + path + + " to perm " + perm); + final FsPermission fPerm = perm; + if (null == userUgi) { + files.setPermission(path, perm); + } + else { + userUgi.doAs(new PrivilegedExceptionAction<Void>() { + public Void run() throws Exception { + files.setPermission(path, fPerm); + return null; + } + }); + } + if (fStatus.isDirectory() + && !fStatus.isSymlink()) { + FileStatus[] statuses = fs.listStatus(path); + for (FileStatus status : statuses) { + changePermissions(fs, status.getPath()); + } + } + } + private static long getEstimatedSize(LocalResource rsrc) { if (rsrc.getSize() < 0) { return -1; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java index 35f24851f7a10..7754baa73c250 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java @@ -33,6 +33,7 @@ public class LocalResourceRequest private final Path loc; private final long timestamp; private final LocalResourceType type; + private final LocalResourceVisibility visibility; /** * Wrap API resource to match against cache of localized resources. @@ -43,13 +44,16 @@ public LocalResourceRequest(LocalResource resource) throws URISyntaxException { this(ConverterUtils.getPathFromYarnURL(resource.getResource()), resource.getTimestamp(), - resource.getType()); + resource.getType(), + resource.getVisibility()); } - LocalResourceRequest(Path loc, long timestamp, LocalResourceType type) { + LocalResourceRequest(Path loc, long timestamp, LocalResourceType type, + LocalResourceVisibility visibility) { this.loc = loc; this.timestamp = timestamp; this.type = type; + this.visibility = visibility; } @Override @@ -114,7 +118,7 @@ public long getSize() { @Override public LocalResourceVisibility getVisibility() { - throw new UnsupportedOperationException(); + return visibility; } @Override diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestFSDownload.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestFSDownload.java index fda8817d99459..74aa57052c0d9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestFSDownload.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestFSDownload.java @@ -18,6 +18,12 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; +import static org.apache.hadoop.fs.CreateFlag.CREATE; +import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; import java.net.URISyntaxException; import java.util.EnumSet; @@ -28,29 +34,35 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.jar.JarOutputStream; +import java.util.jar.Manifest; + +import junit.framework.Assert; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.FSDownload; import org.apache.hadoop.yarn.util.ConverterUtils; - -import static org.apache.hadoop.fs.CreateFlag.*; - - import org.junit.AfterClass; import org.junit.Test; -import static org.junit.Assert.*; public class TestFSDownload { + private static final Log LOG = LogFactory.getLog(TestFSDownload.class); + @AfterClass public static void deleteTestDir() throws IOException { FileContext fs = FileContext.getLocalFSFileContext(); @@ -61,7 +73,7 @@ public static void deleteTestDir() throws IOException { RecordFactoryProvider.getRecordFactory(null); static LocalResource createFile(FileContext files, Path p, int len, - Random r) throws IOException, URISyntaxException { + Random r, LocalResourceVisibility vis) throws IOException { FSDataOutputStream out = null; try { byte[] bytes = new byte[len]; @@ -75,10 +87,30 @@ static LocalResource createFile(FileContext files, Path p, int len, ret.setResource(ConverterUtils.getYarnUrlFromPath(p)); ret.setSize(len); ret.setType(LocalResourceType.FILE); + ret.setVisibility(vis); ret.setTimestamp(files.getFileStatus(p).getModificationTime()); return ret; } + static LocalResource createJar(FileContext files, Path p, + LocalResourceVisibility vis) throws IOException { + LOG.info("Create jar file " + p); + File jarFile = new File((files.makeQualified(p)).toUri()); + FileOutputStream stream = new FileOutputStream(jarFile); + LOG.info("Create jar out stream "); + JarOutputStream out = new JarOutputStream(stream, new Manifest()); + LOG.info("Done writing jar stream "); + out.close(); + LocalResource ret = recordFactory.newRecordInstance(LocalResource.class); + ret.setResource(ConverterUtils.getYarnUrlFromPath(p)); + FileStatus status = files.getFileStatus(p); + ret.setSize(status.getLen()); + ret.setTimestamp(status.getModificationTime()); + ret.setType(LocalResourceType.ARCHIVE); + ret.setVisibility(vis); + return ret; + } + @Test public void testDownload() throws IOException, URISyntaxException, InterruptedException { @@ -88,6 +120,9 @@ public void testDownload() throws IOException, URISyntaxException, TestFSDownload.class.getSimpleName())); files.mkdir(basedir, null, true); conf.setStrings(TestFSDownload.class.getName(), basedir.toString()); + + Map<LocalResource, LocalResourceVisibility> rsrcVis = + new HashMap<LocalResource, LocalResourceVisibility>(); Random rand = new Random(); long sharedSeed = rand.nextLong(); @@ -102,8 +137,19 @@ public void testDownload() throws IOException, URISyntaxException, int[] sizes = new int[10]; for (int i = 0; i < 10; ++i) { sizes[i] = rand.nextInt(512) + 512; + LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC; + switch (i%3) { + case 1: + vis = LocalResourceVisibility.PRIVATE; + break; + case 2: + vis = LocalResourceVisibility.APPLICATION; + break; + } + LocalResource rsrc = createFile(files, new Path(basedir, "" + i), - sizes[i], rand); + sizes[i], rand, vis); + rsrcVis.put(rsrc, vis); FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, dirs, rsrc, new Random(sharedSeed)); @@ -115,6 +161,22 @@ public void testDownload() throws IOException, URISyntaxException, Path localized = p.getValue().get(); assertEquals(sizes[Integer.valueOf(localized.getName())], p.getKey() .getSize()); + FileStatus status = files.getFileStatus(localized); + FsPermission perm = status.getPermission(); + System.out.println("File permission " + perm + + " for rsrc vis " + p.getKey().getVisibility().name()); + assert(rsrcVis.containsKey(p.getKey())); + switch (rsrcVis.get(p.getKey())) { + case PUBLIC: + Assert.assertTrue("Public file should be 555", + perm.toShort() == FSDownload.PUBLIC_FILE_PERMS.toShort()); + break; + case PRIVATE: + case APPLICATION: + Assert.assertTrue("Private file should be 500", + perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort()); + break; + } } } catch (ExecutionException e) { throw new IOException("Failed exec", e); @@ -122,5 +184,101 @@ public void testDownload() throws IOException, URISyntaxException, exec.shutdown(); } } + + private void verifyPermsRecursively(FileSystem fs, + FileContext files, Path p, + LocalResourceVisibility vis) throws IOException { + FileStatus status = files.getFileStatus(p); + if (status.isDirectory()) { + if (vis == LocalResourceVisibility.PUBLIC) { + Assert.assertTrue(status.getPermission().toShort() == + FSDownload.PUBLIC_DIR_PERMS.toShort()); + } + else { + Assert.assertTrue(status.getPermission().toShort() == + FSDownload.PRIVATE_DIR_PERMS.toShort()); + } + if (!status.isSymlink()) { + FileStatus[] statuses = fs.listStatus(p); + for (FileStatus stat : statuses) { + verifyPermsRecursively(fs, files, stat.getPath(), vis); + } + } + } + else { + if (vis == LocalResourceVisibility.PUBLIC) { + Assert.assertTrue(status.getPermission().toShort() == + FSDownload.PUBLIC_FILE_PERMS.toShort()); + } + else { + Assert.assertTrue(status.getPermission().toShort() == + FSDownload.PRIVATE_FILE_PERMS.toShort()); + } + } + } + + @Test + public void testDirDownload() throws IOException, InterruptedException { + Configuration conf = new Configuration(); + FileContext files = FileContext.getLocalFSFileContext(conf); + final Path basedir = files.makeQualified(new Path("target", + TestFSDownload.class.getSimpleName())); + files.mkdir(basedir, null, true); + conf.setStrings(TestFSDownload.class.getName(), basedir.toString()); + + Map<LocalResource, LocalResourceVisibility> rsrcVis = + new HashMap<LocalResource, LocalResourceVisibility>(); + + Random rand = new Random(); + long sharedSeed = rand.nextLong(); + rand.setSeed(sharedSeed); + System.out.println("SEED: " + sharedSeed); + + Map<LocalResource,Future<Path>> pending = + new HashMap<LocalResource,Future<Path>>(); + ExecutorService exec = Executors.newSingleThreadExecutor(); + LocalDirAllocator dirs = + new LocalDirAllocator(TestFSDownload.class.getName()); + for (int i = 0; i < 5; ++i) { + LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC; + switch (rand.nextInt()%3) { + case 1: + vis = LocalResourceVisibility.PRIVATE; + break; + case 2: + vis = LocalResourceVisibility.APPLICATION; + break; + } + + LocalResource rsrc = createJar(files, new Path(basedir, "dir" + i + + ".jar"), vis); + rsrcVis.put(rsrc, vis); + FSDownload fsd = + new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, + dirs, rsrc, new Random(sharedSeed)); + pending.put(rsrc, exec.submit(fsd)); + } + + try { + + for (Map.Entry<LocalResource,Future<Path>> p : pending.entrySet()) { + Path localized = p.getValue().get(); + FileStatus status = files.getFileStatus(localized); + System.out.println("Testing path " + localized); + assert(status.isDirectory()); + assert(rsrcVis.containsKey(p.getKey())); + + verifyPermsRecursively(localized.getFileSystem(conf), + files, localized, rsrcVis.get(p.getKey())); + } + } catch (ExecutionException e) { + throw new IOException("Failed exec", e); + } finally { + exec.shutdown(); + } + + + + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java index 092ab1674cdc6..c425eb59ffb8e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.junit.Test; @@ -82,7 +83,7 @@ LocalResourcesTracker createMockTracker(String user, final long rsrcSize, for (int i = 0; i < nRsrcs; ++i) { final LocalResourceRequest req = new LocalResourceRequest( new Path("file:///" + user + "/rsrc" + i), timestamp + i * tsstep, - LocalResourceType.FILE); + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC); final long ts = timestamp + i * tsstep; final Path p = new Path("file:///local/" + user + "/rsrc" + i); LocalizedResource rsrc = new LocalizedResource(req, null) {
6bece88c6941a754a8cc054bbe9946f1e7c0c615
ReactiveX-RxJava
Added ChannelObservable.get--
a
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-contrib/rxjava-quasar/src/main/java/rx/quasar/ChannelObservable.java b/rxjava-contrib/rxjava-quasar/src/main/java/rx/quasar/ChannelObservable.java index 45bcaa01c6..97e9af950b 100644 --- a/rxjava-contrib/rxjava-quasar/src/main/java/rx/quasar/ChannelObservable.java +++ b/rxjava-contrib/rxjava-quasar/src/main/java/rx/quasar/ChannelObservable.java @@ -14,6 +14,7 @@ * limitations under the License. */package rx.quasar; +import co.paralleluniverse.fibers.FiberAsync; import co.paralleluniverse.fibers.SuspendExecution; import co.paralleluniverse.fibers.Suspendable; import co.paralleluniverse.strands.Strand; @@ -21,6 +22,9 @@ import co.paralleluniverse.strands.channels.Channels; import co.paralleluniverse.strands.channels.ReceivePort; import co.paralleluniverse.strands.channels.SendPort; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import rx.Observable; import rx.Observer; import rx.Scheduler; @@ -45,7 +49,7 @@ private ChannelObservable() { * @return an Observable that emits each message received on the source {@link ReceivePort} * @see <a href="https://github.com/Netflix/RxJava/wiki/Creating-Observables#wiki-from">RxJava Wiki: from()</a> */ - public final static <T> Observable<T> from(ReceivePort<T> channel) { + public static <T> Observable<T> from(ReceivePort<T> channel) { return Observable.create(new OnSubscribeFromChannel<T>(channel)); } @@ -67,7 +71,7 @@ public final static <T> Observable<T> from(ReceivePort<T> channel) { * @see <a href="https://github.com/Netflix/RxJava/wiki/Creating-Observables#wiki-from">RxJava Wiki: from()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh212140.aspx">MSDN: Observable.ToObservable</a> */ - public final static <T> Observable<T> from(ReceivePort<T> channel, Scheduler scheduler) { + public static <T> Observable<T> from(ReceivePort<T> channel, Scheduler scheduler) { return Observable.create(new OnSubscribeFromChannel<T>(channel)).subscribeOn(scheduler); } @@ -79,7 +83,7 @@ public final static <T> Observable<T> from(ReceivePort<T> channel, Scheduler sch * @param channel the target {@link SendPort} * @return */ - public final static <T> Observer<T> to(final SendPort<T> channel) { + public static <T> Observer<T> to(final SendPort<T> channel) { return new Observer<T>() { @Override @@ -115,9 +119,9 @@ public void onError(Throwable e) { * @param o the observable * @return A new channel with the given buffer size and overflow policy that will receive all events emitted by the observable. */ - public final static <T> ReceivePort<T> subscribe(int bufferSize, Channels.OverflowPolicy policy, Observable<T> o) { + public static <T> ReceivePort<T> subscribe(int bufferSize, Channels.OverflowPolicy policy, Observable<T> o) { final Channel<T> channel = Channels.newChannel(bufferSize, policy); - + o.subscribe(new Observer<T>() { @Override @Suspendable @@ -143,4 +147,72 @@ public void onError(Throwable e) { }); return channel; } + + /** + * Takes an observable that generates <i>at most one value</i>, blocks until it completes and returns the result. + * If the observable completes before a value has been emitted, this method returns {@code null}. + * It the observable fails, this function throws an {@link ExecutionException} that wraps the observable's exception. + * + * @param o the observable + * @return the observable's result, or {@code null} if the observable completes before a value is emitted. + * @throws ExecutionException if the observable fails + */ + public static <T> T get(final Observable<T> o) throws ExecutionException, SuspendExecution, InterruptedException { + return new AsyncObservable<T>(o).run(); + } + + /** + * Takes an observable that generates <i>at most one value</i>, blocks until it completes or the timeout expires, and returns the result. + * If the observable completes before a value has been emitted, this method returns {@code null}. + * It the observable fails, this function throws an {@link ExecutionException} that wraps the observable's exception. + * + * @param o the observable + * @param timeout the maximum time this method will blcok + * @param unit the timeout's time unit + * @return the observable's result, or {@code null} if the observable completes before a value is emitted. + * @throws ExecutionException if the observable fails + * @throws TimeoutException if the timeout expires before the observable completes + */ + public static <T> T get(final Observable<T> o, long timeout, TimeUnit unit) throws ExecutionException, SuspendExecution, InterruptedException, TimeoutException { + return new AsyncObservable<T>(o).run(timeout, unit); + } + + private static class AsyncObservable<T> extends FiberAsync<T, Void, ExecutionException> implements Observer<T> { + private final Observable<T> o; + + public AsyncObservable(Observable<T> o) { + this.o = o; + } + + @Override + protected Void requestAsync() { + o.subscribe(this); + return null; + } + + @Override + public void onNext(T t) { + if (isCompleted()) + throw new IllegalStateException("Operation already completed"); + asyncCompleted(t); + } + + @Override + public void onError(Throwable e) { + if (isCompleted()) + throw new IllegalStateException("Operation already completed"); + asyncFailed(e); + } + + @Override + public void onCompleted() { + if (!isCompleted()) + asyncCompleted(null); + } + + @Override + protected ExecutionException wrapException(Throwable t) { + return new ExecutionException(t); + } + } } diff --git a/rxjava-contrib/rxjava-quasar/src/test/java/rx/quasar/ChannelObservableTest.java b/rxjava-contrib/rxjava-quasar/src/test/java/rx/quasar/ChannelObservableTest.java index cce90923de..63ef447af3 100644 --- a/rxjava-contrib/rxjava-quasar/src/test/java/rx/quasar/ChannelObservableTest.java +++ b/rxjava-contrib/rxjava-quasar/src/test/java/rx/quasar/ChannelObservableTest.java @@ -15,9 +15,11 @@ */ package rx.quasar; +import co.paralleluniverse.fibers.Fiber; import co.paralleluniverse.fibers.SuspendExecution; import co.paralleluniverse.fibers.Suspendable; import co.paralleluniverse.strands.Strand; +import co.paralleluniverse.strands.SuspendableCallable; import co.paralleluniverse.strands.channels.Channel; import co.paralleluniverse.strands.channels.Channels; import co.paralleluniverse.strands.channels.ProducerException; @@ -26,6 +28,7 @@ import java.util.Arrays; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.CoreMatchers.*; import static org.junit.Assert.*; @@ -113,18 +116,91 @@ public void testObserverChannelWithError() throws Exception { o.onNext("a"); o.onError(new MyException()); o.onNext("c"); - + assertThat(c.receive(), equalTo("a")); try { c.receive(); fail(); - } catch(ProducerException e) { + } catch (ProducerException e) { assertThat(e.getCause(), instanceOf(MyException.class)); } assertThat(c.isClosed(), is(true)); } - + + @Test + public void whenGetThenBlockAndReturnResult() throws Exception { + final PublishSubject<String> o = PublishSubject.create(); + + Fiber<String> f = new Fiber<String>(new SuspendableCallable<String>() { + + @Override + public String run() throws SuspendExecution, InterruptedException { + try { + return ChannelObservable.get(o); + } catch (ExecutionException e) { + throw new AssertionError(); + } + } + }).start(); + + Thread.sleep(100); + + o.onNext("foo"); + o.onCompleted(); + + assertThat(f.get(), equalTo("foo")); + } + + @Test + public void whenGetAndObservableFailsThenThrowExecutionException() throws Exception { + final PublishSubject<String> o = PublishSubject.create(); + + Fiber<String> f = new Fiber<String>(new SuspendableCallable<String>() { + + @Override + public String run() throws SuspendExecution, InterruptedException { + try { + return ChannelObservable.get(o); + } catch (ExecutionException e) { + return e.getCause().getMessage(); + } + } + }).start(); + + Thread.sleep(100); + + o.onError(new Exception("ohoh")); + + assertThat(f.get(), equalTo("ohoh")); + } + + @Test + public void whenGetAndObservableEmitsTwoValuesThenBlowup() throws Exception { + final PublishSubject<String> o = PublishSubject.create(); + + Fiber<String> f = new Fiber<String>(new SuspendableCallable<String>() { + + @Override + public String run() throws SuspendExecution, InterruptedException { + try { + return ChannelObservable.get(o); + } catch (ExecutionException e) { + throw new AssertionError(); + } + } + }).start(); + + Thread.sleep(100); + + o.onNext("foo"); + try { + o.onNext("bar"); + fail(); + } catch (Exception e) { + } + } + static class MyException extends RuntimeException { - + } }
c225b44f3440d8799f1be96de7e27131ad9086c3
spring-framework
SPR-5636 - @RequestMapping matching should be- insensitive to trailing slashes--
c
https://github.com/spring-projects/spring-framework
diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerAdapter.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerAdapter.java index 5ba30a4d49e2..2b936b5de378 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerAdapter.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/AnnotationMethodHandlerAdapter.java @@ -503,7 +503,15 @@ private boolean isPathMatchInternal(String pattern, String lookupPath) { if (pattern.equals(lookupPath) || pathMatcher.match(pattern, lookupPath)) { return true; } - return !(pattern.indexOf('.') != -1) && pathMatcher.match(pattern + ".*", lookupPath); + boolean hasSuffix = pattern.indexOf('.') != -1; + if (!hasSuffix && pathMatcher.match(pattern + ".*", lookupPath)) { + return true; + } + boolean endsWithSlash = pattern.endsWith("/"); + if (!endsWithSlash && pathMatcher.match(pattern + "/", lookupPath)) { + return true; + } + return false; } private boolean checkParameters(RequestMappingInfo mapping, HttpServletRequest request) { diff --git a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/DefaultAnnotationHandlerMapping.java b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/DefaultAnnotationHandlerMapping.java index 07c136ffb688..f2087c192a0f 100644 --- a/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/DefaultAnnotationHandlerMapping.java +++ b/org.springframework.web.servlet/src/main/java/org/springframework/web/servlet/mvc/annotation/DefaultAnnotationHandlerMapping.java @@ -86,10 +86,10 @@ public class DefaultAnnotationHandlerMapping extends AbstractDetectingUrlHandler /** * Set whether to register paths using the default suffix pattern as well: - * i.e. whether "/users" should be registered as "/users.*" too. + * i.e. whether "/users" should be registered as "/users.*" and "/users/" too. * <p>Default is "true". Turn this convention off if you intend to interpret * your <code>@RequestMapping</code> paths strictly. - * <p>Note that paths which include a ".xxx" suffix already will not be + * <p>Note that paths which include a ".xxx" suffix or end with "/" already will not be * transformed using the default suffix pattern in any case. */ public void setUseDefaultSuffixPattern(boolean useDefaultSuffixPattern) { @@ -168,8 +168,9 @@ public void doWith(Method method) { */ protected void addUrlsForPath(Set<String> urls, String path) { urls.add(path); - if (this.useDefaultSuffixPattern && path.indexOf('.') == -1) { + if (this.useDefaultSuffixPattern && path.indexOf('.') == -1 && !path.endsWith("/")) { urls.add(path + ".*"); + urls.add(path + "/"); } } diff --git a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/mvc/annotation/UriTemplateServletAnnotationControllerTests.java b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/mvc/annotation/UriTemplateServletAnnotationControllerTests.java index fd87705ddf32..587f70a9bf9e 100644 --- a/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/mvc/annotation/UriTemplateServletAnnotationControllerTests.java +++ b/org.springframework.web.servlet/src/test/java/org/springframework/web/servlet/mvc/annotation/UriTemplateServletAnnotationControllerTests.java @@ -152,6 +152,11 @@ public void crud() throws Exception { servlet.service(request, response); assertEquals("list", response.getContentAsString()); + request = new MockHttpServletRequest("GET", "/hotels/"); + response = new MockHttpServletResponse(); + servlet.service(request, response); + assertEquals("list", response.getContentAsString()); + request = new MockHttpServletRequest("POST", "/hotels"); response = new MockHttpServletResponse(); servlet.service(request, response); @@ -162,6 +167,11 @@ public void crud() throws Exception { servlet.service(request, response); assertEquals("show-42", response.getContentAsString()); + request = new MockHttpServletRequest("GET", "/hotels/42/"); + response = new MockHttpServletResponse(); + servlet.service(request, response); + assertEquals("show-42", response.getContentAsString()); + request = new MockHttpServletRequest("PUT", "/hotels/42"); response = new MockHttpServletResponse(); servlet.service(request, response);
2a211705a3db21df6acc2e2d2e059cd9cd5e4ea6
elasticsearch
Catch and Log RejectedExecutionException in async- ping--
p
https://github.com/elastic/elasticsearch
diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index b261d8496db75..2981e9a34c284 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -175,14 +175,18 @@ public void run() { threadPool.schedule(TimeValue.timeValueMillis(timeout.millis() / 2), ThreadPool.Names.GENERIC, new Runnable() { @Override public void run() { - sendPings(timeout, TimeValue.timeValueMillis(timeout.millis() / 2), sendPingsHandler); - ConcurrentMap<DiscoveryNode, PingResponse> responses = receivedResponses.remove(sendPingsHandler.id()); - sendPingsHandler.close(); - for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) { - logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node); - transportService.disconnectFromNode(node); + try { + sendPings(timeout, TimeValue.timeValueMillis(timeout.millis() / 2), sendPingsHandler); + ConcurrentMap<DiscoveryNode, PingResponse> responses = receivedResponses.remove(sendPingsHandler.id()); + sendPingsHandler.close(); + for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) { + logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node); + transportService.disconnectFromNode(node); + } + listener.onPing(responses.values().toArray(new PingResponse[responses.size()])); + } catch (RejectedExecutionException ex) { + logger.info("Ping execution ejected", ex); } - listener.onPing(responses.values().toArray(new PingResponse[responses.size()])); } }); }
0f30646656428e3f2eb7c3a598fdf9cb1edee919
hbase
HBASE-7579 HTableDescriptor equals method fails- if results are returned in a different order; REVERT -- OVERCOMMITTED--git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1471053 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hbase
diff --git a/bin/hbase-daemon.sh b/bin/hbase-daemon.sh index 91a15ec87d0d..e45054b5ac72 100755 --- a/bin/hbase-daemon.sh +++ b/bin/hbase-daemon.sh @@ -36,7 +36,7 @@ usage="Usage: hbase-daemon.sh [--config <conf-dir>]\ (start|stop|restart|autorestart) <hbase-command> \ - [--formatZK] [--formatFS] <args...>" + <args...>" # if no args specified, show usage if [ $# -le 1 ]; then @@ -57,19 +57,6 @@ shift command=$1 shift -if [ "$startStop" = "start" ];then - for i in 1 2 - do - if [ "$1" = "--formatZK" ];then - formatzk=$1 - shift - elif [ "$1" = "--formatFS" ];then - formatfs=$1 - shift - fi - done -fi - hbase_rotate_log () { log=$1; @@ -111,10 +98,6 @@ check_before_start(){ fi } -clear_hbase_data() { - $bin/hbase-cleanup.sh $formatzk $formatfs -} - wait_until_done () { p=$1 @@ -189,7 +172,6 @@ case $startStop in (start) check_before_start - clear_hbase_data nohup $thiscmd --config "${HBASE_CONF_DIR}" internal_start $command $args < /dev/null > /dev/null 2>&1 & ;; diff --git a/bin/start-hbase.sh b/bin/start-hbase.sh index 672a0e89ed01..8fca03ca7769 100755 --- a/bin/start-hbase.sh +++ b/bin/start-hbase.sh @@ -24,7 +24,7 @@ # Start hadoop hbase daemons. # Run this on master node. -usage="Usage: start-hbase.sh [autorestart] [--formatZK] [--formatFS]" +usage="Usage: start-hbase.sh" bin=`dirname "${BASH_SOURCE-$0}"` bin=`cd "$bin">/dev/null; pwd` @@ -37,19 +37,12 @@ if [ $errCode -ne 0 ] then exit $errCode fi -for i in 1 2 3 -do - if [ "$1" = "autorestart" ];then - commandToRun="autorestart" - elif [ "$1" = "--formatZK" ];then - formatzk=$1 - elif [ "$1" = "--formatFS" ];then - formatfs=$1 - fi - shift -done -if [ "$commandToRun" = "" ];then + +if [ "$1" = "autorestart" ] +then + commandToRun="autorestart" +else commandToRun="start" fi @@ -59,10 +52,10 @@ distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBa if [ "$distMode" == 'false' ] then - "$bin"/hbase-daemon.sh $commandToRun master $formatzk $formatfs + "$bin"/hbase-daemon.sh $commandToRun master else "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" $commandToRun zookeeper - "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" $commandToRun master $formatzk $formatfs + "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" $commandToRun master "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \ --hosts "${HBASE_REGIONSERVERS}" $commandToRun regionserver "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 4e2c2a829429..ecb0826b5ed8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -1125,10 +1125,12 @@ public void write(DataOutput out) throws IOException { public int compareTo(HColumnDescriptor o) { int result = Bytes.compareTo(this.name, o.getName()); if (result == 0) { - // The maps interface should compare values, even if they're in different orders - if (!this.values.equals(o.values)) { - return 1; - } + // punt on comparison for ordering, just calculate difference + result = this.values.hashCode() - o.values.hashCode(); + if (result < 0) + result = -1; + else if (result > 0) + result = 1; } if (result == 0) { result = this.configuration.hashCode() - o.configuration.hashCode(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index b27826856f7c..8d2afcf36eff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -225,7 +225,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> { * catalog tables, <code>.META.</code> and <code>-ROOT-</code>. */ protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { - setName(name); + this.name = name.clone(); + this.nameAsString = Bytes.toString(this.name); + setMetaFlags(name); for(HColumnDescriptor descriptor : families) { this.families.put(descriptor.getName(), descriptor); } @@ -237,7 +239,12 @@ protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { */ protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families, Map<ImmutableBytesWritable,ImmutableBytesWritable> values) { - this(name.clone(), families); + this.name = name.clone(); + this.nameAsString = Bytes.toString(this.name); + setMetaFlags(name); + for(HColumnDescriptor descriptor : families) { + this.families.put(descriptor.getName(), descriptor); + } for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry: values.entrySet()) { setValue(entry.getKey(), entry.getValue()); @@ -277,7 +284,9 @@ public HTableDescriptor(final String name) { */ public HTableDescriptor(final byte [] name) { super(); - setName(name); + setMetaFlags(this.name); + this.name = this.isMetaRegion()? name: isLegalTableName(name); + this.nameAsString = Bytes.toString(this.name); } /** @@ -289,7 +298,9 @@ public HTableDescriptor(final byte [] name) { */ public HTableDescriptor(final HTableDescriptor desc) { super(); - setName(desc.name.clone()); + this.name = desc.name.clone(); + this.nameAsString = Bytes.toString(this.name); + setMetaFlags(this.name); for (HColumnDescriptor c: desc.families.values()) { this.families.put(c.getName(), new HColumnDescriptor(c)); } @@ -639,13 +650,9 @@ public String getRegionSplitPolicyClassName() { * Set the name of the table. * * @param name name of table - * @throws IllegalArgumentException if passed a table name - * that is made of other than 'word' characters, underscore or period: i.e. - * <code>[a-zA-Z_0-9.]. - * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a> */ public void setName(byte[] name) { - this.name = isMetaTable(name) ? name : isLegalTableName(name); + this.name = name; this.nameAsString = Bytes.toString(this.name); setMetaFlags(this.name); } @@ -980,34 +987,39 @@ public void write(DataOutput out) throws IOException { */ @Override public int compareTo(final HTableDescriptor other) { - // Check name matches int result = Bytes.compareTo(this.name, other.name); - if (result != 0) return result; - - // Check size matches - result = families.size() - other.families.size(); - if (result != 0) return result; - - // Compare that all column families - for (Iterator<HColumnDescriptor> it = families.values().iterator(), - it2 = other.families.values().iterator(); it.hasNext(); ) { - result = it.next().compareTo(it2.next()); - if (result != 0) { - return result; + if (result == 0) { + result = families.size() - other.families.size(); + } + if (result == 0 && families.size() != other.families.size()) { + result = Integer.valueOf(families.size()).compareTo( + Integer.valueOf(other.families.size())); + } + if (result == 0) { + for (Iterator<HColumnDescriptor> it = families.values().iterator(), + it2 = other.families.values().iterator(); it.hasNext(); ) { + result = it.next().compareTo(it2.next()); + if (result != 0) { + break; + } } } - - // Compare values - if (!values.equals(other.values)) { - return 1; + if (result == 0) { + // punt on comparison for ordering, just calculate difference + result = this.values.hashCode() - other.values.hashCode(); + if (result < 0) + result = -1; + else if (result > 0) + result = 1; } - - // Compare configuration - if (!configuration.equals(other.configuration)) { - return 1; + if (result == 0) { + result = this.configuration.hashCode() - other.configuration.hashCode(); + if (result < 0) + result = -1; + else if (result > 0) + result = 1; } - - return 0; + return result; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java index fb4506555bbd..253460936dd2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java @@ -18,14 +18,12 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.regionserver.BloomType; import org.junit.experimental.categories.Category; @@ -96,71 +94,4 @@ public void testAddGetRemoveConfiguration() throws Exception { desc.removeConfiguration(key); assertEquals(null, desc.getConfigurationValue(key)); } - - @Test - public void testEqualsWithFamilyName() { - final String name1 = "someFamilyName"; - HColumnDescriptor hcd1 = new HColumnDescriptor(name1); - HColumnDescriptor hcd2 = new HColumnDescriptor("someOtherFamilyName"); - HColumnDescriptor hcd3 = new HColumnDescriptor(name1); - - assertFalse(hcd1.equals(hcd2)); - assertFalse(hcd2.equals(hcd1)); - - assertTrue(hcd3.equals(hcd1)); - assertTrue(hcd1.equals(hcd3)); - } - - @Test - public void testEqualsWithAdditionalProperties() { - final String name1 = "someFamilyName"; - HColumnDescriptor hcd1 = new HColumnDescriptor(name1); - HColumnDescriptor hcd2 = new HColumnDescriptor(name1); - hcd2.setBlocksize(4); - - assertFalse(hcd1.equals(hcd2)); - assertFalse(hcd2.equals(hcd1)); - - hcd1.setBlocksize(4); - - assertTrue(hcd2.equals(hcd1)); - assertTrue(hcd1.equals(hcd2)); - } - - @Test - public void testEqualsWithDifferentNumberOfProperties() { - final String name1 = "someFamilyName"; - HColumnDescriptor hcd1 = new HColumnDescriptor(name1); - HColumnDescriptor hcd2 = new HColumnDescriptor(name1); - hcd2.setBlocksize(4); - hcd1.setBlocksize(4); - - assertTrue(hcd2.equals(hcd1)); - assertTrue(hcd1.equals(hcd2)); - - hcd2.setBloomFilterType(BloomType.ROW); - - assertFalse(hcd1.equals(hcd2)); - assertFalse(hcd2.equals(hcd1)); - } - - @Test - public void testEqualsWithDifferentOrderingOfProperties() { - final String name1 = "someFamilyName"; - HColumnDescriptor hcd1 = new HColumnDescriptor(name1); - HColumnDescriptor hcd2 = new HColumnDescriptor(name1); - hcd2.setBlocksize(4); - hcd2.setBloomFilterType(BloomType.ROW); - hcd1.setBloomFilterType(BloomType.ROW); - hcd1.setBlocksize(4); - - assertTrue(hcd2.equals(hcd1)); - assertTrue(hcd1.equals(hcd2)); - } - - @Test - public void testEqualityWithSameObject() { - HColumnDescriptor hcd1 = new HColumnDescriptor("someName"); - assertTrue(hcd1.equals(hcd1)); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index 5f06b429c91a..bc8e72c8689c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -198,173 +198,4 @@ public void testAddGetRemoveConfiguration() throws Exception { desc.removeConfiguration(key); assertEquals(null, desc.getConfigurationValue(key)); } - - @Test - public void testEqualsWithDifferentProperties() { - // Test basic property difference - HTableDescriptor h1 = new HTableDescriptor(); - h1.setName(Bytes.toBytes("n1")); - - HTableDescriptor h2 = new HTableDescriptor(); - h2.setName(Bytes.toBytes("n2")); - - assertFalse(h2.equals(h1)); - assertFalse(h1.equals(h2)); - - h2.setName(Bytes.toBytes("n1")); - assertTrue(h2.equals(h1)); - assertTrue(h1.equals(h2)); - } - - @Test - public void testEqualsWithDifferentNumberOfItems() { - HTableDescriptor h1 = new HTableDescriptor(); - HTableDescriptor h2 = new HTableDescriptor(); - - // Test diff # of items - h1 = new HTableDescriptor(); - h1.setName(Bytes.toBytes("n1")); - - h2 = new HTableDescriptor(); - h2.setName(Bytes.toBytes("n1")); - - HColumnDescriptor hcd1 = new HColumnDescriptor(Bytes.toBytes("someName")); - HColumnDescriptor hcd2 = new HColumnDescriptor(Bytes.toBytes("someOtherName")); - - h1.addFamily(hcd1); - h2.addFamily(hcd1); - h1.addFamily(hcd2); - - assertFalse(h2.equals(h1)); - assertFalse(h1.equals(h2)); - - h2.addFamily(hcd2); - - assertTrue(h2.equals(h1)); - assertTrue(h1.equals(h2)); - } - - @Test - public void testNotEqualsWithDifferentHCDs() { - HTableDescriptor h1 = new HTableDescriptor(); - HTableDescriptor h2 = new HTableDescriptor(); - - // Test diff # of items - h1 = new HTableDescriptor(); - h1.setName(Bytes.toBytes("n1")); - - h2 = new HTableDescriptor(); - h2.setName(Bytes.toBytes("n1")); - - HColumnDescriptor hcd1 = new HColumnDescriptor(Bytes.toBytes("someName")); - HColumnDescriptor hcd2 = new HColumnDescriptor(Bytes.toBytes("someOtherName")); - - h1.addFamily(hcd1); - h2.addFamily(hcd2); - - assertFalse(h2.equals(h1)); - assertFalse(h1.equals(h2)); - } - - @Test - public void testEqualsWithDifferentHCDObjects() { - HTableDescriptor h1 = new HTableDescriptor(); - HTableDescriptor h2 = new HTableDescriptor(); - - // Test diff # of items - h1 = new HTableDescriptor(); - h1.setName(Bytes.toBytes("n1")); - - h2 = new HTableDescriptor(); - h2.setName(Bytes.toBytes("n1")); - - HColumnDescriptor hcd1 = new HColumnDescriptor(Bytes.toBytes("someName")); - HColumnDescriptor hcd2 = new HColumnDescriptor(Bytes.toBytes("someName")); - - h1.addFamily(hcd1); - h2.addFamily(hcd2); - - assertTrue(h2.equals(h1)); - assertTrue(h1.equals(h2)); - } - - @Test - public void testNotEqualsWithDifferentItems() { - HTableDescriptor h1 = new HTableDescriptor(); - HTableDescriptor h2 = new HTableDescriptor(); - - // Test diff # of items - h1 = new HTableDescriptor(); - h1.setName(Bytes.toBytes("n1")); - - h2 = new HTableDescriptor(); - h2.setName(Bytes.toBytes("n1")); - - HColumnDescriptor hcd1 = new HColumnDescriptor(Bytes.toBytes("someName")); - HColumnDescriptor hcd2 = new HColumnDescriptor(Bytes.toBytes("someOtherName")); - h1.addFamily(hcd1); - h2.addFamily(hcd2); - - assertFalse(h2.equals(h1)); - assertFalse(h1.equals(h2)); - } - - @Test - public void testEqualsWithDifferentOrderingsOfItems() { - HTableDescriptor h1 = new HTableDescriptor(); - HTableDescriptor h2 = new HTableDescriptor(); - - //Test diff # of items - h1 = new HTableDescriptor(); - h1.setName(Bytes.toBytes("n1")); - - h2 = new HTableDescriptor(); - h2.setName(Bytes.toBytes("n1")); - - HColumnDescriptor hcd1 = new HColumnDescriptor(Bytes.toBytes("someName")); - HColumnDescriptor hcd2 = new HColumnDescriptor(Bytes.toBytes("someOtherName")); - h1.addFamily(hcd1); - h2.addFamily(hcd2); - h1.addFamily(hcd2); - h2.addFamily(hcd1); - - assertTrue(h2.equals(h1)); - assertTrue(h1.equals(h2)); - } - - @Test - public void testSingleItemEquals() { - HTableDescriptor h1 = new HTableDescriptor(); - HTableDescriptor h2 = new HTableDescriptor(); - - //Test diff # of items - h1 = new HTableDescriptor(); - h1.setName(Bytes.toBytes("n1")); - - h2 = new HTableDescriptor(); - h2.setName(Bytes.toBytes("n1")); - - HColumnDescriptor hcd1 = new HColumnDescriptor(Bytes.toBytes("someName")); - HColumnDescriptor hcd2 = new HColumnDescriptor(Bytes.toBytes("someName")); - h1.addFamily(hcd1); - h2.addFamily(hcd2); - - assertTrue(h2.equals(h1)); - assertTrue(h1.equals(h2)); - } - - @Test - public void testEmptyEquals() { - HTableDescriptor h1 = new HTableDescriptor(); - HTableDescriptor h2 = new HTableDescriptor(); - - assertTrue(h2.equals(h1)); - assertTrue(h1.equals(h2)); - } - - @Test - public void testEqualityWithSameObject() { - HTableDescriptor htd = new HTableDescriptor("someName"); - assertTrue(htd.equals(htd)); - } }
252399b9a9aeecc11a689c045295470c04d19608
kotlin
Invert if condition intention - smaller- availability range + refactored--
p
https://github.com/JetBrains/kotlin
diff --git a/.idea/dictionaries/valentin.xml b/.idea/dictionaries/valentin.xml index fd8f381047854..45c154aad825c 100644 --- a/.idea/dictionaries/valentin.xml +++ b/.idea/dictionaries/valentin.xml @@ -4,6 +4,7 @@ <w>funs</w> <w>initializers</w> <w>inserter</w> + <w>negatable</w> <w>pparent</w> <w>processings</w> <w>rbracket</w> diff --git a/compiler/frontend/src/org/jetbrains/kotlin/psi/JetIfExpression.java b/compiler/frontend/src/org/jetbrains/kotlin/psi/JetIfExpression.java index b4f7517505e3b..c77b8b19e70ac 100644 --- a/compiler/frontend/src/org/jetbrains/kotlin/psi/JetIfExpression.java +++ b/compiler/frontend/src/org/jetbrains/kotlin/psi/JetIfExpression.java @@ -62,4 +62,9 @@ public JetExpression getElse() { public PsiElement getElseKeyword() { return findChildByType(JetTokens.ELSE_KEYWORD); } + + @NotNull + public PsiElement getIfKeyword() { + return findChildByType(JetTokens.IF_KEYWORD); + } } diff --git a/idea/idea-analysis/src/org/jetbrains/kotlin/idea/JetBundle.properties b/idea/idea-analysis/src/org/jetbrains/kotlin/idea/JetBundle.properties index 17b34f0200106..2d95d68f798d4 100644 --- a/idea/idea-analysis/src/org/jetbrains/kotlin/idea/JetBundle.properties +++ b/idea/idea-analysis/src/org/jetbrains/kotlin/idea/JetBundle.properties @@ -324,8 +324,6 @@ make.type.explicit.in.lambda=Make types explicit in lambda make.type.explicit.in.lambda.family=Make Types Explicit In Lambda make.type.implicit.in.lambda=Make types implicit in lambda (may break code) make.type.implicit.in.lambda.family=Make Types Implicit In Lambda (May Break Code) -invert.if.condition=Invert If Condition -invert.if.condition.family=Invert If Condition replace.java.class.argument=Replace javaClass<T>() with T::class replace.java.class.argument.family=Replace javaClass<T>() with T::class diff --git a/idea/src/org/jetbrains/kotlin/idea/intentions/InvertIfConditionIntention.kt b/idea/src/org/jetbrains/kotlin/idea/intentions/InvertIfConditionIntention.kt index 083f40c3a5e7f..53828a3d44bd4 100644 --- a/idea/src/org/jetbrains/kotlin/idea/intentions/InvertIfConditionIntention.kt +++ b/idea/src/org/jetbrains/kotlin/idea/intentions/InvertIfConditionIntention.kt @@ -16,153 +16,100 @@ package org.jetbrains.kotlin.idea.intentions -import org.jetbrains.kotlin.psi.JetIfExpression import com.intellij.openapi.editor.Editor -import org.jetbrains.kotlin.psi.JetExpression -import org.jetbrains.kotlin.psi.JetUnaryExpression -import org.jetbrains.kotlin.psi.JetBinaryExpression -import org.jetbrains.kotlin.lexer.JetTokens -import org.jetbrains.kotlin.lexer.JetToken import com.intellij.psi.tree.IElementType -import org.jetbrains.kotlin.psi.JetConstantExpression -import org.jetbrains.kotlin.psi.JetPsiFactory -import org.jetbrains.kotlin.psi.JetPsiUtil -import org.jetbrains.kotlin.psi.JetParenthesizedExpression -import org.jetbrains.kotlin.lexer.JetSingleValueToken -import org.jetbrains.kotlin.lexer.JetKeywordToken -import org.jetbrains.kotlin.psi.JetNamedFunction -import org.jetbrains.kotlin.psi.JetBlockExpression - -public class InvertIfConditionIntention : JetSelfTargetingOffsetIndependentIntention<JetIfExpression>("invert.if.condition", javaClass()) { - fun checkForNegation(element: JetUnaryExpression): Boolean { - return element.getOperationReference().getReferencedName().equals("!") - } - - override fun isApplicableTo(element: JetIfExpression): Boolean { - val condition = element.getCondition() - val thenBranch = element.getThen() - - return condition != null && thenBranch != null && when (condition) { - is JetUnaryExpression -> { - when { - checkForNegation(condition) -> { - val baseExpression = condition.getBaseExpression() +import org.jetbrains.kotlin.lexer.JetTokens +import org.jetbrains.kotlin.psi.* - when (baseExpression) { - is JetParenthesizedExpression -> baseExpression.getExpression() != null - else -> condition.getBaseExpression() != null - } - } - else -> true - } - } - is JetBinaryExpression -> { - condition.getOperationToken() != null && condition.getLeft() != null && condition.getRight() != null - } - else -> true - } +public class InvertIfConditionIntention : JetSelfTargetingIntention<JetIfExpression>(javaClass(), "Invert 'if' condition") { + override fun isApplicableTo(element: JetIfExpression, caretOffset: Int): Boolean { + if (!element.getIfKeyword().getTextRange().containsOffset(caretOffset)) return false + return element.getCondition() != null && element.getThen() != null } override fun applyTo(element: JetIfExpression, editor: Editor) { val psiFactory = JetPsiFactory(element) - fun isNegatableOperator(token: IElementType): Boolean { - return token in array(JetTokens.EQEQ, JetTokens.EXCLEQ, JetTokens.EQEQEQ, JetTokens.EXCLEQEQEQ, JetTokens.IS_KEYWORD, JetTokens.NOT_IS, JetTokens.IN_KEYWORD, JetTokens.NOT_IN, JetTokens.LT, JetTokens.LTEQ, JetTokens.GT, JetTokens.GTEQ) - } - - fun getNegatedOperator(token: IElementType): JetToken { - return when { - token == JetTokens.EQEQ -> JetTokens.EXCLEQ - token == JetTokens.EXCLEQ -> JetTokens.EQEQ - token == JetTokens.EQEQEQ -> JetTokens.EXCLEQEQEQ - token == JetTokens.EXCLEQEQEQ -> JetTokens.EQEQEQ - token == JetTokens.IS_KEYWORD -> JetTokens.NOT_IS - token == JetTokens.NOT_IS -> JetTokens.IS_KEYWORD - token == JetTokens.IN_KEYWORD -> JetTokens.NOT_IN - token == JetTokens.NOT_IN -> JetTokens.IN_KEYWORD - token == JetTokens.LT -> JetTokens.GTEQ - token == JetTokens.LTEQ -> JetTokens.GT - token == JetTokens.GT -> JetTokens.LTEQ - token == JetTokens.GTEQ -> JetTokens.LT - else -> throw IllegalArgumentException("The token $token does not have a negated equivalent.") - } - } + val condition = element.getCondition()!! + val newCondition = negate(condition) - fun getTokenText(token: JetToken): String { - return when (token) { - is JetSingleValueToken -> token.getValue() - is JetKeywordToken -> token.getValue() - else -> throw IllegalArgumentException("The token $token does not have an applicable string value.") - } - } + val thenBranch = element.getThen() + val elseBranch = element.getElse() ?: psiFactory.createEmptyBody() - fun negateExpressionText(element: JetExpression): String { - val negatedParenthesizedExpressionText = "!(${element.getText()})" - val possibleNewExpression = psiFactory.createExpression(negatedParenthesizedExpressionText) as JetUnaryExpression - val innerExpression = possibleNewExpression.getBaseExpression() as JetParenthesizedExpression + val newThen = if (elseBranch is JetIfExpression) + psiFactory.wrapInABlock(elseBranch) + else + elseBranch - return when { - JetPsiUtil.areParenthesesUseless(innerExpression) -> "!${element.getText()}" - else -> negatedParenthesizedExpressionText - } - } + val newElse = if (thenBranch is JetBlockExpression && thenBranch.getStatements().isEmpty()) + null + else + thenBranch - fun getNegation(element: JetExpression): JetExpression { - return psiFactory.createExpression(when (element) { - is JetBinaryExpression -> { - val operator = element.getOperationToken()!! + element.replace(psiFactory.createIf(newCondition, newThen, newElse)) + } - when { - isNegatableOperator(operator) -> "${element.getLeft()!!.getText()} ${getTokenText(getNegatedOperator(operator))} ${element.getRight()!!.getText()}" - else -> negateExpressionText(element) - } - } - is JetConstantExpression -> { - when { - element.textMatches("true") -> "false" - element.textMatches("false") -> "true" - else -> negateExpressionText(element) - } - } - else -> negateExpressionText(element) - }) + companion object { + private val NEGATABLE_OPERATORS = setOf(JetTokens.EQEQ, JetTokens.EXCLEQ, JetTokens.EQEQEQ, + JetTokens.EXCLEQEQEQ, JetTokens.IS_KEYWORD, JetTokens.NOT_IS, JetTokens.IN_KEYWORD, + JetTokens.NOT_IN, JetTokens.LT, JetTokens.LTEQ, JetTokens.GT, JetTokens.GTEQ) + + private fun getNegatedOperatorText(token: IElementType): String { + return when(token) { + JetTokens.EQEQ -> JetTokens.EXCLEQ.getValue() + JetTokens.EXCLEQ -> JetTokens.EQEQ.getValue() + JetTokens.EQEQEQ -> JetTokens.EXCLEQEQEQ.getValue() + JetTokens.EXCLEQEQEQ -> JetTokens.EQEQEQ.getValue() + JetTokens.IS_KEYWORD -> JetTokens.NOT_IS.getValue() + JetTokens.NOT_IS -> JetTokens.IS_KEYWORD.getValue() + JetTokens.IN_KEYWORD -> JetTokens.NOT_IN.getValue() + JetTokens.NOT_IN -> JetTokens.IN_KEYWORD.getValue() + JetTokens.LT -> JetTokens.GTEQ.getValue() + JetTokens.LTEQ -> JetTokens.GT.getValue() + JetTokens.GT -> JetTokens.LTEQ.getValue() + JetTokens.GTEQ -> JetTokens.LT.getValue() + else -> throw IllegalArgumentException("The token $token does not have a negated equivalent.") + } } - fun removeNegation(element: JetUnaryExpression): JetExpression { - val baseExpression = element.getBaseExpression()!! + private fun negate(expression: JetExpression): JetExpression { + val specialNegation = specialNegationText(expression) + if (specialNegation != null) return specialNegation - return when (baseExpression) { - is JetParenthesizedExpression -> baseExpression.getExpression()!! - else -> baseExpression - } + val negationExpr = JetPsiFactory(expression).createExpression("!a") as JetPrefixExpression + negationExpr.getBaseExpression()!!.replace(expression) + return negationExpr } - fun getFinalExpressionOfFunction(element: JetNamedFunction): JetExpression? { - val body = element.getBodyExpression() + private fun specialNegationText(expression: JetExpression): JetExpression? { + val factory = JetPsiFactory(expression) + when (expression) { + is JetPrefixExpression -> { + if (expression.getOperationReference().getReferencedName() == "!") { + val baseExpression = expression.getBaseExpression() + if (baseExpression != null) { + return JetPsiUtil.safeDeparenthesize(baseExpression) + } + } + } - return when (body) { - is JetBlockExpression -> body.getStatements().last() as JetExpression - else -> body - } - } + is JetBinaryExpression -> { + val operator = expression.getOperationToken() ?: return null + if (operator !in NEGATABLE_OPERATORS) return null + val left = expression.getLeft() ?: return null + val right = expression.getRight() ?: return null + return factory.createExpression(left.getText() + " " + getNegatedOperatorText(operator) + " " + right.getText()) + } - val condition = element.getCondition()!! - val replacementCondition = when (condition) { - is JetUnaryExpression -> { - when { - checkForNegation(condition) -> removeNegation(condition) - else -> getNegation(condition) + is JetConstantExpression -> { + return when (expression.getText()) { + "true" -> factory.createExpression("false") + "false" -> factory.createExpression("true") + else -> null + } } } - else -> getNegation(condition) + return null } - - val thenBranch = element.getThen() - val elseBranch = element.getElse() ?: psiFactory.createEmptyBody() - - element.replace(psiFactory.createIf(replacementCondition, when (elseBranch) { - is JetIfExpression -> psiFactory.wrapInABlock(elseBranch) - else -> elseBranch - }, if (thenBranch is JetBlockExpression && thenBranch.getStatements().isEmpty()) null else thenBranch)) } }
fcfbdf64406ac44b771a3c1b91b95d9d9a465391
hadoop
YARN-3181. FairScheduler: Fix up outdated findbugs- issues. (kasha)--(cherry picked from commit c2b185def846f5577a130003a533b9c377b58fab)-
p
https://github.com/apache/hadoop
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 6d27c85bf5be8..87524586bf320 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -243,6 +243,8 @@ Release 2.7.0 - UNRELEASED YARN-2079. Recover NonAggregatingLogHandler state upon nodemanager restart. (Jason Lowe via junping_du) + YARN-3181. FairScheduler: Fix up outdated findbugs issues. (kasha) + YARN-3124. Fixed CS LeafQueue/ParentQueue to use QueueCapacities to track capacities-by-label. (Wangda Tan via jianhe) diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index c45634e1be07b..70f1a71fbcb74 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -142,22 +142,12 @@ <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogAggregationService" /> <Bug pattern="IS2_INCONSISTENT_SYNC" /> </Match> - <Match> - <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationFileLoaderService" /> - <Field name="allocFile" /> - <Bug pattern="IS2_INCONSISTENT_SYNC" /> - </Match> <!-- Inconsistent sync warning - minimumAllocation is only initialized once and never changed --> <Match> <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler" /> <Field name="minimumAllocation" /> <Bug pattern="IS2_INCONSISTENT_SYNC" /> </Match> - <Match> - <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSSchedulerNode" /> - <Method name="reserveResource" /> - <Bug pattern="BC_UNCONFIRMED_CAST" /> - </Match> <!-- Inconsistent sync warning - reinitialize read from other queue does not need sync--> <Match> <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue" /> @@ -215,18 +205,6 @@ <Field name="scheduleAsynchronously" /> <Bug pattern="IS2_INCONSISTENT_SYNC" /> </Match> - <!-- Inconsistent sync warning - updateInterval is only initialized once and never changed --> - <Match> - <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler" /> - <Field name="updateInterval" /> - <Bug pattern="IS2_INCONSISTENT_SYNC" /> - </Match> - <!-- Inconsistent sync warning - callDurationMetrics is only initialized once and never changed --> - <Match> - <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler" /> - <Field name="fsOpDurations" /> - <Bug pattern="IS2_INCONSISTENT_SYNC" /> - </Match> <!-- Inconsistent sync warning - numRetries is only initialized once and never changed --> <Match> @@ -426,11 +404,6 @@ <Field name="queue" /> <Bug pattern="IS2_INCONSISTENT_SYNC" /> </Match> - <Match> - <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler" /> - <Field name="allocConf" /> - <Bug pattern="IS2_INCONSISTENT_SYNC" /> - </Match> <Match> <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode" /> <Field name="numContainers" /> diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java index 0ea731403029e..9cb767d38a5d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java @@ -33,6 +33,9 @@ import com.google.common.annotations.VisibleForTesting; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe public class AllocationConfiguration extends ReservationSchedulerConfiguration { private static final AccessControlList EVERYBODY_ACL = new AccessControlList("*"); private static final AccessControlList NOBODY_ACL = new AccessControlList(" "); @@ -204,12 +207,16 @@ public float getFairSharePreemptionThreshold(String queueName) { } public ResourceWeights getQueueWeight(String queue) { - ResourceWeights weight = queueWeights.get(queue); - return (weight == null) ? ResourceWeights.NEUTRAL : weight; + synchronized (queueWeights) { + ResourceWeights weight = queueWeights.get(queue); + return (weight == null) ? ResourceWeights.NEUTRAL : weight; + } } public void setQueueWeight(String queue, ResourceWeights weight) { - queueWeights.put(queue, weight); + synchronized (queueWeights) { + queueWeights.put(queue, weight); + } } public int getUserMaxApps(String user) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 76fa588fc767f..c19aa513e1c1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -201,7 +201,7 @@ public synchronized void setReloadListener(Listener reloadListener) { * @throws ParserConfigurationException if XML parser is misconfigured. * @throws SAXException if config file is malformed. */ - public synchronized void reloadAllocations() throws IOException, + public void reloadAllocations() throws IOException, ParserConfigurationException, SAXException, AllocationConfigurationException { if (allocFile == null) { return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java index c2282fdb736ca..c50f281cb6645 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java @@ -31,6 +31,8 @@ import static org.apache.hadoop.metrics2.lib.Interns.info; import org.apache.hadoop.metrics2.lib.MutableRate; +import javax.annotation.concurrent.ThreadSafe; + /** * Class to capture the performance metrics of FairScheduler. * This should be a singleton. @@ -38,6 +40,7 @@ @InterfaceAudience.Private @InterfaceStability.Unstable @Metrics(context="fairscheduler-op-durations") +@ThreadSafe public class FSOpDurations implements MetricsSource { @Metric("Duration for a continuous scheduling run")
dc9e9cb4cc87f132a32a00e6589d807350f0b8e0
elasticsearch
Aggregations: change to default shard_size in- terms aggregation--The default shard size in the terms aggregation now uses BucketUtils.suggestShardSideQueueSize() to set the shard size if the user does not specify it as a parameter.--Closes -6857-
p
https://github.com/elastic/elasticsearch
diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java index c4b57064e80eb..c38f136dd9b29 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.BucketUtils; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.support.ValuesSourceParser; import org.elasticsearch.search.internal.SearchContext; @@ -32,7 +33,6 @@ */ public class TermsParser implements Aggregator.Parser { - @Override public String type() { return StringTerms.TYPE.name(); @@ -41,19 +41,22 @@ public String type() { @Override public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException { TermsParametersParser aggParser = new TermsParametersParser(); - ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, StringTerms.TYPE, context) - .scriptable(true) - .formattable(true) - .requiresSortedValues(true) - .requiresUniqueValues(true) - .build(); + ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, StringTerms.TYPE, context).scriptable(true).formattable(true) + .requiresSortedValues(true).requiresUniqueValues(true).build(); IncludeExclude.Parser incExcParser = new IncludeExclude.Parser(aggregationName, StringTerms.TYPE, context); aggParser.parse(aggregationName, parser, context, vsParser, incExcParser); + InternalOrder order = resolveOrder(aggParser.getOrderKey(), aggParser.isOrderAsc()); TermsAggregator.BucketCountThresholds bucketCountThresholds = aggParser.getBucketCountThresholds(); + if (!(order == InternalOrder.TERM_ASC || order == InternalOrder.TERM_DESC) + && bucketCountThresholds.getShardSize() == aggParser.getDefaultBucketCountThresholds().getShardSize()) { + // The user has not made a shardSize selection. Use default heuristic to avoid any wrong-ranking caused by distributed counting + bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize(), + context.numberOfShards())); + } bucketCountThresholds.ensureValidity(); - InternalOrder order = resolveOrder(aggParser.getOrderKey(), aggParser.isOrderAsc()); - return new TermsAggregatorFactory(aggregationName, vsParser.config(), order, bucketCountThresholds, aggParser.getIncludeExclude(), aggParser.getExecutionHint(), aggParser.getCollectionMode()); + return new TermsAggregatorFactory(aggregationName, vsParser.config(), order, bucketCountThresholds, aggParser.getIncludeExclude(), + aggParser.getExecutionHint(), aggParser.getCollectionMode()); } static InternalOrder resolveOrder(String key, boolean asc) { diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java index 7251617f374ee..4bdaecc646d0c 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java @@ -45,6 +45,31 @@ public void noShardSize_string() throws Exception { .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) .execute().actionGet(); + Terms terms = response.getAggregations().get("keys"); + Collection<Terms.Bucket> buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map<String, Long> expected = ImmutableMap.<String, Long>builder() + .put("1", 8l) + .put("3", 8l) + .put("2", 5l) + .build(); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsText().string()))); + } + } + + @Test + public void shardSizeEqualsSize_string() throws Exception { + createIdx("type=string,index=not_analyzed"); + + indexData(); + + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .setQuery(matchAllQuery()) + .addAggregation(terms("keys").field("key").size(3).shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .execute().actionGet(); + Terms terms = response.getAggregations().get("keys"); Collection<Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); @@ -109,6 +134,31 @@ public void withShardSize_string_singleShard() throws Exception { assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey()))); } } + + @Test + public void noShardSizeTermOrder_string() throws Exception { + createIdx("type=string,index=not_analyzed"); + + indexData(); + + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .setQuery(matchAllQuery()) + .addAggregation(terms("keys").field("key").size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .execute().actionGet(); + + Terms terms = response.getAggregations().get("keys"); + Collection<Terms.Bucket> buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map<String, Long> expected = ImmutableMap.<String, Long>builder() + .put("1", 8l) + .put("2", 5l) + .put("3", 8l) + .build(); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsText().string()))); + } + } @Test public void noShardSize_long() throws Exception { @@ -123,6 +173,32 @@ public void noShardSize_long() throws Exception { .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) .execute().actionGet(); + Terms terms = response.getAggregations().get("keys"); + Collection<Terms.Bucket> buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder() + .put(1, 8l) + .put(3, 8l) + .put(2, 5l) + .build(); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + + @Test + public void shardSizeEqualsSize_long() throws Exception { + + createIdx("type=long"); + + indexData(); + + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .setQuery(matchAllQuery()) + .addAggregation(terms("keys").field("key").size(3).shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .execute().actionGet(); + Terms terms = response.getAggregations().get("keys"); Collection<Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); @@ -188,6 +264,32 @@ public void withShardSize_long_singleShard() throws Exception { } } + @Test + public void noShardSizeTermOrder_long() throws Exception { + + createIdx("type=long"); + + indexData(); + + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .setQuery(matchAllQuery()) + .addAggregation(terms("keys").field("key").size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .execute().actionGet(); + + Terms terms = response.getAggregations().get("keys"); + Collection<Terms.Bucket> buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder() + .put(1, 8l) + .put(2, 5l) + .put(3, 8l) + .build(); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + @Test public void noShardSize_double() throws Exception { @@ -201,6 +303,32 @@ public void noShardSize_double() throws Exception { .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) .execute().actionGet(); + Terms terms = response.getAggregations().get("keys"); + Collection<Terms.Bucket> buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder() + .put(1, 8l) + .put(3, 8l) + .put(2, 5l) + .build(); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + + @Test + public void shardSizeEqualsSize_double() throws Exception { + + createIdx("type=double"); + + indexData(); + + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .setQuery(matchAllQuery()) + .addAggregation(terms("keys").field("key").size(3).shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .execute().actionGet(); + Terms terms = response.getAggregations().get("keys"); Collection<Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); @@ -265,4 +393,30 @@ public void withShardSize_double_singleShard() throws Exception { assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); } } + + @Test + public void noShardSizeTermOrder_double() throws Exception { + + createIdx("type=double"); + + indexData(); + + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .setQuery(matchAllQuery()) + .addAggregation(terms("keys").field("key").size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .execute().actionGet(); + + Terms terms = response.getAggregations().get("keys"); + Collection<Terms.Bucket> buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder() + .put(1, 8l) + .put(2, 5l) + .put(3, 8l) + .build(); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } }
9f923255878b7baefd89bc37af8fe3072f163322
elasticsearch
Allow additional settings for the node in- ESSingleNodeTestCase--This change adds a method that extending classes can override to provide additional settings-for the node used in a single node test case.-
a
https://github.com/elastic/elasticsearch
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 6e16d60eafc01..57dfc10684588 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -160,6 +159,11 @@ protected final Collection<Class<? extends Plugin>> pluginList(Class<? extends P return Arrays.asList(plugins); } + /** Additional settings to add when creating the node. Also allows overriding the default settings. */ + protected Settings nodeSettings() { + return Settings.EMPTY; + } + private Node newNode() { Settings settings = Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) @@ -177,6 +181,7 @@ private Node newNode() { .put(Node.NODE_LOCAL_SETTING.getKey(), true) .put(Node.NODE_DATA_SETTING.getKey(), true) .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true) // make sure we get what we set :) + .put(nodeSettings()) // allow test cases to provide their own settings or override these .build(); Node build = new MockNode(settings, getVersion(), getPlugins()); build.start();
b2107efb3355d921350058e3caaede813cc7795c
restlet-framework-java
Updated to Scripturian 1.0RC3--
p
https://github.com/restlet/restlet-framework-java
diff --git a/libraries/com.threecrickets.scripturian_1.0/com.threecrickets.scripturian.jar b/libraries/com.threecrickets.scripturian_1.0/com.threecrickets.scripturian.jar index f6e8ed4ad4..25d47aece3 100644 Binary files a/libraries/com.threecrickets.scripturian_1.0/com.threecrickets.scripturian.jar and b/libraries/com.threecrickets.scripturian_1.0/com.threecrickets.scripturian.jar differ diff --git a/modules/org.restlet.ext.script/src/org/restlet/ext/script/ScriptedTextRepresentation.java b/modules/org.restlet.ext.script/src/org/restlet/ext/script/ScriptedTextRepresentation.java index 2b6a8178aa..03b70ca8dc 100644 --- a/modules/org.restlet.ext.script/src/org/restlet/ext/script/ScriptedTextRepresentation.java +++ b/modules/org.restlet.ext.script/src/org/restlet/ext/script/ScriptedTextRepresentation.java @@ -33,10 +33,7 @@ import java.io.IOException; import java.io.StringWriter; import java.io.Writer; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import javax.script.ScriptEngine; import javax.script.ScriptEngineManager; import javax.script.ScriptException; @@ -47,6 +44,7 @@ import org.restlet.representation.WriterRepresentation; import com.threecrickets.scripturian.EmbeddedScript; +import com.threecrickets.scripturian.EmbeddedScriptContext; import com.threecrickets.scripturian.ScriptContextController; /** @@ -181,11 +179,11 @@ public void setScriptContextController( @Override public void write(Writer writer) throws IOException { try { - ConcurrentMap<String, ScriptEngine> scriptEngines = new ConcurrentHashMap<String, ScriptEngine>(); - this.embeddedScript.run(writer, this.errorWriter, false, - scriptEngines, + this.embeddedScript.run(false, writer, this.errorWriter, false, + new EmbeddedScriptContext(this.embeddedScript + .getScriptEngineManager()), new ExposedScriptedTextRepresentationContainer(this), - getScriptContextController(), false); + getScriptContextController()); } catch (ScriptException e) { IOException ioe = new IOException("Script exception"); ioe.initCause(e); diff --git a/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ExposedScriptedResourceContainer.java b/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ExposedScriptedResourceContainer.java index 91e416a5ff..93fd12a932 100644 --- a/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ExposedScriptedResourceContainer.java +++ b/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ExposedScriptedResourceContainer.java @@ -34,10 +34,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import javax.script.ScriptEngine; import javax.script.ScriptException; import org.restlet.data.CharacterSet; @@ -51,6 +48,7 @@ import org.restlet.resource.ResourceException; import com.threecrickets.scripturian.EmbeddedScript; +import com.threecrickets.scripturian.EmbeddedScriptContext; import com.threecrickets.scripturian.ScriptContextController; import com.threecrickets.scripturian.ScriptSource; @@ -104,9 +102,9 @@ public class ExposedScriptedResourceContainer { private Language language; /** - * A cache of script engines used by {@link EmbeddedScript}. + * The embedded script context. */ - private final ConcurrentMap<String, ScriptEngine> scriptEngines = new ConcurrentHashMap<String, ScriptEngine>(); + private final EmbeddedScriptContext embeddedScriptContext; /** * Constructs a container with no variant or entity, plain text media type, @@ -125,6 +123,8 @@ public ExposedScriptedResourceContainer(ScriptedResource resource, this.entity = null; this.mediaType = MediaType.TEXT_PLAIN; this.characterSet = resource.getDefaultCharacterSet(); + this.embeddedScriptContext = new EmbeddedScriptContext(resource + .getScriptEngineManager()); } /** @@ -152,6 +152,8 @@ public ExposedScriptedResourceContainer(ScriptedResource resource, if (this.characterSet == null) { this.characterSet = resource.getDefaultCharacterSet(); } + this.embeddedScriptContext = new EmbeddedScriptContext(resource + .getScriptEngineManager()); } /** @@ -177,6 +179,8 @@ public ExposedScriptedResourceContainer(ScriptedResource resource, if (this.characterSet == null) { this.characterSet = resource.getDefaultCharacterSet(); } + this.embeddedScriptContext = new EmbeddedScriptContext(resource + .getScriptEngineManager()); } /** @@ -335,9 +339,9 @@ public void include(String name, String scriptEngineName) scriptDescriptor.setScriptIfAbsent(script); } - script.run(this.resource.getWriter(), this.resource.getErrorWriter(), - true, this.scriptEngines, this, this.resource - .getScriptContextController(), false); + script.run(false, this.resource.getWriter(), this.resource + .getErrorWriter(), true, this.embeddedScriptContext, this, + this.resource.getScriptContextController()); } /** @@ -369,9 +373,9 @@ public Object invoke(String entryPointName) throws ResourceException { if (existing != null) { script = existing; } - script.run(this.resource.getWriter(), this.resource - .getErrorWriter(), true, this.scriptEngines, this, - this.resource.getScriptContextController(), false); + script.run(false, this.resource.getWriter(), this.resource + .getErrorWriter(), true, this.embeddedScriptContext, + this, this.resource.getScriptContextController()); } return script.invoke(entryPointName, this, this.resource diff --git a/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ExposedScriptedTextResourceContainer.java b/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ExposedScriptedTextResourceContainer.java index 2afd553d1d..b9a0816d2e 100644 --- a/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ExposedScriptedTextResourceContainer.java +++ b/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ExposedScriptedTextResourceContainer.java @@ -34,10 +34,8 @@ import java.io.IOException; import java.io.StringWriter; import java.io.Writer; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import javax.script.ScriptEngine; import javax.script.ScriptException; import org.restlet.data.CharacterSet; @@ -51,6 +49,7 @@ import org.restlet.representation.Variant; import com.threecrickets.scripturian.EmbeddedScript; +import com.threecrickets.scripturian.EmbeddedScriptContext; import com.threecrickets.scripturian.ScriptSource; /** @@ -111,9 +110,9 @@ public class ExposedScriptedTextResourceContainer { private StringBuffer buffer; /** - * A cache of script engines used by {@link EmbeddedScript}. + * The embedded script context. */ - private final ConcurrentMap<String, ScriptEngine> scriptEngines = new ConcurrentHashMap<String, ScriptEngine>(); + private final EmbeddedScriptContext embeddedScriptContext; /** * Constructs a container with media type and character set according to the @@ -137,6 +136,8 @@ public ExposedScriptedTextResourceContainer(ScriptedTextResource resource, if (this.characterSet == null) { this.characterSet = resource.getDefaultCharacterSet(); } + this.embeddedScriptContext = new EmbeddedScriptContext(resource + .getScriptEngineManager()); } /** @@ -322,9 +323,10 @@ public Representation include(String name, String scriptEngineName) try { // Do not allow caching in streaming mode - if (script.run(writer, this.resource.getErrorWriter(), false, - this.scriptEngines, this, this.resource - .getScriptContextController(), !isStreaming)) { + if (script.run(!isStreaming, writer, + this.resource.getErrorWriter(), false, + this.embeddedScriptContext, this, this.resource + .getScriptContextController())) { // Did the script ask us to start streaming? if (this.startStreaming) { @@ -332,7 +334,7 @@ public Representation include(String name, String scriptEngineName) // Note that this will cause the script to run again! return new ScriptedTextStreamingRepresentation( - this.resource, this, this.scriptEngines, + this.resource, this, this.embeddedScriptContext, this.resource.getScriptContextController(), script, this.flushLines); } @@ -379,7 +381,7 @@ public Representation include(String name, String scriptEngineName) // Note that this will cause the script to run again! return new ScriptedTextStreamingRepresentation(this.resource, - this, this.scriptEngines, this.resource + this, this.embeddedScriptContext, this.resource .getScriptContextController(), script, this.flushLines); diff --git a/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ScriptedTextStreamingRepresentation.java b/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ScriptedTextStreamingRepresentation.java index 2dd8dea288..5e105252ae 100644 --- a/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ScriptedTextStreamingRepresentation.java +++ b/modules/org.restlet.ext.script/src/org/restlet/ext/script/internal/ScriptedTextStreamingRepresentation.java @@ -33,9 +33,7 @@ import java.io.IOException; import java.io.Writer; import java.util.Arrays; -import java.util.concurrent.ConcurrentMap; -import javax.script.ScriptEngine; import javax.script.ScriptException; import org.restlet.data.Language; @@ -43,6 +41,7 @@ import org.restlet.representation.WriterRepresentation; import com.threecrickets.scripturian.EmbeddedScript; +import com.threecrickets.scripturian.EmbeddedScriptContext; import com.threecrickets.scripturian.ScriptContextController; /** @@ -74,9 +73,9 @@ class ScriptedTextStreamingRepresentation extends WriterRepresentation { private final ScriptContextController scriptContextController; /** - * A cache of script contexts used by {@link EmbeddedScript}. + * The embedded script context. */ - private final ConcurrentMap<String, ScriptEngine> scriptEngines; + private final EmbeddedScriptContext embeddedScriptContext; /** * Whether to flush the writers after every line. @@ -90,8 +89,8 @@ class ScriptedTextStreamingRepresentation extends WriterRepresentation { * The resource * @param container * The container - * @param scriptEngines - * A cache of script engines used by {@link EmbeddedScript}. + * @param embeddedScriptContext + * The embedded script context * @param scriptContextController * The script context controller * @param script @@ -101,7 +100,7 @@ class ScriptedTextStreamingRepresentation extends WriterRepresentation { */ public ScriptedTextStreamingRepresentation(ScriptedTextResource resource, ExposedScriptedTextResourceContainer container, - ConcurrentMap<String, ScriptEngine> scriptEngines, + EmbeddedScriptContext embeddedScriptContext, ScriptContextController scriptContextController, EmbeddedScript script, boolean flushLines) { // Note that we are setting representation characteristics @@ -109,7 +108,7 @@ public ScriptedTextStreamingRepresentation(ScriptedTextResource resource, super(container.getMediaType()); this.resource = resource; this.container = container; - this.scriptEngines = scriptEngines; + this.embeddedScriptContext = embeddedScriptContext; this.scriptContextController = scriptContextController; this.flushLines = flushLines; setCharacterSet(container.getCharacterSet()); @@ -126,9 +125,9 @@ public void write(Writer writer) throws IOException { this.container.isStreaming = true; this.resource.setWriter(writer); try { - this.script.run(writer, this.resource.getErrorWriter(), - this.flushLines, this.scriptEngines, this.container, - this.scriptContextController, false); + this.script.run(false, writer, this.resource.getErrorWriter(), + this.flushLines, this.embeddedScriptContext, + this.container, this.scriptContextController); } catch (ScriptException e) { IOException ioe = new IOException("Script exception"); ioe.initCause(e);
ac26e42d1e85deac0b7bfa50c3ca3e5298493dd4
ReactiveX-RxJava
use Java Subject<T, R> as contravariant in T and- covariant in R--
p
https://github.com/ReactiveX/RxJava
diff --git a/rxjava-core/src/main/java/rx/Observable.java b/rxjava-core/src/main/java/rx/Observable.java index d5d8fb8297..a14e78329c 100644 --- a/rxjava-core/src/main/java/rx/Observable.java +++ b/rxjava-core/src/main/java/rx/Observable.java @@ -397,7 +397,7 @@ public Subscription subscribe(final Action1<? super T> onNext, final Action1<Thr * @return a {@link ConnectableObservable} that upon connection causes the source Observable to * push results into the specified {@link Subject} */ - public <R> ConnectableObservable<R> multicast(Subject<T, R> subject) { + public <R> ConnectableObservable<R> multicast(Subject<? super T, ? extends R> subject) { return OperationMulticast.multicast(this, subject); } diff --git a/rxjava-core/src/main/java/rx/operators/OperationMulticast.java b/rxjava-core/src/main/java/rx/operators/OperationMulticast.java index e83cfdaa03..e24c24a91a 100644 --- a/rxjava-core/src/main/java/rx/operators/OperationMulticast.java +++ b/rxjava-core/src/main/java/rx/operators/OperationMulticast.java @@ -27,7 +27,7 @@ import rx.subjects.Subject; public class OperationMulticast { - public static <T, R> ConnectableObservable<R> multicast(Observable<? extends T> source, final Subject<T, R> subject) { + public static <T, R> ConnectableObservable<R> multicast(Observable<? extends T> source, final Subject<? super T, ? extends R> subject) { return new MulticastConnectableObservable<T, R>(source, subject); } @@ -35,11 +35,11 @@ private static class MulticastConnectableObservable<T, R> extends ConnectableObs private final Object lock = new Object(); private final Observable<? extends T> source; - private final Subject<T, R> subject; + private final Subject<? super T, ? extends R> subject; private Subscription subscription; - public MulticastConnectableObservable(Observable<? extends T> source, final Subject<T, R> subject) { + public MulticastConnectableObservable(Observable<? extends T> source, final Subject<? super T, ? extends R> subject) { super(new OnSubscribeFunc<R>() { @Override public Subscription onSubscribe(Observer<? super R> observer) {
8689c572f29745487f2f19a50f3162f7053b8d2c
drools
JBRULES-1730: Add support for other data types when- writing processes to XML - pluggable data types support--git-svn-id: https://svn.jboss.org/repos/labs/labs/jbossrules/trunk@21496 c60d74c8-e8f6-0310-9e8f-d4a2fc68ab70-
a
https://github.com/kiegroup/drools
diff --git a/drools-compiler/src/main/java/org/drools/xml/BaseAbstractHandler.java b/drools-compiler/src/main/java/org/drools/xml/BaseAbstractHandler.java index c95ee7690c4..ba201d9c993 100644 --- a/drools-compiler/src/main/java/org/drools/xml/BaseAbstractHandler.java +++ b/drools-compiler/src/main/java/org/drools/xml/BaseAbstractHandler.java @@ -26,15 +26,15 @@ * */ public abstract class BaseAbstractHandler { - protected Set validPeers; - protected Set validParents; + protected Set<Class<?>> validPeers; + protected Set<Class<?>> validParents; protected boolean allowNesting; - public Set getValidParents() { + public Set<Class<?>> getValidParents() { return this.validParents; } - public Set getValidPeers() { + public Set<Class<?>> getValidPeers() { return this.validPeers; } diff --git a/drools-compiler/src/main/java/org/drools/xml/Handler.java b/drools-compiler/src/main/java/org/drools/xml/Handler.java index 4b56ca50443..78045b2701c 100644 --- a/drools-compiler/src/main/java/org/drools/xml/Handler.java +++ b/drools-compiler/src/main/java/org/drools/xml/Handler.java @@ -36,11 +36,11 @@ Object end(String uri, String localName, ExtensibleXmlParser xmlPackageReader) throws SAXException; - Set getValidParents(); + Set<Class<?>> getValidParents(); - Set getValidPeers(); + Set<Class<?>> getValidPeers(); boolean allowNesting(); - Class generateNodeFor(); + Class<?> generateNodeFor(); } \ No newline at end of file diff --git a/drools-compiler/src/main/java/org/drools/xml/XmlWorkflowProcessDumper.java b/drools-compiler/src/main/java/org/drools/xml/XmlWorkflowProcessDumper.java index ee57f068222..2c161e7c7a6 100644 --- a/drools-compiler/src/main/java/org/drools/xml/XmlWorkflowProcessDumper.java +++ b/drools-compiler/src/main/java/org/drools/xml/XmlWorkflowProcessDumper.java @@ -10,6 +10,7 @@ import org.drools.process.core.context.variable.Variable; import org.drools.process.core.context.variable.VariableScope; import org.drools.process.core.datatype.DataType; +import org.drools.process.core.datatype.impl.type.ObjectDataType; import org.drools.workflow.core.Connection; import org.drools.workflow.core.Node; import org.drools.workflow.core.WorkflowProcess; @@ -116,7 +117,7 @@ private void visitVariables(List<Variable> variables, StringBuffer xmlDump) { visitDataType(variable.getType(), xmlDump); Object value = variable.getValue(); if (value != null) { - visitValue(variable.getValue(), xmlDump); + visitValue(variable.getValue(), variable.getType(), xmlDump); } xmlDump.append(" </variable>" + EOL); } @@ -134,16 +135,17 @@ private void visitSwimlanes(Collection<Swimlane> swimlanes, StringBuffer xmlDump } } - private void visitDataType(DataType dataType, StringBuffer xmlDump) { - xmlDump.append(" <type name=\"" + dataType.getClass().getName() + "\" />" + EOL); + public static void visitDataType(DataType dataType, StringBuffer xmlDump) { + xmlDump.append(" <type name=\"" + dataType.getClass().getName() + "\" "); + // TODO make this pluggable so datatypes can write out other properties as well + if (dataType instanceof ObjectDataType) { + xmlDump.append("className=\"" + ((ObjectDataType) dataType).getClassName() + "\" "); + } + xmlDump.append("/>" + EOL); } - private void visitValue(Object value, StringBuffer xmlDump) { - if (value instanceof String) { - xmlDump.append(" <value>" + XmlDumper.replaceIllegalChars((String) value) + "</value>" + EOL); - } else { - throw new IllegalArgumentException("Unsupported value type: " + value); - } + public static void visitValue(Object value, DataType dataType, StringBuffer xmlDump) { + xmlDump.append(" <value>" + XmlDumper.replaceIllegalChars(dataType.writeValue(value)) + "</value>" + EOL); } private void visitNodes(WorkflowProcess process, StringBuffer xmlDump, boolean includeMeta) { diff --git a/drools-compiler/src/main/java/org/drools/xml/processes/ParameterHandler.java b/drools-compiler/src/main/java/org/drools/xml/processes/ParameterHandler.java index 6b97e927fb4..76b644bba54 100644 --- a/drools-compiler/src/main/java/org/drools/xml/processes/ParameterHandler.java +++ b/drools-compiler/src/main/java/org/drools/xml/processes/ParameterHandler.java @@ -1,111 +1,75 @@ package org.drools.xml.processes; -import java.io.Serializable; import java.util.HashSet; import org.drools.process.core.ParameterDefinition; +import org.drools.process.core.TypeObject; +import org.drools.process.core.ValueObject; import org.drools.process.core.Work; import org.drools.process.core.datatype.DataType; -import org.drools.process.core.datatype.impl.type.BooleanDataType; -import org.drools.process.core.datatype.impl.type.FloatDataType; -import org.drools.process.core.datatype.impl.type.IntegerDataType; -import org.drools.process.core.datatype.impl.type.StringDataType; import org.drools.process.core.impl.ParameterDefinitionImpl; import org.drools.xml.BaseAbstractHandler; import org.drools.xml.ExtensibleXmlParser; import org.drools.xml.Handler; -import org.w3c.dom.Element; -import org.w3c.dom.Text; import org.xml.sax.Attributes; import org.xml.sax.SAXException; -import org.xml.sax.SAXParseException; -public class ParameterHandler extends BaseAbstractHandler - implements - Handler { +public class ParameterHandler extends BaseAbstractHandler implements Handler { + public ParameterHandler() { - if ( (this.validParents == null) && (this.validPeers == null) ) { - this.validParents = new HashSet(); - this.validParents.add( Work.class ); - - this.validPeers = new HashSet(); - this.validPeers.add( null ); - + if ((this.validParents == null) && (this.validPeers == null)) { + this.validParents = new HashSet<Class<?>>(); + this.validParents.add(Work.class); + this.validPeers = new HashSet<Class<?>>(); + this.validPeers.add(null); this.allowNesting = false; } } - - public Object start(final String uri, final String localName, final Attributes attrs, final ExtensibleXmlParser parser) throws SAXException { - parser.startElementBuilder( localName, - attrs ); - return null; + parser.startElementBuilder(localName, attrs); + final String name = attrs.getValue("name"); + emptyAttributeCheck(localName, "name", name, parser); + Work work = (Work) parser.getParent(); + ParameterDefinition parameterDefinition = new ParameterDefinitionImpl(); + parameterDefinition.setName(name); + work.addParameterDefinition(parameterDefinition); + return new ParameterWrapper(parameterDefinition, work); } public Object end(final String uri, final String localName, final ExtensibleXmlParser parser) throws SAXException { - final Element element = parser.endElementBuilder(); - Work work = (Work) parser.getParent(); - final String name = element.getAttribute("name"); - emptyAttributeCheck(localName, "name", name, parser); - final String type = element.getAttribute("type"); - emptyAttributeCheck(localName, "type", type, parser); - DataType dataType = null; - try { - dataType = (DataType) Class.forName(type).newInstance(); - } catch (ClassNotFoundException e) { - throw new SAXParseException( - "Could not find datatype " + name, parser.getLocator()); - } catch (InstantiationException e) { - throw new SAXParseException( - "Could not instantiate datatype " + name, parser.getLocator()); - } catch (IllegalAccessException e) { - throw new SAXParseException( - "Could not access datatype " + name, parser.getLocator()); - } - String text = ((Text)element.getChildNodes().item( 0 )).getWholeText(); - if (text != null) { - text = text.trim(); - if ("".equals(text)) { - text = null; - } - } - Object value = restoreValue(text, dataType, parser); - ParameterDefinition parameterDefinition = new ParameterDefinitionImpl(name, dataType); - work.addParameterDefinition(parameterDefinition); - work.setParameter(name, value); + parser.endElementBuilder(); return null; } - private Serializable restoreValue(String text, DataType dataType, ExtensibleXmlParser parser) throws SAXException { - if (text == null || "".equals(text)) { - return null; - } - if (dataType == null) { - throw new SAXParseException( - "Null datatype", parser.getLocator()); - } - if (dataType instanceof StringDataType) { - return text; - } else if (dataType instanceof IntegerDataType) { - return new Integer(text); - } else if (dataType instanceof FloatDataType) { - return new Float(text); - } else if (dataType instanceof BooleanDataType) { - return new Boolean(text); - } else { - throw new SAXParseException( - "Unknown datatype " + dataType, parser.getLocator()); - } + public Class<?> generateNodeFor() { + return ParameterWrapper.class; } - - public Class generateNodeFor() { - return null; + + public class ParameterWrapper implements TypeObject, ValueObject { + private Work work; + private ParameterDefinition parameterDefinition; + public ParameterWrapper(ParameterDefinition parameterDefinition, Work work) { + this.work = work; + this.parameterDefinition = parameterDefinition; + } + public DataType getType() { + return parameterDefinition.getType(); + } + public void setType(DataType type) { + parameterDefinition.setType(type); + } + public Object getValue() { + return work.getParameter(parameterDefinition.getName()); + } + public void setValue(Object value) { + work.setParameter(parameterDefinition.getName(), value); + } } } diff --git a/drools-compiler/src/main/java/org/drools/xml/processes/TypeHandler.java b/drools-compiler/src/main/java/org/drools/xml/processes/TypeHandler.java index d3816891c01..ca816ed07b3 100644 --- a/drools-compiler/src/main/java/org/drools/xml/processes/TypeHandler.java +++ b/drools-compiler/src/main/java/org/drools/xml/processes/TypeHandler.java @@ -2,8 +2,9 @@ import java.util.HashSet; -import org.drools.process.core.context.variable.Variable; +import org.drools.process.core.TypeObject; import org.drools.process.core.datatype.DataType; +import org.drools.process.core.datatype.impl.type.ObjectDataType; import org.drools.xml.BaseAbstractHandler; import org.drools.xml.ExtensibleXmlParser; import org.drools.xml.Handler; @@ -16,10 +17,10 @@ public class TypeHandler extends BaseAbstractHandler Handler { public TypeHandler() { if ( (this.validParents == null) && (this.validPeers == null) ) { - this.validParents = new HashSet(); - this.validParents.add( Variable.class ); + this.validParents = new HashSet<Class<?>>(); + this.validParents.add( TypeObject.class ); - this.validPeers = new HashSet(); + this.validPeers = new HashSet<Class<?>>(); this.validPeers.add( null ); this.allowNesting = false; @@ -34,12 +35,18 @@ public Object start(final String uri, final ExtensibleXmlParser parser) throws SAXException { parser.startElementBuilder( localName, attrs ); - Variable variable = (Variable) parser.getParent(); + TypeObject typeable = (TypeObject) parser.getParent(); final String name = attrs.getValue("name"); emptyAttributeCheck(localName, "name", name, parser); DataType dataType = null; try { dataType = (DataType) Class.forName(name).newInstance(); + // TODO make this pluggable so datatypes can read in other properties as well + if (dataType instanceof ObjectDataType) { + final String className = attrs.getValue("className"); + emptyAttributeCheck(localName, "className", className, parser); + ((ObjectDataType) dataType).setClassName(className); + } } catch (ClassNotFoundException e) { throw new SAXParseException( "Could not find datatype " + name, parser.getLocator()); @@ -51,7 +58,7 @@ public Object start(final String uri, "Could not access datatype " + name, parser.getLocator()); } - variable.setType(dataType); + typeable.setType(dataType); return dataType; } @@ -62,7 +69,7 @@ public Object end(final String uri, return null; } - public Class generateNodeFor() { + public Class<?> generateNodeFor() { return DataType.class; } diff --git a/drools-compiler/src/main/java/org/drools/xml/processes/ValueHandler.java b/drools-compiler/src/main/java/org/drools/xml/processes/ValueHandler.java index e37e9df1de8..956d41d91f0 100644 --- a/drools-compiler/src/main/java/org/drools/xml/processes/ValueHandler.java +++ b/drools-compiler/src/main/java/org/drools/xml/processes/ValueHandler.java @@ -1,14 +1,9 @@ package org.drools.xml.processes; -import java.io.Serializable; import java.util.HashSet; -import org.drools.process.core.context.variable.Variable; +import org.drools.process.core.ValueObject; import org.drools.process.core.datatype.DataType; -import org.drools.process.core.datatype.impl.type.BooleanDataType; -import org.drools.process.core.datatype.impl.type.FloatDataType; -import org.drools.process.core.datatype.impl.type.IntegerDataType; -import org.drools.process.core.datatype.impl.type.StringDataType; import org.drools.xml.BaseAbstractHandler; import org.drools.xml.ExtensibleXmlParser; import org.drools.xml.Handler; @@ -18,15 +13,14 @@ import org.xml.sax.SAXException; import org.xml.sax.SAXParseException; -public class ValueHandler extends BaseAbstractHandler - implements - Handler { +public class ValueHandler extends BaseAbstractHandler implements Handler { + public ValueHandler() { if ( (this.validParents == null) && (this.validPeers == null) ) { - this.validParents = new HashSet(); - this.validParents.add( Variable.class ); + this.validParents = new HashSet<Class<?>>(); + this.validParents.add( ValueObject.class ); - this.validPeers = new HashSet(); + this.validPeers = new HashSet<Class<?>>(); this.validPeers.add( null ); this.allowNesting = false; @@ -48,7 +42,7 @@ public Object end(final String uri, final String localName, final ExtensibleXmlParser parser) throws SAXException { final Element element = parser.endElementBuilder(); - Variable variable = (Variable) parser.getParent(); + ValueObject valueObject = (ValueObject) parser.getParent(); String text = ((Text)element.getChildNodes().item( 0 )).getWholeText(); if (text != null) { text.trim(); @@ -56,12 +50,12 @@ public Object end(final String uri, text = null; } } - Serializable value = restoreValue(text, variable.getType(), parser); - variable.setValue(value); + Object value = restoreValue(text, valueObject.getType(), parser); + valueObject.setValue(value); return null; } - private Serializable restoreValue(String text, DataType dataType, ExtensibleXmlParser parser) throws SAXException { + private Object restoreValue(String text, DataType dataType, ExtensibleXmlParser parser) throws SAXException { if (text == null || "".equals(text)) { return null; } @@ -69,21 +63,10 @@ private Serializable restoreValue(String text, DataType dataType, ExtensibleXmlP throw new SAXParseException( "Null datatype", parser.getLocator()); } - if (dataType instanceof StringDataType) { - return text; - } else if (dataType instanceof IntegerDataType) { - return new Integer(text); - } else if (dataType instanceof FloatDataType) { - return new Float(text); - } else if (dataType instanceof BooleanDataType) { - return new Boolean(text); - } else { - throw new SAXParseException( - "Unknown datatype " + dataType, parser.getLocator()); - } + return dataType.readValue(text); } - public Class generateNodeFor() { + public Class<?> generateNodeFor() { return null; } diff --git a/drools-compiler/src/main/java/org/drools/xml/processes/VariableHandler.java b/drools-compiler/src/main/java/org/drools/xml/processes/VariableHandler.java index fb4462cfd37..906f991cec4 100644 --- a/drools-compiler/src/main/java/org/drools/xml/processes/VariableHandler.java +++ b/drools-compiler/src/main/java/org/drools/xml/processes/VariableHandler.java @@ -20,10 +20,10 @@ public class VariableHandler extends BaseAbstractHandler Handler { public VariableHandler() { if ( (this.validParents == null) && (this.validPeers == null) ) { - this.validParents = new HashSet(); + this.validParents = new HashSet<Class<?>>(); this.validParents.add( Process.class ); - this.validPeers = new HashSet(); + this.validPeers = new HashSet<Class<?>>(); this.validPeers.add( null ); this.allowNesting = false; @@ -68,7 +68,7 @@ public Object end(final String uri, return null; } - public Class generateNodeFor() { + public Class<?> generateNodeFor() { return Variable.class; } diff --git a/drools-compiler/src/main/java/org/drools/xml/processes/WorkItemNodeHandler.java b/drools-compiler/src/main/java/org/drools/xml/processes/WorkItemNodeHandler.java index c04e09ca5ef..9a853034b87 100644 --- a/drools-compiler/src/main/java/org/drools/xml/processes/WorkItemNodeHandler.java +++ b/drools-compiler/src/main/java/org/drools/xml/processes/WorkItemNodeHandler.java @@ -4,10 +4,11 @@ import org.drools.process.core.ParameterDefinition; import org.drools.process.core.Work; +import org.drools.process.core.datatype.DataType; import org.drools.workflow.core.Node; import org.drools.workflow.core.node.WorkItemNode; import org.drools.xml.ExtensibleXmlParser; -import org.drools.xml.XmlDumper; +import org.drools.xml.XmlWorkflowProcessDumper; import org.w3c.dom.Element; import org.xml.sax.SAXException; @@ -29,7 +30,7 @@ protected Node createNode() { return new WorkItemNode(); } - public Class generateNodeFor() { + public Class<?> generateNodeFor() { return WorkItemNode.class; } @@ -76,18 +77,15 @@ protected void visitWork(Work work, StringBuffer xmlDump, boolean includeMeta) { if (work != null) { xmlDump.append(" <work name=\"" + work.getName() + "\" >" + EOL); for (ParameterDefinition paramDefinition: work.getParameterDefinitions()) { - xmlDump.append(" <parameter name=\"" + paramDefinition.getName() + "\" " + - "type=\"" + paramDefinition.getType().getClass().getName() + "\" "); + DataType dataType = paramDefinition.getType(); + xmlDump.append(" <parameter name=\"" + paramDefinition.getName() + "\" >" + EOL + " "); + XmlWorkflowProcessDumper.visitDataType(dataType, xmlDump); Object value = work.getParameter(paramDefinition.getName()); - if (value == null) { - xmlDump.append("/>" + EOL); - } else { - if (value instanceof String) { - xmlDump.append(">" + XmlDumper.replaceIllegalChars((String) value) + "</parameter>" + EOL); - } else { - throw new IllegalArgumentException("Unsupported value type: " + value); - } + if (value != null) { + xmlDump.append(" "); + XmlWorkflowProcessDumper.visitValue(value, dataType, xmlDump); } + xmlDump.append(" </parameter>" + EOL); } xmlDump.append(" </work>" + EOL); } diff --git a/drools-compiler/src/main/resources/META-INF/drools-processes-4.0.xsd b/drools-compiler/src/main/resources/META-INF/drools-processes-4.0.xsd index 64208d28dae..566007adfdc 100644 --- a/drools-compiler/src/main/resources/META-INF/drools-processes-4.0.xsd +++ b/drools-compiler/src/main/resources/META-INF/drools-processes-4.0.xsd @@ -69,6 +69,7 @@ <xs:element name="type"> <xs:complexType> <xs:attribute name="name" type="xs:string" use="required"/> + <xs:attribute name="className" type="xs:string" /> </xs:complexType> </xs:element> <xs:element name="value"> @@ -269,12 +270,11 @@ </xs:element> <xs:element name="parameter"> <xs:complexType> - <xs:simpleContent> - <xs:extension base="xs:string"> - <xs:attribute name="name" type="xs:string" use="required"/> - <xs:attribute name="type" type="xs:string" use="required"/> - </xs:extension> - </xs:simpleContent> + <xs:choice minOccurs="0" maxOccurs="unbounded"> + <xs:element ref="drools:type"/> + <xs:element ref="drools:value"/> + </xs:choice> + <xs:attribute name="name" type="xs:string"/> </xs:complexType> </xs:element> <xs:element name="mapping"> diff --git a/drools-compiler/src/test/java/org/drools/xml/processes/XMLPersistenceTest.java b/drools-compiler/src/test/java/org/drools/xml/processes/XMLPersistenceTest.java index 3a1531af6a1..5e9998d0733 100644 --- a/drools-compiler/src/test/java/org/drools/xml/processes/XMLPersistenceTest.java +++ b/drools-compiler/src/test/java/org/drools/xml/processes/XMLPersistenceTest.java @@ -10,12 +10,14 @@ import junit.framework.TestCase; +import org.drools.Person; import org.drools.compiler.PackageBuilderConfiguration; import org.drools.process.core.ParameterDefinition; import org.drools.process.core.Work; import org.drools.process.core.context.swimlane.Swimlane; import org.drools.process.core.context.variable.Variable; import org.drools.process.core.datatype.impl.type.IntegerDataType; +import org.drools.process.core.datatype.impl.type.ListDataType; import org.drools.process.core.datatype.impl.type.ObjectDataType; import org.drools.process.core.datatype.impl.type.StringDataType; import org.drools.process.core.event.EventTypeFilter; @@ -131,6 +133,23 @@ public void addNode(Node node) { variable.setType(new IntegerDataType()); variable.setValue(2); variables.add(variable); + variable = new Variable(); + variable.setName("variable3"); + variable.setType(new ObjectDataType("org.drools.Person")); + Person person = new Person(); + person.setName("John"); + variable.setValue(person); + variables.add(variable); + variable = new Variable(); + variable.setName("variable3"); + ListDataType listDataType = new ListDataType(); + listDataType.setType(new ObjectDataType("java.lang.Integer")); + variable.setType(listDataType); + List<Integer> list = new ArrayList<Integer>(); + list.add(10); + list.add(20); + variable.setValue(list); + variables.add(variable); process.getVariableScope().setVariables(variables); Swimlane swimlane = new Swimlane(); diff --git a/drools-core/src/main/java/org/drools/process/core/ParameterDefinition.java b/drools-core/src/main/java/org/drools/process/core/ParameterDefinition.java index 1f62b474cf4..b2ac021711a 100644 --- a/drools-core/src/main/java/org/drools/process/core/ParameterDefinition.java +++ b/drools-core/src/main/java/org/drools/process/core/ParameterDefinition.java @@ -1,17 +1,13 @@ package org.drools.process.core; -import org.drools.process.core.datatype.DataType; /** * * @author <a href="mailto:[email protected]">Kris Verlaenen</a> */ -public interface ParameterDefinition { +public interface ParameterDefinition extends TypeObject { String getName(); void setName(String name); - DataType getType(); - void setType(DataType type); - } diff --git a/drools-core/src/main/java/org/drools/process/core/TypeObject.java b/drools-core/src/main/java/org/drools/process/core/TypeObject.java new file mode 100644 index 00000000000..690e4f2dae5 --- /dev/null +++ b/drools-core/src/main/java/org/drools/process/core/TypeObject.java @@ -0,0 +1,10 @@ +package org.drools.process.core; + +import org.drools.process.core.datatype.DataType; + +public interface TypeObject { + + DataType getType(); + void setType(DataType type); + +} diff --git a/drools-core/src/main/java/org/drools/process/core/ValueObject.java b/drools-core/src/main/java/org/drools/process/core/ValueObject.java new file mode 100644 index 00000000000..7f73b7de994 --- /dev/null +++ b/drools-core/src/main/java/org/drools/process/core/ValueObject.java @@ -0,0 +1,8 @@ +package org.drools.process.core; + +public interface ValueObject extends TypeObject { + + Object getValue(); + + void setValue(Object value); +} diff --git a/drools-core/src/main/java/org/drools/process/core/context/variable/Variable.java b/drools-core/src/main/java/org/drools/process/core/context/variable/Variable.java index 6ee0a2d0106..b9f061c8e67 100644 --- a/drools-core/src/main/java/org/drools/process/core/context/variable/Variable.java +++ b/drools-core/src/main/java/org/drools/process/core/context/variable/Variable.java @@ -18,7 +18,8 @@ import java.io.Serializable; -import org.drools.process.core.context.variable.Variable; +import org.drools.process.core.TypeObject; +import org.drools.process.core.ValueObject; import org.drools.process.core.datatype.DataType; import org.drools.process.core.datatype.impl.type.UndefinedDataType; @@ -27,13 +28,13 @@ * * @author <a href="mailto:[email protected]">Kris Verlaenen</a> */ -public class Variable implements Serializable { +public class Variable implements TypeObject, ValueObject, Serializable { private static final long serialVersionUID = 400L; - private String name; - private DataType type; - private Serializable value; + private String name; + private DataType type; + private Object value; public Variable() { this.type = UndefinedDataType.getInstance(); @@ -58,11 +59,11 @@ public void setType(final DataType type) { this.type = type; } - public Serializable getValue() { + public Object getValue() { return this.value; } - public void setValue(final Serializable value) { + public void setValue(final Object value) { if ( this.type.verifyDataType( value ) ) { this.value = value; } else { diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/DataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/DataType.java index 2e3717fcb7b..09336b9ffe6 100644 --- a/drools-core/src/main/java/org/drools/process/core/datatype/DataType.java +++ b/drools-core/src/main/java/org/drools/process/core/datatype/DataType.java @@ -29,5 +29,9 @@ public interface DataType extends Externalizable { * Returns true if the given value is a valid value of this data type. */ boolean verifyDataType(Object value); + + String writeValue(Object value); + + Object readValue(String value); } diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/BooleanDataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/BooleanDataType.java index c5e24a7e701..aa487e54a08 100644 --- a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/BooleanDataType.java +++ b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/BooleanDataType.java @@ -45,4 +45,12 @@ public boolean verifyDataType(final Object value) { } return false; } + + public Object readValue(String value) { + return new Boolean(value); + } + + public String writeValue(Object value) { + return (Boolean) value ? "true" : "false"; + } } diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/DateDataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/DateDataType.java deleted file mode 100644 index 8e9209d7d68..00000000000 --- a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/DateDataType.java +++ /dev/null @@ -1,47 +0,0 @@ -package org.drools.process.core.datatype.impl.type; - -/* - * Copyright 2005 JBoss Inc - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.util.Date; - -import org.drools.process.core.datatype.DataType; - -/** - * Representation of a date datatype. - * - * @author <a href="mailto:[email protected]">Kris Verlaenen</a> - */ -public final class DateDataType implements DataType { - - private static final long serialVersionUID = 400L; - - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - } - - public void writeExternal(ObjectOutput out) throws IOException { - } - - public boolean verifyDataType(final Object value) { - if ( value instanceof Date ) { - return true; - } - return false; - } -} diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/FloatDataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/FloatDataType.java index 7ab51d3a39e..7236b51dde9 100644 --- a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/FloatDataType.java +++ b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/FloatDataType.java @@ -48,4 +48,13 @@ public boolean verifyDataType(final Object value) { return false; } } + + public Object readValue(String value) { + return new Float(value); + } + + public String writeValue(Object value) { + Float f = (Float) value; + return f == null ? "" : f.toString(); + } } diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/IntegerDataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/IntegerDataType.java index b760b71d89c..45723150326 100644 --- a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/IntegerDataType.java +++ b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/IntegerDataType.java @@ -48,4 +48,14 @@ public boolean verifyDataType(final Object value) { return false; } } + + public Object readValue(String value) { + return new Integer(value); + } + + public String writeValue(Object value) { + Integer i = (Integer) value; + return i == null ? "" : i.toString(); + } + } diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/ListDataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/ListDataType.java index ee84402f9df..270d1aedd7c 100644 --- a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/ListDataType.java +++ b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/ListDataType.java @@ -16,25 +16,30 @@ * limitations under the License. */ -import org.drools.process.core.datatype.DataType; - import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; import java.util.Iterator; import java.util.List; +import org.drools.process.core.TypeObject; +import org.drools.process.core.datatype.DataType; + /** * Representation of a list datatype. * All elements in the list must have the same datatype. * * @author <a href="mailto:[email protected]">Kris Verlaenen</a> */ -public class ListDataType implements DataType { +public class ListDataType extends ObjectDataType implements TypeObject { private static final long serialVersionUID = 400L; private DataType dataType; + + public ListDataType() { + setClassName("java.util.List"); + } public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { dataType = (DataType)in.readObject(); @@ -44,18 +49,15 @@ public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(dataType); } - public ListDataType() { - } - public ListDataType(DataType dataType) { - setDataType(dataType); + setType(dataType); } - public void setDataType(final DataType dataType) { + public void setType(final DataType dataType) { this.dataType = dataType; } - public DataType getDataType() { + public DataType getType() { return this.dataType; } @@ -64,8 +66,8 @@ public boolean verifyDataType(final Object value) { return true; } if (value instanceof List) { - for (final Iterator<?> it = ((List<?>) value).iterator(); it.hasNext();) { - if (!this.dataType.verifyDataType(it.next())) { + for (Object o: (List<?>) value) { + if (dataType != null && !dataType.verifyDataType(o)) { return false; } } diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/ObjectDataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/ObjectDataType.java index 650edbacfc4..7d81500b919 100644 --- a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/ObjectDataType.java +++ b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/ObjectDataType.java @@ -22,12 +22,14 @@ import org.drools.process.core.datatype.DataType; +import com.thoughtworks.xstream.XStream; + /** * Representation of an object datatype. * * @author <a href="mailto:[email protected]">Kris Verlaenen</a> */ -public final class ObjectDataType implements DataType { +public class ObjectDataType implements DataType { private static final long serialVersionUID = 4L; @@ -49,9 +51,11 @@ public void setClassName(String className) { } public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + className = in.readUTF(); } public void writeExternal(ObjectOutput out) throws IOException { + out.writeUTF(className); } public boolean verifyDataType(final Object value) { @@ -66,4 +70,14 @@ public boolean verifyDataType(final Object value) { } return false; } + + public Object readValue(String value) { + XStream xstream = new XStream(); + return xstream.fromXML(value); + } + + public String writeValue(Object value) { + XStream xstream = new XStream(); + return xstream.toXML(value); + } } diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/StringDataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/StringDataType.java index 8cfef116939..af2aa2b1e72 100644 --- a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/StringDataType.java +++ b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/StringDataType.java @@ -27,9 +27,7 @@ * * @author <a href="mailto:[email protected]">Kris Verlaenen</a> */ -public class StringDataType - implements - DataType { +public class StringDataType implements DataType { private static final long serialVersionUID = 400L; @@ -48,4 +46,13 @@ public boolean verifyDataType(final Object value) { return false; } } + + public Object readValue(String value) { + return value; + } + + public String writeValue(Object value) { + return (String) value; + } + } diff --git a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/UndefinedDataType.java b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/UndefinedDataType.java index bca9519d66a..a29954480a5 100644 --- a/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/UndefinedDataType.java +++ b/drools-core/src/main/java/org/drools/process/core/datatype/impl/type/UndefinedDataType.java @@ -51,4 +51,13 @@ public boolean verifyDataType(final Object value) { } return false; } + + public Object readValue(String value) { + throw new IllegalArgumentException("Undefined datatype"); + } + + public String writeValue(Object value) { + throw new IllegalArgumentException("Undefined datatype"); + } + } diff --git a/drools-core/src/main/java/org/drools/process/core/impl/ParameterDefinitionImpl.java b/drools-core/src/main/java/org/drools/process/core/impl/ParameterDefinitionImpl.java index 61acfc2ef6c..1d97db1d2b8 100644 --- a/drools-core/src/main/java/org/drools/process/core/impl/ParameterDefinitionImpl.java +++ b/drools-core/src/main/java/org/drools/process/core/impl/ParameterDefinitionImpl.java @@ -11,4 +11,9 @@ */ public class ParameterDefinitionImpl implements ParameterDefinition, Serializable { - private static final long serialVersionUID = 400L; private String name; private DataType type; public ParameterDefinitionImpl(String name, DataType type) { setName(name); setType(type); } public String getName() { return name; } public void setName(String name) { if (name == null) { throw new IllegalArgumentException("Name cannot be null"); } this.name = name; } public DataType getType() { return type; } public void setType(DataType type) { if (type == null) { throw new IllegalArgumentException("Data type cannot be null"); } this.type = type; } public String toString() { return name; } } \ No newline at end of file + private static final long serialVersionUID = 400L; private String name; private DataType type; + + public ParameterDefinitionImpl() { + } + + public ParameterDefinitionImpl(String name, DataType type) { setName(name); setType(type); } public String getName() { return name; } public void setName(String name) { if (name == null) { throw new IllegalArgumentException("Name cannot be null"); } this.name = name; } public DataType getType() { return type; } public void setType(DataType type) { if (type == null) { throw new IllegalArgumentException("Data type cannot be null"); } this.type = type; } public String toString() { return name; } } \ No newline at end of file diff --git a/drools-core/src/main/resources/META-INF/WorkDefinitions.conf b/drools-core/src/main/resources/META-INF/WorkDefinitions.conf index 36e3787162e..39ad4f3691d 100644 --- a/drools-core/src/main/resources/META-INF/WorkDefinitions.conf +++ b/drools-core/src/main/resources/META-INF/WorkDefinitions.conf @@ -3,7 +3,6 @@ // The allowed properties are name, parameters, displayName, icon and customEditor // The returned result should thus be of type List<Map<String, Object>> import org.drools.process.core.datatype.impl.type.StringDataType; -import org.drools.process.core.datatype.impl.type.DateDataType; [ diff --git a/drools-core/src/test/java/org/drools/process/ForEachTest.java b/drools-core/src/test/java/org/drools/process/ForEachTest.java index e7ad535a773..a93bd432e23 100644 --- a/drools-core/src/test/java/org/drools/process/ForEachTest.java +++ b/drools-core/src/test/java/org/drools/process/ForEachTest.java @@ -42,7 +42,7 @@ public void testForEach() { ListDataType listDataType = new ListDataType(); ObjectDataType personDataType = new ObjectDataType(); personDataType.setClassName("org.drools.Person"); - listDataType.setDataType(personDataType); + listDataType.setType(personDataType); variable.setType(listDataType); variables.add(variable); process.getVariableScope().setVariables(variables);
6ac389cf8a8d445d7689e672f1b9e8dd23f37419
kotlin
Extracted error messages from- DefaultDiagnosticRenderer to special DefaultErrorMessages class.--
p
https://github.com/JetBrains/kotlin
diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/Errors.java b/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/Errors.java index 2c74e53d6a3e8..628d07a338f15 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/Errors.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/Errors.java @@ -38,6 +38,8 @@ import static org.jetbrains.jet.lang.diagnostics.Severity.WARNING; /** + * For error messages, see DefaultErrorMessages and IdeErrorMessages. + * * @author abreslav */ public interface Errors { @@ -326,7 +328,8 @@ public List<TextRange> mark(@NotNull JetWhenConditionInRange condition) { @NotNull @Override public List<TextRange> mark(@NotNull JetNullableType element) { - return markNode(element.getQuestionMarkNode()); + return markNode( + element.getQuestionMarkNode()); } }); DiagnosticFactory1<PsiElement, JetType> UNSAFE_CALL = DiagnosticFactory1.create(ERROR); diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DefaultDiagnosticRenderer.java b/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DefaultDiagnosticRenderer.java index 746ce87d6182a..6453e0465e7ac 100644 --- a/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DefaultDiagnosticRenderer.java +++ b/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DefaultDiagnosticRenderer.java @@ -16,25 +16,8 @@ package org.jetbrains.jet.lang.diagnostics.rendering; -import com.intellij.psi.PsiElement; import org.jetbrains.annotations.NotNull; -import org.jetbrains.jet.lang.descriptors.CallableDescriptor; -import org.jetbrains.jet.lang.diagnostics.*; -import org.jetbrains.jet.lang.psi.JetExpression; -import org.jetbrains.jet.lang.psi.JetSimpleNameExpression; -import org.jetbrains.jet.lang.psi.JetTypeConstraint; -import org.jetbrains.jet.lang.resolve.calls.ResolvedCall; -import org.jetbrains.jet.lang.types.JetType; -import org.jetbrains.jet.lexer.JetKeywordToken; -import org.jetbrains.jet.resolve.DescriptorRenderer; - -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; - -import static org.jetbrains.jet.lang.diagnostics.Errors.*; -import static org.jetbrains.jet.lang.diagnostics.rendering.Renderers.*; +import org.jetbrains.jet.lang.diagnostics.Diagnostic; /** * @author Evgeny Gerashchenko @@ -42,393 +25,8 @@ */ public class DefaultDiagnosticRenderer implements DiagnosticRenderer<Diagnostic> { public static final DefaultDiagnosticRenderer INSTANCE = new DefaultDiagnosticRenderer(); - private static final Renderer<Collection<? extends ResolvedCall<? extends CallableDescriptor>>> AMBIGUOUS_CALLS = - new Renderer<Collection<? extends ResolvedCall<? extends CallableDescriptor>>>() { - @NotNull - @Override - public String render(@NotNull Collection<? extends ResolvedCall<? extends CallableDescriptor>> argument) { - StringBuilder stringBuilder = new StringBuilder("\n"); - for (ResolvedCall<? extends CallableDescriptor> call : argument) { - stringBuilder.append(DescriptorRenderer.TEXT.render(call.getResultingDescriptor())).append("\n"); - } - return stringBuilder.toString(); - } - }; - - private final Map<AbstractDiagnosticFactory, DiagnosticRenderer<?>> map = - new HashMap<AbstractDiagnosticFactory, DiagnosticRenderer<?>>(); - - protected final <E extends PsiElement> void put(SimpleDiagnosticFactory<E> factory, String message) { - map.put(factory, new SimpleDiagnosticRenderer(message)); - } - - protected final <E extends PsiElement, A> void put(DiagnosticFactory1<E, A> factory, String message, Renderer<? super A> rendererA) { - map.put(factory, new DiagnosticWithParameters1Renderer<A>(message, rendererA)); - } - - protected final <E extends PsiElement, A, B> void put(DiagnosticFactory2<E, A, B> factory, - String message, - Renderer<? super A> rendererA, - Renderer<? super B> rendererB) { - map.put(factory, new DiagnosticWithParameters2Renderer<A, B>(message, rendererA, rendererB)); - } - - protected final <E extends PsiElement, A, B, C> void put(DiagnosticFactory3<E, A, B, C> factory, - String message, - Renderer<? super A> rendererA, - Renderer<? super B> rendererB, - Renderer<? super C> rendererC) { - map.put(factory, new DiagnosticWithParameters3Renderer<A, B, C>(message, rendererA, rendererB, rendererC)); - } - - protected DefaultDiagnosticRenderer() { - put(EXCEPTION_WHILE_ANALYZING, "{0}", new Renderer<Throwable>() { - @NotNull - @Override - public String render(@NotNull Throwable e) { - return e.getClass().getSimpleName() + ": " + e.getMessage(); - } - }); - - put(UNRESOLVED_REFERENCE, "Unresolved reference: {0}", TO_STRING); - - put(INVISIBLE_REFERENCE, "Cannot access ''{0}'' in ''{1}''", NAME, NAME); - put(INVISIBLE_MEMBER, "Cannot access ''{0}'' in ''{1}''", NAME, NAME); - - put(REDECLARATION, "Redeclaration: {0}", NAME); - put(NAME_SHADOWING, "Name shadowed: {0}", NAME); - - put(TYPE_MISMATCH, "Type mismatch: inferred type is {1} but {0} was expected", RENDER_TYPE, RENDER_TYPE); - put(INCOMPATIBLE_MODIFIERS, "Incompatible modifiers: ''{0}''", new Renderer<Collection<JetKeywordToken>>() { - @NotNull - @Override - public String render(@NotNull Collection<JetKeywordToken> tokens) { - StringBuilder sb = new StringBuilder(); - for (Iterator<JetKeywordToken> iterator = tokens.iterator(); iterator.hasNext(); ) { - JetKeywordToken modifier = iterator.next(); - sb.append(modifier.getValue()); - if (iterator.hasNext()) { - sb.append(" "); - } - } - return sb.toString(); - } - }); - put(ILLEGAL_MODIFIER, "Illegal modifier ''{0}''", TO_STRING); - - put(REDUNDANT_MODIFIER, "Modifier {0} is redundant because {1} is present", TO_STRING, TO_STRING); - put(ABSTRACT_MODIFIER_IN_TRAIT, "Modifier ''abstract'' is redundant in trait"); - put(OPEN_MODIFIER_IN_TRAIT, "Modifier ''open'' is redundant in trait"); - put(REDUNDANT_MODIFIER_IN_GETTER, "Visibility modifiers are redundant in getter"); - put(TRAIT_CAN_NOT_BE_FINAL, "Trait can not be final"); - put(TYPECHECKER_HAS_RUN_INTO_RECURSIVE_PROBLEM, - "Type checking has run into a recursive problem. Easiest workaround: specify types of your declarations explicitly"); // TODO: message - put(RETURN_NOT_ALLOWED, "'return' is not allowed here"); - put(PROJECTION_IN_IMMEDIATE_ARGUMENT_TO_SUPERTYPE, "Projections are not allowed for immediate arguments of a supertype"); - put(LABEL_NAME_CLASH, "There is more than one label with such a name in this scope"); - put(EXPRESSION_EXPECTED_NAMESPACE_FOUND, "Expression expected, but a namespace name found"); - - put(CANNOT_IMPORT_FROM_ELEMENT, "Cannot import from ''{0}''", NAME); - put(CANNOT_BE_IMPORTED, "Cannot import ''{0}'', functions and properties can be imported only from packages", NAME); - put(USELESS_HIDDEN_IMPORT, "Useless import, it is hidden further"); - put(USELESS_SIMPLE_IMPORT, "Useless import, does nothing"); - - put(CANNOT_INFER_PARAMETER_TYPE, - "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation"); - - put(NO_BACKING_FIELD_ABSTRACT_PROPERTY, "This property doesn't have a backing field, because it's abstract"); - put(NO_BACKING_FIELD_CUSTOM_ACCESSORS, - "This property doesn't have a backing field, because it has custom accessors without reference to the backing field"); - put(INACCESSIBLE_BACKING_FIELD, "The backing field is not accessible here"); - put(NOT_PROPERTY_BACKING_FIELD, "The referenced variable is not a property and doesn't have backing field"); - - put(MIXING_NAMED_AND_POSITIONED_ARGUMENTS, "Mixing named and positioned arguments in not allowed"); - put(ARGUMENT_PASSED_TWICE, "An argument is already passed for this parameter"); - put(NAMED_PARAMETER_NOT_FOUND, "Cannot find a parameter with this name: {0}", TO_STRING); - put(VARARG_OUTSIDE_PARENTHESES, "Passing value as a vararg is only allowed inside a parenthesized argument list"); - put(NON_VARARG_SPREAD, "The spread operator (*foo) may only be applied in a vararg position"); - - put(MANY_FUNCTION_LITERAL_ARGUMENTS, "Only one function literal is allowed outside a parenthesized argument list"); - put(PROPERTY_WITH_NO_TYPE_NO_INITIALIZER, "This property must either have a type annotation or be initialized"); - - put(ABSTRACT_PROPERTY_IN_PRIMARY_CONSTRUCTOR_PARAMETERS, "This property cannot be declared abstract"); - put(ABSTRACT_PROPERTY_NOT_IN_CLASS, "A property may be abstract only when defined in a class or trait"); - put(ABSTRACT_PROPERTY_WITH_INITIALIZER, "Property with initializer cannot be abstract"); - put(ABSTRACT_PROPERTY_WITH_GETTER, "Property with getter implementation cannot be abstract"); - put(ABSTRACT_PROPERTY_WITH_SETTER, "Property with setter implementation cannot be abstract"); - - put(PACKAGE_MEMBER_CANNOT_BE_PROTECTED, "Package member cannot be protected"); - - put(GETTER_VISIBILITY_DIFFERS_FROM_PROPERTY_VISIBILITY, "Getter visibility must be the same as property visibility"); - put(BACKING_FIELD_IN_TRAIT, "Property in a trait cannot have a backing field"); - put(MUST_BE_INITIALIZED, "Property must be initialized"); - put(MUST_BE_INITIALIZED_OR_BE_ABSTRACT, "Property must be initialized or be abstract"); - put(PROPERTY_INITIALIZER_IN_TRAIT, "Property initializers are not allowed in traits"); - put(PROPERTY_INITIALIZER_NO_BACKING_FIELD, "Initializer is not allowed here because this property has no backing field"); - put(ABSTRACT_PROPERTY_IN_NON_ABSTRACT_CLASS, "Abstract property {0} in non-abstract class {1}", TO_STRING, TO_STRING, TO_STRING); - put(ABSTRACT_FUNCTION_IN_NON_ABSTRACT_CLASS, "Abstract function {0} in non-abstract class {1}", TO_STRING, TO_STRING, TO_STRING); - put(ABSTRACT_FUNCTION_WITH_BODY, "A function {0} with body cannot be abstract", TO_STRING); - put(NON_ABSTRACT_FUNCTION_WITH_NO_BODY, "Method {0} without a body must be abstract", TO_STRING); - put(NON_MEMBER_ABSTRACT_FUNCTION, "Function {0} is not a class or trait member and cannot be abstract", TO_STRING); - - put(NON_MEMBER_FUNCTION_NO_BODY, "Function {0} must have a body", TO_STRING); - put(NON_FINAL_MEMBER_IN_FINAL_CLASS, "Non final member in a final class"); - - put(PUBLIC_MEMBER_SHOULD_SPECIFY_TYPE, "Public or protected member should specify a type"); - - put(PROJECTION_ON_NON_CLASS_TYPE_ARGUMENT, - "Projections are not allowed on type arguments of functions and properties"); // TODO : better positioning - put(SUPERTYPE_NOT_INITIALIZED, "This type has a constructor, and thus must be initialized here"); - put(SUPERTYPE_NOT_INITIALIZED_DEFAULT, "Constructor invocation should be explicitly specified"); - put(SECONDARY_CONSTRUCTOR_BUT_NO_PRIMARY, "A secondary constructor may appear only in a class that has a primary constructor"); - put(SECONDARY_CONSTRUCTOR_NO_INITIALIZER_LIST, "Secondary constructors must have an initializer list"); - put(BY_IN_SECONDARY_CONSTRUCTOR, "'by'-clause is only supported for primary constructors"); - put(INITIALIZER_WITH_NO_ARGUMENTS, "Constructor arguments required"); - put(MANY_CALLS_TO_THIS, "Only one call to 'this(...)' is allowed"); - put(NOTHING_TO_OVERRIDE, "{0} overrides nothing", DescriptorRenderer.TEXT); - put(VIRTUAL_MEMBER_HIDDEN, "''{0}'' hides ''{1}'' in class {2} and needs 'override' modifier", DescriptorRenderer.TEXT, - DescriptorRenderer.TEXT, DescriptorRenderer.TEXT); - - put(ENUM_ENTRY_SHOULD_BE_INITIALIZED, "Missing delegation specifier ''{0}''", NAME); - put(ENUM_ENTRY_ILLEGAL_TYPE, "The type constructor of enum entry should be ''{0}''", NAME); - - put(UNINITIALIZED_VARIABLE, "Variable ''{0}'' must be initialized", NAME); - put(UNINITIALIZED_PARAMETER, "Parameter ''{0}'' is uninitialized here", NAME); - put(UNUSED_VARIABLE, "Variable ''{0}'' is never used", NAME); - put(UNUSED_PARAMETER, "Parameter ''{0}'' is never used", NAME); - put(ASSIGNED_BUT_NEVER_ACCESSED_VARIABLE, "Variable ''{0}'' is assigned but never accessed", NAME); - put(VARIABLE_WITH_REDUNDANT_INITIALIZER, "Variable ''{0}'' initializer is redundant", NAME); - put(UNUSED_VALUE, "The value ''{0}'' assigned to ''{1}'' is never used", ELEMENT_TEXT, TO_STRING); - put(UNUSED_CHANGED_VALUE, "The value changed at ''{0}'' is never used", ELEMENT_TEXT); - put(UNUSED_EXPRESSION, "The expression is unused"); - put(UNUSED_FUNCTION_LITERAL, "The function literal is unused. If you mean block, you can use 'run { ... }'"); - - put(VAL_REASSIGNMENT, "Val can not be reassigned", NAME); - put(INITIALIZATION_BEFORE_DECLARATION, "Variable cannot be initialized before declaration", NAME); - put(VARIABLE_EXPECTED, "Variable expected"); - - put(INITIALIZATION_USING_BACKING_FIELD_CUSTOM_SETTER, - "This property has a custom setter, so initialization using backing field required", NAME); - put(INITIALIZATION_USING_BACKING_FIELD_OPEN_SETTER, - "Setter of this property can be overridden, so initialization using backing field required", NAME); - - put(FUNCTION_PARAMETERS_OF_INLINE_FUNCTION, "Function parameters of inline function can only be invoked", NAME); - - put(UNREACHABLE_CODE, "Unreachable code"); - put(MANY_CLASS_OBJECTS, "Only one class object is allowed per class"); - put(CLASS_OBJECT_NOT_ALLOWED, "A class object is not allowed here"); - put(DELEGATION_IN_TRAIT, "Traits cannot use delegation"); - put(DELEGATION_NOT_TO_TRAIT, "Only traits can be delegated to"); - put(NO_CONSTRUCTOR, "This class does not have a constructor"); - put(NOT_A_CLASS, "Not a class"); - put(ILLEGAL_ESCAPE_SEQUENCE, "Illegal escape sequence"); - - put(LOCAL_EXTENSION_PROPERTY, "Local extension properties are not allowed"); - put(LOCAL_VARIABLE_WITH_GETTER, "Local variables are not allowed to have getters"); - put(LOCAL_VARIABLE_WITH_SETTER, "Local variables are not allowed to have setters"); - put(VAL_WITH_SETTER, "A 'val'-property cannot have a setter"); - - put(NO_GET_METHOD, "No get method providing array access"); - put(NO_SET_METHOD, "No set method providing array access"); - - put(INC_DEC_SHOULD_NOT_RETURN_UNIT, "Functions inc(), dec() shouldn't return Unit to be used by operators ++, --"); - put(ASSIGNMENT_OPERATOR_SHOULD_RETURN_UNIT, "Function ''{0}'' should return Unit to be used by corresponding operator ''{1}''", - NAME, ELEMENT_TEXT); - put(ASSIGN_OPERATOR_AMBIGUITY, "Assignment operators ambiguity: {0}", AMBIGUOUS_CALLS); - - put(EQUALS_MISSING, "No method 'equals(Any?) : Boolean' available"); - put(ASSIGNMENT_IN_EXPRESSION_CONTEXT, "Assignments are not expressions, and only expressions are allowed in this context"); - put(NAMESPACE_IS_NOT_AN_EXPRESSION, "'namespace' is not an expression, it can only be used on the left-hand side of a dot ('.')"); - put(SUPER_IS_NOT_AN_EXPRESSION, "{0} is not an expression, it can only be used on the left-hand side of a dot ('.')", TO_STRING); - put(DECLARATION_IN_ILLEGAL_CONTEXT, "Declarations are not allowed in this position"); - put(SETTER_PARAMETER_WITH_DEFAULT_VALUE, "Setter parameters can not have default values"); - put(NO_THIS, "'this' is not defined in this context"); - put(SUPER_NOT_AVAILABLE, "No supertypes are accessible in this context"); - put(AMBIGUOUS_SUPER, "Many supertypes available, please specify the one you mean in angle brackets, e.g. 'super<Foo>'"); - put(ABSTRACT_SUPER_CALL, "Abstract member cannot be accessed directly"); - put(NOT_A_SUPERTYPE, "Not a supertype"); - put(TYPE_ARGUMENTS_REDUNDANT_IN_SUPER_QUALIFIER, "Type arguments do not need to be specified in a 'super' qualifier"); - put(USELESS_CAST_STATIC_ASSERT_IS_FINE, "No cast needed, use ':' instead"); - put(USELESS_CAST, "No cast needed"); - put(CAST_NEVER_SUCCEEDS, "This cast can never succeed"); - put(WRONG_SETTER_PARAMETER_TYPE, "Setter parameter type must be equal to the type of the property, i.e. {0}", RENDER_TYPE); - put(WRONG_GETTER_RETURN_TYPE, "Getter return type must be equal to the type of the property, i.e. {0}", RENDER_TYPE); - put(NO_CLASS_OBJECT, "Please specify constructor invocation; classifier {0} does not have a class object", NAME); - put(NO_GENERICS_IN_SUPERTYPE_SPECIFIER, "Generic arguments of the base type must be specified"); - - put(HAS_NEXT_PROPERTY_AND_FUNCTION_AMBIGUITY, - "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext' property"); - put(HAS_NEXT_MISSING, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property"); - put(HAS_NEXT_FUNCTION_AMBIGUITY, "Function 'iterator().hasNext()' is ambiguous for this expression"); - put(HAS_NEXT_MUST_BE_READABLE, "The 'iterator().hasNext' property of the loop range must be readable"); - put(HAS_NEXT_PROPERTY_TYPE_MISMATCH, "The 'iterator().hasNext' property of the loop range must return Boolean, but returns {0}", - RENDER_TYPE); - put(HAS_NEXT_FUNCTION_TYPE_MISMATCH, "The 'iterator().hasNext()' function of the loop range must return Boolean, but returns {0}", - RENDER_TYPE); - put(NEXT_AMBIGUITY, "Function 'iterator().next()' is ambiguous for this expression"); - put(NEXT_MISSING, "Loop range must have an 'iterator().next()' function"); - put(ITERATOR_MISSING, "For-loop range must have an iterator() method"); - put(ITERATOR_AMBIGUITY, "Method 'iterator()' is ambiguous for this expression: {0}", AMBIGUOUS_CALLS); - - put(COMPARE_TO_TYPE_MISMATCH, "compareTo() must return Int, but returns {0}", RENDER_TYPE); - put(CALLEE_NOT_A_FUNCTION, "Expecting a function type, but found {0}", RENDER_TYPE); - - put(RETURN_IN_FUNCTION_WITH_EXPRESSION_BODY, - "Returns are not allowed for functions with expression body. Use block body in '{...}'"); - put(NO_RETURN_IN_FUNCTION_WITH_BLOCK_BODY, "A 'return' expression required in a function with a block body ('{...}')"); - put(RETURN_TYPE_MISMATCH, "This function must return a value of type {0}", RENDER_TYPE); - put(EXPECTED_TYPE_MISMATCH, "Expected a value of type {0}", RENDER_TYPE); - put(ASSIGNMENT_TYPE_MISMATCH, - "Expected a value of type {0}. Assignment operation is not an expression, so it does not return any value", RENDER_TYPE); - put(IMPLICIT_CAST_TO_UNIT_OR_ANY, "Type was casted to ''{0}''. Please specify ''{0}'' as expected type, if you mean such cast", - RENDER_TYPE); - put(EXPRESSION_EXPECTED, "{0} is not an expression, and only expression are allowed here", new Renderer<JetExpression>() { - @NotNull - @Override - public String render(@NotNull JetExpression expression) { - String expressionType = expression.toString(); - return expressionType.substring(0, 1) + - expressionType.substring(1).toLowerCase(); - } - }); - - put(UPPER_BOUND_VIOLATED, "An upper bound {0} is violated", RENDER_TYPE); // TODO : Message - put(FINAL_CLASS_OBJECT_UPPER_BOUND, "{0} is a final type, and thus a class object cannot extend it", RENDER_TYPE); - put(FINAL_UPPER_BOUND, "{0} is a final type, and thus a value of the type parameter is predetermined", RENDER_TYPE); - put(USELESS_ELVIS, "Elvis operator (?:) always returns the left operand of non-nullable type {0}", RENDER_TYPE); - put(CONFLICTING_UPPER_BOUNDS, "Upper bounds of {0} have empty intersection", NAME); - put(CONFLICTING_CLASS_OBJECT_UPPER_BOUNDS, "Class object upper bounds of {0} have empty intersection", NAME); - - put(TOO_MANY_ARGUMENTS, "Too many arguments for {0}", TO_STRING); - put(ERROR_COMPILE_TIME_VALUE, "{0}", TO_STRING); - - put(ELSE_MISPLACED_IN_WHEN, "'else' entry must be the last one in a when-expression"); - - put(NO_ELSE_IN_WHEN, "'when' expression must contain 'else' branch"); - put(TYPE_MISMATCH_IN_RANGE, "Type mismatch: incompatible types of range and element checked in it"); - put(CYCLIC_INHERITANCE_HIERARCHY, "There's a cycle in the inheritance hierarchy for this type"); - - put(MANY_CLASSES_IN_SUPERTYPE_LIST, "Only one class may appear in a supertype list"); - put(SUPERTYPE_NOT_A_CLASS_OR_TRAIT, "Only classes and traits may serve as supertypes"); - put(SUPERTYPE_INITIALIZED_IN_TRAIT, "Traits cannot initialize supertypes"); - put(CONSTRUCTOR_IN_TRAIT, "A trait may not have a constructor"); - put(SECONDARY_CONSTRUCTORS_ARE_NOT_SUPPORTED, "Secondary constructors are not supported"); - put(SUPERTYPE_APPEARS_TWICE, "A supertype appears twice"); - put(FINAL_SUPERTYPE, "This type is final, so it cannot be inherited from"); - - put(ILLEGAL_SELECTOR, "Expression ''{0}'' cannot be a selector (occur after a dot)", TO_STRING); - - put(VALUE_PARAMETER_WITH_NO_TYPE_ANNOTATION, "A type annotation is required on a value parameter"); - put(BREAK_OR_CONTINUE_OUTSIDE_A_LOOP, "'break' and 'continue' are only allowed inside a loop"); - put(NOT_A_LOOP_LABEL, "The label ''{0}'' does not denote a loop", TO_STRING); - put(NOT_A_RETURN_LABEL, "The label ''{0}'' does not reference to a context from which we can return", TO_STRING); - - put(ANONYMOUS_INITIALIZER_WITHOUT_CONSTRUCTOR, "Anonymous initializers are only allowed in the presence of a primary constructor"); - put(NULLABLE_SUPERTYPE, "A supertype cannot be nullable"); - put(UNSAFE_CALL, "Only safe calls (?.) are allowed on a nullable receiver of type {0}", RENDER_TYPE); - put(AMBIGUOUS_LABEL, "Ambiguous label"); - put(UNSUPPORTED, "Unsupported [{0}]", TO_STRING); - put(UNNECESSARY_SAFE_CALL, "Unnecessary safe call on a non-null receiver of type {0}", RENDER_TYPE); - put(UNNECESSARY_NOT_NULL_ASSERTION, "Unnecessary non-null assertion (!!) on a non-null receiver of type {0}", RENDER_TYPE); - put(NAME_IN_CONSTRAINT_IS_NOT_A_TYPE_PARAMETER, "{0} does not refer to a type parameter of {1}", new Renderer<JetTypeConstraint>() { - @NotNull - @Override - public String render(@NotNull JetTypeConstraint typeConstraint) { - //noinspection ConstantConditions - return typeConstraint.getSubjectTypeParameterName().getReferencedName(); - } - }, NAME); - put(AUTOCAST_IMPOSSIBLE, "Automatic cast to {0} is impossible, because {1} could have changed since the is-check", RENDER_TYPE, - NAME); - - put(TYPE_MISMATCH_IN_FOR_LOOP, "The loop iterates over values of type {0} but the parameter is declared to be {1}", RENDER_TYPE, - RENDER_TYPE); - put(TYPE_MISMATCH_IN_CONDITION, "Condition must be of type Boolean, but was of type {0}", RENDER_TYPE); - put(TYPE_MISMATCH_IN_TUPLE_PATTERN, "Type mismatch: subject is of type {0} but the pattern is of type Tuple{1}", RENDER_TYPE, - TO_STRING); // TODO: message - put(TYPE_MISMATCH_IN_BINDING_PATTERN, "{0} must be a supertype of {1}. Use 'is' to match against {0}", RENDER_TYPE, RENDER_TYPE); - put(INCOMPATIBLE_TYPES, "Incompatible types: {0} and {1}", RENDER_TYPE, RENDER_TYPE); - put(EXPECTED_CONDITION, "Expected condition of Boolean type"); - - put(CANNOT_CHECK_FOR_ERASED, "Cannot check for instance of erased type: {0}", RENDER_TYPE); - put(UNCHECKED_CAST, "Unchecked cast: {0} to {1}", RENDER_TYPE, RENDER_TYPE); - - put(INCONSISTENT_TYPE_PARAMETER_VALUES, "Type parameter {0} of {1} has inconsistent values: {2}", NAME, DescriptorRenderer.TEXT, - new Renderer<Collection<JetType>>() { - @NotNull - @Override - public String render(@NotNull Collection<JetType> types) { - StringBuilder builder = new StringBuilder(); - for (Iterator<JetType> iterator = types.iterator(); iterator.hasNext(); ) { - JetType jetType = iterator.next(); - builder.append(jetType); - if (iterator.hasNext()) { - builder.append(", "); - } - } - return builder.toString(); - } - }); - - put(EQUALITY_NOT_APPLICABLE, "Operator {0} cannot be applied to {1} and {2}", new Renderer<JetSimpleNameExpression>() { - @NotNull - @Override - public String render(@NotNull JetSimpleNameExpression nameExpression) { - //noinspection ConstantConditions - return nameExpression.getReferencedName(); - } - }, TO_STRING, TO_STRING); - - put(OVERRIDING_FINAL_MEMBER, "''{0}'' in ''{1}'' is final and cannot be overridden", NAME, NAME); - put(CANNOT_WEAKEN_ACCESS_PRIVILEGE, "Cannot weaken access privilege ''{0}'' for ''{1}'' in ''{2}''", TO_STRING, NAME, NAME); - put(CANNOT_CHANGE_ACCESS_PRIVILEGE, "Cannot change access privilege ''{0}'' for ''{1}'' in ''{2}''", TO_STRING, NAME, NAME); - - put(RETURN_TYPE_MISMATCH_ON_OVERRIDE, "Return type of {0} is not a subtype of the return type overridden member {1}", - DescriptorRenderer.TEXT, DescriptorRenderer.TEXT); - - put(VAR_OVERRIDDEN_BY_VAL, "Var-property {0} cannot be overridden by val-property {1}", DescriptorRenderer.TEXT, - DescriptorRenderer.TEXT); - - put(ABSTRACT_MEMBER_NOT_IMPLEMENTED, "{0} must be declared abstract or implement abstract member {1}", RENDER_CLASS_OR_OBJECT, - DescriptorRenderer.TEXT); - - put(MANY_IMPL_MEMBER_NOT_IMPLEMENTED, "{0} must override {1} because it inherits many implementations of it", - RENDER_CLASS_OR_OBJECT, DescriptorRenderer.TEXT); - - put(CONFLICTING_OVERLOADS, "{1} is already defined in ''{0}''", DescriptorRenderer.TEXT, TO_STRING); - - - put(RESULT_TYPE_MISMATCH, "{0} must return {1} but returns {2}", TO_STRING, RENDER_TYPE, RENDER_TYPE); - put(UNSAFE_INFIX_CALL, - "Infix call corresponds to a dot-qualified call ''{0}.{1}({2})'' which is not allowed on a nullable receiver ''{0}''. " + - "Use '?.'-qualified call instead", - TO_STRING, TO_STRING, TO_STRING); - - put(OVERLOAD_RESOLUTION_AMBIGUITY, "Overload resolution ambiguity: {0}", AMBIGUOUS_CALLS); - put(NONE_APPLICABLE, "None of the following functions can be called with the arguments supplied: {0}", AMBIGUOUS_CALLS); - put(NO_VALUE_FOR_PARAMETER, "No value passed for parameter {0}", DescriptorRenderer.TEXT); - put(MISSING_RECEIVER, "A receiver of type {0} is required", RENDER_TYPE); - put(NO_RECEIVER_ADMITTED, "No receiver can be passed to this function or property"); - - put(CREATING_AN_INSTANCE_OF_ABSTRACT_CLASS, "Can not create an instance of an abstract class"); - put(TYPE_INFERENCE_FAILED, "Type inference failed: {0}", TO_STRING); - put(WRONG_NUMBER_OF_TYPE_ARGUMENTS, "{0} type arguments expected", new Renderer<Integer>() { - @NotNull - @Override - public String render(@NotNull Integer argument) { - return argument == 0 ? "No" : argument.toString(); - } - }); - - put(UNRESOLVED_IDE_TEMPLATE, "Unresolved IDE template: {0}", TO_STRING); - - put(DANGLING_FUNCTION_LITERAL_ARGUMENT_SUSPECTED, - "This expression is treated as an argument to the function call on the previous line. " + - "Separate it with a semicolon (;) if it is not intended to be an argument."); - - put(NOT_AN_ANNOTATION_CLASS, "{0} is not an annotation class", TO_STRING); - } + private final DiagnosticFactoryToRendererMap map = DefaultErrorMessages.MAP; @NotNull @Override diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DefaultErrorMessages.java b/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DefaultErrorMessages.java new file mode 100644 index 0000000000000..f9954ace34c5c --- /dev/null +++ b/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DefaultErrorMessages.java @@ -0,0 +1,408 @@ +/* + * Copyright 2010-2012 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.jetbrains.jet.lang.diagnostics.rendering; + +import org.jetbrains.annotations.NotNull; +import org.jetbrains.jet.lang.descriptors.CallableDescriptor; +import org.jetbrains.jet.lang.psi.JetExpression; +import org.jetbrains.jet.lang.psi.JetSimpleNameExpression; +import org.jetbrains.jet.lang.psi.JetTypeConstraint; +import org.jetbrains.jet.lang.resolve.calls.ResolvedCall; +import org.jetbrains.jet.lang.types.JetType; +import org.jetbrains.jet.lexer.JetKeywordToken; +import org.jetbrains.jet.resolve.DescriptorRenderer; + +import java.util.Collection; +import java.util.Iterator; + +import static org.jetbrains.jet.lang.diagnostics.Errors.*; +import static org.jetbrains.jet.lang.diagnostics.rendering.Renderers.*; +import static org.jetbrains.jet.lang.diagnostics.rendering.Renderers.RENDER_TYPE; +import static org.jetbrains.jet.lang.diagnostics.rendering.Renderers.TO_STRING; + +/** + * @author Evgeny Gerashchenko + * @since 4/13/12 + */ +public class DefaultErrorMessages { + public static final DiagnosticFactoryToRendererMap MAP = new DiagnosticFactoryToRendererMap(); + + private static final Renderer<Collection<? extends ResolvedCall<? extends CallableDescriptor>>> AMBIGUOUS_CALLS = + new Renderer<Collection<? extends ResolvedCall<? extends CallableDescriptor>>>() { + @NotNull + @Override + public String render(@NotNull Collection<? extends ResolvedCall<? extends CallableDescriptor>> argument) { + StringBuilder stringBuilder = new StringBuilder("\n"); + for (ResolvedCall<? extends CallableDescriptor> call : argument) { + stringBuilder.append(DescriptorRenderer.TEXT.render(call.getResultingDescriptor())).append("\n"); + } + return stringBuilder.toString(); + } + }; + + static { + MAP.put(EXCEPTION_WHILE_ANALYZING, "{0}", new Renderer<Throwable>() { + @NotNull + @Override + public String render(@NotNull Throwable e) { + return e.getClass().getSimpleName() + ": " + e.getMessage(); + } + }); + + MAP.put(UNRESOLVED_REFERENCE, "Unresolved reference: {0}", TO_STRING); + + MAP.put(INVISIBLE_REFERENCE, "Cannot access ''{0}'' in ''{1}''", NAME, NAME); + MAP.put(INVISIBLE_MEMBER, "Cannot access ''{0}'' in ''{1}''", NAME, NAME); + + MAP.put(REDECLARATION, "Redeclaration: {0}", NAME); + MAP.put(NAME_SHADOWING, "Name shadowed: {0}", NAME); + + MAP.put(TYPE_MISMATCH, "Type mismatch: inferred type is {1} but {0} was expected", RENDER_TYPE, RENDER_TYPE); + MAP.put(INCOMPATIBLE_MODIFIERS, "Incompatible modifiers: ''{0}''", new Renderer<Collection<JetKeywordToken>>() { + @NotNull + @Override + public String render(@NotNull Collection<JetKeywordToken> tokens) { + StringBuilder sb = new StringBuilder(); + for (Iterator<JetKeywordToken> iterator = tokens.iterator(); iterator.hasNext(); ) { + JetKeywordToken modifier = iterator.next(); + sb.append(modifier.getValue()); + if (iterator.hasNext()) { + sb.append(" "); + } + } + return sb.toString(); + } + }); + MAP.put(ILLEGAL_MODIFIER, "Illegal modifier ''{0}''", TO_STRING); + + MAP.put(REDUNDANT_MODIFIER, "Modifier {0} is redundant because {1} is present", TO_STRING, TO_STRING); + MAP.put(ABSTRACT_MODIFIER_IN_TRAIT, "Modifier ''abstract'' is redundant in trait"); + MAP.put(OPEN_MODIFIER_IN_TRAIT, "Modifier ''open'' is redundant in trait"); + MAP.put(REDUNDANT_MODIFIER_IN_GETTER, "Visibility modifiers are redundant in getter"); + MAP.put(TRAIT_CAN_NOT_BE_FINAL, "Trait can not be final"); + MAP.put(TYPECHECKER_HAS_RUN_INTO_RECURSIVE_PROBLEM, + "Type checking has run into a recursive problem. Easiest workaround: specify types of your declarations explicitly"); // TODO: message + MAP.put(RETURN_NOT_ALLOWED, "'return' is not allowed here"); + MAP.put(PROJECTION_IN_IMMEDIATE_ARGUMENT_TO_SUPERTYPE, "Projections are not allowed for immediate arguments of a supertype"); + MAP.put(LABEL_NAME_CLASH, "There is more than one label with such a name in this scope"); + MAP.put(EXPRESSION_EXPECTED_NAMESPACE_FOUND, "Expression expected, but a namespace name found"); + + MAP.put(CANNOT_IMPORT_FROM_ELEMENT, "Cannot import from ''{0}''", NAME); + MAP.put(CANNOT_BE_IMPORTED, "Cannot import ''{0}'', functions and properties can be imported only from packages", NAME); + MAP.put(USELESS_HIDDEN_IMPORT, "Useless import, it is hidden further"); + MAP.put(USELESS_SIMPLE_IMPORT, "Useless import, does nothing"); + + MAP.put(CANNOT_INFER_PARAMETER_TYPE, + "Cannot infer a type for this parameter. To specify it explicitly use the {(p : Type) => ...} notation"); + + MAP.put(NO_BACKING_FIELD_ABSTRACT_PROPERTY, "This property doesn't have a backing field, because it's abstract"); + MAP.put(NO_BACKING_FIELD_CUSTOM_ACCESSORS, + "This property doesn't have a backing field, because it has custom accessors without reference to the backing field"); + MAP.put(INACCESSIBLE_BACKING_FIELD, "The backing field is not accessible here"); + MAP.put(NOT_PROPERTY_BACKING_FIELD, "The referenced variable is not a property and doesn't have backing field"); + + MAP.put(MIXING_NAMED_AND_POSITIONED_ARGUMENTS, "Mixing named and positioned arguments in not allowed"); + MAP.put(ARGUMENT_PASSED_TWICE, "An argument is already passed for this parameter"); + MAP.put(NAMED_PARAMETER_NOT_FOUND, "Cannot find a parameter with this name: {0}", TO_STRING); + MAP.put(VARARG_OUTSIDE_PARENTHESES, "Passing value as a vararg is only allowed inside a parenthesized argument list"); + MAP.put(NON_VARARG_SPREAD, "The spread operator (*foo) may only be applied in a vararg position"); + + MAP.put(MANY_FUNCTION_LITERAL_ARGUMENTS, "Only one function literal is allowed outside a parenthesized argument list"); + MAP.put(PROPERTY_WITH_NO_TYPE_NO_INITIALIZER, "This property must either have a type annotation or be initialized"); + + MAP.put(ABSTRACT_PROPERTY_IN_PRIMARY_CONSTRUCTOR_PARAMETERS, "This property cannot be declared abstract"); + MAP.put(ABSTRACT_PROPERTY_NOT_IN_CLASS, "A property may be abstract only when defined in a class or trait"); + MAP.put(ABSTRACT_PROPERTY_WITH_INITIALIZER, "Property with initializer cannot be abstract"); + MAP.put(ABSTRACT_PROPERTY_WITH_GETTER, "Property with getter implementation cannot be abstract"); + MAP.put(ABSTRACT_PROPERTY_WITH_SETTER, "Property with setter implementation cannot be abstract"); + + MAP.put(PACKAGE_MEMBER_CANNOT_BE_PROTECTED, "Package member cannot be protected"); + + MAP.put(GETTER_VISIBILITY_DIFFERS_FROM_PROPERTY_VISIBILITY, "Getter visibility must be the same as property visibility"); + MAP.put(BACKING_FIELD_IN_TRAIT, "Property in a trait cannot have a backing field"); + MAP.put(MUST_BE_INITIALIZED, "Property must be initialized"); + MAP.put(MUST_BE_INITIALIZED_OR_BE_ABSTRACT, "Property must be initialized or be abstract"); + MAP.put(PROPERTY_INITIALIZER_IN_TRAIT, "Property initializers are not allowed in traits"); + MAP.put(PROPERTY_INITIALIZER_NO_BACKING_FIELD, "Initializer is not allowed here because this property has no backing field"); + MAP.put(ABSTRACT_PROPERTY_IN_NON_ABSTRACT_CLASS, "Abstract property {0} in non-abstract class {1}", TO_STRING, TO_STRING, TO_STRING); + MAP.put(ABSTRACT_FUNCTION_IN_NON_ABSTRACT_CLASS, "Abstract function {0} in non-abstract class {1}", TO_STRING, TO_STRING, TO_STRING); + MAP.put(ABSTRACT_FUNCTION_WITH_BODY, "A function {0} with body cannot be abstract", TO_STRING); + MAP.put(NON_ABSTRACT_FUNCTION_WITH_NO_BODY, "Method {0} without a body must be abstract", TO_STRING); + MAP.put(NON_MEMBER_ABSTRACT_FUNCTION, "Function {0} is not a class or trait member and cannot be abstract", TO_STRING); + + MAP.put(NON_MEMBER_FUNCTION_NO_BODY, "Function {0} must have a body", TO_STRING); + MAP.put(NON_FINAL_MEMBER_IN_FINAL_CLASS, "Non final member in a final class"); + + MAP.put(PUBLIC_MEMBER_SHOULD_SPECIFY_TYPE, "Public or protected member should specify a type"); + + MAP.put(PROJECTION_ON_NON_CLASS_TYPE_ARGUMENT, + "Projections are not allowed on type arguments of functions and properties"); // TODO : better positioning + MAP.put(SUPERTYPE_NOT_INITIALIZED, "This type has a constructor, and thus must be initialized here"); + MAP.put(SUPERTYPE_NOT_INITIALIZED_DEFAULT, "Constructor invocation should be explicitly specified"); + MAP.put(SECONDARY_CONSTRUCTOR_BUT_NO_PRIMARY, "A secondary constructor may appear only in a class that has a primary constructor"); + MAP.put(SECONDARY_CONSTRUCTOR_NO_INITIALIZER_LIST, "Secondary constructors must have an initializer list"); + MAP.put(BY_IN_SECONDARY_CONSTRUCTOR, "'by'-clause is only supported for primary constructors"); + MAP.put(INITIALIZER_WITH_NO_ARGUMENTS, "Constructor arguments required"); + MAP.put(MANY_CALLS_TO_THIS, "Only one call to 'this(...)' is allowed"); + MAP.put(NOTHING_TO_OVERRIDE, "{0} overrides nothing", DescriptorRenderer.TEXT); + MAP.put(VIRTUAL_MEMBER_HIDDEN, "''{0}'' hides ''{1}'' in class {2} and needs 'override' modifier", DescriptorRenderer.TEXT, + DescriptorRenderer.TEXT, DescriptorRenderer.TEXT); + + MAP.put(ENUM_ENTRY_SHOULD_BE_INITIALIZED, "Missing delegation specifier ''{0}''", NAME); + MAP.put(ENUM_ENTRY_ILLEGAL_TYPE, "The type constructor of enum entry should be ''{0}''", NAME); + + MAP.put(UNINITIALIZED_VARIABLE, "Variable ''{0}'' must be initialized", NAME); + MAP.put(UNINITIALIZED_PARAMETER, "Parameter ''{0}'' is uninitialized here", NAME); + MAP.put(UNUSED_VARIABLE, "Variable ''{0}'' is never used", NAME); + MAP.put(UNUSED_PARAMETER, "Parameter ''{0}'' is never used", NAME); + MAP.put(ASSIGNED_BUT_NEVER_ACCESSED_VARIABLE, "Variable ''{0}'' is assigned but never accessed", NAME); + MAP.put(VARIABLE_WITH_REDUNDANT_INITIALIZER, "Variable ''{0}'' initializer is redundant", NAME); + MAP.put(UNUSED_VALUE, "The value ''{0}'' assigned to ''{1}'' is never used", ELEMENT_TEXT, TO_STRING); + MAP.put(UNUSED_CHANGED_VALUE, "The value changed at ''{0}'' is never used", ELEMENT_TEXT); + MAP.put(UNUSED_EXPRESSION, "The expression is unused"); + MAP.put(UNUSED_FUNCTION_LITERAL, "The function literal is unused. If you mean block, you can use 'run { ... }'"); + + MAP.put(VAL_REASSIGNMENT, "Val can not be reassigned", NAME); + MAP.put(INITIALIZATION_BEFORE_DECLARATION, "Variable cannot be initialized before declaration", NAME); + MAP.put(VARIABLE_EXPECTED, "Variable expected"); + + MAP.put(INITIALIZATION_USING_BACKING_FIELD_CUSTOM_SETTER, + "This property has a custom setter, so initialization using backing field required", NAME); + MAP.put(INITIALIZATION_USING_BACKING_FIELD_OPEN_SETTER, + "Setter of this property can be overridden, so initialization using backing field required", NAME); + + MAP.put(FUNCTION_PARAMETERS_OF_INLINE_FUNCTION, "Function parameters of inline function can only be invoked", NAME); + + MAP.put(UNREACHABLE_CODE, "Unreachable code"); + + MAP.put(MANY_CLASS_OBJECTS, "Only one class object is allowed per class"); + MAP.put(CLASS_OBJECT_NOT_ALLOWED, "A class object is not allowed here"); + MAP.put(DELEGATION_IN_TRAIT, "Traits cannot use delegation"); + MAP.put(DELEGATION_NOT_TO_TRAIT, "Only traits can be delegated to"); + MAP.put(NO_CONSTRUCTOR, "This class does not have a constructor"); + MAP.put(NOT_A_CLASS, "Not a class"); + MAP.put(ILLEGAL_ESCAPE_SEQUENCE, "Illegal escape sequence"); + + MAP.put(LOCAL_EXTENSION_PROPERTY, "Local extension properties are not allowed"); + MAP.put(LOCAL_VARIABLE_WITH_GETTER, "Local variables are not allowed to have getters"); + MAP.put(LOCAL_VARIABLE_WITH_SETTER, "Local variables are not allowed to have setters"); + MAP.put(VAL_WITH_SETTER, "A 'val'-property cannot have a setter"); + + MAP.put(NO_GET_METHOD, "No get method providing array access"); + MAP.put(NO_SET_METHOD, "No set method providing array access"); + + MAP.put(INC_DEC_SHOULD_NOT_RETURN_UNIT, "Functions inc(), dec() shouldn't return Unit to be used by operators ++, --"); + MAP.put(ASSIGNMENT_OPERATOR_SHOULD_RETURN_UNIT, "Function ''{0}'' should return Unit to be used by corresponding operator ''{1}''", + NAME, ELEMENT_TEXT); + MAP.put(ASSIGN_OPERATOR_AMBIGUITY, "Assignment operators ambiguity: {0}", AMBIGUOUS_CALLS); + + MAP.put(EQUALS_MISSING, "No method 'equals(Any?) : Boolean' available"); + MAP.put(ASSIGNMENT_IN_EXPRESSION_CONTEXT, "Assignments are not expressions, and only expressions are allowed in this context"); + MAP.put(NAMESPACE_IS_NOT_AN_EXPRESSION, "'namespace' is not an expression, it can only be used on the left-hand side of a dot ('.')"); + MAP.put(SUPER_IS_NOT_AN_EXPRESSION, "{0} is not an expression, it can only be used on the left-hand side of a dot ('.')", TO_STRING); + MAP.put(DECLARATION_IN_ILLEGAL_CONTEXT, "Declarations are not allowed in this position"); + MAP.put(SETTER_PARAMETER_WITH_DEFAULT_VALUE, "Setter parameters can not have default values"); + MAP.put(NO_THIS, "'this' is not defined in this context"); + MAP.put(SUPER_NOT_AVAILABLE, "No supertypes are accessible in this context"); + MAP.put(AMBIGUOUS_SUPER, "Many supertypes available, please specify the one you mean in angle brackets, e.g. 'super<Foo>'"); + MAP.put(ABSTRACT_SUPER_CALL, "Abstract member cannot be accessed directly"); + MAP.put(NOT_A_SUPERTYPE, "Not a supertype"); + MAP.put(TYPE_ARGUMENTS_REDUNDANT_IN_SUPER_QUALIFIER, "Type arguments do not need to be specified in a 'super' qualifier"); + MAP.put(USELESS_CAST_STATIC_ASSERT_IS_FINE, "No cast needed, use ':' instead"); + MAP.put(USELESS_CAST, "No cast needed"); + MAP.put(CAST_NEVER_SUCCEEDS, "This cast can never succeed"); + MAP.put(WRONG_SETTER_PARAMETER_TYPE, "Setter parameter type must be equal to the type of the property, i.e. {0}", RENDER_TYPE); + MAP.put(WRONG_GETTER_RETURN_TYPE, "Getter return type must be equal to the type of the property, i.e. {0}", RENDER_TYPE); + MAP.put(NO_CLASS_OBJECT, "Please specify constructor invocation; classifier {0} does not have a class object", NAME); + MAP.put(NO_GENERICS_IN_SUPERTYPE_SPECIFIER, "Generic arguments of the base type must be specified"); + + MAP.put(HAS_NEXT_PROPERTY_AND_FUNCTION_AMBIGUITY, + "An ambiguity between 'iterator().hasNext()' function and 'iterator().hasNext' property"); + MAP.put(HAS_NEXT_MISSING, "Loop range must have an 'iterator().hasNext()' function or an 'iterator().hasNext' property"); + MAP.put(HAS_NEXT_FUNCTION_AMBIGUITY, "Function 'iterator().hasNext()' is ambiguous for this expression"); + MAP.put(HAS_NEXT_MUST_BE_READABLE, "The 'iterator().hasNext' property of the loop range must be readable"); + MAP.put(HAS_NEXT_PROPERTY_TYPE_MISMATCH, "The 'iterator().hasNext' property of the loop range must return Boolean, but returns {0}", + RENDER_TYPE); + MAP.put(HAS_NEXT_FUNCTION_TYPE_MISMATCH, "The 'iterator().hasNext()' function of the loop range must return Boolean, but returns {0}", + RENDER_TYPE); + MAP.put(NEXT_AMBIGUITY, "Function 'iterator().next()' is ambiguous for this expression"); + MAP.put(NEXT_MISSING, "Loop range must have an 'iterator().next()' function"); + MAP.put(ITERATOR_MISSING, "For-loop range must have an iterator() method"); + MAP.put(ITERATOR_AMBIGUITY, "Method 'iterator()' is ambiguous for this expression: {0}", AMBIGUOUS_CALLS); + + MAP.put(COMPARE_TO_TYPE_MISMATCH, "compareTo() must return Int, but returns {0}", RENDER_TYPE); + MAP.put(CALLEE_NOT_A_FUNCTION, "Expecting a function type, but found {0}", RENDER_TYPE); + + MAP.put(RETURN_IN_FUNCTION_WITH_EXPRESSION_BODY, + "Returns are not allowed for functions with expression body. Use block body in '{...}'"); + MAP.put(NO_RETURN_IN_FUNCTION_WITH_BLOCK_BODY, "A 'return' expression required in a function with a block body ('{...}')"); + MAP.put(RETURN_TYPE_MISMATCH, "This function must return a value of type {0}", RENDER_TYPE); + MAP.put(EXPECTED_TYPE_MISMATCH, "Expected a value of type {0}", RENDER_TYPE); + MAP.put(ASSIGNMENT_TYPE_MISMATCH, + "Expected a value of type {0}. Assignment operation is not an expression, so it does not return any value", RENDER_TYPE); + MAP.put(IMPLICIT_CAST_TO_UNIT_OR_ANY, "Type was casted to ''{0}''. Please specify ''{0}'' as expected type, if you mean such cast", + RENDER_TYPE); + MAP.put(EXPRESSION_EXPECTED, "{0} is not an expression, and only expression are allowed here", new Renderer<JetExpression>() { + @NotNull + @Override + public String render(@NotNull JetExpression expression) { + String expressionType = expression.toString(); + return expressionType.substring(0, 1) + + expressionType.substring(1).toLowerCase(); + } + }); + + MAP.put(UPPER_BOUND_VIOLATED, "An upper bound {0} is violated", RENDER_TYPE); // TODO : Message + MAP.put(FINAL_CLASS_OBJECT_UPPER_BOUND, "{0} is a final type, and thus a class object cannot extend it", RENDER_TYPE); + MAP.put(FINAL_UPPER_BOUND, "{0} is a final type, and thus a value of the type parameter is predetermined", RENDER_TYPE); + MAP.put(USELESS_ELVIS, "Elvis operator (?:) always returns the left operand of non-nullable type {0}", RENDER_TYPE); + MAP.put(CONFLICTING_UPPER_BOUNDS, "Upper bounds of {0} have empty intersection", NAME); + MAP.put(CONFLICTING_CLASS_OBJECT_UPPER_BOUNDS, "Class object upper bounds of {0} have empty intersection", NAME); + + MAP.put(TOO_MANY_ARGUMENTS, "Too many arguments for {0}", TO_STRING); + MAP.put(ERROR_COMPILE_TIME_VALUE, "{0}", TO_STRING); + + MAP.put(ELSE_MISPLACED_IN_WHEN, "'else' entry must be the last one in a when-expression"); + + MAP.put(NO_ELSE_IN_WHEN, "'when' expression must contain 'else' branch"); + MAP.put(TYPE_MISMATCH_IN_RANGE, "Type mismatch: incompatible types of range and element checked in it"); + MAP.put(CYCLIC_INHERITANCE_HIERARCHY, "There's a cycle in the inheritance hierarchy for this type"); + + MAP.put(MANY_CLASSES_IN_SUPERTYPE_LIST, "Only one class may appear in a supertype list"); + MAP.put(SUPERTYPE_NOT_A_CLASS_OR_TRAIT, "Only classes and traits may serve as supertypes"); + MAP.put(SUPERTYPE_INITIALIZED_IN_TRAIT, "Traits cannot initialize supertypes"); + MAP.put(CONSTRUCTOR_IN_TRAIT, "A trait may not have a constructor"); + MAP.put(SECONDARY_CONSTRUCTORS_ARE_NOT_SUPPORTED, "Secondary constructors are not supported"); + MAP.put(SUPERTYPE_APPEARS_TWICE, "A supertype appears twice"); + MAP.put(FINAL_SUPERTYPE, "This type is final, so it cannot be inherited from"); + + MAP.put(ILLEGAL_SELECTOR, "Expression ''{0}'' cannot be a selector (occur after a dot)", TO_STRING); + + MAP.put(VALUE_PARAMETER_WITH_NO_TYPE_ANNOTATION, "A type annotation is required on a value parameter"); + MAP.put(BREAK_OR_CONTINUE_OUTSIDE_A_LOOP, "'break' and 'continue' are only allowed inside a loop"); + MAP.put(NOT_A_LOOP_LABEL, "The label ''{0}'' does not denote a loop", TO_STRING); + MAP.put(NOT_A_RETURN_LABEL, "The label ''{0}'' does not reference to a context from which we can return", TO_STRING); + + MAP.put(ANONYMOUS_INITIALIZER_WITHOUT_CONSTRUCTOR, "Anonymous initializers are only allowed in the presence of a primary constructor"); + MAP.put(NULLABLE_SUPERTYPE, "A supertype cannot be nullable"); + MAP.put(UNSAFE_CALL, "Only safe calls (?.) are allowed on a nullable receiver of type {0}", RENDER_TYPE); + MAP.put(AMBIGUOUS_LABEL, "Ambiguous label"); + MAP.put(UNSUPPORTED, "Unsupported [{0}]", TO_STRING); + MAP.put(UNNECESSARY_SAFE_CALL, "Unnecessary safe call on a non-null receiver of type {0}", RENDER_TYPE); + MAP.put(UNNECESSARY_NOT_NULL_ASSERTION, "Unnecessary non-null assertion (!!) on a non-null receiver of type {0}", RENDER_TYPE); + MAP.put(NAME_IN_CONSTRAINT_IS_NOT_A_TYPE_PARAMETER, "{0} does not refer to a type parameter of {1}", new Renderer<JetTypeConstraint>() { + @NotNull + @Override + public String render(@NotNull JetTypeConstraint typeConstraint) { + //noinspection ConstantConditions + return typeConstraint.getSubjectTypeParameterName().getReferencedName(); + } + }, NAME); + MAP.put(AUTOCAST_IMPOSSIBLE, "Automatic cast to {0} is impossible, because {1} could have changed since the is-check", RENDER_TYPE, + NAME); + + MAP.put(TYPE_MISMATCH_IN_FOR_LOOP, "The loop iterates over values of type {0} but the parameter is declared to be {1}", RENDER_TYPE, + RENDER_TYPE); + MAP.put(TYPE_MISMATCH_IN_CONDITION, "Condition must be of type Boolean, but was of type {0}", RENDER_TYPE); + MAP.put(TYPE_MISMATCH_IN_TUPLE_PATTERN, "Type mismatch: subject is of type {0} but the pattern is of type Tuple{1}", RENDER_TYPE, + TO_STRING); // TODO: message + MAP.put(TYPE_MISMATCH_IN_BINDING_PATTERN, "{0} must be a supertype of {1}. Use 'is' to match against {0}", RENDER_TYPE, RENDER_TYPE); + MAP.put(INCOMPATIBLE_TYPES, "Incompatible types: {0} and {1}", RENDER_TYPE, RENDER_TYPE); + MAP.put(EXPECTED_CONDITION, "Expected condition of Boolean type"); + + MAP.put(CANNOT_CHECK_FOR_ERASED, "Cannot check for instance of erased type: {0}", RENDER_TYPE); + MAP.put(UNCHECKED_CAST, "Unchecked cast: {0} to {1}", RENDER_TYPE, RENDER_TYPE); + + MAP.put(INCONSISTENT_TYPE_PARAMETER_VALUES, "Type parameter {0} of {1} has inconsistent values: {2}", NAME, DescriptorRenderer.TEXT, + new Renderer<Collection<JetType>>() { + @NotNull + @Override + public String render(@NotNull Collection<JetType> types) { + StringBuilder builder = new StringBuilder(); + for (Iterator<JetType> iterator = types.iterator(); iterator.hasNext(); ) { + JetType jetType = iterator.next(); + builder.append(jetType); + if (iterator.hasNext()) { + builder.append(", "); + } + } + return builder.toString(); + } + }); + + MAP.put(EQUALITY_NOT_APPLICABLE, "Operator {0} cannot be applied to {1} and {2}", new Renderer<JetSimpleNameExpression>() { + @NotNull + @Override + public String render(@NotNull JetSimpleNameExpression nameExpression) { + //noinspection ConstantConditions + return nameExpression.getReferencedName(); + } + }, TO_STRING, TO_STRING); + + MAP.put(OVERRIDING_FINAL_MEMBER, "''{0}'' in ''{1}'' is final and cannot be overridden", NAME, NAME); + MAP.put(CANNOT_WEAKEN_ACCESS_PRIVILEGE, "Cannot weaken access privilege ''{0}'' for ''{1}'' in ''{2}''", TO_STRING, NAME, NAME); + MAP.put(CANNOT_CHANGE_ACCESS_PRIVILEGE, "Cannot change access privilege ''{0}'' for ''{1}'' in ''{2}''", TO_STRING, NAME, NAME); + + MAP.put(RETURN_TYPE_MISMATCH_ON_OVERRIDE, "Return type of {0} is not a subtype of the return type overridden member {1}", + DescriptorRenderer.TEXT, DescriptorRenderer.TEXT); + + MAP.put(VAR_OVERRIDDEN_BY_VAL, "Var-property {0} cannot be overridden by val-property {1}", DescriptorRenderer.TEXT, + DescriptorRenderer.TEXT); + + MAP.put(ABSTRACT_MEMBER_NOT_IMPLEMENTED, "{0} must be declared abstract or implement abstract member {1}", RENDER_CLASS_OR_OBJECT, + DescriptorRenderer.TEXT); + + MAP.put(MANY_IMPL_MEMBER_NOT_IMPLEMENTED, "{0} must override {1} because it inherits many implementations of it", + RENDER_CLASS_OR_OBJECT, DescriptorRenderer.TEXT); + + MAP.put(CONFLICTING_OVERLOADS, "{1} is already defined in ''{0}''", DescriptorRenderer.TEXT, TO_STRING); + + + MAP.put(RESULT_TYPE_MISMATCH, "{0} must return {1} but returns {2}", TO_STRING, RENDER_TYPE, RENDER_TYPE); + MAP.put(UNSAFE_INFIX_CALL, + "Infix call corresponds to a dot-qualified call ''{0}.{1}({2})'' which is not allowed on a nullable receiver ''{0}''. " + + "Use '?.'-qualified call instead", + TO_STRING, TO_STRING, TO_STRING); + + MAP.put(OVERLOAD_RESOLUTION_AMBIGUITY, "Overload resolution ambiguity: {0}", AMBIGUOUS_CALLS); + MAP.put(NONE_APPLICABLE, "None of the following functions can be called with the arguments supplied: {0}", AMBIGUOUS_CALLS); + MAP.put(NO_VALUE_FOR_PARAMETER, "No value passed for parameter {0}", DescriptorRenderer.TEXT); + MAP.put(MISSING_RECEIVER, "A receiver of type {0} is required", RENDER_TYPE); + MAP.put(NO_RECEIVER_ADMITTED, "No receiver can be passed to this function or property"); + + MAP.put(CREATING_AN_INSTANCE_OF_ABSTRACT_CLASS, "Can not create an instance of an abstract class"); + MAP.put(TYPE_INFERENCE_FAILED, "Type inference failed: {0}", TO_STRING); + MAP.put(WRONG_NUMBER_OF_TYPE_ARGUMENTS, "{0} type arguments expected", new Renderer<Integer>() { + @NotNull + @Override + public String render(@NotNull Integer argument) { + return argument == 0 ? "No" : argument.toString(); + } + }); + + MAP.put(UNRESOLVED_IDE_TEMPLATE, "Unresolved IDE template: {0}", TO_STRING); + + MAP.put(DANGLING_FUNCTION_LITERAL_ARGUMENT_SUSPECTED, + "This expression is treated as an argument to the function call on the previous line. " + + "Separate it with a semicolon (;) if it is not intended to be an argument."); + + MAP.put(NOT_AN_ANNOTATION_CLASS, "{0} is not an annotation class", TO_STRING); + } + + private DefaultErrorMessages() { + } +} diff --git a/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DiagnosticFactoryToRendererMap.java b/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DiagnosticFactoryToRendererMap.java new file mode 100644 index 0000000000000..3172e3f914400 --- /dev/null +++ b/compiler/frontend/src/org/jetbrains/jet/lang/diagnostics/rendering/DiagnosticFactoryToRendererMap.java @@ -0,0 +1,62 @@ +/* + * Copyright 2010-2012 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.jetbrains.jet.lang.diagnostics.rendering; + +import com.intellij.psi.PsiElement; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.jetbrains.jet.lang.diagnostics.*; + +import java.util.HashMap; +import java.util.Map; + +/** + * @author Evgeny Gerashchenko + * @since 4/13/12 + */ +public class DiagnosticFactoryToRendererMap { + private final Map<AbstractDiagnosticFactory, DiagnosticRenderer<?>> map = + new HashMap<AbstractDiagnosticFactory, DiagnosticRenderer<?>>(); + + public final <E extends PsiElement> void put(SimpleDiagnosticFactory<E> factory, String message) { + map.put(factory, new SimpleDiagnosticRenderer(message)); + } + + public final <E extends PsiElement, A> void put(DiagnosticFactory1<E, A> factory, String message, Renderer<? super A> rendererA) { + map.put(factory, new DiagnosticWithParameters1Renderer<A>(message, rendererA)); + } + + public final <E extends PsiElement, A, B> void put(DiagnosticFactory2<E, A, B> factory, + String message, + Renderer<? super A> rendererA, + Renderer<? super B> rendererB) { + map.put(factory, new DiagnosticWithParameters2Renderer<A, B>(message, rendererA, rendererB)); + } + + public final <E extends PsiElement, A, B, C> void put(DiagnosticFactory3<E, A, B, C> factory, + String message, + Renderer<? super A> rendererA, + Renderer<? super B> rendererB, + Renderer<? super C> rendererC) { + map.put(factory, new DiagnosticWithParameters3Renderer<A, B, C>(message, rendererA, rendererB, rendererC)); + } + + @Nullable + public final DiagnosticRenderer<?> get(@NotNull AbstractDiagnosticFactory factory) { + return map.get(factory); + } +}
d18dca5dbbceb9126f3a463332d7baab6dc034db
intellij-community
Renaming preview files--
p
https://github.com/JetBrains/intellij-community
diff --git a/source/com/intellij/application/options/CodeStyleHtmlPanel.java b/source/com/intellij/application/options/CodeStyleHtmlPanel.java index be9ab9ba4e740..81af10bde87be 100644 --- a/source/com/intellij/application/options/CodeStyleHtmlPanel.java +++ b/source/com/intellij/application/options/CodeStyleHtmlPanel.java @@ -199,7 +199,7 @@ public JComponent getPanel() { } protected String getPreviewText() { - return readFromFile("preview.html"); + return readFromFile("preview.html.template"); } diff --git a/source/com/intellij/application/options/CodeStyleXmlPanel.java b/source/com/intellij/application/options/CodeStyleXmlPanel.java index e32cf35d29985..a6585eac5696e 100644 --- a/source/com/intellij/application/options/CodeStyleXmlPanel.java +++ b/source/com/intellij/application/options/CodeStyleXmlPanel.java @@ -120,7 +120,7 @@ public JComponent getPanel() { } protected String getPreviewText() { - return readFromFile("preview.xml"); + return readFromFile("preview.xml.template"); } protected FileType getFileType() {
2b47c1528690b6413060401b254c79a9dc89ce85
hbase
HBASE-1192 LRU-style map for the block cache--git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@780527 13f79535-47bb-0310-9956-ffa450edef68-
p
https://github.com/apache/hbase
diff --git a/CHANGES.txt b/CHANGES.txt index e2229652313d..4ac7b2f70d94 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -293,6 +293,8 @@ Release 0.20.0 - Unreleased (Lars George and Alex Newman via Stack) HBASE-1455 Update DemoClient.py for thrift 1.0 (Tim Sell via Stack) HBASE-1464 Add hbase.regionserver.logroll.period to hbase-default + HBASE-1192 LRU-style map for the block cache (Jon Gray and Ryan Rawson + via Stack) OPTIMIZATIONS HBASE-1412 Change values for delete column and column family in KeyValue diff --git a/conf/hbase-default.xml b/conf/hbase-default.xml index a3a15b158535..f75d1d5d63c0 100644 --- a/conf/hbase-default.xml +++ b/conf/hbase-default.xml @@ -394,4 +394,11 @@ mode flag is stored at /hbase/safe-mode. </description> </property> + <property> + <name>hfile.block.cache.size</name> + <value>50000000</value> + <description> + The size of the block cache used by HFile/StoreFile. Set to 0 to disable. + </description> + </property> </configuration> diff --git a/src/java/org/apache/hadoop/hbase/io/HeapSize.java b/src/java/org/apache/hadoop/hbase/io/HeapSize.java index 13858e91ed44..e6f59e54bd23 100644 --- a/src/java/org/apache/hadoop/hbase/io/HeapSize.java +++ b/src/java/org/apache/hadoop/hbase/io/HeapSize.java @@ -55,7 +55,7 @@ public interface HeapSize { static final int BLOCK_SIZE_TAX = 8; - + static final int BYTE_BUFFER = 56; /** * @return Approximate 'exclusive deep size' of implementing object. Includes diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 05c8dfdf27a9..58f6cf9b6ee3 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -526,6 +526,10 @@ private void checkValue(final byte [] value, } } + public long getTotalBytes() { + return this.totalBytes; + } + public void close() throws IOException { if (this.outputStream == null) { return; diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 8b2342cdb324..d3fbb81e95c1 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -76,7 +76,7 @@ * * <p>We maintain multiple HStores for a single HRegion. * - * <p>An HStore is a set of rows with some column data; together, + * <p>An Store is a set of rows with some column data; together, * they make up all the data for the rows. * * <p>Each HRegion has a 'startKey' and 'endKey'. @@ -96,9 +96,9 @@ * * <p>An HRegion is defined by its table and its key extent. * - * <p>It consists of at least one HStore. The number of HStores should be + * <p>It consists of at least one Store. The number of Stores should be * configurable, so that data which is accessed together is stored in the same - * HStore. Right now, we approximate that by building a single HStore for + * Store. Right now, we approximate that by building a single Store for * each column family. (This config info will be communicated via the * tabledesc.) * diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index b08e56d14477..c77dd577bbda 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.HbaseMapWritable; import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion; @@ -1093,6 +1094,16 @@ protected void doMetrics() { this.metrics.storefiles.set(storefiles); this.metrics.memcacheSizeMB.set((int)(memcacheSize/(1024*1024))); this.metrics.storefileIndexSizeMB.set((int)(storefileIndexSize/(1024*1024))); + + LruBlockCache lruBlockCache = (LruBlockCache)StoreFile.getBlockCache(conf); + if (lruBlockCache != null) { + this.metrics.blockCacheCount.set(lruBlockCache.size()); + this.metrics.blockCacheFree.set(lruBlockCache.getMemFree()); + this.metrics.blockCacheSize.set(lruBlockCache.getMemUsed()); + double ratio = lruBlockCache.getHitRatio(); + int percent = (int) (ratio * 100); + this.metrics.blockCacheHitRatio.set(percent); + } } /** diff --git a/src/java/org/apache/hadoop/hbase/regionserver/Store.java b/src/java/org/apache/hadoop/hbase/regionserver/Store.java index 6d1ad7828cff..a9166861d0c9 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -358,7 +358,7 @@ private Map<Long, StoreFile> loadStoreFiles() } StoreFile curfile = null; try { - curfile = new StoreFile(fs, p); + curfile = new StoreFile(fs, p, this.conf); } catch (IOException ioe) { LOG.warn("Failed open of " + p + "; presumption is that file was " + "corrupted at flush and lost edits picked up by commit log replay. " + @@ -499,7 +499,7 @@ private StoreFile internalFlushCache(final ConcurrentSkipListSet<KeyValue> cache writer.close(); } } - StoreFile sf = new StoreFile(this.fs, writer.getPath()); + StoreFile sf = new StoreFile(this.fs, writer.getPath(), this.conf); this.storeSize += sf.getReader().length(); if(LOG.isDebugEnabled()) { LOG.debug("Added " + sf + ", entries=" + sf.getReader().getEntries() + @@ -962,7 +962,7 @@ private void completeCompaction(final List<StoreFile> compactedFiles, LOG.error("Failed move of compacted file " + compactedFile.getPath(), e); return; } - StoreFile finalCompactedFile = new StoreFile(this.fs, p); + StoreFile finalCompactedFile = new StoreFile(this.fs, p, this.conf); this.lock.writeLock().lock(); try { try { diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 038bb7dda538..7940c0cc1a71 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -33,12 +33,17 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.HalfHFileReader; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.LruBlockCache; +import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Hash; +import org.apache.hadoop.io.RawComparator; /** * A Store data file. Stores usually have one or more of these files. They @@ -52,6 +57,10 @@ */ public class StoreFile implements HConstants { static final Log LOG = LogFactory.getLog(StoreFile.class.getName()); + + public static final String HFILE_CACHE_SIZE_KEY = "hfile.block.cache.size"; + + private static BlockCache hfileBlockCache = null; // Make default block size for StoreFiles 8k while testing. TODO: FIX! // Need to make it 8k for testing. @@ -88,16 +97,18 @@ public class StoreFile implements HConstants { // Used making file ids. private final static Random rand = new Random(); + private final HBaseConfiguration conf; /** - * Constructor. - * Loads up a Reader (and its indices, etc.). - * @param fs Filesystem. - * @param p qualified path + * Constructor, loads a reader and it's indices, etc. May allocate a substantial + * amount of ram depending on the underlying files (10-20MB?). + * @param fs + * @param p + * @param conf * @throws IOException */ - StoreFile(final FileSystem fs, final Path p) - throws IOException { + StoreFile(final FileSystem fs, final Path p, final HBaseConfiguration conf) throws IOException { + this.conf = conf; this.fs = fs; this.path = p; if (isReference(p)) { @@ -105,6 +116,7 @@ public class StoreFile implements HConstants { this.referencePath = getReferredToFile(this.path); } this.reader = open(); + } /** @@ -196,6 +208,23 @@ public long getMaxSequenceId() { return this.sequenceid; } + public static synchronized BlockCache getBlockCache(HBaseConfiguration conf) { + if (hfileBlockCache != null) + return hfileBlockCache; + + long cacheSize = conf.getLong(HFILE_CACHE_SIZE_KEY, 0L); + // There should be a better way to optimize this. But oh well. + if (cacheSize == 0L) + return null; + + hfileBlockCache = new LruBlockCache(cacheSize); + return hfileBlockCache; + } + + public BlockCache getBlockCache() { + return getBlockCache(conf); + } + /** * Opens reader on this store file. Called by Constructor. * @return Reader for the store file. @@ -208,10 +237,10 @@ protected HFile.Reader open() throw new IllegalAccessError("Already open"); } if (isReference()) { - this.reader = new HalfHFileReader(this.fs, this.referencePath, null, + this.reader = new HalfHFileReader(this.fs, this.referencePath, getBlockCache(), this.reference); } else { - this.reader = new StoreFileReader(this.fs, this.path, null); + this.reader = new StoreFileReader(this.fs, this.path, getBlockCache()); } // Load up indices and fileinfo. Map<byte [], byte []> map = this.reader.loadFileInfo(); @@ -368,13 +397,13 @@ public static HFile.Writer getWriter(final FileSystem fs, final Path dir) * @param blocksize * @param algorithm Pass null to get default. * @param c Pass null to get default. - * @param bloomfilter + * @param filter BloomFilter * @return HFile.Writer * @throws IOException */ public static HFile.Writer getWriter(final FileSystem fs, final Path dir, final int blocksize, final Compression.Algorithm algorithm, - final KeyValue.KeyComparator c, final boolean bloomfilter) + final KeyValue.KeyComparator c, final boolean filter) throws IOException { if (!fs.exists(dir)) { fs.mkdirs(dir); @@ -382,7 +411,7 @@ public static HFile.Writer getWriter(final FileSystem fs, final Path dir, Path path = getUniqueFile(fs, dir); return new HFile.Writer(fs, path, blocksize, algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm, - c == null? KeyValue.KEY_COMPARATOR: c, bloomfilter); + c == null? KeyValue.KEY_COMPARATOR: c, filter); } /** @@ -399,10 +428,9 @@ static Path getUniqueFile(final FileSystem fs, final Path p) } /** + * * @param fs * @param dir - * @param encodedRegionName - * @param family * @return Path to a file that doesn't exist at time of this invocation. * @throws IOException */ @@ -412,12 +440,12 @@ static Path getRandomFilename(final FileSystem fs, final Path dir) } /** + * * @param fs * @param dir - * @param encodedRegionName - * @param family * @param suffix * @return Path to a file that doesn't exist at time of this invocation. + * @return * @throws IOException */ static Path getRandomFilename(final FileSystem fs, final Path dir, @@ -437,8 +465,8 @@ static Path getRandomFilename(final FileSystem fs, final Path dir, * Write file metadata. * Call before you call close on the passed <code>w</code> since its written * as metadata to that file. - * - * @param filesystem file system + * + * @param w * @param maxSequenceId Maximum sequence id. * @throws IOException */ @@ -488,4 +516,4 @@ static Path split(final FileSystem fs, final Path splitDir, Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName); return r.write(fs, p); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java b/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java index 3c79ef169a07..2a723efb57f0 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java @@ -49,7 +49,7 @@ public class RegionServerMetrics implements Updater { private MetricsRegistry registry = new MetricsRegistry(); public final MetricsTimeVaryingRate atomicIncrementTime = - new MetricsTimeVaryingRate("atomicIncrementTime", registry); + new MetricsTimeVaryingRate("atomicIncrementTime", registry); /** * Count of regions carried by this regionserver @@ -57,10 +57,30 @@ public class RegionServerMetrics implements Updater { public final MetricsIntValue regions = new MetricsIntValue("regions", registry); + /** + * Block cache size. + */ + public final MetricsLongValue blockCacheSize = new MetricsLongValue("blockCacheSize", registry); + + /** + * Block cache free size. + */ + public final MetricsLongValue blockCacheFree = new MetricsLongValue("blockCacheFree", registry); + + /** + * Block cache item count. + */ + public final MetricsLongValue blockCacheCount = new MetricsLongValue("blockCacheCount", registry); + + /** + * Block hit ratio. + */ + public final MetricsIntValue blockCacheHitRatio = new MetricsIntValue("blockCacheHitRatio", registry); + /* * Count of requests to the regionservers since last call to metrics update */ - private final MetricsRate requests = new MetricsRate("requests"); + private final MetricsRate requests = new MetricsRate("requests"); /** * Count of stores open on the regionserver. @@ -112,6 +132,11 @@ public void doUpdates(MetricsContext unused) { this.memcacheSizeMB.pushMetric(this.metricsRecord); this.regions.pushMetric(this.metricsRecord); this.requests.pushMetric(this.metricsRecord); + + this.blockCacheSize.pushMetric(this.metricsRecord); + this.blockCacheFree.pushMetric(this.metricsRecord); + this.blockCacheCount.pushMetric(this.metricsRecord); + this.blockCacheHitRatio.pushMetric(this.metricsRecord); } this.metricsRecord.update(); this.lastUpdate = System.currentTimeMillis(); @@ -162,6 +187,14 @@ public String toString() { Long.valueOf(memory.getUsed()/MB)); sb = Strings.appendKeyValue(sb, "maxHeap", Long.valueOf(memory.getMax()/MB)); + sb = Strings.appendKeyValue(sb, this.blockCacheSize.getName(), + Long.valueOf(this.blockCacheSize.get())); + sb = Strings.appendKeyValue(sb, this.blockCacheFree.getName(), + Long.valueOf(this.blockCacheFree.get())); + sb = Strings.appendKeyValue(sb, this.blockCacheCount.getName(), + Long.valueOf(this.blockCacheCount.get())); + sb = Strings.appendKeyValue(sb, this.blockCacheHitRatio.getName(), + Long.valueOf(this.blockCacheHitRatio.get())); return sb.toString(); } } diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index 58b390e438cc..9f6601bcd3c5 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -73,7 +73,7 @@ public void testBasicHalfMapFile() throws Exception { new Path(new Path(this.testDir, "regionname"), "familyname"), 2 * 1024, null, null, false); writeStoreFile(writer); - checkHalfHFile(new StoreFile(this.fs, writer.getPath())); + checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf)); } /* @@ -112,7 +112,7 @@ public void testReference() HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null, null, false); writeStoreFile(writer); - StoreFile hsf = new StoreFile(this.fs, writer.getPath()); + StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf); HFile.Reader reader = hsf.getReader(); // Split on a row, not in middle of row. Midkey returned by reader // may be in middle of row. Create new one with empty column and @@ -123,7 +123,7 @@ public void testReference() byte [] finalKey = hsk.getRow(); // Make a reference Path refPath = StoreFile.split(fs, dir, hsf, reader.midkey(), Range.top); - StoreFile refHsf = new StoreFile(this.fs, refPath); + StoreFile refHsf = new StoreFile(this.fs, refPath, conf); // Now confirm that I can read from the reference and that it only gets // keys from top half of the file. HFileScanner s = refHsf.getReader().getScanner(); @@ -157,8 +157,8 @@ private void checkHalfHFile(final StoreFile f) Path bottomPath = StoreFile.split(this.fs, bottomDir, f, midkey, Range.bottom); // Make readers on top and bottom. - HFile.Reader top = new StoreFile(this.fs, topPath).getReader(); - HFile.Reader bottom = new StoreFile(this.fs, bottomPath).getReader(); + HFile.Reader top = new StoreFile(this.fs, topPath, conf).getReader(); + HFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf).getReader(); ByteBuffer previous = null; LOG.info("Midkey: " + Bytes.toString(midkey)); byte [] midkeyBytes = new HStoreKey(midkey).getBytes(); @@ -211,8 +211,8 @@ private void checkHalfHFile(final StoreFile f) topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top); bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, Range.bottom); - top = new StoreFile(this.fs, topPath).getReader(); - bottom = new StoreFile(this.fs, bottomPath).getReader(); + top = new StoreFile(this.fs, topPath, conf).getReader(); + bottom = new StoreFile(this.fs, bottomPath, conf).getReader(); bottomScanner = bottom.getScanner(); int count = 0; while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || @@ -255,8 +255,8 @@ private void checkHalfHFile(final StoreFile f) topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top); bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, Range.bottom); - top = new StoreFile(this.fs, topPath).getReader(); - bottom = new StoreFile(this.fs, bottomPath).getReader(); + top = new StoreFile(this.fs, topPath, conf).getReader(); + bottom = new StoreFile(this.fs, bottomPath, conf).getReader(); first = true; bottomScanner = bottom.getScanner(); while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
1677de7862935244adfcd039bd1f0bd3a93e7009
restlet-framework-java
Fixed regression. Reported by Guido Schmidt--
c
https://github.com/restlet/restlet-framework-java
diff --git a/modules/org.restlet/src/org/restlet/engine/http/connector/ConnectedRequest.java b/modules/org.restlet/src/org/restlet/engine/http/connector/ConnectedRequest.java index 5b2807de35..25ab1bc4fc 100644 --- a/modules/org.restlet/src/org/restlet/engine/http/connector/ConnectedRequest.java +++ b/modules/org.restlet/src/org/restlet/engine/http/connector/ConnectedRequest.java @@ -664,11 +664,13 @@ public List<Range> getRanges() { public List<RecipientInfo> getRecipientsInfo() { List<RecipientInfo> result = super.getRecipientsInfo(); if (!recipientsInfoAdded) { - for (String header : getHeaders().getValuesArray( - HeaderConstants.HEADER_VIA)) { - new RecipientInfoReader(header).addValues(result); + if (getHeaders() != null) { + for (String header : getHeaders().getValuesArray( + HeaderConstants.HEADER_VIA)) { + new RecipientInfoReader(header).addValues(result); + } } - setRecipientsInfo(result); + this.recipientsInfoAdded = true; } return result; }
a7a3653b7006297958e79146aa46011d6060099f
hadoop
HADOOP-7341. Fix options parsing in CommandFormat.- Contributed by Daryn Sharp.--git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1132505 13f79535-47bb-0310-9956-ffa450edef68-
c
https://github.com/apache/hadoop
diff --git a/CHANGES.txt b/CHANGES.txt index 16152a6c9f688..cabe26cebf230 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -278,6 +278,8 @@ Trunk (unreleased changes) HADOOP-7284 Trash and shell's rm does not work for viewfs (Sanjay Radia) + HADOOP-7341. Fix options parsing in CommandFormat (Daryn Sharp via todd) + Release 0.22.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/src/java/org/apache/hadoop/fs/FsShellPermissions.java b/src/java/org/apache/hadoop/fs/FsShellPermissions.java index 839f954f92dc2..41c9f4b367027 100644 --- a/src/java/org/apache/hadoop/fs/FsShellPermissions.java +++ b/src/java/org/apache/hadoop/fs/FsShellPermissions.java @@ -80,7 +80,7 @@ public static class Chmod extends FsShellPermissions { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 2, Integer.MAX_VALUE, "R", null); + CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R", null); cf.parse(args); setRecursive(cf.getOpt("R")); @@ -143,7 +143,7 @@ public static class Chown extends FsShellPermissions { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 2, Integer.MAX_VALUE, "R"); + CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R"); cf.parse(args); setRecursive(cf.getOpt("R")); parseOwnerGroup(args.removeFirst()); diff --git a/src/java/org/apache/hadoop/fs/shell/CommandFormat.java b/src/java/org/apache/hadoop/fs/shell/CommandFormat.java index e72e553d72be3..eea429a97e5f1 100644 --- a/src/java/org/apache/hadoop/fs/shell/CommandFormat.java +++ b/src/java/org/apache/hadoop/fs/shell/CommandFormat.java @@ -29,14 +29,30 @@ * Parse the args of a command and check the format of args. */ public class CommandFormat { - final String name; final int minPar, maxPar; final Map<String, Boolean> options = new HashMap<String, Boolean>(); boolean ignoreUnknownOpts = false; - /** constructor */ + /** + * @deprecated use replacement since name is an unused parameter + * @param name of command, but never used + * @param min see replacement + * @param max see replacement + * @param possibleOpt see replacement + * @see #CommandFormat(int, int, String...) + */ + @Deprecated public CommandFormat(String n, int min, int max, String ... possibleOpt) { - name = n; + this(min, max, possibleOpt); + } + + /** + * Simple parsing of command line arguments + * @param min minimum arguments required + * @param max maximum arguments permitted + * @param possibleOpt list of the allowed switches + */ + public CommandFormat(int min, int max, String ... possibleOpt) { minPar = min; maxPar = max; for (String opt : possibleOpt) { @@ -71,16 +87,23 @@ public void parse(List<String> args) { int pos = 0; while (pos < args.size()) { String arg = args.get(pos); - if (arg.startsWith("-") && arg.length() > 1) { - String opt = arg.substring(1); - if (options.containsKey(opt)) { - args.remove(pos); - options.put(opt, Boolean.TRUE); - continue; - } - if (!ignoreUnknownOpts) throw new UnknownOptionException(arg); + // stop if not an opt, or the stdin arg "-" is found + if (!arg.startsWith("-") || arg.equals("-")) { + break; + } else if (arg.equals("--")) { // force end of option processing + args.remove(pos); + break; + } + + String opt = arg.substring(1); + if (options.containsKey(opt)) { + args.remove(pos); + options.put(opt, Boolean.TRUE); + } else if (ignoreUnknownOpts) { + pos++; + } else { + throw new UnknownOptionException(arg); } - pos++; } int psize = args.size(); if (psize < minPar) { diff --git a/src/java/org/apache/hadoop/fs/shell/CopyCommands.java b/src/java/org/apache/hadoop/fs/shell/CopyCommands.java index a10d303a8287a..32c71c32b1b67 100644 --- a/src/java/org/apache/hadoop/fs/shell/CopyCommands.java +++ b/src/java/org/apache/hadoop/fs/shell/CopyCommands.java @@ -64,7 +64,7 @@ public static class Merge extends FsCommand { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 2, 3); + CommandFormat cf = new CommandFormat(2, 3); cf.parse(args); // TODO: this really should be a -nl option @@ -94,7 +94,7 @@ static class Cp extends CommandWithDestination { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 2, Integer.MAX_VALUE); + CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE); cf.parse(args); getRemoteDestination(args); } @@ -137,7 +137,7 @@ protected void processOptions(LinkedList<String> args) throws IOException { localFs = FileSystem.getLocal(getConf()); CommandFormat cf = new CommandFormat( - null, 1, Integer.MAX_VALUE, "crc", "ignoreCrc"); + 1, Integer.MAX_VALUE, "crc", "ignoreCrc"); cf.parse(args); copyCrc = cf.getOpt("crc"); verifyChecksum = !cf.getOpt("ignoreCrc"); @@ -216,7 +216,7 @@ public static class Put extends CommandWithDestination { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 1, Integer.MAX_VALUE); + CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE); cf.parse(args); getRemoteDestination(args); } diff --git a/src/java/org/apache/hadoop/fs/shell/Count.java b/src/java/org/apache/hadoop/fs/shell/Count.java index 6d945196babf4..891e68a4bf928 100644 --- a/src/java/org/apache/hadoop/fs/shell/Count.java +++ b/src/java/org/apache/hadoop/fs/shell/Count.java @@ -73,7 +73,7 @@ public Count(String[] cmd, int pos, Configuration conf) { @Override protected void processOptions(LinkedList<String> args) { - CommandFormat cf = new CommandFormat(NAME, 1, Integer.MAX_VALUE, "q"); + CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "q"); cf.parse(args); if (args.isEmpty()) { // default path is the current working directory args.add("."); diff --git a/src/java/org/apache/hadoop/fs/shell/Delete.java b/src/java/org/apache/hadoop/fs/shell/Delete.java index 7f17619d220c8..8b34afecdf0ca 100644 --- a/src/java/org/apache/hadoop/fs/shell/Delete.java +++ b/src/java/org/apache/hadoop/fs/shell/Delete.java @@ -57,7 +57,7 @@ public static class Rm extends FsCommand { @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat( - null, 1, Integer.MAX_VALUE, "r", "R", "skipTrash"); + 1, Integer.MAX_VALUE, "r", "R", "skipTrash"); cf.parse(args); deleteDirs = cf.getOpt("r") || cf.getOpt("R"); skipTrash = cf.getOpt("skipTrash"); @@ -115,7 +115,7 @@ static class Expunge extends FsCommand { // TODO: should probably allow path arguments for the filesystems @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 0, 0); + CommandFormat cf = new CommandFormat(0, 0); cf.parse(args); } diff --git a/src/java/org/apache/hadoop/fs/shell/Display.java b/src/java/org/apache/hadoop/fs/shell/Display.java index 312527b98e3d0..e650b711bc108 100644 --- a/src/java/org/apache/hadoop/fs/shell/Display.java +++ b/src/java/org/apache/hadoop/fs/shell/Display.java @@ -66,7 +66,7 @@ public static class Cat extends Display { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 1, Integer.MAX_VALUE, "ignoreCrc"); + CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "ignoreCrc"); cf.parse(args); verifyChecksum = !cf.getOpt("ignoreCrc"); } diff --git a/src/java/org/apache/hadoop/fs/shell/FsUsage.java b/src/java/org/apache/hadoop/fs/shell/FsUsage.java index cf95f6a98a107..0d73f76e6669e 100644 --- a/src/java/org/apache/hadoop/fs/shell/FsUsage.java +++ b/src/java/org/apache/hadoop/fs/shell/FsUsage.java @@ -67,7 +67,7 @@ public static class Df extends FsUsage { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 0, Integer.MAX_VALUE, "h"); + CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h"); cf.parse(args); humanReadable = cf.getOpt("h"); if (args.isEmpty()) args.add(Path.SEPARATOR); @@ -123,7 +123,7 @@ public static class Du extends FsUsage { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 0, Integer.MAX_VALUE, "h", "s"); + CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h", "s"); cf.parse(args); humanReadable = cf.getOpt("h"); summary = cf.getOpt("s"); diff --git a/src/java/org/apache/hadoop/fs/shell/Ls.java b/src/java/org/apache/hadoop/fs/shell/Ls.java index 7bd8ba4b82ac0..36c01a63297dd 100644 --- a/src/java/org/apache/hadoop/fs/shell/Ls.java +++ b/src/java/org/apache/hadoop/fs/shell/Ls.java @@ -62,7 +62,7 @@ public static void registerCommands(CommandFactory factory) { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 0, Integer.MAX_VALUE, "R"); + CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "R"); cf.parse(args); setRecursive(cf.getOpt("R")); if (args.isEmpty()) args.add(Path.CUR_DIR); diff --git a/src/java/org/apache/hadoop/fs/shell/Mkdir.java b/src/java/org/apache/hadoop/fs/shell/Mkdir.java index effbbfebb0303..30ce5ed4dfddf 100644 --- a/src/java/org/apache/hadoop/fs/shell/Mkdir.java +++ b/src/java/org/apache/hadoop/fs/shell/Mkdir.java @@ -45,7 +45,7 @@ public static void registerCommands(CommandFactory factory) { @Override protected void processOptions(LinkedList<String> args) { - CommandFormat cf = new CommandFormat(null, 1, Integer.MAX_VALUE); + CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE); cf.parse(args); } diff --git a/src/java/org/apache/hadoop/fs/shell/MoveCommands.java b/src/java/org/apache/hadoop/fs/shell/MoveCommands.java index 32089303b9855..5f7d474fbc104 100644 --- a/src/java/org/apache/hadoop/fs/shell/MoveCommands.java +++ b/src/java/org/apache/hadoop/fs/shell/MoveCommands.java @@ -78,7 +78,7 @@ public static class Rename extends CommandWithDestination { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 2, Integer.MAX_VALUE); + CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE); cf.parse(args); getRemoteDestination(args); } diff --git a/src/java/org/apache/hadoop/fs/shell/SetReplication.java b/src/java/org/apache/hadoop/fs/shell/SetReplication.java index a7097170cae5d..3266bd2b4081e 100644 --- a/src/java/org/apache/hadoop/fs/shell/SetReplication.java +++ b/src/java/org/apache/hadoop/fs/shell/SetReplication.java @@ -33,7 +33,7 @@ @InterfaceAudience.Private @InterfaceStability.Unstable -public class SetReplication extends FsCommand { +class SetReplication extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(SetReplication.class, "-setrep"); } @@ -51,7 +51,7 @@ public static void registerCommands(CommandFactory factory) { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 2, Integer.MAX_VALUE, "R", "w"); + CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R", "w"); cf.parse(args); waitOpt = cf.getOpt("w"); setRecursive(cf.getOpt("R")); diff --git a/src/java/org/apache/hadoop/fs/shell/Stat.java b/src/java/org/apache/hadoop/fs/shell/Stat.java index 6bd57cf5a134d..e8a731a3351ed 100644 --- a/src/java/org/apache/hadoop/fs/shell/Stat.java +++ b/src/java/org/apache/hadoop/fs/shell/Stat.java @@ -64,7 +64,7 @@ public static void registerCommands(CommandFactory factory) { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 1, Integer.MAX_VALUE, "R"); + CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "R"); cf.parse(args); setRecursive(cf.getOpt("R")); if (args.getFirst().contains("%")) format = args.removeFirst(); diff --git a/src/java/org/apache/hadoop/fs/shell/Tail.java b/src/java/org/apache/hadoop/fs/shell/Tail.java index fda7b99c52256..e95d4d1cec830 100644 --- a/src/java/org/apache/hadoop/fs/shell/Tail.java +++ b/src/java/org/apache/hadoop/fs/shell/Tail.java @@ -51,7 +51,7 @@ public static void registerCommands(CommandFactory factory) { @Override protected void processOptions(LinkedList<String> args) throws IOException { - CommandFormat cf = new CommandFormat(null, 1, 1, "f"); + CommandFormat cf = new CommandFormat(1, 1, "f"); cf.parse(args); follow = cf.getOpt("f"); } diff --git a/src/java/org/apache/hadoop/fs/shell/Test.java b/src/java/org/apache/hadoop/fs/shell/Test.java index 309ad053aa175..9780698b3a8be 100644 --- a/src/java/org/apache/hadoop/fs/shell/Test.java +++ b/src/java/org/apache/hadoop/fs/shell/Test.java @@ -46,7 +46,7 @@ public static void registerCommands(CommandFactory factory) { @Override protected void processOptions(LinkedList<String> args) { - CommandFormat cf = new CommandFormat(null, 1, 1, "e", "d", "z"); + CommandFormat cf = new CommandFormat(1, 1, "e", "d", "z"); cf.parse(args); String[] opts = cf.getOpts().toArray(new String[0]); diff --git a/src/java/org/apache/hadoop/fs/shell/Touchz.java b/src/java/org/apache/hadoop/fs/shell/Touchz.java index f384ab51df9a2..18c9aa74c29c9 100644 --- a/src/java/org/apache/hadoop/fs/shell/Touchz.java +++ b/src/java/org/apache/hadoop/fs/shell/Touchz.java @@ -52,7 +52,7 @@ public static class Touchz extends Touch { @Override protected void processOptions(LinkedList<String> args) { - CommandFormat cf = new CommandFormat(null, 1, Integer.MAX_VALUE); + CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE); cf.parse(args); } diff --git a/src/test/core/org/apache/hadoop/fs/TestCommandFormat.java b/src/test/core/org/apache/hadoop/fs/TestCommandFormat.java index 3fdc253c16484..4b855c4940440 100644 --- a/src/test/core/org/apache/hadoop/fs/TestCommandFormat.java +++ b/src/test/core/org/apache/hadoop/fs/TestCommandFormat.java @@ -119,63 +119,59 @@ public void testOptArg() { @Test public void testArgOpt() { args = listOf("b", "-a"); - expectedOpts = setOf("a"); - expectedArgs = listOf("b"); + expectedArgs = listOf("b", "-a"); - checkArgLimits(UnknownOptionException.class, 0, 0); checkArgLimits(TooManyArgumentsException.class, 0, 0, "a", "b"); - checkArgLimits(null, 0, 1, "a", "b"); - checkArgLimits(null, 1, 1, "a", "b"); checkArgLimits(null, 1, 2, "a", "b"); - checkArgLimits(NotEnoughArgumentsException.class, 2, 2, "a", "b"); + checkArgLimits(null, 2, 2, "a", "b"); + checkArgLimits(NotEnoughArgumentsException.class, 3, 4, "a", "b"); } @Test - public void testOptArgOpt() { - args = listOf("a", "-b", "c"); - expectedOpts = setOf("b"); - expectedArgs = listOf("a", "c"); + public void testOptStopOptArg() { + args = listOf("-a", "--", "-b", "c"); + expectedOpts = setOf("a"); + expectedArgs = listOf("-b", "c"); checkArgLimits(UnknownOptionException.class, 0, 0); - checkArgLimits(TooManyArgumentsException.class, 0, 0, "b"); - checkArgLimits(TooManyArgumentsException.class, 1, 1, "b"); - checkArgLimits(null, 0, 2, "b"); - checkArgLimits(NotEnoughArgumentsException.class, 3, 3, "b"); + checkArgLimits(TooManyArgumentsException.class, 0, 1, "a", "b"); + checkArgLimits(null, 2, 2, "a", "b"); + checkArgLimits(NotEnoughArgumentsException.class, 3, 4, "a", "b"); } @Test public void testOptDashArg() { - args = listOf("-b", "-", "c"); + args = listOf("-b", "-", "-c"); expectedOpts = setOf("b"); - expectedArgs = listOf("-", "c"); + expectedArgs = listOf("-", "-c"); checkArgLimits(UnknownOptionException.class, 0, 0); - checkArgLimits(TooManyArgumentsException.class, 0, 0, "b"); - checkArgLimits(TooManyArgumentsException.class, 1, 1, "b"); - checkArgLimits(null, 2, 2, "b"); - checkArgLimits(NotEnoughArgumentsException.class, 3, 4, "b"); + checkArgLimits(TooManyArgumentsException.class, 0, 0, "b", "c"); + checkArgLimits(TooManyArgumentsException.class, 1, 1, "b", "c"); + checkArgLimits(null, 2, 2, "b", "c"); + checkArgLimits(NotEnoughArgumentsException.class, 3, 4, "b", "c"); } @Test public void testOldArgsWithIndex() { String[] arrayArgs = new String[]{"ignore", "-a", "b", "-c"}; { - CommandFormat cf = new CommandFormat("", 0, 9, "a", "c"); + CommandFormat cf = new CommandFormat(0, 9, "a", "c"); List<String> parsedArgs = cf.parse(arrayArgs, 0); - assertEquals(setOf("a", "c"), cf.getOpts()); - assertEquals(listOf("ignore", "b"), parsedArgs); + assertEquals(setOf(), cf.getOpts()); + assertEquals(listOf("ignore", "-a", "b", "-c"), parsedArgs); } { - CommandFormat cf = new CommandFormat("", 0, 9, "a", "c"); + CommandFormat cf = new CommandFormat(0, 9, "a", "c"); List<String> parsedArgs = cf.parse(arrayArgs, 1); - assertEquals(setOf("a", "c"), cf.getOpts()); - assertEquals(listOf("b"), parsedArgs); + assertEquals(setOf("a"), cf.getOpts()); + assertEquals(listOf("b", "-c"), parsedArgs); } { - CommandFormat cf = new CommandFormat("", 0, 9, "a", "c"); + CommandFormat cf = new CommandFormat(0, 9, "a", "c"); List<String> parsedArgs = cf.parse(arrayArgs, 2); - assertEquals(setOf("c"), cf.getOpts()); - assertEquals(listOf("b"), parsedArgs); + assertEquals(setOf(), cf.getOpts()); + assertEquals(listOf("b", "-c"), parsedArgs); } } @@ -183,7 +179,7 @@ private static <T> CommandFormat checkArgLimits( Class<? extends IllegalArgumentException> expectedErr, int min, int max, String ... opts) { - CommandFormat cf = new CommandFormat("", min, max, opts); + CommandFormat cf = new CommandFormat(min, max, opts); List<String> parsedArgs = new ArrayList<String>(args); Class<?> cfError = null; @@ -202,11 +198,13 @@ private static <T> CommandFormat checkArgLimits( return cf; } - private static <T> List<T> listOf(T ... objects) { + // Don't use generics to avoid warning: + // unchecked generic array creation of type T[] for varargs parameter + private static List<String> listOf(String ... objects) { return Arrays.asList(objects); } - private static <T> Set<T> setOf(T ... objects) { - return new HashSet<T>(listOf(objects)); + private static Set<String> setOf(String ... objects) { + return new HashSet<String>(listOf(objects)); } }