[JBoss JIRA] (WFLY-3873) AJP-connector mangles SOAP-request
by xiaodong xie (JIRA)
[ https://issues.jboss.org/browse/WFLY-3873?page=com.atlassian.jira.plugin.... ]
xiaodong xie commented on WFLY-3873:
------------------------------------
We found something today, it should be triggered by "Expect:100-conitnue" HTTP header...
> AJP-connector mangles SOAP-request
> ----------------------------------
>
> Key: WFLY-3873
> URL: https://issues.jboss.org/browse/WFLY-3873
> Project: WildFly
> Issue Type: Bug
> Components: Web (Undertow)
> Affects Versions: 8.1.0.Final
> Environment: Linux
> reverse-proxy from Apache 2.2 via mod_proxy_ajp to AJP connector on WildFly 8.1.0-Final
> Reporter: Gerke Ephorus
> Assignee: Stuart Douglas
> Labels: .net, ajp, soap
>
> When connecting to WildFly 8.1.0-Final SOAP-service from a .NET application via a reverse-proxy (Apache 2.2 with mod_proxy_ajp to the AJP-connector) it looks like the payload SOAP package gets mangled:
> {noformat}
> 2014-09-19 08:45:05,206 WARNING [org.apache.cxf.phase.PhaseInterceptorChain] (default task-99) Interceptor for {http://ephorus.com/document-processor/ws/}DocumentProcessor has thrown exception, unwinding now: java.lang.RuntimeException:
> Couldn't parse stream.
> at org.apache.cxf.staxutils.StaxUtils.createXMLStreamReader(StaxUtils.java:1447)
> at org.apache.cxf.interceptor.StaxInInterceptor.handleMessage(StaxInInterceptor.java:123)
> at org.apache.cxf.phase.PhaseInterceptorChain.doIntercept(PhaseInterceptorChain.java:272)
> at org.apache.cxf.transport.ChainInitiationObserver.onMessage(ChainInitiationObserver.java:121)
> at org.apache.cxf.transport.http.AbstractHTTPDestination.invoke(AbstractHTTPDestination.java:241)
> at org.jboss.wsf.stack.cxf.RequestHandlerImpl.handleHttpRequest(RequestHandlerImpl.java:93)
> at org.jboss.wsf.stack.cxf.transport.ServletHelper.callRequestHandler(ServletHelper.java:133)
> at org.jboss.wsf.stack.cxf.CXFServletExt.invoke(CXFServletExt.java:88)
> at org.apache.cxf.transport.servlet.AbstractHTTPServlet.handleRequest(AbstractHTTPServlet.java:286)
> at org.apache.cxf.transport.servlet.AbstractHTTPServlet.doPost(AbstractHTTPServlet.java:206)
> at javax.servlet.http.HttpServlet.service(HttpServlet.java:707) [jboss-servlet-api_3.1_spec-1.0.0.Final.jar:1.0.0.Final]
> at org.jboss.wsf.stack.cxf.CXFServletExt.service(CXFServletExt.java:136)
> at org.jboss.wsf.spi.deployment.WSFServlet.service(WSFServlet.java:140) [jbossws-spi-2.2.2.Final.jar:2.2.2.Final]
> at javax.servlet.http.HttpServlet.service(HttpServlet.java:790) [jboss-servlet-api_3.1_spec-1.0.0.Final.jar:1.0.0.Final]
> at io.undertow.servlet.handlers.ServletHandler.handleRequest(ServletHandler.java:85) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.security.ServletSecurityRoleHandler.handleRequest(ServletSecurityRoleHandler.java:61) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.ServletDispatchingHandler.handleRequest(ServletDispatchingHandler.java:36) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at org.wildfly.extension.undertow.security.SecurityContextAssociationHandler.handleRequest(SecurityContextAssociationHandler.java:78)
> at io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:25) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.security.SSLInformationAssociationHandler.handleRequest(SSLInformationAssociationHandler.java:113) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.security.ServletAuthenticationCallHandler.handleRequest(ServletAuthenticationCallHandler.java:56) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:25) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.security.handlers.AbstractConfidentialityHandler.handleRequest(AbstractConfidentialityHandler.java:45) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.security.ServletConfidentialityConstraintHandler.handleRequest(ServletConfidentialityConstraintHandler.java:61) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.security.handlers.AuthenticationMechanismsHandler.handleRequest(AuthenticationMechanismsHandler.java:58) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.security.CachedAuthenticatedSessionHandler.handleRequest(CachedAuthenticatedSessionHandler.java:70) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.security.handlers.SecurityInitialHandler.handleRequest(SecurityInitialHandler.java:76) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:25) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at org.wildfly.extension.undertow.security.jacc.JACCContextIdHandler.handleRequest(JACCContextIdHandler.java:61)
> at io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:25) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:25) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.SessionRestoringHandler.handleRequest(SessionRestoringHandler.java:101) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.ServletInitialHandler.handleFirstRequest(ServletInitialHandler.java:240) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.ServletInitialHandler.dispatchRequest(ServletInitialHandler.java:227) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.ServletInitialHandler.access$000(ServletInitialHandler.java:73) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.servlet.handlers.ServletInitialHandler$1.handleRequest(ServletInitialHandler.java:146) [undertow-servlet-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.server.Connectors.executeRootHandler(Connectors.java:177) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at io.undertow.server.HttpServerExchange$1.run(HttpServerExchange.java:727) [undertow-core-1.0.15.Final.jar:1.0.15.Final]
> at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [rt.jar:1.8.0_20]
> at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [rt.jar:1.8.0_20]
> at java.lang.Thread.run(Thread.java:745) [rt.jar:1.8.0_20]
> Caused by: com.ctc.wstx.exc.WstxIOException: Invalid UTF-8 start byte 0x95 (at char #4, byte #-1)
> at com.ctc.wstx.stax.WstxInputFactory.doCreateSR(WstxInputFactory.java:536)
> at com.ctc.wstx.stax.WstxInputFactory.createSR(WstxInputFactory.java:585)
> at com.ctc.wstx.stax.WstxInputFactory.createSR(WstxInputFactory.java:610)
> at com.ctc.wstx.stax.WstxInputFactory.createXMLStreamReader(WstxInputFactory.java:316)
> at __redirected.__XMLInputFactory.createXMLStreamReader(__XMLInputFactory.java:142) [jboss-modules.jar:1.3.3.Final]
> at org.apache.cxf.staxutils.StaxUtils.createXMLStreamReader(StaxUtils.java:1445)
> ... 40 more
> Caused by: java.io.CharConversionException: Invalid UTF-8 start byte 0x95 (at char #4, byte #-1)
> at com.ctc.wstx.io.UTF8Reader.reportInvalidInitial(UTF8Reader.java:303)
> at com.ctc.wstx.io.UTF8Reader.read(UTF8Reader.java:189)
> at com.ctc.wstx.io.ReaderBootstrapper.initialLoad(ReaderBootstrapper.java:250)
> at com.ctc.wstx.io.ReaderBootstrapper.bootstrapInput(ReaderBootstrapper.java:133)
> at com.ctc.wstx.stax.WstxInputFactory.doCreateSR(WstxInputFactory.java:531)
> ... 45 more
> {noformat}
> Connecting to the same SOAP-service via the reverse-proxy via the AJP from a different party/client does not show this problem.
> Connecting to the same SOAP-service directly to the http-connector on the same WildFly 8.1.0-Final server does not show this problem.
> Wild guess is that it depends somehow on the HTTP-headers of the .NET client. These are the headers captured via Fiddler-http-proxy on the client-side:
> {noformat}
> POST http://some.host.com/doce/Foo/Bar HTTP/1.1
> User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; MS Web Services Client Protocol 2.0.50727.3082)
> Content-Type: text/xml; charset=utf-8
> SOAPAction: "http://host/some/soap-action/ws/addDocument"
> Host: some.host.com
> Content-Length: 6592
> Expect: 100-continue
> Connection: Keep-Alive
> Accept: application/json, text/plain, */*
> {noformat}
> There is a small change it's the same root-cause as [WFLY-2999]. Maybe I'll find the time to test this on 9.0.0-Beta1.
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months
[JBoss JIRA] (WFCORE-511) "Reload" support for embedded server
by Brian Stansberry (JIRA)
Brian Stansberry created WFCORE-511:
---------------------------------------
Summary: "Reload" support for embedded server
Key: WFCORE-511
URL: https://issues.jboss.org/browse/WFCORE-511
Project: WildFly Core
Issue Type: Feature Request
Components: Server
Reporter: Brian Stansberry
The current embedded server stuff doesn't handle a server reload. It creates a ModelControllerClient and then holds it forever. But the ModelController backed by that MCC will be invalid after a reload.
Key thing is to register a listener with the ControlledProcessStateService, and in callbacks deal with the reload and establish a new MCC. Nice to have is to make the MCC returned by StandaloneServer.getModelControllerClient() a facade that delegates to whatever the current "live" MCC is.
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months
[JBoss JIRA] (JGRP-1906) jdbc_ping doesn't delete crashed node
by rama rama (JIRA)
[ https://issues.jboss.org/browse/JGRP-1906?page=com.atlassian.jira.plugin.... ]
rama rama closed JGRP-1906.
---------------------------
Resolution: Rejected
> jdbc_ping doesn't delete crashed node
> -------------------------------------
>
> Key: JGRP-1906
> URL: https://issues.jboss.org/browse/JGRP-1906
> Project: JGroups
> Issue Type: Bug
> Affects Versions: 3.6.1
> Environment: n/a
> Reporter: rama rama
> Assignee: Bela Ban
> Priority: Critical
>
> Hi,
> i am having trouble with jgroups. If a node crash, it doesn't get removed from jdbc table.
> After 10 hours of work, the table contains 100 rows (that is not a problem) but, when the server start_up, GMS take _AGES_ to try to connect to all this dead node.
> Since i am only a 'user' speaking about jgroup and i have no idea on how internally does it work, it this normal?
> I don't think so, master of the cluster, or a governor or something like that, should be able to detect if a node is present or not (via FD,VERIFY_SUSPECT) and delete it from JDBC table to avoid issue on startup for the next time, correct me if i am wrong.
> Here a copy/paste of my current config, configured in applicative way.
> ------------------------------------------------------------------------------------
> int min_cores = 1;
> int max_cores = 50;
> InetAddress bind_addr = org.jgroups.util.Util.getAddressByPatternMatch("match-address:" + Config.get("Cluster.bind_addr"));
> stack
> .addProtocol(new UDP()
> .setValue("bind_addr", bind_addr)
> .setValue("loopback", true)
> .setValue("thread_naming_pattern", "cl")
> .setValue("timer_type", "new3")
> .setValue("timer_min_threads", min_cores)
> .setValue("timer_max_threads", max_cores)
> .setValue("timer_keep_alive_time", Util.MIN * 10)
> .setValue("timer_queue_max_size", 500)
> .setValue("mcast_port", Config.get("Cluster.mcast_port", Constant.INT_NULL))
> .setValue("thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("thread_pool_min_threads", min_cores)
> .setValue("thread_pool_max_threads", max_cores)
> .setValue("thread_pool_queue_enabled", false)
> .setValue("thread_pool_queue_max_size", 500)
> .setValue("oob_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("oob_thread_pool_min_threads", min_cores)
> .setValue("oob_thread_pool_max_threads", max_cores)
> .setValue("oob_thread_pool_queue_enabled", false)
> .setValue("oob_thread_pool_queue_max_size", 500)
> .setValue("internal_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("internal_thread_pool_min_threads", min_cores)
> .setValue("internal_thread_pool_max_threads", max_cores)
> .setValue("internal_thread_pool_queue_enabled", false)
> .setValue("internal_thread_pool_queue_max_size", 600000)
> .setValue("ucast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("ucast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> .setValue("mcast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("mcast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> )
> .addProtocol(new JDBC_PING()
> .setValue("connection_url", Config.get("DB.dbdriver") + ':' + Config.get("DB.dburl") + '/' + Config.get("DB.dbname"))
> .setValue("connection_username", Config.get("DB.dbuser"))
> .setValue("connection_password", Config.get("DB.dbpwd"))
> .setValue("connection_driver", "org.postgresql.Driver")
> .setValue("initialize_sql", "CREATE TABLE IF NOT EXISTS JGROUPSPING ( own_addr varchar(200) NOT NULL, cluster_name varchar(200) NOT NULL, ping_data bytea DEFAULT NULL, PRIMARY KEY (own_addr, cluster_name) )")
> )
> .addProtocol(new MERGE3()
> .setValue("max_interval", 10000)
> .setValue("min_interval", 1000)
> )
> .addProtocol(new FD_SOCK()
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new FD_ALL()
> )
> .addProtocol(new VERIFY_SUSPECT()
> .setValue("timeout", 1000)
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new NAKACK2()
> .setValue("xmit_interval", 500)
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 2000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> .setValue("use_mcast_xmit", false)
> .setValue("discard_delivered_msgs", true)
> )
> .addProtocol(new UNICAST3()
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 1000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> )
> .addProtocol(new STABLE()
> .setValue("stability_delay", 2000)
> .setValue("desired_avg_gossip", 60000)
> .setValue("max_bytes", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("cap", 0.1)
> )
> .addProtocol(new GMS()
> .setValue("join_timeout", 3000)
> .setValue("view_bundling", false)
> .setValue("print_local_addr", false)
> .setValue("print_physical_addrs", false)
> )
> .addProtocol(new UFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new MFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new FRAG2()
> .setValue("frag_size", org.jgroups.util.Util.readBytesInteger("60K"))
> )
> .addProtocol(new RSVP()
> .setValue("resend_interval", 2000)
> .setValue("timeout", 10000)
> )
> ;
> stack.init();
> ----------------------------
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months
[JBoss JIRA] (JGRP-1906) jdbc_ping doesn't delete crashed node
by rama rama (JIRA)
[ https://issues.jboss.org/browse/JGRP-1906?page=com.atlassian.jira.plugin.... ]
rama rama commented on JGRP-1906:
---------------------------------
sure, issue closed, tnx
> jdbc_ping doesn't delete crashed node
> -------------------------------------
>
> Key: JGRP-1906
> URL: https://issues.jboss.org/browse/JGRP-1906
> Project: JGroups
> Issue Type: Bug
> Affects Versions: 3.6.1
> Environment: n/a
> Reporter: rama rama
> Assignee: Bela Ban
> Priority: Critical
>
> Hi,
> i am having trouble with jgroups. If a node crash, it doesn't get removed from jdbc table.
> After 10 hours of work, the table contains 100 rows (that is not a problem) but, when the server start_up, GMS take _AGES_ to try to connect to all this dead node.
> Since i am only a 'user' speaking about jgroup and i have no idea on how internally does it work, it this normal?
> I don't think so, master of the cluster, or a governor or something like that, should be able to detect if a node is present or not (via FD,VERIFY_SUSPECT) and delete it from JDBC table to avoid issue on startup for the next time, correct me if i am wrong.
> Here a copy/paste of my current config, configured in applicative way.
> ------------------------------------------------------------------------------------
> int min_cores = 1;
> int max_cores = 50;
> InetAddress bind_addr = org.jgroups.util.Util.getAddressByPatternMatch("match-address:" + Config.get("Cluster.bind_addr"));
> stack
> .addProtocol(new UDP()
> .setValue("bind_addr", bind_addr)
> .setValue("loopback", true)
> .setValue("thread_naming_pattern", "cl")
> .setValue("timer_type", "new3")
> .setValue("timer_min_threads", min_cores)
> .setValue("timer_max_threads", max_cores)
> .setValue("timer_keep_alive_time", Util.MIN * 10)
> .setValue("timer_queue_max_size", 500)
> .setValue("mcast_port", Config.get("Cluster.mcast_port", Constant.INT_NULL))
> .setValue("thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("thread_pool_min_threads", min_cores)
> .setValue("thread_pool_max_threads", max_cores)
> .setValue("thread_pool_queue_enabled", false)
> .setValue("thread_pool_queue_max_size", 500)
> .setValue("oob_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("oob_thread_pool_min_threads", min_cores)
> .setValue("oob_thread_pool_max_threads", max_cores)
> .setValue("oob_thread_pool_queue_enabled", false)
> .setValue("oob_thread_pool_queue_max_size", 500)
> .setValue("internal_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("internal_thread_pool_min_threads", min_cores)
> .setValue("internal_thread_pool_max_threads", max_cores)
> .setValue("internal_thread_pool_queue_enabled", false)
> .setValue("internal_thread_pool_queue_max_size", 600000)
> .setValue("ucast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("ucast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> .setValue("mcast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("mcast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> )
> .addProtocol(new JDBC_PING()
> .setValue("connection_url", Config.get("DB.dbdriver") + ':' + Config.get("DB.dburl") + '/' + Config.get("DB.dbname"))
> .setValue("connection_username", Config.get("DB.dbuser"))
> .setValue("connection_password", Config.get("DB.dbpwd"))
> .setValue("connection_driver", "org.postgresql.Driver")
> .setValue("initialize_sql", "CREATE TABLE IF NOT EXISTS JGROUPSPING ( own_addr varchar(200) NOT NULL, cluster_name varchar(200) NOT NULL, ping_data bytea DEFAULT NULL, PRIMARY KEY (own_addr, cluster_name) )")
> )
> .addProtocol(new MERGE3()
> .setValue("max_interval", 10000)
> .setValue("min_interval", 1000)
> )
> .addProtocol(new FD_SOCK()
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new FD_ALL()
> )
> .addProtocol(new VERIFY_SUSPECT()
> .setValue("timeout", 1000)
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new NAKACK2()
> .setValue("xmit_interval", 500)
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 2000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> .setValue("use_mcast_xmit", false)
> .setValue("discard_delivered_msgs", true)
> )
> .addProtocol(new UNICAST3()
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 1000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> )
> .addProtocol(new STABLE()
> .setValue("stability_delay", 2000)
> .setValue("desired_avg_gossip", 60000)
> .setValue("max_bytes", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("cap", 0.1)
> )
> .addProtocol(new GMS()
> .setValue("join_timeout", 3000)
> .setValue("view_bundling", false)
> .setValue("print_local_addr", false)
> .setValue("print_physical_addrs", false)
> )
> .addProtocol(new UFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new MFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new FRAG2()
> .setValue("frag_size", org.jgroups.util.Util.readBytesInteger("60K"))
> )
> .addProtocol(new RSVP()
> .setValue("resend_interval", 2000)
> .setValue("timeout", 10000)
> )
> ;
> stack.init();
> ----------------------------
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months
[JBoss JIRA] (JGRP-1906) jdbc_ping doesn't delete crashed node
by Bela Ban (JIRA)
[ https://issues.jboss.org/browse/JGRP-1906?page=com.atlassian.jira.plugin.... ]
Bela Ban commented on JGRP-1906:
--------------------------------
In production, you won't run in IntelliJ, so that should be an issue then ?
Cleaning the table certainly works. Can you close this issue then ?
> jdbc_ping doesn't delete crashed node
> -------------------------------------
>
> Key: JGRP-1906
> URL: https://issues.jboss.org/browse/JGRP-1906
> Project: JGroups
> Issue Type: Bug
> Affects Versions: 3.6.1
> Environment: n/a
> Reporter: rama rama
> Assignee: Bela Ban
> Priority: Critical
>
> Hi,
> i am having trouble with jgroups. If a node crash, it doesn't get removed from jdbc table.
> After 10 hours of work, the table contains 100 rows (that is not a problem) but, when the server start_up, GMS take _AGES_ to try to connect to all this dead node.
> Since i am only a 'user' speaking about jgroup and i have no idea on how internally does it work, it this normal?
> I don't think so, master of the cluster, or a governor or something like that, should be able to detect if a node is present or not (via FD,VERIFY_SUSPECT) and delete it from JDBC table to avoid issue on startup for the next time, correct me if i am wrong.
> Here a copy/paste of my current config, configured in applicative way.
> ------------------------------------------------------------------------------------
> int min_cores = 1;
> int max_cores = 50;
> InetAddress bind_addr = org.jgroups.util.Util.getAddressByPatternMatch("match-address:" + Config.get("Cluster.bind_addr"));
> stack
> .addProtocol(new UDP()
> .setValue("bind_addr", bind_addr)
> .setValue("loopback", true)
> .setValue("thread_naming_pattern", "cl")
> .setValue("timer_type", "new3")
> .setValue("timer_min_threads", min_cores)
> .setValue("timer_max_threads", max_cores)
> .setValue("timer_keep_alive_time", Util.MIN * 10)
> .setValue("timer_queue_max_size", 500)
> .setValue("mcast_port", Config.get("Cluster.mcast_port", Constant.INT_NULL))
> .setValue("thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("thread_pool_min_threads", min_cores)
> .setValue("thread_pool_max_threads", max_cores)
> .setValue("thread_pool_queue_enabled", false)
> .setValue("thread_pool_queue_max_size", 500)
> .setValue("oob_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("oob_thread_pool_min_threads", min_cores)
> .setValue("oob_thread_pool_max_threads", max_cores)
> .setValue("oob_thread_pool_queue_enabled", false)
> .setValue("oob_thread_pool_queue_max_size", 500)
> .setValue("internal_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("internal_thread_pool_min_threads", min_cores)
> .setValue("internal_thread_pool_max_threads", max_cores)
> .setValue("internal_thread_pool_queue_enabled", false)
> .setValue("internal_thread_pool_queue_max_size", 600000)
> .setValue("ucast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("ucast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> .setValue("mcast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("mcast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> )
> .addProtocol(new JDBC_PING()
> .setValue("connection_url", Config.get("DB.dbdriver") + ':' + Config.get("DB.dburl") + '/' + Config.get("DB.dbname"))
> .setValue("connection_username", Config.get("DB.dbuser"))
> .setValue("connection_password", Config.get("DB.dbpwd"))
> .setValue("connection_driver", "org.postgresql.Driver")
> .setValue("initialize_sql", "CREATE TABLE IF NOT EXISTS JGROUPSPING ( own_addr varchar(200) NOT NULL, cluster_name varchar(200) NOT NULL, ping_data bytea DEFAULT NULL, PRIMARY KEY (own_addr, cluster_name) )")
> )
> .addProtocol(new MERGE3()
> .setValue("max_interval", 10000)
> .setValue("min_interval", 1000)
> )
> .addProtocol(new FD_SOCK()
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new FD_ALL()
> )
> .addProtocol(new VERIFY_SUSPECT()
> .setValue("timeout", 1000)
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new NAKACK2()
> .setValue("xmit_interval", 500)
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 2000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> .setValue("use_mcast_xmit", false)
> .setValue("discard_delivered_msgs", true)
> )
> .addProtocol(new UNICAST3()
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 1000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> )
> .addProtocol(new STABLE()
> .setValue("stability_delay", 2000)
> .setValue("desired_avg_gossip", 60000)
> .setValue("max_bytes", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("cap", 0.1)
> )
> .addProtocol(new GMS()
> .setValue("join_timeout", 3000)
> .setValue("view_bundling", false)
> .setValue("print_local_addr", false)
> .setValue("print_physical_addrs", false)
> )
> .addProtocol(new UFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new MFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new FRAG2()
> .setValue("frag_size", org.jgroups.util.Util.readBytesInteger("60K"))
> )
> .addProtocol(new RSVP()
> .setValue("resend_interval", 2000)
> .setValue("timeout", 10000)
> )
> ;
> stack.init();
> ----------------------------
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months
[JBoss JIRA] (WFLY-4265) Add support for caching handler
by Tomaz Cerar (JIRA)
[ https://issues.jboss.org/browse/WFLY-4265?page=com.atlassian.jira.plugin.... ]
Tomaz Cerar reassigned WFLY-4265:
---------------------------------
Assignee: Tomaz Cerar (was: Stuart Douglas)
> Add support for caching handler
> -------------------------------
>
> Key: WFLY-4265
> URL: https://issues.jboss.org/browse/WFLY-4265
> Project: WildFly
> Issue Type: Feature Request
> Components: Web (Undertow)
> Reporter: Tomaz Cerar
> Assignee: Tomaz Cerar
> Fix For: 9.0.0.CR1
>
>
> Currently we cannot configure CacheHandler as it requires DirectCache as parameter in constructor.
> One option to fix this is to have few cache handler impls that would only have primitive parameters in constructor or getters / setters that would configure direct cache.
> Or add proper type cache configuration to undertow subsystem.
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months
[JBoss JIRA] (WFLY-4265) Add support for caching handler
by Tomaz Cerar (JIRA)
Tomaz Cerar created WFLY-4265:
---------------------------------
Summary: Add support for caching handler
Key: WFLY-4265
URL: https://issues.jboss.org/browse/WFLY-4265
Project: WildFly
Issue Type: Feature Request
Components: Web (Undertow)
Reporter: Tomaz Cerar
Assignee: Stuart Douglas
Fix For: 9.0.0.CR1
Currently we cannot configure CacheHandler as it requires DirectCache as parameter in constructor.
One option to fix this is to have few cache handler impls that would only have primitive parameters in constructor or getters / setters that would configure direct cache.
Or add proper type cache configuration to undertow subsystem.
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months
[JBoss JIRA] (JGRP-1906) jdbc_ping doesn't delete crashed node
by rama rama (JIRA)
[ https://issues.jboss.org/browse/JGRP-1906?page=com.atlassian.jira.plugin.... ]
rama rama commented on JGRP-1906:
---------------------------------
My app also register a shutdown hook, i have noticed that running it into intellij doesn't fire it :D
that's probably why even JDBC_PING hook fail.... my only change here is to wipe out the table from time to time probably
> jdbc_ping doesn't delete crashed node
> -------------------------------------
>
> Key: JGRP-1906
> URL: https://issues.jboss.org/browse/JGRP-1906
> Project: JGroups
> Issue Type: Bug
> Affects Versions: 3.6.1
> Environment: n/a
> Reporter: rama rama
> Assignee: Bela Ban
> Priority: Critical
>
> Hi,
> i am having trouble with jgroups. If a node crash, it doesn't get removed from jdbc table.
> After 10 hours of work, the table contains 100 rows (that is not a problem) but, when the server start_up, GMS take _AGES_ to try to connect to all this dead node.
> Since i am only a 'user' speaking about jgroup and i have no idea on how internally does it work, it this normal?
> I don't think so, master of the cluster, or a governor or something like that, should be able to detect if a node is present or not (via FD,VERIFY_SUSPECT) and delete it from JDBC table to avoid issue on startup for the next time, correct me if i am wrong.
> Here a copy/paste of my current config, configured in applicative way.
> ------------------------------------------------------------------------------------
> int min_cores = 1;
> int max_cores = 50;
> InetAddress bind_addr = org.jgroups.util.Util.getAddressByPatternMatch("match-address:" + Config.get("Cluster.bind_addr"));
> stack
> .addProtocol(new UDP()
> .setValue("bind_addr", bind_addr)
> .setValue("loopback", true)
> .setValue("thread_naming_pattern", "cl")
> .setValue("timer_type", "new3")
> .setValue("timer_min_threads", min_cores)
> .setValue("timer_max_threads", max_cores)
> .setValue("timer_keep_alive_time", Util.MIN * 10)
> .setValue("timer_queue_max_size", 500)
> .setValue("mcast_port", Config.get("Cluster.mcast_port", Constant.INT_NULL))
> .setValue("thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("thread_pool_min_threads", min_cores)
> .setValue("thread_pool_max_threads", max_cores)
> .setValue("thread_pool_queue_enabled", false)
> .setValue("thread_pool_queue_max_size", 500)
> .setValue("oob_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("oob_thread_pool_min_threads", min_cores)
> .setValue("oob_thread_pool_max_threads", max_cores)
> .setValue("oob_thread_pool_queue_enabled", false)
> .setValue("oob_thread_pool_queue_max_size", 500)
> .setValue("internal_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("internal_thread_pool_min_threads", min_cores)
> .setValue("internal_thread_pool_max_threads", max_cores)
> .setValue("internal_thread_pool_queue_enabled", false)
> .setValue("internal_thread_pool_queue_max_size", 600000)
> .setValue("ucast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("ucast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> .setValue("mcast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("mcast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> )
> .addProtocol(new JDBC_PING()
> .setValue("connection_url", Config.get("DB.dbdriver") + ':' + Config.get("DB.dburl") + '/' + Config.get("DB.dbname"))
> .setValue("connection_username", Config.get("DB.dbuser"))
> .setValue("connection_password", Config.get("DB.dbpwd"))
> .setValue("connection_driver", "org.postgresql.Driver")
> .setValue("initialize_sql", "CREATE TABLE IF NOT EXISTS JGROUPSPING ( own_addr varchar(200) NOT NULL, cluster_name varchar(200) NOT NULL, ping_data bytea DEFAULT NULL, PRIMARY KEY (own_addr, cluster_name) )")
> )
> .addProtocol(new MERGE3()
> .setValue("max_interval", 10000)
> .setValue("min_interval", 1000)
> )
> .addProtocol(new FD_SOCK()
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new FD_ALL()
> )
> .addProtocol(new VERIFY_SUSPECT()
> .setValue("timeout", 1000)
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new NAKACK2()
> .setValue("xmit_interval", 500)
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 2000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> .setValue("use_mcast_xmit", false)
> .setValue("discard_delivered_msgs", true)
> )
> .addProtocol(new UNICAST3()
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 1000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> )
> .addProtocol(new STABLE()
> .setValue("stability_delay", 2000)
> .setValue("desired_avg_gossip", 60000)
> .setValue("max_bytes", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("cap", 0.1)
> )
> .addProtocol(new GMS()
> .setValue("join_timeout", 3000)
> .setValue("view_bundling", false)
> .setValue("print_local_addr", false)
> .setValue("print_physical_addrs", false)
> )
> .addProtocol(new UFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new MFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new FRAG2()
> .setValue("frag_size", org.jgroups.util.Util.readBytesInteger("60K"))
> )
> .addProtocol(new RSVP()
> .setValue("resend_interval", 2000)
> .setValue("timeout", 10000)
> )
> ;
> stack.init();
> ----------------------------
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months
[JBoss JIRA] (JGRP-1906) jdbc_ping doesn't delete crashed node
by Bela Ban (JIRA)
[ https://issues.jboss.org/browse/JGRP-1906?page=com.atlassian.jira.plugin.... ]
Bela Ban commented on JGRP-1906:
--------------------------------
{{JDBC_PING}} registers a shutdown hook when starting. When the process is killed or stopped, the process removes its information from the DB table. However, this is not done when you kill a process with kill -9. In this case, you have to manually remove the information.
The reason failure detection doesn't do this is because of split brains, e.g. if we have A,B,C,D split into AB and CD. In this case, A cannot remove C and D, and C cannot remove A and B, because the members are still alive.
> jdbc_ping doesn't delete crashed node
> -------------------------------------
>
> Key: JGRP-1906
> URL: https://issues.jboss.org/browse/JGRP-1906
> Project: JGroups
> Issue Type: Bug
> Affects Versions: 3.6.1
> Environment: n/a
> Reporter: rama rama
> Assignee: Bela Ban
> Priority: Critical
>
> Hi,
> i am having trouble with jgroups. If a node crash, it doesn't get removed from jdbc table.
> After 10 hours of work, the table contains 100 rows (that is not a problem) but, when the server start_up, GMS take _AGES_ to try to connect to all this dead node.
> Since i am only a 'user' speaking about jgroup and i have no idea on how internally does it work, it this normal?
> I don't think so, master of the cluster, or a governor or something like that, should be able to detect if a node is present or not (via FD,VERIFY_SUSPECT) and delete it from JDBC table to avoid issue on startup for the next time, correct me if i am wrong.
> Here a copy/paste of my current config, configured in applicative way.
> ------------------------------------------------------------------------------------
> int min_cores = 1;
> int max_cores = 50;
> InetAddress bind_addr = org.jgroups.util.Util.getAddressByPatternMatch("match-address:" + Config.get("Cluster.bind_addr"));
> stack
> .addProtocol(new UDP()
> .setValue("bind_addr", bind_addr)
> .setValue("loopback", true)
> .setValue("thread_naming_pattern", "cl")
> .setValue("timer_type", "new3")
> .setValue("timer_min_threads", min_cores)
> .setValue("timer_max_threads", max_cores)
> .setValue("timer_keep_alive_time", Util.MIN * 10)
> .setValue("timer_queue_max_size", 500)
> .setValue("mcast_port", Config.get("Cluster.mcast_port", Constant.INT_NULL))
> .setValue("thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("thread_pool_min_threads", min_cores)
> .setValue("thread_pool_max_threads", max_cores)
> .setValue("thread_pool_queue_enabled", false)
> .setValue("thread_pool_queue_max_size", 500)
> .setValue("oob_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("oob_thread_pool_min_threads", min_cores)
> .setValue("oob_thread_pool_max_threads", max_cores)
> .setValue("oob_thread_pool_queue_enabled", false)
> .setValue("oob_thread_pool_queue_max_size", 500)
> .setValue("internal_thread_pool_keep_alive_time", Util.MIN * 10)
> .setValue("internal_thread_pool_min_threads", min_cores)
> .setValue("internal_thread_pool_max_threads", max_cores)
> .setValue("internal_thread_pool_queue_enabled", false)
> .setValue("internal_thread_pool_queue_max_size", 600000)
> .setValue("ucast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("ucast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> .setValue("mcast_recv_buf_size", org.jgroups.util.Util.readBytesInteger("5M"))
> .setValue("mcast_send_buf_size", org.jgroups.util.Util.readBytesInteger("640K"))
> )
> .addProtocol(new JDBC_PING()
> .setValue("connection_url", Config.get("DB.dbdriver") + ':' + Config.get("DB.dburl") + '/' + Config.get("DB.dbname"))
> .setValue("connection_username", Config.get("DB.dbuser"))
> .setValue("connection_password", Config.get("DB.dbpwd"))
> .setValue("connection_driver", "org.postgresql.Driver")
> .setValue("initialize_sql", "CREATE TABLE IF NOT EXISTS JGROUPSPING ( own_addr varchar(200) NOT NULL, cluster_name varchar(200) NOT NULL, ping_data bytea DEFAULT NULL, PRIMARY KEY (own_addr, cluster_name) )")
> )
> .addProtocol(new MERGE3()
> .setValue("max_interval", 10000)
> .setValue("min_interval", 1000)
> )
> .addProtocol(new FD_SOCK()
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new FD_ALL()
> )
> .addProtocol(new VERIFY_SUSPECT()
> .setValue("timeout", 1000)
> .setValue("bind_addr", bind_addr)
> )
> .addProtocol(new NAKACK2()
> .setValue("xmit_interval", 500)
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 2000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> .setValue("use_mcast_xmit", false)
> .setValue("discard_delivered_msgs", true)
> )
> .addProtocol(new UNICAST3()
> .setValue("xmit_table_num_rows", 100)
> .setValue("xmit_table_msgs_per_row", 1000)
> .setValue("xmit_table_max_compaction_time", 30000)
> .setValue("max_msg_batch_size", 500)
> )
> .addProtocol(new STABLE()
> .setValue("stability_delay", 2000)
> .setValue("desired_avg_gossip", 60000)
> .setValue("max_bytes", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("cap", 0.1)
> )
> .addProtocol(new GMS()
> .setValue("join_timeout", 3000)
> .setValue("view_bundling", false)
> .setValue("print_local_addr", false)
> .setValue("print_physical_addrs", false)
> )
> .addProtocol(new UFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new MFC()
> .setValue("max_credits", org.jgroups.util.Util.readBytesInteger("4M"))
> .setValue("min_threshold", 0.4)
> )
> .addProtocol(new FRAG2()
> .setValue("frag_size", org.jgroups.util.Util.readBytesInteger("60K"))
> )
> .addProtocol(new RSVP()
> .setValue("resend_interval", 2000)
> .setValue("timeout", 10000)
> )
> ;
> stack.init();
> ----------------------------
--
This message was sent by Atlassian JIRA
(v6.3.11#6341)
11 years, 3 months