I deployed in my k8s cluster a nginx ingress controller to reach a backend application.
When testing my ingress resource configuration, I noticed that if I add a rewrite rule, I also need to declare a path for the rewritten URI pointing to the same service.
For example, the following ingress config doesn't work:
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: fieldprov-app
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /wetty
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
kubernetes.io/ingress.class: "field-management"
spec:
tls:
- hosts:
- ccfanhe09.sce-lab.com
rules:
- host: ccfanhe09.sce-lab.com
http:
paths:
- path: /provisioning
backend:
serviceName: fieldprov-app
servicePort: 3000
The controller will redirect me to its default backend server associated to "/"
fd10::2:102 - [fd10::2:102] - - [01/Oct/2019:19:34:07 +0000] "GET /provisioning HTTP/2.0" 304 0 "-" "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0" 236 0.003 [default-fieldprov-app-3000] [] [fd10::1:10a]:
3000 0 0.003 304 64895f25fd3fb9937f66ebbcee369c81
fd10::2:102 - [fd10::2:102] - - [01/Oct/2019:19:34:07 +0000] "GET /wetty/socket.io/socket.io.js HTTP/2.0" 404 159 "https://ccfanhe10.sce-lab.com:30000/provisioning" "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firef
ox/60.0" 71 0.002 [upstream-default-backend] [] 127.0.0.1:8181 159 0.003 404 8310972dd6b39f294bae7550305bd7c2
fd10::2:102 - [fd10::2:102] - - [01/Oct/2019:19:34:07 +0000] "GET /wetty/wetty.min.js HTTP/2.0" 404 159 "https://ccfanhe10.sce-lab.com:30000/provisioning" "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0" 2
4 0.001 [upstream-default-backend] [] 127.0.0.1:8181 159 0.002 404 47875d655cf4bcd02fc6212dc6142848
fd10::2:102 - [fd10::2:102] - - [01/Oct/2019:19:34:07 +0000] "GET /wetty/wetty.min.js HTTP/2.0" 404 159 "https://ccfanhe10.sce-lab.com:30000/provisioning" "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0" 2
4 0.001 [upstream-default-backend] [] 127.0.0.1:8181 159 0.000 404 08d1fd4cc72a5475d0fcfb4ca6a4cb2b``
So to me it's like the controller is doing a 2nd lookup after having applied the rewrite rule.
Below is my nginx.conf associated to the ingress config described above:
## start server ccfanhe10.sce-lab.com
server {
server_name ccfanhe10.sce-lab.com ;
listen 80 ;
listen [::]:80 ;
listen 443 ssl http2 ;
listen [::]:443 ssl http2 ;
set $proxy_upstream_name "-";
# PEM sha: 692b563bafa154b7d28350ef01e7c4d53ec2afd1
ssl_certificate /etc/ingress-controller/ssl/default-fake-certificate.pem;
ssl_certificate_key /etc/ingress-controller/ssl/default-fake-certificate.pem;
ssl_certificate_by_lua_block {
certificate.call()
}
location ~* "^/provisioning" {
set $namespace "default";
set $ingress_name "fieldprov-app";
set $service_name "fieldprov-app";
set $service_port "{0 3000 }";
set $location_path "/provisioning";
rewrite_by_lua_block {
lua_ingress.rewrite({
force_ssl_redirect = true,
use_port_in_redirects = false,
})
balancer.rewrite()
plugins.run()
}
header_filter_by_lua_block {
plugins.run()
}
body_filter_by_lua_block {
}
log_by_lua_block {
balancer.log()
monitor.call()
plugins.run()
}
if ($scheme = https) {
more_set_headers "Strict-Transport-Security: max-age=15724800; includeSubDomains";
}
rewrite_log on;
port_in_redirect off;
set $balancer_ewma_score -1;
set $proxy_upstream_name "default-fieldprov-app-3000";
set $proxy_host $proxy_upstream_name;
set $pass_access_scheme $scheme;
set $pass_server_port $server_port;
set $best_http_host $http_host;
set $pass_port $pass_server_port;
set $proxy_alternative_upstream_name "";
client_max_body_size 1m;
proxy_set_header Host $best_http_host;
# Pass the extracted client certificate to the backend
# Allow websocket connections
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header X-Request-ID $req_id;
proxy_set_header X-Real-IP $the_real_ip;
proxy_set_header X-Forwarded-For $the_real_ip;
proxy_set_header X-Forwarded-Host $best_http_host;
proxy_set_header X-Forwarded-Port $pass_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_set_header X-Original-URI $request_uri;
proxy_set_header X-Scheme $pass_access_scheme;
# Pass the original X-Forwarded-For
proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
# mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
proxy_set_header Proxy "";
# Custom headers to proxied server
proxy_connect_timeout 5s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 4k;
proxy_request_buffering on;
proxy_http_version 1.1;
proxy_cookie_domain off;
proxy_cookie_path off;
# In case of errors try the next upstream server before returning an error
proxy_next_upstream error timeout;
proxy_next_upstream_timeout 0;
proxy_next_upstream_tries 3;
rewrite "(?i)/provisioning" /wetty break;
proxy_pass http://upstream_balancer;
proxy_redirect off;
}
location ~* "^/" {
set $namespace "";
set $ingress_name "";
set $service_name "";
set $service_port "{0 0 }";
set $location_path "/";
rewrite_by_lua_block {
lua_ingress.rewrite({
force_ssl_redirect = true,
use_port_in_redirects = false,
})
balancer.rewrite()
plugins.run()
}
header_filter_by_lua_block {
plugins.run()
}
body_filter_by_lua_block {
}
log_by_lua_block {
balancer.log()
monitor.call()
plugins.run()
}
if ($scheme = https) {
more_set_headers "Strict-Transport-Security: max-age=15724800; includeSubDomains";
}
rewrite_log on;
port_in_redirect off;
set $balancer_ewma_score -1;
set $proxy_upstream_name "upstream-default-backend";
set $proxy_host $proxy_upstream_name;
set $pass_access_scheme $scheme;
set $pass_server_port $server_port;
set $best_http_host $http_host;
set $pass_port $pass_server_port;
set $proxy_alternative_upstream_name "";
client_max_body_size 1m;
proxy_set_header Host $best_http_host;
# Pass the extracted client certificate to the backend
# Allow websocket connections
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header X-Request-ID $req_id;
proxy_set_header X-Real-IP $the_real_ip;
proxy_set_header X-Forwarded-For $the_real_ip;
proxy_set_header X-Forwarded-Host $best_http_host;
proxy_set_header X-Forwarded-Port $pass_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_set_header X-Original-URI $request_uri;
proxy_set_header X-Scheme $pass_access_scheme;
# Pass the original X-Forwarded-For
proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
# mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
proxy_set_header Proxy "";
# Custom headers to proxied server
proxy_connect_timeout 5s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 4k;
proxy_request_buffering on;
proxy_http_version 1.1;
proxy_cookie_domain off;
proxy_cookie_path off;
# In case of errors try the next upstream server before returning an error
proxy_next_upstream error timeout;
proxy_next_upstream_timeout 0;
proxy_next_upstream_tries 3;
rewrite "(?i)/" /wetty break;
proxy_pass http://upstream_balancer;
proxy_redirect off;
}
}
## end server ccfanhe10.sce-lab.com
I don't understand the rewrite rule under "^ /" location.
Also I thought that adding the break keyword to the rewrite rule would avoid any extra URI lookup but this is not the behavior I'm seeing. If I don't create a location for "/wetty" which points to the same service as "/provisioning", it doesn't work.
I'm looking for some explanation about the expected behavior in such condition.
Thanks for your support !!
Try to use capture groups as described in here or here section of ingres-nginx documentation.
Starting in Version 0.22.0, ingress definitions using the annotation nginx.ingress.kubernetes.io/rewrite-target are not backwards compatible with previous versions. In Version 0.22.0 and beyond, any substrings within the request URI that need to be passed to the rewritten path must explicitly be defined in a capture group.
Captured groups are saved in numbered placeholders, chronologically, in the form $1, $2 ... $n. These placeholders can be used as parameters in the rewrite-target annotation.
So Your ingress config should looks something like this:
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: fieldprov-app
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /wetty/$2
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
kubernetes.io/ingress.class: "field-management"
spec:
tls:
- hosts:
- ccfanhe09.sce-lab.com
rules:
- host: ccfanhe09.sce-lab.com
http:
paths:
- path: /provisioning/(/|$)(.*)
backend:
serviceName: fieldprov-app
servicePort: 3000
As for the nginx.conf it looks like that because its settings are injected from nginx-controller using lua-nginx-module, You can read about it here.
Update:
If the rewrite rule is working correctly there shouldn't be any extra lookups. I was not able to find anything like that in nginx ingress annotations.
You can check if rewrite rule is working by using curl -I -k <URI> command:
$ curl -I -k http://approot.bar.com/
HTTP/1.1 302 Moved Temporarily
Server: nginx/1.11.10
Date: Mon, 13 Mar 2017 14:57:15 GMT
Content-Type: text/html
Content-Length: 162
Location: http://stickyingress.example.com/app1
Connection: keep-alive
I have runing bitcoind on ubuntu. bitcoin-cli works fine. I can not get working json rpc protocol
bitcoin.conf file:
testnet=0
rpcuser="bitcoinrpc"
rpcpassword="xxxxx"
rpcport=8332
rpcallowip="*"
server=1
http post request with url='http://bitcoinrpc:xxxxx#127.0.0.1:8332/' fails with 401 error.
request headers:
Accept:*/*
Accept-Encoding:gzip, deflate
Accept-Language:en-US,en;q=0.8,ru;q=0.6,de;q=0.4,sr;q=0.2
Authorization:Basic Yml0Y29pbnJwYzp4eHh4eA==
Cache-Control:no-cache
Connection:keep-alive
Content-Length:53
Content-Type:text/plain
DNT:1
Host:127.0.0.1:8332
Origin:chrome-extension://fhjcajmcbmldlhcimfajhfbgofnpcjmb
Pragma:no-cache
User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/51.0.2704.79 Chrome/51.0.2704.79 Safari/537.36
request post payload:
{jsonrpc: "2.0", method: "getinfo", params: []}
What is correct way for bitcoind json rpc autentification?
For future googlers: a possible problem is that the password should not contain the pound sign (#) as this is treated as a comment!
So I am trying to do some tomcat access log analysis by getting it loaded into mysql. I have the majority of it working, but the last entry in the combined access log is kinda a pain, it does not always have the same spaces, and the file is space delimited. I need the last string in the file to either have the spaces removed or replaced with a comma or some other place holder.
I process the file through sed to remove all of the " from the file, so if i can add more to my sed command to do this that would be great, if i need to run it against something else after the sed command that will work to.
Here is the file before the sed command
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] "GET /irep/client/Cart/Controller/TempController.js HTTP/1.1" 304 - "webpage" "Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4"
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] "GET /irep/client/Libraries/jquery.mobile.datebox.js HTTP/1.1" 304 - "webpage" "Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4"
Here is the sed command
sed 's/\"//g' filename > newfilename
Here is an example string from the file after that command is ran against it. Since it is space delimited in mysql it tries to make several more columns and it cannot. so if i can get all the spaces out of the last section that would be awesome.
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] GET /irep/client/Content/css/jquery.mobile.datebox.css HTTP/1.1 304 - webaddress Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] GET /irep/client/Libraries/Bookmark.js HTTP/1.1 304 - webaddress Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4
Example of a string where Mozilla is not present.
24.240.97.38 - - [09/Feb/2015:07:38:21 -0600] GET /irep/images/integra.png HTTP/1.1 304 - - MobileSafari/600.1.4 CFNetwork/711.1.16 Darwin/14.0.0
Here is my expected output, sorry had several distractions this morning to this project.
IPAddress, ClientUsername, AuthUserName, DateTime, Request/File, Protocol, Status, SizeBytes, Referance address, UserAgent/Browser
I would post a screen shot of the table in mysql workbench but i am not allowed to yet.
Basically everything from "Mozilla" to the end of the row i want the spaces replaced or gone, i think a comma or : place holder would be ideal. Any suggestions?
Ed, here is the error I am getting when running it today.
awk: irep-istor_access_log.2015-02-10.txt:4: 166.173.58.240 - - [10/Feb/2015:00:04:07 -0600] "GET /istore/js/cart.js HTTP/1.1" 200 7042 "https://istore.salonservicegroup.com/istore/loginpage.jsp" "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0"
awk: irep-istor_access_log.2015-02-10.txt:4: ^ syntax error
You can do the part you have left like this:
$ awk 'match($0,/Mozilla.*/){ tgt=substr($0,RSTART); gsub(/[[:space:]]+/,",",tgt); $0 = substr($0,1,RSTART-1) tgt } 1' file
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] GET /irep/client/Content/css/jquery.mobile.datebox.css HTTP/1.1 304 - webaddress Mozilla/5.0,(iPad;,CPU,OS,8_1_3,like,Mac,OS,X),AppleWebKit/600.1.4,(KHTML,,like,Gecko),Version/8.0,Mobile/12B466,Safari/600.1.4
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] GET /irep/client/Libraries/Bookmark.js HTTP/1.1 304 - webaddress Mozilla/5.0,(iPad;,CPU,OS,8_1_3,like,Mac,OS,X),AppleWebKit/600.1.4,(KHTML,,like,Gecko),Version/8.0,Mobile/12B466,Safari/600.1.4
but you should just be using one small, simple, awk script for the whole thing, whatever that is.
I see you just added some pre-sed input (but still no expected output) so:
$ cat file
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] "GET /irep/client/Cart/Controller/TempController.js HTTP/1.1" 304 - "webpage" "Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4"
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] "GET /irep/client/Libraries/jquery.mobile.datebox.js HTTP/1.1" 304 - "webpage" "Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4"
$
$ awk '{gsub(/"/,"")} match($0,/Mozilla.*/){ tgt=substr($0,RSTART); gsub(/[[:space:]]+/,",",tgt); $0 = substr($0,1,RSTART-1) tgt } 1' file
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] GET /irep/client/Cart/Controller/TempController.js HTTP/1.1 304 - webpage Mozilla/5.0,(iPad;,CPU,OS,8_1_3,like,Mac,OS,X),AppleWebKit/600.1.4,(KHTML,,like,Gecko),Version/8.0,Mobile/12B466,Safari/600.1.4
24.240.97.38 - - [09/Feb/2015:07:38:23 -0600] GET /irep/client/Libraries/jquery.mobile.datebox.js HTTP/1.1 304 - webpage Mozilla/5.0,(iPad;,CPU,OS,8_1_3,like,Mac,OS,X),AppleWebKit/600.1.4,(KHTML,,like,Gecko),Version/8.0,Mobile/12B466,Safari/600.1.4
Different approach: here is how to convert your input file into a CSV file:
$ cat tst.awk
BEGIN{
OFS=","
print "ipAddr", "dash1", "dash2", "dateTime", "getCmd", "number", "info", "browser"
}
{
gsub(OFS,";")
ip = $1
dash1 = $2
dash2 = $3
match($0,/\[[^]]+\]/)
dt = substr($0,RSTART+1,RLENGTH-2)
match($0,/"[^"]+"/)
get = substr($0,RSTART+1,RLENGTH-2)
$0 = substr($0,RSTART+RLENGTH)
num = $1
dash3 = $2
match($0,/"[^"]+"/)
info = substr($0,RSTART+1,RLENGTH-2)
$0 = substr($0,RSTART+RLENGTH)
match($0,/"[^"]+"/)
browser = substr($0,RSTART+1,RLENGTH-2)
print ip, dash1, dash2, dt, get, num, info, browser
}
.
$ awk -f tst.awk file
ipAddr,dash1,dash2,dateTime,getCmd,number,info,browser
24.240.97.38,-,-,09/Feb/2015:07:38:23 -0600,GET /irep/client/Cart/Controller/TempController.js HTTP/1.1,304,webpage,Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML; like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4
24.240.97.38,-,-,09/Feb/2015:07:38:23 -0600,GET /irep/client/Libraries/jquery.mobile.datebox.js HTTP/1.1,304,webpage,Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML; like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4
With our new webservers, the access logs are in JSON and I'm not able to use typical awk commands to pull out traffic info. I've found jsawk, however I keep getting a parse error anytime I try to pull anything out of the access logs. I have the feeling that the logs are not in a format the the parser likes
Here is a sample entry from the logs:
{ "#timestamp": "2014-09-30T21:33:56+00:00", "webserver_remote_addr": "24.4.209.153", "webserver_remote_user": "-", "webserver_body_bytes_sent": 193, "webserver_request_time": 0.000, "webserver_status": "404", "webserver_request": "GET /favicon.ico HTTP/1.1", "webserver_request_method": "GET", "webserver_http_referrer": "-", "webserver_http_user_agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36" }
So for example if I want to pull the IP addresses out of the logs, I would use this:
cat access.log | jsawk 'return this.webserver_remote_addr'
However this only results in 'jsawk: JSON parse error:' and the entire access log printed.
Am I correct in assuming that the access logs are in a format the parser doesn't recognize? Each entry in the logs is all on one line. How can I get jsawk to parse properly?
I tried this:
$ echo '{ "#timestamp": "2014-09-30T21:33:56+00:00", "webserver_remote_addr": "24.4.209.153", "webserver_remote_user": "-", "webserver_body_bytes_sent": 193, "webserver_request_time": 0.000, "webserver_status": "404", "webserver_request": "GET /favicon.ico HTTP/1.1", "webserver_request_method": "GET", "webserver_http_referrer": "-", "webserver_http_user_agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36" }' | jsawk 'return this.webserver_remote_addr'
and got this:
24.4.209.153
Updates:
I think the problem is that you have each line as a json object, and there are multiple lines in access.log. There's a good way to work around at here: How to use jsawk if every line is a json object ?
I'm using MySQL database in node.js application but can't connect with MySQL. I just changed the XAMPP server port 80 to 600, but what's the problem in MySQL connection?
Connection node.js code:
var mysql = require('mysql');
var connection = mysql.createConnection({
host : 'localhost',
port : '3306',
user : 'root',
password : '',
database :'appdb'
});
Request headers:
Request Headers
Accept application/json, text/javascript, */*; q=0.01
Accept-Encoding gzip, deflate
Accept-Language en-US,en;q=0.5
Host localhost:1337
Referer http://localhost:1337/
User-Agent Mozilla/5.0 (Windows NT 6.2; WOW64; rv:26.0) Gecko/20100101 Firefox/26.0
X-Requested-With XMLHttpRequest
Throwing error message:
module.js:340 throw err; ^ Error: Cannot find module 'D:\project\hotel\app.js' at Function.Module._resolveFilename (module.js:338:15) at Function.Module._load (module.js:280:25) at Function.Module.runMain (module.js:497:10) at startup (node.js:119:16) at node.js:902:3
You shouldn't use the port of your xamp server, but the port of your mysql server (usually 3306).