diff --git a/NGINX-API-Steering/README.md b/NGINX-API-Steering/README.md new file mode 100644 index 0000000..a8156b7 --- /dev/null +++ b/NGINX-API-Steering/README.md @@ -0,0 +1,954 @@ +# NGINX API Steering + +## Description + +This is a sample NGINX Plus API Gateway configuration to publish REST APIs and provide: + +- Authentication based on Java Web Tokens (JWT) +- Authorization based on HTTP method and JWT role match +- Reverse proxying with URL rewriting +- Template-based JSON payload manipulation (client-to-server and server-to-client) +- Template-based JSON payload format validation (mandatory parameters, parameters types) + +An external REST API-enabled backend is used to store JSON service definitions that include authorization and rewriting rules. +The service definition JSON is defined as: + +``` + { + "id": 2, <-- Unique ID + "enabled": true, <-- Flag to enable/disable the definition + "uri": "v1.0/api_post", <-- URI to match against client request + "matchRules": { + "method": "POST", <-- HTTP method to match against client request + "roles": "devops" <-- JWT role to match + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" <-- URL to reverse proxy the client request + }, + "json": { <-- JSON payload manipulation (optional) + "to_server": { <-- client-to-server payload manipulation rules + "set": [ <-- key:value array to add/replace in request JSON payload + { + "field1": "value1" + }, + { + "field2": "value2" + } + ], + "del": [ <-- Array of keys to delete from request JSON payload + "group" + ] + }, + "to_client": { <-- server-to-client payload manipulation rules + "set": [ <-- key:value array to add/replace in response JSON payload + { + "new_response_field": "ADDED" + } + ], + "del": [ <-- Array of keys to delete from response JSON payload + "hostname" + ] + } + }, + template": { <-- Template for JSON format validation + "name": "", <-- Mandatory parameter, type is string + "age": 0, <-- Mandatory parameter, type is integer + "address": { <-- Nested parameters + "street": "", + "city": "" + } + } + } +``` + +The provided sample backend can be queried using the request URI as the lookup key. +The sample backend provides the `jwks.json` endpoint to return the JWT secret. + +## Prerequisites + +- Linux VM with Docker-compose v2.20.3+ (tested on Ubuntu 20.04 and 22.04) or a running Kubernetes cluster +- NGINX Plus certificate and key to build the relevant docker image (tested with NGINX Plus R30-p1 and above) + +## High level architecture + +```mermaid +sequenceDiagram + Client->>NGINX Plus: REST request (with JWT token) + NGINX Plus->>NGINX Plus: JWT Authentication + NGINX Plus->>Source of truth: Service definition JSON request + Source of truth->>NGINX Plus: Service definition JSON reply + NGINX Plus->>NGINX Plus: Authorization + NGINX Plus->>NGINX Plus: Optional JSON request rewriting + NGINX Plus->>Backend: REST request + Backend->>NGINX Plus: REST response + NGINX Plus->>NGINX Plus: Optional JSON response rewriting + NGINX Plus->>Client: Response +``` + +## Deploying this repository + +1. Clone the repository + +``` +git clone https://github.com/fabriziofiorucci/NGINX-API-Steering +``` + +2. cd to the newly created directory + +``` +cd NGINX-API-Steering +``` + +## Running with docker-compose + +1. Run the startup script to build all docker images and spin up the docker-compose deployment. You will need to use a valid NGINX Plus certificate and key to fetch all software packages from the NGINX private registry + +``` +./nginx-api-steering.sh -o start -C /etc/ssl/nginx/nginx-repo.crt -K /etc/ssl/nginx/nginx-repo.key +``` + +2. Check running containers: + +``` +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +3f6f53f9786d nginx-api-steering "nginx -g 'daemon of…" 45 seconds ago Up 43 seconds 0.0.0.0:10080->80/tcp, :::10080->80/tcp, 0.0.0.0:20080->8080/tcp, :::20080->8080/tcp nginx +998ddb007210 api-server "python apiserver.py" 45 seconds ago Up 43 seconds 0.0.0.0:5001->5000/tcp, :::5001->5000/tcp api-server-1 +bd13b5e4ecf1 api-server "python apiserver.py" 45 seconds ago Up 43 seconds 0.0.0.0:5002->5000/tcp, :::5002->5000/tcp api-server-2 +46f25f772f63 backend "python backend.py" 45 seconds ago Up 43 seconds 0.0.0.0:10000->5000/tcp, :::10000->5000/tcp backend +``` + +## Running on Kubernetes + +1. Build the docker images: + +``` +./nginx-api-steering.sh -o build -C /etc/ssl/nginx/nginx-repo.crt -K /etc/ssl/nginx/nginx-repo.key +``` + +2. Make sure all images have been correctly built: + +``` +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +api-server latest bc502c140891 2 minutes ago 159MB +backend latest ac783909638e 2 minutes ago 145MB +nginx-api-steering latest 0752ea9e5400 2 minutes ago 33.1MB +``` + +3. Tag and push the docker images: + +``` +docker tag api-server:latest registry.k8s.ie.ff.lan:31005/nginx-api-steering:api-server +docker tag backend:latest registry.k8s.ie.ff.lan:31005/nginx-api-steering:backend +docker tag nginx-api-steering:latest registry.k8s.ie.ff.lan:31005/nginx-api-steering:nginx-api-steering +``` + +``` +docker push registry.k8s.ie.ff.lan:31005/nginx-api-steering:api-server +docker push registry.k8s.ie.ff.lan:31005/nginx-api-steering:backend +docker push registry.k8s.ie.ff.lan:31005/nginx-api-steering:nginx-api-steering +``` + +4. Deploy all manifests: + +``` +./nginx-api-steering.sh -o start-k8s +``` + +5. Make sure all relevant objects have been created: + +``` +$ kubectl get all -n nginx-api-steering +NAME READY STATUS RESTARTS AGE +pod/api-server-1-6596f78694-nfjvm 1/1 Running 0 20s +pod/api-server-2-847868565b-5lwk8 1/1 Running 0 20s +pod/backend-74b87464fd-cmc6h 1/1 Running 0 20s +pod/nginx-9cbcb8bcd-hz6xm 1/1 Running 0 20s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/api-server-1 ClusterIP 10.103.76.109 5000/TCP 20s +service/api-server-2 ClusterIP 10.111.202.8 5000/TCP 20s +service/backend ClusterIP 10.107.14.237 5000/TCP 20s +service/nginx ClusterIP 10.97.140.134 80/TCP 20s + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/api-server-1 1/1 1 1 20s +deployment.apps/api-server-2 1/1 1 1 20s +deployment.apps/backend 1/1 1 1 20s +deployment.apps/nginx 1/1 1 1 20s + +NAME DESIRED CURRENT READY AGE +replicaset.apps/api-server-1-6596f78694 1 1 1 20s +replicaset.apps/api-server-2-847868565b 1 1 1 20s +replicaset.apps/backend-74b87464fd 1 1 1 20s +replicaset.apps/nginx-9cbcb8bcd 1 1 1 20s +``` + + +## NGINX Plus dashboard (docker-compose only) + +Using your favourite browser open https://127.0.0.1:20080/dashboard.html + +## Creating JWT tokens + +(see https://www.nginx.com/blog/authenticating-api-clients-jwt-nginx-plus/) + +This repository's backend DB uses a JWT secret defined as + +``` +$ cat jwt/jwks.json +{ + "keys": [ + { + "k":"ZmFudGFzdGljand0", + "kty":"oct", + "kid":"0001" + } + ] +} +``` + +the k field is the generated symmetric key (base64url-encoded) basing on a secret (fantasticjwt in the example). The secret can be generated with the following command: + +``` +$ echo -n "fantasticjwt" | base64 | tr '+/' '-_' | tr -d '=' +ZmFudGFzdGljand0 +``` + +Create the test JWT tokens using: + +``` +$ cd jwt +$ ./jwtEncoder.sh +``` + +Two tokens are created: + +``` +jwt.devops - Token with "devops" role +jwt.guest - Token with "guest" role +``` + +The decoded tokens are: + +``` +$ cat jwt.guest | ./jwtDecoder.sh +Header +{ + "typ": "JWT", + "alg": "HS256", + "kid": "0001", + "iss": "Bash JWT Generator", + "iat": 1698145320, + "exp": 1698145321 +} +Payload +{ + "name": "Alice Guest", + "sub": "JWT sub claim", + "iss": "JWT iss claim", + "roles": [ + "guest" + ] +} +Signature is valid +``` + +``` +$ cat jwt.devops | ./jwtDecoder.sh +Header +{ + "typ": "JWT", + "alg": "HS256", + "kid": "0001", + "iss": "Bash JWT Generator", + "iat": 1698145320, + "exp": 1698145321 +} +Payload +{ + "name": "Bob DevOps", + "sub": "JWT sub claim", + "iss": "JWT iss claim", + "roles": [ + "devops" + ] +} +Signature is valid +``` + +## Backend DB test (docker-compose only) + +Backend DB, fetching the JWT secret: + +``` +$ curl -s 127.0.0.1:10000/jwks.json | jq +{ + "keys": [ + { + "k": "ZmFudGFzdGljand0", + "kid": "0001", + "kty": "oct" + } + ] +} +``` + +Backend DB, fetching all keys: + +``` +$ curl -s 127.0.0.1:10000/backend/fetchallkeys | jq +{ + "rules": [ + { + "enabled": true, + "id": 1, + "matchRules": { + "method": "GET", + "roles": "guest" + }, + "operation": { + "url": "https://api-server-1:5000/get_data" + }, + "uri": "v1.0/api_get" + }, + { + "enabled": true, + "id": 2, + "json": { + "to_client": { + "del": [ + "hostname" + ], + "set": [ + { + "new_response_field": "ADDED" + } + ] + }, + "to_server": { + "del": [ + "group" + ], + "set": [ + { + "field1": "value1" + }, + { + "field2": "value2" + } + ] + } + }, + "matchRules": { + "method": "POST", + "roles": "devops" + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" + }, + "uri": "v1.0/api_post" + }, + { + "enabled": true, + "id": 3, + "matchRules": { + "method": "POST", + "roles": "devops" + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" + }, + "uri": "v1.0/api_post_no_change" + } + ] +} +``` + +Backend DB, fetching a specific key: + +``` +$ curl -s http://127.0.0.1:10000/backend/fetchkey/v1.0/api_post | jq +{ + "rule": { + "enabled": true, + "id": 2, + "json": { + "to_client": { + "del": [ + "hostname" + ], + "set": [ + { + "new_response_field": "ADDED" + } + ] + }, + "to_server": { + "del": [ + "group" + ], + "set": [ + { + "field1": "value1" + }, + { + "field2": "value2" + } + ] + } + }, + "matchRules": { + "method": "POST", + "roles": "devops" + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" + }, + "uri": "v1.0/api_post" + } +} +``` + +## Direct backend API access (docker-compose only): + +GET test: + +``` +curl -ks -X GET https://127.0.0.1:5001/get_data | jq +``` + +Output: + +``` +{ + "hostname": "be4e709e5957", + "timestamp": "2023-10-24 10:32:55" +} +``` + +POST test: the client payload is echoed back in the `payload` field + +``` +curl -ks -X POST https://127.0.0.1:5001/echo_data -d '{"var":123}' -H "Content-Type: application/json" | jq +``` + +Output: + +``` +{ + "hostname": "be4e709e5957", + "payload": { + "var": 123 + }, + "timestamp": "2023-10-24 10:32:18" +} +``` + +## REST API access test - for docker-compose + +Display NGINX Plus logs: + +``` +docker logs nginx -f +``` + +### Test with valid HTTP method with no JWT token + +``` +curl -X GET -ki http://127.0.0.1:10080/v1.0/api_get +``` + +Output: + +``` +HTTP/1.1 401 Unauthorized +Server: nginx/1.25.1 +Date: Tue, 24 Oct 2023 08:38:39 GMT +Content-Type: text/html +Content-Length: 179 +Connection: keep-alive +WWW-Authenticate: Bearer realm="authentication required" + + +401 Authorization Required + +

401 Authorization Required

+
nginx/1.25.1
+ + +``` + +### Test with valid JWT token, HTTP method and URI + +``` +curl -X GET -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://127.0.0.1:10080/v1.0/api_get +``` + +Output: + +``` +HTTP/1.1 200 OK +Server: nginx/1.25.1 +Date: Tue, 24 Oct 2023 10:51:59 GMT +Content-Type: application/json +Connection: keep-alive +Content-Length: 62 + +{"hostname":"6f0e4de1fa9b","timestamp":"2023-10-24 10:51:59"} +``` + +### Test with valid JWT token and invalid HTTP method + +``` +curl -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://127.0.0.1:10080/v1.0/api_get +``` + +Output: + +``` +HTTP/1.1 403 Forbidden +Server: nginx/1.25.1 +Date: Tue, 24 Oct 2023 10:53:29 GMT +Content-Type: text/html +Content-Length: 153 +Connection: keep-alive + + +403 Forbidden + +

403 Forbidden

+
nginx/1.25.1
+ + +``` + +### Test with valid JWT token and invalid role (`guest` instead of `devops`) + +``` +curl -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://127.0.0.1:10080/v1.0/api_post -d '{"username": "john.doe@acme.com"}' -H "Content-Type: application/json" +``` + +Output: + +``` +HTTP/1.1 403 Forbidden +Server: nginx/1.25.1 +Date: Tue, 24 Oct 2023 10:55:17 GMT +Content-Type: text/html +Content-Length: 153 +Connection: keep-alive + + +403 Forbidden + +

403 Forbidden

+
nginx/1.25.1
+ + +``` + +### Same request with a valid JWT token and `devops` role + +``` +curl -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.devops`" http://127.0.0.1:10080/v1.0/api_post -d '{"username": "john.doe@acme.com", "group": "guest"}' -H "Content-Type: application/json" +``` + +Output: + +``` +HTTP/1.1 200 OK +Server: nginx/1.25.1 +Date: Wed, 01 Nov 2023 14:55:06 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive + +{"payload":{"field1":"value1","field2":"value2","username":"john.doe@acme.com"},"timestamp":"2023-11-01 14:59:06","new_response_field":"ADDED"} +``` + +The JSON service definition retrieved from the backend is: + +``` + { + "id": 2, + "enabled": true, + "uri": "v1.0/api_post", + "matchRules": { + "method": "POST", + "roles": "devops" + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" + }, + "json": { + "to_server": { + "set": [ + { + "field1": "value1" + }, + { + "field2": "value2" + } + ], + "del": [ + "group" + ] + }, + "to_client": { + "set": [ + { + "new_response_field": "ADDED" + }, + "del": [ + "hostname" + ] + } + } + } +``` + +Client request payload is: + +``` +{"username": "john.doe@acme.com", "group": "guest"} +``` + +Based on the JSON service definition `to_server` section: + +- "field1": "value1" is added +- "field2": "value2" is added +- "group" is removed + +The updated payload: + +``` +{"field1":"value1","field2":"value2","username":"john.doe@acme.com"} +``` + +is sent to the upstream. Upstream response is: + +``` +{"hostname":"cf97af39bbd6","payload":{"field1":"value1","field2":"value2","username":"john.doe@acme.com"},"timestamp":"2023-11-01 14:59:06"} +``` + +The response payload is updated based on the JSON service definition `to_client` section: + +- "new_response_field": "ADDED" is added +- "hostname" is removed + +The resulting payload is returned to the client as: + +``` +{"payload":{"field1":"value1","field2":"value2","username":"john.doe@acme.com"},"timestamp":"2023-11-01 14:59:06","new_response_field":"ADDED"} +``` + +NGINX logs: + +``` +$ docker logs nginx -f +2023/11/01 14:59:06 [warn] 6#6: *9 js: --- CLIENT REQUEST --------------------------- +2023/11/01 14:59:06 [warn] 6#6: *9 js: Client[10.5.0.1] Method[POST] Host[127.0.0.1:10080] URI [/v1.0/api_post] Body[{"username": "john.doe@acme.com", "group": "guest"}] +2023/11/01 14:59:06 [warn] 6#6: *9 js: Subrequest [/dbQuery/backend/fetchkey/v1.0/api_post] +2023/11/01 14:59:06 [warn] 6#6: *9 js: Rule found: URI[/dbQuery/backend/fetchkey/v1.0/api_post] status[200] body[{"rule":{"enabled":true,"id":2,"json":{"to_client":{"del":["hostname"],"set":[{"new_response_field":"ADDED"}]},"to_server":{"del":["group"],"set":[{"field1":"value1"},{"field2":"value2"}]}},"matchRules":{"method":"POST","roles":"devops"},"operation":{"url":"https://api-server-2:5000/echo_data"},"uri":"v1.0/api_post"}} +] +2023/11/01 14:59:06 [warn] 6#6: *9 js: Rewriting request [127.0.0.1:10080/v1.0/api_post] -> [https://api-server-2:5000/echo_data] +2023/11/01 14:59:06 [warn] 6#6: *9 js: --- Checking authorization +2023/11/01 14:59:06 [warn] 6#6: *9 js: - HTTP method received [POST] -> needed [POST] +2023/11/01 14:59:06 [warn] 6#6: *9 js: - JWT roles received [devops] -> needed [devops] +2023/11/01 14:59:06 [warn] 6#6: *9 js: --- Authorization successful +2023/11/01 14:59:06 [warn] 6#6: *9 js: --- JSON payload client -> server : being updated +2023/11/01 14:59:06 [warn] 6#6: *9 js: Updating JSON payload [{"username":"john.doe@acme.com","group":"guest"}] with template [{"del":["group"],"set":[{"field1":"value1"},{"field2":"value2"}]}] +2023/11/01 14:59:06 [warn] 6#6: *9 js: - Updating [field1 = value1] +2023/11/01 14:59:06 [warn] 6#6: *9 js: - Updating [field2 = value2] +2023/11/01 14:59:06 [warn] 6#6: *9 js: - Deleting [group] +2023/11/01 14:59:06 [warn] 6#6: *9 js: Done updating JSON payload [{"username":"john.doe@acme.com","field1":"value1","field2":"value2"}] +2023/11/01 14:59:06 [warn] 6#6: *9 js: --- Proxying request to upstream +2023/11/01 14:59:06 [warn] 6#6: *9 js: --- Upstream returned HTTP [200] payload [{"hostname":"cf97af39bbd6","payload":{"field1":"value1","field2":"value2","username":"john.doe@acme.com"},"timestamp":"2023-11-01 14:59:06"} +] +2023/11/01 14:59:06 [warn] 6#6: *9 js: --- JSON payload server -> client : being updated +2023/11/01 14:59:06 [warn] 6#6: *9 js: Updating JSON payload [{"hostname":"cf97af39bbd6","payload":{"field1":"value1","field2":"value2","username":"john.doe@acme.com"},"timestamp":"2023-11-01 14:59:06"}] with template [{"del":["hostname"],"set":[{"new_response_field":"ADDED"}]}] +2023/11/01 14:59:06 [warn] 6#6: *9 js: - Updating [new_response_field = ADDED] +2023/11/01 14:59:06 [warn] 6#6: *9 js: - Deleting [hostname] +2023/11/01 14:59:06 [warn] 6#6: *9 js: Done updating JSON payload [{"payload":{"field1":"value1","field2":"value2","username":"john.doe@acme.com"},"timestamp":"2023-11-01 14:59:06","new_response_field":"ADDED"}] +``` + +### Test with no payload rewriting + +``` +curl -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.devops`" http://127.0.0.1:10080/v1.0/api_post_no_change -d '{"username": "john.doe@acme.com", "group": "guest"}' -H "Content-Type: application/json" +``` + +Output: + +``` +HTTP/1.1 200 OK +Server: nginx/1.25.1 +Date: Wed, 01 Nov 2023 14:22:47 GMT +Content-Type: application/octet-stream +Transfer-Encoding: chunked +Connection: keep-alive + +{"hostname":"b93ecf5e10c5","payload":{"group":"guest","username":"john.doe@acme.com"},"timestamp":"2023-11-01 14:22:47"} +``` + +NGINX logs: + +``` +$ docker logs nginx -f +2023/11/01 14:22:47 [warn] 7#7: *13 js: --- CLIENT REQUEST --------------------------- +2023/11/01 14:22:47 [warn] 7#7: *13 js: Client[10.5.0.1] Method[POST] Host[127.0.0.1:10080] URI [/v1.0/api_post_no_change] Body[{"username": "john.doe@acme.com", "group": "guest"}] +2023/11/01 14:22:47 [warn] 7#7: *13 js: Subrequest [/dbQuery/backend/fetchkey/v1.0/api_post_no_change] +2023/11/01 14:22:47 [warn] 7#7: *13 js: Rule found: URI[/dbQuery/backend/fetchkey/v1.0/api_post_no_change] status[200] body[{"rule":{"enabled":true,"id":3,"matchRules":{"method":"POST","roles":"devops"},"operation":{"url":"https://api-server-2:5000/echo_data"},"uri":"v1.0/api_post_no_change"}} +] +2023/11/01 14:22:47 [warn] 7#7: *13 js: Rewriting request [127.0.0.1:10080/v1.0/api_post_no_change] -> [https://api-server-2:5000/echo_data] +2023/11/01 14:22:47 [warn] 7#7: *13 js: --- Checking authorization +2023/11/01 14:22:47 [warn] 7#7: *13 js: - HTTP method received [POST] -> needed [POST] +2023/11/01 14:22:47 [warn] 7#7: *13 js: - JWT roles received [devops] -> needed [devops] +2023/11/01 14:22:47 [warn] 7#7: *13 js: --- Authorization successful +2023/11/01 14:22:47 [warn] 7#7: *13 js: --- JSON payload client -> server : no changes +2023/11/01 14:22:47 [warn] 7#7: *13 js: --- Proxying request to upstream +2023/11/01 14:22:47 [warn] 7#7: *13 js: --- Upstream returned HTTP [200] payload [{"hostname":"b93ecf5e10c5","payload":{"group":"guest","username":"john.doe@acme.com"},"timestamp":"2023-11-01 14:22:47"} +] +2023/11/01 14:22:47 [warn] 7#7: *13 js: --- JSON payload server -> client : no changes +10.5.0.1 - - [01/Nov/2023:14:22:47 +0000] "POST /v1.0/api_post_no_change HTTP/1.1" 200 132 "-" "curl/7.68.0" "-" +2023/11/01 14:22:47 [info] 7#7: *13 client 10.5.0.1 closed keepalive connection +``` + +### Test with JSON payload checked against template + +``` +curl -w '\n' -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://127.0.0.1:10080/v1.0/template_test -d ' +{ + "name": "John", + "age": 30, + "address": { + "street": "123 Main St", + "city": "New York" + } +} +' -H "Content-Type: application/json" +``` + +The JSON service definition retrieved from the backend is: + +``` + { + "id": 4, + "enabled": true, + "uri": "v1.0/template_test", + "matchRules": { + "method": "POST", + "roles": "guest" + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" + }, + "template": { + "name": "", + "age": 0, + "address": { + "street": "", + "city": "" + } + } + } +``` + +Output: + +``` +HTTP/1.1 200 OK +Server: nginx/1.25.3 +Date: Thu, 28 Mar 2024 16:38:41 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive + +{"hostname":"6c8c15e6b178","payload":{"address":{"city":"New York","street":"123 Main St"},"age":30,"name":"John"},"timestamp":"2024-03-28 16:38:41"} +``` + +NGINX logs: + +``` +2024/03/28 17:13:33 [warn] 6#6: *45 js: --- CLIENT REQUEST --------------------------- +2024/03/28 17:13:33 [warn] 6#6: *45 js: Client[10.5.0.1] Method[POST] Host[127.0.0.1:10080] URI [/v1.0/template_test] Body[ +{ + "name": "John", + "age": 30, + "address": { + "street": "123 Main St", + "city": "New York" + } +} +] +2024/03/28 17:13:33 [warn] 6#6: *45 js: Subrequest [/dbQuery/backend/fetchkey/v1.0/template_test] +2024/03/28 17:13:33 [warn] 6#6: *45 js: Rule found: URI[/dbQuery/backend/fetchkey/v1.0/template_test] status[200] body[{"rule":{"enabled":true,"id":4,"matchRules":{"method":"POST","roles":"guest"},"operation":{"url":"https://api-server-2:5000/echo_data"},"template":{"address":{"city":"","street":""},"age":0,"name":""},"uri":"v1.0/template_test"}} +] +2024/03/28 17:13:33 [warn] 6#6: *45 js: Rewriting request [127.0.0.1:10080/v1.0/template_test] -> [https://api-server-2:5000/echo_data] +2024/03/28 17:13:33 [warn] 6#6: *45 js: --- Checking authorization +2024/03/28 17:13:33 [warn] 6#6: *45 js: - HTTP method received [POST] -> needed [POST] +2024/03/28 17:13:33 [warn] 6#6: *45 js: - JWT roles received [guest] -> needed [guest] +2024/03/28 17:13:33 [warn] 6#6: *45 js: --- Authorization successful +2024/03/28 17:13:33 [warn] 6#6: *45 js: +-- JSON template validation [{"address":{"city":"","street":""},"age":0,"name":""}] +2024/03/28 17:13:33 [warn] 6#6: *45 js: |-- Checking JSON payload [[object Object]] +2024/03/28 17:13:33 [warn] 6#6: *45 js: |-- Checking JSON payload [[object Object]] +2024/03/28 17:13:33 [warn] 6#6: *45 js: |---- Property [city] ok +2024/03/28 17:13:33 [warn] 6#6: *45 js: |---- Property [street] ok +2024/03/28 17:13:33 [warn] 6#6: *45 js: |---- Property [address] ok +2024/03/28 17:13:33 [warn] 6#6: *45 js: |---- Property [age] ok +2024/03/28 17:13:33 [warn] 6#6: *45 js: |---- Property [name] ok +2024/03/28 17:13:33 [warn] 6#6: *45 js: +-- JSON template validation successful +2024/03/28 17:13:33 [warn] 6#6: *45 js: --- JSON payload client -> server : no changes +2024/03/28 17:13:33 [warn] 6#6: *45 js: --- Proxying request to upstream +2024/03/28 17:13:33 [warn] 6#6: *45 js: --- Upstream returned HTTP [200] payload [{"hostname":"6c8c15e6b178","payload":{"address":{"city":"New York","street":"123 Main St"},"age":30,"name":"John"},"timestamp":"2024-03-28 17:13:33"} +] +2024/03/28 17:13:33 [warn] 6#6: *45 js: --- JSON payload server -> client : no changes +2024/03/28 17:13:33 [info] 6#6: *45 client 10.5.0.1 closed keepalive connection +``` + +### Test with invalid JSON payload check against template + +``` +curl -w '\n' -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://127.0.0.1:10080/v1.0/template_test -d ' +{ + "name": "John", + "address": { + "street": "123 Main St", + "city": "New York" + } +} +' -H "Content-Type: application/json" +``` + +Output: + +``` +HTTP/1.1 422 +Server: nginx/1.25.3 +Date: Thu, 28 Mar 2024 17:15:29 GMT +Content-Length: 0 +Connection: keep-alive +``` + +NGINX logs: + +``` +2024/03/28 17:15:29 [warn] 6#6: *49 js: --- CLIENT REQUEST --------------------------- +2024/03/28 17:15:29 [warn] 6#6: *49 js: Client[10.5.0.1] Method[POST] Host[127.0.0.1:10080] URI [/v1.0/template_test] Body[ +{ + "name": "John", + "address": { + "street": "123 Main St", + "city": "New York" + } +} +] +2024/03/28 17:15:29 [warn] 6#6: *49 js: Subrequest [/dbQuery/backend/fetchkey/v1.0/template_test] +2024/03/28 17:15:29 [warn] 6#6: *49 js: Rule found: URI[/dbQuery/backend/fetchkey/v1.0/template_test] status[200] body[{"rule":{"enabled":true,"id":4,"matchRules":{"method":"POST","roles":"guest"},"operation":{"url":"https://api-server-2:5000/echo_data"},"template":{"address":{"city":"","street":""},"age":0,"name":""},"uri":"v1.0/template_test"}} +] +2024/03/28 17:15:29 [warn] 6#6: *49 js: Rewriting request [127.0.0.1:10080/v1.0/template_test] -> [https://api-server-2:5000/echo_data] +2024/03/28 17:15:29 [warn] 6#6: *49 js: --- Checking authorization +2024/03/28 17:15:29 [warn] 6#6: *49 js: - HTTP method received [POST] -> needed [POST] +2024/03/28 17:15:29 [warn] 6#6: *49 js: - JWT roles received [guest] -> needed [guest] +2024/03/28 17:15:29 [warn] 6#6: *49 js: --- Authorization successful +2024/03/28 17:15:29 [warn] 6#6: *49 js: +-- JSON template validation [{"address":{"city":"","street":""},"age":0,"name":""}] +2024/03/28 17:15:29 [warn] 6#6: *49 js: |-- Checking JSON payload [[object Object]] +2024/03/28 17:15:29 [warn] 6#6: *49 js: |-- Checking JSON payload [[object Object]] +2024/03/28 17:15:29 [warn] 6#6: *49 js: |---- Property [city] ok +2024/03/28 17:15:29 [warn] 6#6: *49 js: |---- Property [street] ok +2024/03/28 17:15:29 [warn] 6#6: *49 js: |---- Property [address] ok +2024/03/28 17:15:29 [warn] 6#6: *49 js: |---- Property [age] missing +2024/03/28 17:15:29 [warn] 6#6: *49 js: +-- JSON template validation failed +2024/03/28 17:15:29 [info] 6#6: *49 client 10.5.0.1 closed keepalive connection +``` + +## REST API access test - for Kubernetes + +Note: the example FQDN `nginx-api-steering.k8s.f5.ff.lan` is used. This is defined in the `kubernetes.yaml` file. + +Display NGINX Plus logs: + +``` +kubectl logs -l app=nginx -n nginx-api-steering -f +``` + +Test with valid HTTP method with no JWT token (returns 401): + +``` +curl -X GET -ki http://nginx-api-steering.k8s.f5.ff.lan/v1.0/api_get +``` + +Test with valid JWT token, HTTP method and URI (returns 200): + +``` +curl -X GET -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://nginx-api-steering.k8s.f5.ff.lan/v1.0/api_get +``` + +Test with valid JWT token and invalid HTTP method (return 403): + +``` +curl -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://nginx-api-steering.k8s.f5.ff.lan/v1.0/api_get +``` + +Test with valid JWT token and invalid `guest` role (returns 403): + +``` +curl -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://nginx-api-steering.k8s.f5.ff.lan/v1.0/api_post -d '{"username": "john.doe@acme.com"}' -H "Content-Type: application/json" +``` + +Test with valid JWT token and valid `devops` role (returns 200): + +``` +curl -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.devops`" http://nginx-api-steering.k8s.f5.ff.lan/v1.0/api_post -d '{"username": "john.doe@acme.com", "group": "guest"}' -H "Content-Type: application/json" +``` + +Test with no payload rewriting (returns 200): + +``` +curl -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.devops`" http://nginx-api-steering.k8s.f5.ff.lan/v1.0/api_post_no_change -d '{"username": "john.doe@acme.com", "group": "guest"}' -H "Content-Type: application/json" +``` + +Test with JSON payload checked against template (returns 200): + +``` +curl -w '\n' -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://nginx-api-steering.k8s.f5.ff.lan/v1.0/template_test -d ' +{ + "name": "John", + "age": 30, + "address": { + "street": "123 Main St", + "city": "New York" + } +} +' -H "Content-Type: application/json" +``` + +Test with invalid JSON payload check against template (returns 422): + +``` +curl -w '\n' -X POST -ki -H "Authorization: Bearer `cat jwt/jwt.guest`" http://nginx-api-steering.k8s.f5.ff.lan/v1.0/template_test -d ' +{ + "name": "John", + "address": { + "street": "123 Main St", + "city": "New York" + } +} +' -H "Content-Type: application/json" +``` + +## Deployment removal + +For docker-compose: + +``` +./nginx-api-steering.sh -o stop +``` + +For Kubernetes: + +``` +./nginx-api-steering.sh -o stop-k8s +``` diff --git a/NGINX-API-Steering/apiserver/Dockerfile b/NGINX-API-Steering/apiserver/Dockerfile new file mode 100644 index 0000000..3e664d4 --- /dev/null +++ b/NGINX-API-Steering/apiserver/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.12-slim + +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY apiserver.py . + +EXPOSE 5000 +CMD ["python", "apiserver.py"] diff --git a/NGINX-API-Steering/apiserver/apiserver.py b/NGINX-API-Steering/apiserver/apiserver.py new file mode 100755 index 0000000..3bd0c7d --- /dev/null +++ b/NGINX-API-Steering/apiserver/apiserver.py @@ -0,0 +1,32 @@ +#!/usr/bin/python3 + +from flask import Flask, request, jsonify +from datetime import datetime +import socket + +app = Flask(__name__) + +# curl -ks -X GET https://127.0.0.1:5000/echo_data | jq +@app.route("/get_data", methods=["GET"]) +def get_data(): + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + hostname = socket.gethostname() + + data = { + "timestamp": timestamp, + "hostname": hostname + } + + return jsonify(data) + +# curl -ks -X POST https://127.0.0.1:5000/echo_data -d '{"var":123}' -H "Content-Type: application/json" +@app.route("/echo_data", methods=["POST"]) +def echo_data(): + payload = request.get_json() if request.get_json() != None else '' + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + hostname = socket.gethostname() + + return jsonify({"payload": payload, "hostname": hostname, "timestamp": timestamp}) + +if __name__ == "__main__": + app.run(ssl_context="adhoc",host="0.0.0.0", port=5000) diff --git a/NGINX-API-Steering/apiserver/requirements.txt b/NGINX-API-Steering/apiserver/requirements.txt new file mode 100644 index 0000000..74a05ec --- /dev/null +++ b/NGINX-API-Steering/apiserver/requirements.txt @@ -0,0 +1,2 @@ +Flask +cryptography diff --git a/NGINX-API-Steering/backend/Dockerfile b/NGINX-API-Steering/backend/Dockerfile new file mode 100644 index 0000000..6b67bc7 --- /dev/null +++ b/NGINX-API-Steering/backend/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.12-slim + +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY backend.py . + +EXPOSE 5000 +CMD ["python", "backend.py"] diff --git a/NGINX-API-Steering/backend/backend.py b/NGINX-API-Steering/backend/backend.py new file mode 100755 index 0000000..d60c73f --- /dev/null +++ b/NGINX-API-Steering/backend/backend.py @@ -0,0 +1,31 @@ +#!/usr/bin/python3 + +import json +from flask import Flask, jsonify, abort, make_response, request + +app = Flask(__name__) + +with open('db.json') as db: + rules = json.load(db) + +@app.route('/backend/fetchkey/', methods=['GET']) +def get_key(uri): + rule = [rule for rule in rules if rule['uri'] == uri] + if len(rule) == 0: + abort(404) + return jsonify({'rule': rule[0]}) + +@app.route('/backend/fetchallkeys', methods=['GET']) +def get_all_keys(): + return jsonify({'rules': rules}) + +@app.route('/jwks.json', methods=['GET']) +def get_jwks(): + return jsonify({"keys": [{ "k":"ZmFudGFzdGljand0", "kty":"oct", "kid":"0001" }]}) + +@app.errorhandler(404) +def not_found(error): + return make_response(jsonify({'error': 'Not found'}), 404) + +if __name__ == '__main__': + app.run(host='0.0.0.0') diff --git a/NGINX-API-Steering/backend/db-dockercompose.json b/NGINX-API-Steering/backend/db-dockercompose.json new file mode 100644 index 0000000..5774799 --- /dev/null +++ b/NGINX-API-Steering/backend/db-dockercompose.json @@ -0,0 +1,83 @@ +[ + { + "id": 1, + "enabled": true, + "uri": "v1.0/api_get", + "matchRules": { + "method": "GET", + "roles": "guest" + }, + "operation": { + "url": "https://api-server-1:5000/get_data" + } + }, + { + "id": 2, + "enabled": true, + "uri": "v1.0/api_post", + "matchRules": { + "method": "POST", + "roles": "devops" + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" + }, + "json": { + "to_server": { + "set": [ + { + "field1": "value1" + }, + { + "field2": "value2" + } + ], + "del": [ + "group" + ] + }, + "to_client": { + "set": [ + { + "new_response_field": "ADDED" + } + ], + "del": [ + "hostname" + ] + } + } + }, + { + "id": 3, + "enabled": true, + "uri": "v1.0/api_post_no_change", + "matchRules": { + "method": "POST", + "roles": "devops" + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" + } + }, + { + "id": 4, + "enabled": true, + "uri": "v1.0/template_test", + "matchRules": { + "method": "POST", + "roles": "guest" + }, + "operation": { + "url": "https://api-server-2:5000/echo_data" + }, + "template": { + "name": "", + "age": 0, + "address": { + "street": "", + "city": "" + } + } + } +] diff --git a/NGINX-API-Steering/backend/db-k8s.json b/NGINX-API-Steering/backend/db-k8s.json new file mode 100644 index 0000000..00f659c --- /dev/null +++ b/NGINX-API-Steering/backend/db-k8s.json @@ -0,0 +1,83 @@ +[ + { + "id": 1, + "enabled": true, + "uri": "v1.0/api_get", + "matchRules": { + "method": "GET", + "roles": "guest" + }, + "operation": { + "url": "https://api-server-1.nginx-api-steering.svc.cluster.local:5000/get_data" + } + }, + { + "id": 2, + "enabled": true, + "uri": "v1.0/api_post", + "matchRules": { + "method": "POST", + "roles": "devops" + }, + "operation": { + "url": "https://api-server-2.nginx-api-steering.svc.cluster.local:5000/echo_data" + }, + "json": { + "to_server": { + "set": [ + { + "field1": "value1" + }, + { + "field2": "value2" + } + ], + "del": [ + "group" + ] + }, + "to_client": { + "set": [ + { + "new_response_field": "ADDED" + } + ], + "del": [ + "hostname" + ] + } + } + }, + { + "id": 3, + "enabled": true, + "uri": "v1.0/api_post_no_change", + "matchRules": { + "method": "POST", + "roles": "devops" + }, + "operation": { + "url": "https://api-server-2.nginx-api-steering.svc.cluster.local:5000/echo_data" + } + }, + { + "id": 4, + "enabled": true, + "uri": "v1.0/template_test", + "matchRules": { + "method": "POST", + "roles": "guest" + }, + "operation": { + "url": "https://api-server-2.nginx-api-steering.svc.cluster.local:5000/echo_data" + }, + "template": { + "name": "", + "age": 0, + "address": { + "street": "", + "city": "" + } + } + } +] diff --git a/NGINX-API-Steering/backend/requirements.txt b/NGINX-API-Steering/backend/requirements.txt new file mode 100644 index 0000000..e3e9a71 --- /dev/null +++ b/NGINX-API-Steering/backend/requirements.txt @@ -0,0 +1 @@ +Flask diff --git a/NGINX-API-Steering/docker-compose.yaml b/NGINX-API-Steering/docker-compose.yaml new file mode 100644 index 0000000..ba31d2b --- /dev/null +++ b/NGINX-API-Steering/docker-compose.yaml @@ -0,0 +1,80 @@ +version: "3.9" + +services: + backend: + container_name: backend + image: backend + build: + context: ./backend + dockerfile: Dockerfile + ports: + - 10000:5000 + networks: + lab-network: + ipv4_address: 10.5.0.10 + volumes: + - ./backend/db-dockercompose.json:/app/db.json:ro + + api-server-1: + container_name: api-server-1 + image: api-server + build: + context: ./apiserver + dockerfile: Dockerfile + ports: + - 5001:5000 + networks: + lab-network: + ipv4_address: 10.5.0.11 + + api-server-2: + container_name: api-server-2 + image: api-server + build: + context: ./apiserver + dockerfile: Dockerfile + ports: + - 5002:5000 + networks: + lab-network: + ipv4_address: 10.5.0.12 + + nginx: + container_name: nginx + image: nginx-api-steering + build: + context: ./nginx + dockerfile: Dockerfile + secrets: + - nginx-crt + - nginx-key + ports: + # Clients access to published REST API + - 10080:80 + # Admin access to NGINX Plus API and Dashboard + - 20080:8080 + networks: + lab-network: + ipv4_address: 10.5.0.20 + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro + - ./nginx/api.conf:/etc/nginx/conf.d/api.conf:ro + - ./nginx/steering.js:/etc/nginx/conf.d/steering.js:ro + - ./nginx/steering.conf-dockercompose:/etc/nginx/conf.d/steering.conf:ro + - /dev/null:/etc/nginx/conf.d/default.conf:ro + +secrets: + nginx-crt: + name: nginx-crt + file: ${NGINX_CERT} + nginx-key: + name: nginx-key + file: ${NGINX_KEY} + +networks: + lab-network: + driver: bridge + ipam: + config: + - subnet: 10.5.0.0/24 + gateway: 10.5.0.1 diff --git a/NGINX-API-Steering/jwt/jwks.json b/NGINX-API-Steering/jwt/jwks.json new file mode 100644 index 0000000..68139b8 --- /dev/null +++ b/NGINX-API-Steering/jwt/jwks.json @@ -0,0 +1,9 @@ +{ + "keys": [ + { + "k":"ZmFudGFzdGljand0", + "kty":"oct", + "kid":"0001" + } + ] +} diff --git a/NGINX-API-Steering/jwt/jwtDecoder.sh b/NGINX-API-Steering/jwt/jwtDecoder.sh new file mode 100755 index 0000000..e48d4c6 --- /dev/null +++ b/NGINX-API-Steering/jwt/jwtDecoder.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# +# JWT Decoder Bash Script +# + +secret='fantasticjwt' + +base64_encode() +{ + declare input=${1:-$(2& echo "fatal error. base64 string is unexepcted length" + elif [[ $remainder -eq 2 || $remainder -eq 3 ]]; + then + input="${input}$(for i in `seq $((4 - $remainder))`; do printf =; done)" + fi + printf '%s' "${input}" | base64 --decode +} + +verify_signature() +{ + declare header_and_payload=${1} + expected=$(echo "${header_and_payload}" | hmacsha256_encode | base64_encode) + actual=${2} + + if [ "${expected}" = "${actual}" ] + then + echo "Signature is valid" + else + echo "Signature is NOT valid" + fi +} + +hmacsha256_encode() +{ + declare input=${1:-$( jwt.guest +echo "${header_payload_devops}.${signature_devops}" > jwt.devops diff --git a/NGINX-API-Steering/kubernetes.yaml b/NGINX-API-Steering/kubernetes.yaml new file mode 100644 index 0000000..057e254 --- /dev/null +++ b/NGINX-API-Steering/kubernetes.yaml @@ -0,0 +1,212 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backend + namespace: nginx-api-steering + labels: + app: backend +spec: + selector: + matchLabels: + app: backend + replicas: 1 + template: + metadata: + labels: + app: backend + spec: + containers: + - name: backend + image: registry.k8s.ie.ff.lan:31005/nginx-api-steering:backend + ports: + - name: http + containerPort: 5000 + volumeMounts: + - name: backend-db-volume + mountPath: /app/db.json + subPath: db.json + volumes: + - name: backend-db-volume + configMap: + name: backend-db + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: api-server-1 + namespace: nginx-api-steering + labels: + app: api-server-1 +spec: + selector: + matchLabels: + app: api-server-1 + replicas: 1 + template: + metadata: + labels: + app: api-server-1 + spec: + containers: + - name: api-server-1 + image: registry.k8s.ie.ff.lan:31005/nginx-api-steering:api-server + ports: + - name: http + containerPort: 5000 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: api-server-2 + namespace: nginx-api-steering + labels: + app: api-server-2 +spec: + selector: + matchLabels: + app: api-server-2 + replicas: 1 + template: + metadata: + labels: + app: api-server-2 + spec: + containers: + - name: api-server-2 + image: registry.k8s.ie.ff.lan:31005/nginx-api-steering:api-server + ports: + - name: http + containerPort: 5000 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + namespace: nginx-api-steering + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + replicas: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: backend + image: registry.k8s.ie.ff.lan:31005/nginx-api-steering:nginx-api-steering + ports: + - name: http + containerPort: 80 + volumeMounts: + - name: nginx-conf-volume + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-conf-volume + mountPath: /etc/nginx/conf.d/api.conf + subPath: api.conf + - name: nginx-conf-volume + mountPath: /etc/nginx/conf.d/steering.js + subPath: steering.js + - name: nginx-conf-volume + mountPath: /etc/nginx/conf.d/steering.conf + subPath: steering.conf + - name: nginx-conf-volume + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + volumes: + - name: nginx-conf-volume + configMap: + name: nginx-conf + +--- +apiVersion: v1 +kind: Service +metadata: + name: backend + namespace: nginx-api-steering + labels: + app: backend +spec: + ports: + - name: http + port: 5000 + selector: + app: backend + type: ClusterIP + +--- +apiVersion: v1 +kind: Service +metadata: + name: api-server-1 + namespace: nginx-api-steering + labels: + app: api-server-1 +spec: + ports: + - name: http + port: 5000 + selector: + app: api-server-1 + type: ClusterIP + +--- +apiVersion: v1 +kind: Service +metadata: + name: api-server-2 + namespace: nginx-api-steering + labels: + app: api-server-2 +spec: + ports: + - name: http + port: 5000 + selector: + app: api-server-2 + type: ClusterIP + +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx + namespace: nginx-api-steering + labels: + app: nginx +spec: + ports: + - name: http + port: 80 + selector: + app: nginx + type: ClusterIP + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: nginx-api-steering + namespace: nginx-api-steering + labels: + app: nginx-api-steering +spec: + ingressClassName: nginx + rules: + - host: nginx-api-steering.k8s.f5.ff.lan + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: nginx + port: + number: 80 diff --git a/NGINX-API-Steering/nginx-api-steering.sh b/NGINX-API-Steering/nginx-api-steering.sh new file mode 100755 index 0000000..135bf35 --- /dev/null +++ b/NGINX-API-Steering/nginx-api-steering.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +PROJECT_NAME=nginx-api-steering +DOCKERCOMPOSE=docker-compose.yaml +NAMESPACE=nginx-api-steering + +BANNER="NGINX API Steering lab - https://github.com/fabriziofiorucci/NGINX-API-Steering\n\n +=== Usage:\n\n +$0 [options]\n\n +=== Options:\n\n +-h\t\t\t\t\t\t- This help\n +-o [start|stop|start-k8s|stop-k8s|build]\t- Action\n +-C [file.crt]\t\t\t\t\t- Certificate file to pull packages from the official NGINX repository\n +-K [file.key]\t\t\t\t\t- Key file to pull packages from the official NGINX repository\n\n +=== Examples:\n\n +Build docker images:\n +\t$0 -o build -C /etc/ssl/nginx/nginx-repo.crt -K /etc/ssl/nginx/nginx-repo.key\n\n +\tNote: Images are built as\n +\t- nginx-api-steering:latest\n +\t- api-server:latest\n +\t- backend:latest\n\n +docker-compose lab start (build images if needed):\n +\t$0 -o start -C /etc/ssl/nginx/nginx-repo.crt -K /etc/ssl/nginx/nginx-repo.key\n\n +docker-compose lab stop:\n +\t$0 -o stop\n\n +Kubernetes lab start:\n +\t$0 -o start-k8s\n\n +\tPrerequisites:\n\n +\t1. Docker images must be built using the \"build\" action\n +\t2. Docker images must be pushed to a local registry\n +\t3. Docker images in the local registry names must be referenced in the kubernetes.yaml file\n +\tNote: The lab is deployed in the \"$NAMESPACE\" namespace\n\n +Kubernetes lab stop:\n\n +\tNote: The \"$NAMESPACE\" namespace is deleted\n\n +\t$0 -o stop-k8s\n +" + + +while getopts 'ho:C:K:' OPTION +do + case "$OPTION" in + h) + echo -e $BANNER + exit + ;; + o) + MODE=$OPTARG + ;; + C) + export NGINX_CERT=$OPTARG + ;; + K) + export NGINX_KEY=$OPTARG + ;; + esac +done + +if [ -z "$1" ] || [ -z "${MODE}" ] +then + echo -e $BANNER + exit +fi + +case $MODE in + 'start'|'build') + if [ -z "${NGINX_CERT}" ] || [ -z "${NGINX_KEY}" ] + then + echo "Missing NGINX Plus certificate/key" + exit + fi + + if [ "$MODE" = "start" ] + then + DOCKER_BUILDKIT=1 docker-compose -p $PROJECT_NAME -f $DOCKERCOMPOSE up -d --remove-orphans + else + DOCKER_BUILDKIT=1 docker-compose -p $PROJECT_NAME -f $DOCKERCOMPOSE build + fi + ;; + 'stop') + export NGINX_CERT="x" + export NGINX_KEY="x" + docker-compose -p $PROJECT_NAME -f $DOCKERCOMPOSE down + ;; + 'start-k8s') + echo "Deploying on Kubernetes namespace $NAMESPACE" + kubectl create ns $NAMESPACE + + kubectl create configmap backend-db -n $NAMESPACE \ + --from-file=db.json=backend/db-k8s.json + + kubectl create configmap nginx-conf -n $NAMESPACE \ + --from-file=nginx.conf=nginx/nginx.conf \ + --from-file=api.conf=nginx/api.conf \ + --from-file=steering.js=nginx/steering.js \ + --from-file=steering.conf=nginx/steering.conf-k8s \ + --from-file=default.conf=/dev/null + + kubectl apply -n $NAMESPACE -f kubernetes.yaml + + ;; + 'stop-k8s') + echo "Removing Kubernetes namespace $NAMESPACE" + kubectl delete ns $NAMESPACE + ;; + *) + echo -e $BANNER + exit + ;; +esac diff --git a/NGINX-API-Steering/nginx/Dockerfile b/NGINX-API-Steering/nginx/Dockerfile new file mode 100644 index 0000000..457d36a --- /dev/null +++ b/NGINX-API-Steering/nginx/Dockerfile @@ -0,0 +1,66 @@ +FROM alpine:3.19 + +LABEL maintainer="NGINX Docker Maintainers " + +# Define NGINX versions for NGINX Plus and NGINX Plus modules +# Uncomment this block and the versioned nginxPackages in the main RUN +# instruction to install a specific release +# ENV NGINX_VERSION 29 +# ENV NJS_VERSION 0.7.12 +# ENV PKG_RELEASE 1 + +# Download certificate and key from the customer portal (https://account.f5.com) +# and copy to the build context +RUN --mount=type=secret,id=nginx-crt,dst=cert.pem \ + --mount=type=secret,id=nginx-key,dst=cert.key \ + set -x \ +# Create nginx user/group first, to be consistent throughout Docker variants + && addgroup -g 101 -S nginx \ + && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \ +# Install the latest release of NGINX Plus and/or NGINX Plus modules +# Uncomment individual modules if necessary +# Use versioned packages over defaults to specify a release + && nginxPackages=" \ + nginx-plus \ + # nginx-plus=${NGINX_VERSION}-r${PKG_RELEASE} \ + # nginx-plus-module-xslt \ + # nginx-plus-module-xslt=${NGINX_VERSION}-r${PKG_RELEASE} \ + # nginx-plus-module-geoip \ + # nginx-plus-module-geoip=${NGINX_VERSION}-r${PKG_RELEASE} \ + # nginx-plus-module-image-filter \ + # nginx-plus-module-image-filter=${NGINX_VERSION}-r${PKG_RELEASE} \ + # nginx-plus-module-perl \ + # nginx-plus-module-perl=${NGINX_VERSION}-r${PKG_RELEASE} \ + nginx-plus-module-njs \ + # nginx-plus-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${PKG_RELEASE} \ + " \ + KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \ + && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \ + && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \ + echo "key verification succeeded!"; \ + mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \ + else \ + echo "key verification failed!"; \ + exit 1; \ + fi \ + && cat cert.pem > /etc/apk/cert.pem \ + && cat cert.key > /etc/apk/cert.key \ + && apk add -X "https://pkgs.nginx.com/plus/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ + && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \ + && if [ -f "/etc/apk/cert.key" ] && [ -f "/etc/apk/cert.pem" ]; then rm -f /etc/apk/cert.key /etc/apk/cert.pem; fi \ +# Bring in tzdata so users could set the timezones through the environment +# variables + && apk add --no-cache tzdata \ +# Bring in curl and ca-certificates to make registering on DNS SD easier + && apk add --no-cache curl ca-certificates \ +# Forward request and error logs to Docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +EXPOSE 80 + +STOPSIGNAL SIGQUIT + +CMD ["nginx", "-g", "daemon off;"] + +# vim:syntax=Dockerfile diff --git a/NGINX-API-Steering/nginx/api.conf b/NGINX-API-Steering/nginx/api.conf new file mode 100644 index 0000000..f916c92 --- /dev/null +++ b/NGINX-API-Steering/nginx/api.conf @@ -0,0 +1,12 @@ +server { + listen 8080; + + location /api/ { + api write=on; + } + + location /dashboard.html { + root /usr/share/nginx/html; + } + access_log off; +} diff --git a/NGINX-API-Steering/nginx/nginx.conf b/NGINX-API-Steering/nginx/nginx.conf new file mode 100644 index 0000000..5d44eeb --- /dev/null +++ b/NGINX-API-Steering/nginx/nginx.conf @@ -0,0 +1,40 @@ +user nginx; +worker_processes auto; + +error_log /var/log/nginx/error.log notice; +pid /var/run/nginx.pid; + +load_module modules/ngx_http_js_module.so; +load_module modules/ngx_stream_js_module.so; + + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; +} + + +# TCP/UDP proxy and load balancing block +stream { + log_format stream-main '$remote_addr [$time_local] ' + '$protocol $status $bytes_sent $bytes_received ' + '$session_time "$ssl_preread_server_name"'; + #access_log /dev/stdout stream-main; + include /etc/nginx/stream-conf.d/*.conf; +} diff --git a/NGINX-API-Steering/nginx/steering.conf-dockercompose b/NGINX-API-Steering/nginx/steering.conf-dockercompose new file mode 100644 index 0000000..7790105 --- /dev/null +++ b/NGINX-API-Steering/nginx/steering.conf-dockercompose @@ -0,0 +1,75 @@ +js_import steering from conf.d/steering.js; + +resolver 127.0.0.11; + +upstream backend-db +{ + zone backend-db 64k; + + # Steering DB REST API endpoint + server backend:5000; +} + +proxy_cache_path /var/cache/nginx/jwk levels=1 keys_zone=jwk:1m max_size=10m; +proxy_cache_path /var/tmp/cache levels=1:2 keys_zone=dbQueryCache:10m max_size=20m inactive=1m use_temp_path=off; +proxy_cache_key "$scheme://$host$request_uri$query_string"; + +log_format jwt '$remote_addr - $remote_user [$time_local] "$request" ' +'$status $body_bytes_sent "$http_referer" "$http_user_agent" ' +'$jwt_header_alg $jwt_claim_sub'; + +auth_jwt_claim_set $jwt_claim_roles roles; + +server +{ + listen 80; + server_name $host; + status_zone steering; + + location / + { + auth_jwt "authentication required"; + auth_jwt_key_request /_jwks_uri; + + access_log /var/log/nginx/api_steering_access.log jwt; + access_log /var/log/nginx/access.log main; + + error_log /var/log/nginx/api_steering_error.log debug; + error_log /var/log/nginx/error.log debug; + + js_content steering.dbQuery; + } + + location = /_jwks_uri + { + internal; + proxy_method GET; + proxy_cache jwk; # Cache responses + proxy_pass http://backend-db/jwks.json; # Obtain keys from here + } + + location ~ /dbQuery/(.*) + { + internal; + + proxy_cache dbQueryCache; + proxy_cache_bypass $http_pragma; + proxy_cache_lock on; + proxy_cache_valid 200 1m; + + proxy_pass http://backend-db/$1; + } + + location ~ /steeringMode/(.*) + { + internal; + + proxy_ssl_session_reuse off; + proxy_ssl_server_name on; + + proxy_pass_request_headers on; + proxy_pass_request_body on; + + proxy_pass $1; + } +} diff --git a/NGINX-API-Steering/nginx/steering.conf-k8s b/NGINX-API-Steering/nginx/steering.conf-k8s new file mode 100644 index 0000000..9886c39 --- /dev/null +++ b/NGINX-API-Steering/nginx/steering.conf-k8s @@ -0,0 +1,75 @@ +js_import steering from conf.d/steering.js; + +resolver kube-dns.kube-system.svc.cluster.local; + +upstream backend-db +{ + zone backend-db 64k; + + # Steering DB REST API endpoint + server backend:5000; +} + +proxy_cache_path /var/cache/nginx/jwk levels=1 keys_zone=jwk:1m max_size=10m; +proxy_cache_path /var/tmp/cache levels=1:2 keys_zone=dbQueryCache:10m max_size=20m inactive=1m use_temp_path=off; +proxy_cache_key "$scheme://$host$request_uri$query_string"; + +log_format jwt '$remote_addr - $remote_user [$time_local] "$request" ' +'$status $body_bytes_sent "$http_referer" "$http_user_agent" ' +'$jwt_header_alg $jwt_claim_sub'; + +auth_jwt_claim_set $jwt_claim_roles roles; + +server +{ + listen 80; + server_name $host; + status_zone steering; + + location / + { + auth_jwt "authentication required"; + auth_jwt_key_request /_jwks_uri; + + access_log /var/log/nginx/api_steering_access.log jwt; + access_log /var/log/nginx/access.log main; + + error_log /var/log/nginx/api_steering_error.log debug; + error_log /var/log/nginx/error.log debug; + + js_content steering.dbQuery; + } + + location = /_jwks_uri + { + internal; + proxy_method GET; + proxy_cache jwk; # Cache responses + proxy_pass http://backend-db/jwks.json; # Obtain keys from here + } + + location ~ /dbQuery/(.*) + { + internal; + + proxy_cache dbQueryCache; + proxy_cache_bypass $http_pragma; + proxy_cache_lock on; + proxy_cache_valid 200 1m; + + proxy_pass http://backend-db/$1; + } + + location ~ /steeringMode/(.*) + { + internal; + + proxy_ssl_session_reuse off; + proxy_ssl_server_name on; + + proxy_pass_request_headers on; + proxy_pass_request_body on; + + proxy_pass $1; + } +} diff --git a/NGINX-API-Steering/nginx/steering.js b/NGINX-API-Steering/nginx/steering.js new file mode 100644 index 0000000..2721d70 --- /dev/null +++ b/NGINX-API-Steering/nginx/steering.js @@ -0,0 +1,212 @@ +export default { + dbQuery +}; + +function dbQuery(r) { + r.warn('--- CLIENT REQUEST ---------------------------'); + r.warn('Client[' + r.remoteAddress + '] Method[' + r.method + '] Host[' + r.headersIn['host'] + '] URI [' + r.uri + '] Body[' + r.requestText + ']'); + + // Queries the backend db + r.warn('Subrequest [/dbQuery/backend/fetchkey' + r.uri + ']'); + r.subrequest('/dbQuery/backend/fetchkey' + r.uri, '', subReqCallback); + + function subReqCallback(reply) { + if (reply.status != 200) { + // Rule not found + + r.warn('Rule not found - returning 404'); + r.return(404); + } else { + r.warn('Rule found: URI[' + reply.uri + '] status[' + reply.status.toString() + '] body[' + reply.responseText + ']'); + + var body = JSON.parse(reply.responseText); + + if (body.rule.enabled == 'false') { + // Rule is disabled + + r.warn('Rule is disabled - returning 404'); + r.return(404); + } else { + r.warn('Rewriting request [' + r.headersIn['host'] + r.uri + '] -> [' + body.rule.operation.url + ']'); + + // Authorization checks + r.warn('--- Checking authorization'); + + r.warn('- HTTP method received [' + r.method + '] -> needed [' + body.rule.matchRules.method + ']'); + r.warn('- JWT roles received [' + r.variables.jwt_claim_roles + '] -> needed [' + body.rule.matchRules.roles + ']'); + + if (r.method == body.rule.matchRules.method && body.rule.matchRules.roles.indexOf(r.variables.jwt_claim_roles) >= 0) { + r.warn('--- Authorization successful'); + var requestOk = true; + + if (r.requestText) { + // Request JSON payload update + var requestBody=JSON.parse(r.requestText); + + // JSON payload validation against template + if ('template' in body.rule) { + r.warn('+-- JSON template validation [' + JSON.stringify(body.rule.template) + ']'); + + if (checkJSON(r,requestBody,body.rule.template)) { + r.warn('+-- JSON template validation successful'); + } else { + r.warn('+-- JSON template validation failed'); + requestOk = false; + r.return(422); + } + } + + if (requestOk == true) { + if ('json' in body.rule && 'to_server' in body.rule.json) { + r.warn('--- JSON payload client -> server : being updated') + requestBody = JSON.stringify( applyJSONChanges(r, requestBody, body.rule.json.to_server) ); + } else { + r.warn('--- JSON payload client -> server : no changes') + requestBody = r.requestText; + } + } + } + } else { + r.warn('--- Authorization failed'); + requestOk = false; + r.return(403); + } + + if (requestOk == true) { + r.warn('--- Proxying request to upstream'); + r.subrequest('/steeringMode/' + body.rule.operation.url, { + method: r.method, body: requestBody + }, steeringModeSubReqCallback); + } + } + } + + function steeringModeSubReqCallback(steeringReply) { + // Steering mode - returns the steered API response back to the client + + r.warn('--- Upstream returned HTTP [' + steeringReply.status + '] payload [' + steeringReply.responseText + ']'); + + var responseBody=''; + + if (steeringReply.responseText) { + // Response JSON payload update + + if ('json' in body.rule && 'to_client' in body.rule.json) { + r.warn('--- JSON payload server -> client : being updated') + responseBody = JSON.stringify( applyJSONChanges(r, responseBody=JSON.parse(steeringReply.responseText), body.rule.json.to_client) ); + } else { + r.warn('--- JSON payload server -> client : no changes') + responseBody = steeringReply.responseText; + } + } + + r.status = steeringReply.status; + + for (var header in steeringReply.headersOut) { + if (header.toLowerCase() != "content-length") { + r.headersOut[header] = steeringReply.headersOut[header]; + } + } + + r.sendHeader(); + r.send(responseBody); + r.finish(); + } + } +} + +function manipulateJSON(jsonObject, key, value) { + // Check if the key already exists in the JSON object + if (jsonObject.hasOwnProperty(key)) { + // If the value is provided, update the existing key value + if (value !== undefined) { + jsonObject[key] = value; + } + // If the value is not provided, remove the existing key + else { + delete jsonObject[key]; + } + } + // If the key doesn't exist and a value is provided, add a new key value pair + else if (value !== undefined) { + jsonObject[key] = value; + } + + // Return the updated JSON object as a string + return jsonObject; +} + +// Applies JSON payload transformations based on the given template +// "set": [ +// { +// "": "" +// } +// ], +// "del": [ +// "" +// ] +// payload and template are JSON objects +function applyJSONChanges(r, payload, jsonTemplate) { + r.warn('Updating JSON payload [' + JSON.stringify(payload) + '] with template [' + JSON.stringify(jsonTemplate) + ']'); + + if ('set' in jsonTemplate) { + for (var i = 0; i < jsonTemplate.set.length; i++) { + var keyVal = jsonTemplate.set[i]; + + Object.keys(keyVal).forEach(function(key) { + var value = keyVal[key]; + + r.warn('- Updating [' + key + ' = ' + value + ']'); + payload = manipulateJSON(payload, key, value); + }); + } + } + if ('del' in jsonTemplate) { + for (var i = 0; i < jsonTemplate.del.length; i++) { + var key = jsonTemplate.del[i]; + + r.warn('- Deleting [' + key + ']'); + payload = manipulateJSON(payload, key); + } + } + + r.warn('Done updating JSON payload [' + JSON.stringify(payload) + ']'); + + return payload; +} + +// Check JSON payload conformity to the template +// JSON keys and key types are verified +function checkJSON(r, payload, template) { + const keys = Object.keys(template); + + r.warn('|-- Checking JSON payload [' + payload + ']'); + + for (let i = 0; i < keys.length; i++) { + // Property check + + if (!payload.hasOwnProperty(keys[i])) { + // JSON key missing in payload + r.warn('|---- Property [' + keys[i] + '] missing'); + return false; + } + + // Property type check + if (typeof payload[keys[i]] !== typeof template[keys[i]]) { + // JSON key with wrong type in payload + r.warn('|---- Property [' + keys[i] + '] type wrong'); + return false; + } + + // Nested properties + if (typeof template[keys[i]] === 'object') { + if(!checkJSON(r, payload[keys[i]], template[keys[i]])) { + return false; + } + } + + r.warn('|---- Property [' + keys[i] + '] ok'); + } + + return true; +} diff --git a/NGINX-Advanced-Healthcheck/README.md b/NGINX-Advanced-Healthcheck/README.md new file mode 100644 index 0000000..06239d4 --- /dev/null +++ b/NGINX-Advanced-Healthcheck/README.md @@ -0,0 +1,92 @@ +# NGINX Advanced Healthcheck + +## Description + +This repository provides a sample NGINX Plus configuration to support advanced active healthchecks. + +Upstream servers availability status is dynamically set based on evaluation of external REST API that can provide any type of JSON reply. +JSON responses can be evaluated by custom logic by adapting the sample [`healthcheck.js`](nginx/conf.d/healthcheck.js) configuration provided. + + +## Prerequisites + +To use this NGINX Plus configuration: + +- NGINX Plus R27 or later with njs support + +To set up the sample lab: + +- One "Load balancer" Linux host to run NGINX Plus and the configuration from this repo. See [here](https://docs.nginx.com/nginx/technical-specs/) for NGINX Plus supported distributions +- Two "Webserver" Linux hosts to run the [sample webservers](webserver) (upstream servers) +- One client host to access NGINX Plus dashboard and test load balancing using curl / a web browser +- Python 3.10+ on all Linux hosts + +## Setup description + + + +NGINX Plus: +- Runs healthcheck evaluation by [querying the external REST API](nginx/conf.d/healthcheck.js) service and applying the custom healthcheck logic +- Sets upstream servers up/down state [through its REST API](nginx/conf.d/healthcheck.js) +- Load balances client requests + - [For HTTP(S) services](nginx/conf.d/loadbalancer.conf) + - [For TCP/UDP services](nginx/stream-conf.d/stream-loadbalancer.conf) + + + +## Deploying this repository + +1. Setup Linux hosts (IP addresses reference the [sample NGINX configuration](nginx)) and install required packages + +| Hostname | IP Address | Distribution | Packages to be installed | Description | +| -------- | ---------- | ------------ | ----------------- | ----------- | +| `client` | 192.168.1.20 | Your favorite client OS | Your favorite web browser, [curl](https://curl.se) | This is used to access NGINX Plus dashboard and to test using curl | +| `nginx` | 192.168.1.21 | Any [NGINX Plus supported distribution](https://docs.nginx.com/nginx/technical-specs/) | NGINX Plus to be [installed](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-plus/) here | NGINX Plus load balancer with advanced healthchecks | +| `webserver1` | 192.168.1.30 | Your favorite Linux distro | Python 3.10+, [webserver](webserver), [health API](health-api) | This is an upstream server NGINX will load balance requests to | +| `webserver2` | 192.168.1.31 | Your favorite Linux distro | Python 3.10+, [webserver](webserver), [health API](health-api) | This is an upstream server NGINX will load balance requests to | + +2. Configure and start services + +| Hostname | Description | +| -------- | ----------- | +| `nginx` | Copy all files in [`nginx/conf.d`](nginx/conf.d) to `/etc/nginx/conf.d/` and all files in [`nginx/stream-conf.d`](nginx/stream-conf.d) to `/etc/nginx/stream-conf.d/`. Start NGINX Plus | +| `webserver1` | Run the sample [`webserver`](webserver) (listening on 8080/TCP) and the [`health API`](health-api) (listening on 5000/TCP) | +| `webserver2` | Run the sample [`webserver`](webserver) (listening on 8080/TCP) and the [`health API`](health-api) (listening on 5000/TCP) | + + +## Testing + +1. NGINX Plus dashboard can be accessed from `client` browsing to `http://192.168.1.20:8080/dashboard.html` + + + +2. Running `curl` from the `client` host check where requests are balanced: + +``` +$ curl -H "Host: app.test.lab" http://192.168.1.21 +This is the webserver running on webserver1 +``` + +3. The default configuration sets upstream servers as available when 1 minute CPU load is lower than 5, see [`healthcheck.js`](nginx/conf.d/healthcheck.js). The actual logic is fully customizable. + +``` + // Evaluation logic goes here, this example checks the 1 minute CPU load + if(jsonReply.cpu.load.1minute < 5) { + // Set the upstream server up + r.warn('Healthcheck: server #[' + backend_server_entry + '] is up'); + r.subrequest("/upstream/up/" + backend_server_entry,postUpstreamUpdate); + } else { + // Set the upstream server down + r.warn('Healthcheck: server #[' + backend_server_entry + '] is down'); + r.subrequest("/upstream/down/" + backend_server_entry,postUpstreamUpdate); + } +``` + +## Customizing + +To customize the NGINX Plus configuration refer to: + +- [`nginx/conf.d/healthcheck.conf`](nginx/conf.d/healthcheck.conf) - Fully commented NGINX Plus configuration to define healtcheck REST API endpoints +- [`nginx/conf.d/healthcheck.js`](nginx/conf.d/healthcheck.js) - To customize the healthcheck evaluation logic +- [`nginx/conf.d/loadbalancer.conf`](nginx/conf.d/loadbalancer.conf) - HTTP(S) upstream and server {} configuration +- [`nginx/stream-conf.d/stream-loadbalancer.conf`](nginx/stream-conf.d/stream-loadbalancer.conf) - TCP/UDP upstream and server {} configuration diff --git a/NGINX-Advanced-Healthcheck/health-api/README.md b/NGINX-Advanced-Healthcheck/health-api/README.md new file mode 100644 index 0000000..a0e737d --- /dev/null +++ b/NGINX-Advanced-Healthcheck/health-api/README.md @@ -0,0 +1,33 @@ +## Health REST API + +This is a sample Python REST API that returns load and system information for the host it runs on. +It is used by this repository to read upstreams CPU usage and set the upstream server status based on the realtime CPU load. + +To run: + +``` +$ pip install -r requirements.txt +[...] +$ python serverstats.py + * Serving Flask app 'serverstats' + * Debug mode: off +WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:5000 + * Running on http://192.168.1.19:5000 +Press CTRL+C to quit +``` + +It responds to HTTP `GET /stats` requests on port 5000: + +``` +$ curl -i http://127.0.0.1:5000/stats +HTTP/1.1 200 OK +Server: Werkzeug/2.2.2 Python/3.10.9 +Date: Mon, 19 Dec 2022 16:47:17 GMT +Content-Type: application/json +Content-Length: 1836 +Connection: close + +{"cpu":{"arch":"X86_64","arch_string_raw":"x86_64","bits":64,"brand_raw":"Intel(R) Core(TM) i7-4900MQ CPU @ 2.80GHz","count":8,"cpuinfo_version":[9,0,0],"cpuinfo_version_string":"9.0.0","family":6,"flags":["abm","acpi","aes","aperfmperf","apic","arat","arch_perfmon","avx","avx2","bmi1","bmi2","bts","clflush","cmov","constant_tsc","cpuid","cpuid_fault","cx16","cx8","de","ds_cpl","dtes64","dtherm","dts","epb","ept","ept_ad","erms","est","f16c","flexpriority","fma","fpu","fsgsbase","fxsr","hle","ht","ida","invpcid","invpcid_single","lahf_lm","lm","mca","mce","mmx","monitor","movbe","msr","mtrr","nonstop_tsc","nopl","nx","osxsave","pae","pat","pbe","pcid","pclmulqdq","pdcm","pdpe1gb","pebs","pge","pln","pni","popcnt","pse","pse36","pti","pts","rdrand","rdrnd","rdtscp","rep_good","rtm","sdbg","sep","smep","smx","ss","sse","sse2","sse4_1","sse4_2","ssse3","syscall","tm","tm2","tpr_shadow","tsc","tsc_adjust","tscdeadline","vme","vmx","vnmi","vpid","x2apic","xsave","xsaveopt","xtopology","xtpr"],"hz_actual":[3192955000,0],"hz_actual_friendly":"3.1930 GHz","hz_advertised":[2800000000,0],"hz_advertised_friendly":"2.8000 GHz","l1_data_cache_size":131072,"l1_instruction_cache_size":131072,"l2_cache_associativity":6,"l2_cache_line_size":256,"l2_cache_size":1048576,"l3_cache_size":8388608,"load":{"15minute":37.14599609375,"1minute":25.927734375,"5minute":35.4248046875},"model":60,"python_version":"3.10.9.final.0 (64 bit)","stepping":3,"vendor_id_raw":"GenuineIntel"},"disk":{"/":{"free":177138003968,"percent":65.2,"total":509677637632,"used":332539633664}},"swapMemory":{"total":2147479552},"system":{"bootTime":1671436837.0},"virtualMemory":{"active":2608058368,"available":22842421248,"buffers":156848128,"cached":10918633472,"free":14635302912,"inactive":13349625856,"percent":31.4,"total":33295020032,"used":7584235520}}``` +``` diff --git a/NGINX-Advanced-Healthcheck/health-api/requirements.txt b/NGINX-Advanced-Healthcheck/health-api/requirements.txt new file mode 100644 index 0000000..f6ad87b --- /dev/null +++ b/NGINX-Advanced-Healthcheck/health-api/requirements.txt @@ -0,0 +1,3 @@ +flask +psutil +Py-cpuinfo diff --git a/NGINX-Advanced-Healthcheck/health-api/serverstats.py b/NGINX-Advanced-Healthcheck/health-api/serverstats.py new file mode 100755 index 0000000..d22c698 --- /dev/null +++ b/NGINX-Advanced-Healthcheck/health-api/serverstats.py @@ -0,0 +1,73 @@ +# +# Sample healthcheck REST API +# Query with: +# curl -s http://127.0.0.1:5000/stats +# + +from flask import Flask, jsonify, abort, make_response, request +import os +import psutil +import cpuinfo + +app = Flask(__name__) + +@app.route('/stats', methods=['GET']) +def get_info(): + load1, load5, load15 = psutil.getloadavg() + cpu_usage1 = (load1/os.cpu_count()) * 100 + cpu_usage5 = (load5/os.cpu_count()) * 100 + cpu_usage15 = (load15/os.cpu_count()) * 100 + + virtual = psutil.virtual_memory() + swap = psutil.swap_memory() + + output = {} + output['system'] = {} + output['system']['bootTime'] = psutil.boot_time() + + output['cpu'] = {} + output['cpu'] = cpuinfo.get_cpu_info() + + output['cpu']['load'] = {} + output['cpu']['load']['1minute'] = cpu_usage1 + output['cpu']['load']['5minute'] = cpu_usage5 + output['cpu']['load']['15minute'] = cpu_usage15 + + output['virtualMemory'] = {} + output['virtualMemory']['total'] = virtual.total + output['virtualMemory']['available'] = virtual.available + output['virtualMemory']['used'] = virtual.used + output['virtualMemory']['free'] = virtual.free + output['virtualMemory']['percent'] = virtual.percent + output['virtualMemory']['active'] = virtual.active + output['virtualMemory']['inactive'] = virtual.inactive + output['virtualMemory']['buffers'] = virtual.buffers + output['virtualMemory']['cached'] = virtual.cached + + output['swapMemory'] = {} + output['swapMemory']['total'] = swap.total + + output['disk'] = {} + + for disk in psutil.disk_partitions(): + try: + mountpoint = disk.mountpoint + diskstats = psutil.disk_usage(mountpoint) + + output['disk'][mountpoint] = {} + output['disk'][mountpoint]['total'] = diskstats.total + output['disk'][mountpoint]['used'] = diskstats.used + output['disk'][mountpoint]['free'] = diskstats.free + output['disk'][mountpoint]['percent'] = diskstats.percent + except Exception: + pass + + return output + #return jsonify({'cpu1': cpu_usage1,'cpu5': cpu_usage5,'cpu15': cpu_usage15,'ram': ram_usage}) + +@app.errorhandler(404) +def not_found(error): + return make_response(jsonify({'error': 'Not found'}), 404) + +if __name__ == '__main__': + app.run(host='0.0.0.0') diff --git a/NGINX-Advanced-Healthcheck/img/lab-setup.png b/NGINX-Advanced-Healthcheck/img/lab-setup.png new file mode 100644 index 0000000..fbc6118 Binary files /dev/null and b/NGINX-Advanced-Healthcheck/img/lab-setup.png differ diff --git a/NGINX-Advanced-Healthcheck/img/logicflow.png b/NGINX-Advanced-Healthcheck/img/logicflow.png new file mode 100644 index 0000000..63ecbed Binary files /dev/null and b/NGINX-Advanced-Healthcheck/img/logicflow.png differ diff --git a/NGINX-Advanced-Healthcheck/img/nginx-dashboard.png b/NGINX-Advanced-Healthcheck/img/nginx-dashboard.png new file mode 100644 index 0000000..217186d Binary files /dev/null and b/NGINX-Advanced-Healthcheck/img/nginx-dashboard.png differ diff --git a/NGINX-Advanced-Healthcheck/nginx/README.md b/NGINX-Advanced-Healthcheck/nginx/README.md new file mode 100644 index 0000000..200a318 --- /dev/null +++ b/NGINX-Advanced-Healthcheck/nginx/README.md @@ -0,0 +1,7 @@ +## NGINX Configuration for advanced healtcheck + +- [`conf.d`](conf.d) contains: + - The main configuration to run advanced healtcheck (`healtcheck.conf` and `healthcheck.js`) - this works for HTTP(S) and TCP/UDP upstreams + - A sample load balancer configuration (upstream & server) for HTTP(S) services (`loadbalancer.conf`) +- [`stream-conf.d`](stream-conf.d) contains: + - A sample load balancer configuration (upstream & server) for TCP/UDP services (`stream-loadbalancer.conf`) diff --git a/NGINX-Advanced-Healthcheck/nginx/conf.d/healthcheck.conf b/NGINX-Advanced-Healthcheck/nginx/conf.d/healthcheck.conf new file mode 100644 index 0000000..c47eb32 --- /dev/null +++ b/NGINX-Advanced-Healthcheck/nginx/conf.d/healthcheck.conf @@ -0,0 +1,133 @@ +# +# NGINX Advanced Healtcheck +# This configuration requires NGINX Plus +# + +# Import the javascript code +js_import healthcheck from conf.d/healthcheck.js; + +# Log format for healthcheck requests +log_format healthcheck_log '[$time_local] $remote_addr:$remote_port ' + '$server_addr:$server_port ' + '"$request" $status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + +# Internal upstream for time-based healthcheck +upstream server_check { + zone server_check 64k; + + # Servers here must be 127.0.0.1: + # is an arbitrary and unique port referenced in + # the "REST API healtcheck monitoring endpoints map" here below + # and in the backserver servers mapping "map" + server 127.0.0.1:10080; + server 127.0.0.1:10081; +} + +# +# REST API healtcheck monitoring endpoints map +# +# This map must be defined as: +# key -> all ports declared in the "server_check" upstream +# value -> the REST API endpoint to be called for healthcheck evaluation (see healtcheck.js) +# +map $server_port $healthcheck_endpoint { + 10080 http://192.168.1.30:5000/stats; + 10081 http://192.168.1.31:5000/stats; +} + +# +# Mapping between port number and backend server position +# in the http_balancing_upstream upstream for HTTP(S) traffic (declared in conf.d/loadbalancer.conf) +# and in the tcp_balancing_upstream for TCP/UDP traffic (declared in stream-conf.d/stream-loadbalancer.conf) +# +# This map must be defined as: +# key -> all ports declared in the "server_check" upstream +# value -> position of the upstream server used for load balancing as listed in the "http_balancing_upstream" upstream (for HTTP/S traffic) or the "tcp_balancing_upstream" (for TCP/UDP traffic) +# +# In this example 1 is 192.168.1.30:8080 and 2 is 192.168.1.31:8080 (see conf.d/loadbalancer.conf) +# +map $server_port $backend_server_entry { + 10080 1; + 10081 2; +} + +# +# Internal monitoring server - checks destination servers health every 3 seconds +# +server { + location /dummy { + internal; + + health_check interval=3; + proxy_pass http://server_check; + } +} + +# Healtcheck server - njs-based healthcheck evaluation and dynamic balancing_upstream configuration +# This server must listen on all ip:port defined as servers in the "server_check" upstream +server { + listen 127.0.0.1:10080; + listen 127.0.0.1:10081; + + access_log /var/log/nginx/healthcheck-access_log healthcheck_log; + error_log /var/log/nginx/healthcheck-error_log notice; + + # Main location: hooks the javascript code + location / { + js_content healthcheck.check; + } + + # Internal location called by the javascript code through a subrequest. + # This location sends the actual REST API request to the monitoring endpoint + location ~ /healthCheck/(.*) { + internal; + proxy_pass $1$query_string; + } + + # Internal location to set an upstream server as "up" + # This gets called by the javascript code through a subrequest + location ~ /upstream/up/(.*) { + internal; + proxy_method PATCH; + proxy_set_body '{"down":"false"}'; + + # Use to dynamically update the HTTP (conf.d) upstream configured in conf.d/loadbalancer.conf + proxy_pass http://127.0.0.1:8080/api/8/http/upstreams/http_balancing_upstream/servers/$1; + + # Use to dynamically update the TCP/UDP (stream-conf.d) upstream configured in stream-conf.d/stream-loadbalancer.conf + #proxy_pass http://127.0.0.1:8080/api/8/stream/upstreams/tcp_balancing_upstream/servers/$1; + } + + # Internal location to set an upstream server as "down" + # This gets called by the javascript code through a subrequest + location ~ /upstream/down/(.*) { + internal; + proxy_method PATCH; + proxy_set_body '{"down":"true"}'; + + # Use to dynamically update the HTTP (conf.d) upstream configured in conf.d/loadbalancer.conf + proxy_pass http://127.0.0.1:8080/api/8/http/upstreams/http_balancing_upstream/servers/$1; + + # Use to dynamically update the TCP/UDP (stream-conf.d) upstream configured in stream-conf.d/stream-loadbalancer.conf + #proxy_pass http://127.0.0.1:8080/api/8/stream/upstreams/tcp_balancing_upstream/servers/$1; + } +} + +# This enables NGINX Plus REST API access, needed by the javascript code +server { + listen 0.0.0.0:8080; + + location /api { + api write=on; + # Allows full API access, restrict access for real-world usage + allow all; + } + + location / { + root /usr/share/nginx/html; + index dashboard.html; + # Allows full Dashboard access, restrict access for real-world usage + allow all; + } +} diff --git a/NGINX-Advanced-Healthcheck/nginx/conf.d/healthcheck.js b/NGINX-Advanced-Healthcheck/nginx/conf.d/healthcheck.js new file mode 100644 index 0000000..0f62fde --- /dev/null +++ b/NGINX-Advanced-Healthcheck/nginx/conf.d/healthcheck.js @@ -0,0 +1,75 @@ +// +// NGINX Smart Healtchecks +// This configuration requires NGINX Plus +// + +export default {check}; + +function check(r) { + var dest_url = r.variables.healthcheck_endpoint; + var rUri=r.uri; + + // Debug logging + r.warn('--> Running healthcheck'); + r.warn('Client['+r.remoteAddress+'] Method['+r.method+'] URI['+rUri+'] QueryString['+r.variables.query_string+'] Checking ['+encodeURI(dest_url)+']'); + + // Call the internal location to query the external healthcheck REST API + var fullURL = '/healthCheck/' + encodeURI(dest_url); + r.subrequest(fullURL,'',subReqCallback); + + // Callback function to evaluate external healtcheck REST API response + function subReqCallback(reply) { + // Gets the backend upstream server's position as mapped in the backend_server_entry map + var backend_server_entry = r.variables.backend_server_entry; + + // Debug logging + r.warn('JSON reply from: URI['+reply.uri+'] status['+reply.status.toString()+'] body['+reply.responseText+'] for server ['+backend_server_entry+']'); + + // Checks the external healthcheck REST API response code + if(reply.status!=200) { + // Return code != HTTP/200, healthcheck endpoint unreachable + r.warn('Healthcheck: server #[' + backend_server_entry + '] is down'); + + // HTTP response code is not 200/OK, setting the upstream server as not available + r.subrequest("/upstream/down/" + backend_server_entry,postUpstreamUpdate); + } else { + // HTTP response code is 200/OK, parse the JSON response + var jsonReply = JSON.parse(reply.responseText); + + // Example check on 1-minute CPU load + r.warn('Got 1-minute CPU value: '+jsonReply.cpu.load.1minute); + + // Runs the healthcheck logic + var setDownStateTo = "false" + + // Evaluation logic goes here, this example checks the 1 minute CPU load + if(jsonReply.cpu.load.1minute < 5) { + // Set the upstream server up + r.warn('Healthcheck: server #[' + backend_server_entry + '] is up'); + r.subrequest("/upstream/up/" + backend_server_entry,postUpstreamUpdate); + } else { + // Set the upstream server down + r.warn('Healthcheck: server #[' + backend_server_entry + '] is down'); + r.subrequest("/upstream/down/" + backend_server_entry,postUpstreamUpdate); + } + + return; + } + + // Example callback function to display/handle healthcheck REST API response + function postUpstreamUpdate(upstreamUpdateRequest) { + r.warn('NGINX API Response: '+upstreamUpdateRequest.responseText); + + for (var header in reply.headersOut) { + r.warn('header ['+header+'] = ['+reply.headersOut[header] + ']'); + r.headersOut[header] = reply.headersOut[header]; + } + + r.status=200; + r.sendHeader(); + if (reply.responseText) + r.send(reply.responseText); + r.finish(); + } + } +} diff --git a/NGINX-Advanced-Healthcheck/nginx/conf.d/loadbalancer.conf b/NGINX-Advanced-Healthcheck/nginx/conf.d/loadbalancer.conf new file mode 100644 index 0000000..24529b5 --- /dev/null +++ b/NGINX-Advanced-Healthcheck/nginx/conf.d/loadbalancer.conf @@ -0,0 +1,26 @@ +# +# Servers to load balance traffic to - up/down status is automatically configured through REST API +# https://docs.nginx.com/nginx/admin-guide/load-balancer/dynamic-configuration-api/ +# + +upstream http_balancing_upstream +{ + zone http_balancing_upstream 64k; + server 127.0.0.1 down; + + # Servers to load balance traffic to + server 192.168.1.30:8080; + server 192.168.1.31:8080; +} + +# The client-facing server to load balance the end-user application +server { + listen 80; + server_name app.test.lab; + status_zone app.test.lab; + + location / { + proxy_set_header Host $host; + proxy_pass http://http_balancing_upstream; + } +} diff --git a/NGINX-Advanced-Healthcheck/nginx/stream-conf.d/stream-loadbalancer.conf b/NGINX-Advanced-Healthcheck/nginx/stream-conf.d/stream-loadbalancer.conf new file mode 100644 index 0000000..1cea9f1 --- /dev/null +++ b/NGINX-Advanced-Healthcheck/nginx/stream-conf.d/stream-loadbalancer.conf @@ -0,0 +1,18 @@ +# Actual servers used for load balancing - up/down status is automatically configured through REST API +# https://docs.nginx.com/nginx/admin-guide/load-balancer/dynamic-configuration-api/ +upstream tcp_balancing_upstream +{ + zone tcp_balancing_upstream 64k; + server 127.0.0.1:5432 down; + + server 192.168.1.30:5432; + server 192.168.1.31:5432; +} + +# The client-facing server to load balance the end-user application +server { + listen 5432; + status_zone stream_loadbalancer; + + proxy_pass tcp_balancing_upstream; +} diff --git a/NGINX-Advanced-Healthcheck/webserver/README.md b/NGINX-Advanced-Healthcheck/webserver/README.md new file mode 100644 index 0000000..a7e732f --- /dev/null +++ b/NGINX-Advanced-Healthcheck/webserver/README.md @@ -0,0 +1,32 @@ +## Sample webserver + +This is a simple Python webserver to test the NGINX advanced healthcheck lab. + +To run it: + +``` +$ pip install -r requirements.txt +[...] +$ python webserver.py + * Serving Flask app 'webserver' + * Debug mode: off +WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:8080 + * Running on http://192.168.1.19:8080 +Press CTRL+C to quit +``` + +It responds to HTTP `GET /` requests on port 8080: + +``` +$ curl -i http://127.0.0.1:8080 +HTTP/1.1 200 OK +Server: Werkzeug/2.2.2 Python/3.10.9 +Date: Mon, 19 Dec 2022 16:29:48 GMT +Content-Type: text/html; charset=utf-8 +Content-Length: 42 +Connection: close + +This is the webserver running on fflaptop +``` diff --git a/NGINX-Advanced-Healthcheck/webserver/requirements.txt b/NGINX-Advanced-Healthcheck/webserver/requirements.txt new file mode 100644 index 0000000..7e10602 --- /dev/null +++ b/NGINX-Advanced-Healthcheck/webserver/requirements.txt @@ -0,0 +1 @@ +flask diff --git a/NGINX-Advanced-Healthcheck/webserver/webserver.py b/NGINX-Advanced-Healthcheck/webserver/webserver.py new file mode 100755 index 0000000..9100e8a --- /dev/null +++ b/NGINX-Advanced-Healthcheck/webserver/webserver.py @@ -0,0 +1,18 @@ +# +# Minimal webserver +# +# To test: +# curl -s http://127.0.0.1:8080/ +# + +import socket +from flask import Flask + +app = Flask(__name__) + +@app.route('/') +def index(): + return 'This is the webserver running on ' + socket.gethostname() + '\n' + +if __name__ == '__main__': + app.run(debug=False, port=8080, host='0.0.0.0') diff --git a/NGINX-Docker-Image-Builder/Dockerfile.oss b/NGINX-Docker-Image-Builder/Dockerfile.oss new file mode 100644 index 0000000..5fb02e7 --- /dev/null +++ b/NGINX-Docker-Image-Builder/Dockerfile.oss @@ -0,0 +1,34 @@ +FROM nginx:stable-bullseye-perl + +ARG NMS_URL +ARG NGINX_AGENT=false + +# Initial packages setup +RUN apt-get -y update \ + && apt-get -y install wget gpg \ + && wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq \ + && chmod +x /usr/bin/yq \ + && set -x \ +# Forward request logs to Docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log \ +# User and group + && groupadd -g 1001 nginx-agent \ + && usermod root -G nginx-agent \ + && usermod nginx -G nginx-agent \ +# NGINX Agent + && if [ "$NGINX_AGENT" = "true" ] ; then \ + apt-get -y install curl gnupg2 ca-certificates lsb-release debian-archive-keyring \ + && curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor > /usr/share/keyrings/nginx-archive-keyring.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] http://packages.nginx.org/nginx-agent/debian/ `lsb_release -cs` agent" > /etc/apt/sources.list.d/nginx-agent.list \ + && apt-get -y update \ + && apt-get -y install nginx-agent; fi + +# Startup script +COPY ./container/start.sh /deployment/ +RUN chmod +x /deployment/start.sh && touch /.dockerenv + +EXPOSE 80 +STOPSIGNAL SIGTERM + +CMD /deployment/start.sh diff --git a/NGINX-Docker-Image-Builder/Dockerfile.plus b/NGINX-Docker-Image-Builder/Dockerfile.plus new file mode 100644 index 0000000..3da82d4 --- /dev/null +++ b/NGINX-Docker-Image-Builder/Dockerfile.plus @@ -0,0 +1,49 @@ +FROM debian:bullseye-slim + +ARG NAP_WAF=false +ARG NGINX_AGENT=false + +# Initial packages setup +RUN apt-get -y update \ + && apt-get -y install apt-transport-https lsb-release ca-certificates wget gnupg2 curl debian-archive-keyring iproute2 \ + && mkdir -p /deployment /etc/ssl/nginx \ + && addgroup --system --gid 20983 nginx \ + && adduser --system --disabled-login --ingroup nginx --no-create-home --home /nonexistent --gecos "nginx user" --shell /bin/false --uid 20983 nginx \ + && wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq \ + && chmod +x /usr/bin/yq + +# Use certificate and key from secret +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + set -x \ +# Install prerequisite packages: + && wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | gpg --dearmor > /usr/share/keyrings/nginx-archive-keyring.gpg \ + && printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" > /etc/apt/sources.list.d/nginx-plus.list \ + && wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx \ + && apt-get -y update \ + && apt-get -y install nginx-plus nginx-plus-module-njs nginx-plus-module-prometheus \ +# Optional NGINX App Protect WAF + && if [ "$NAP_WAF" = "true" ] ; then \ + wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | gpg --dearmor > /usr/share/keyrings/app-protect-security-updates.gpg \ + && printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" > /etc/apt/sources.list.d/nginx-app-protect.list \ + && printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] https://pkgs.nginx.com/app-protect-security-updates/debian `lsb_release -cs` nginx-plus\n" >> /etc/apt/sources.list.d/nginx-app-protect.list \ + && apt-get -y update \ + && apt-get -y install app-protect app-protect-attack-signatures; fi \ +# Forward request logs to Docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log \ +# NGINX Agent + && if [ "$NGINX_AGENT" = "true" ] ; then \ + curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor > /usr/share/keyrings/nginx-archive-keyring.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] http://packages.nginx.org/nginx-agent/debian/ `lsb_release -cs` agent" > /etc/apt/sources.list.d/nginx-agent.list \ + && apt-get -y update \ + && apt-get -y install nginx-agent; fi + +# Startup script +COPY ./container/start.sh /deployment/ +RUN chmod +x /deployment/start.sh && touch /.dockerenv + +EXPOSE 80 +STOPSIGNAL SIGTERM + +CMD /deployment/start.sh diff --git a/NGINX-Docker-Image-Builder/Dockerfile.plus.unprivileged b/NGINX-Docker-Image-Builder/Dockerfile.plus.unprivileged new file mode 100644 index 0000000..9d66595 --- /dev/null +++ b/NGINX-Docker-Image-Builder/Dockerfile.plus.unprivileged @@ -0,0 +1,82 @@ +FROM debian:bullseye-slim + +ARG NAP_WAF=false +ARG NGINX_AGENT=false + +ARG UID=101 +ARG GID=101 + +# Initial packages setup +RUN apt-get -y update \ + && apt-get -y install apt-transport-https lsb-release ca-certificates wget gnupg2 curl debian-archive-keyring iproute2 \ + && mkdir -p /deployment /etc/ssl/nginx /etc/nms \ + && addgroup --system --gid $GID nginx \ + && adduser --system --disabled-login --ingroup nginx --no-create-home --home /nonexistent --gecos "nginx user" --shell /bin/false --uid $UID nginx \ + && wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq \ + && chmod +x /usr/bin/yq + +# Use certificate and key from secret +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + set -x \ +# Install prerequisite packages: + && wget -qO - https://cs.nginx.com/static/keys/nginx_signing.key | gpg --dearmor > /usr/share/keyrings/nginx-archive-keyring.gpg \ + && printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" > /etc/apt/sources.list.d/nginx-plus.list \ + && wget -P /etc/apt/apt.conf.d https://cs.nginx.com/static/files/90pkgs-nginx \ + && apt-get -y update \ + && apt-get -y install nginx-plus nginx-plus-module-njs nginx-plus-module-prometheus \ +# Optional NGINX App Protect WAF + && if [ "$NAP_WAF" = "true" ] ; then \ + wget -qO - https://cs.nginx.com/static/keys/app-protect-security-updates.key | gpg --dearmor > /usr/share/keyrings/app-protect-security-updates.gpg \ + && printf "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] https://pkgs.nginx.com/app-protect/debian `lsb_release -cs` nginx-plus\n" > /etc/apt/sources.list.d/nginx-app-protect.list \ + && printf "deb [signed-by=/usr/share/keyrings/app-protect-security-updates.gpg] https://pkgs.nginx.com/app-protect-security-updates/debian `lsb_release -cs` nginx-plus\n" >> /etc/apt/sources.list.d/nginx-app-protect.list \ + && apt-get -y update \ + && apt-get -y install app-protect app-protect-attack-signatures \ + && chown $UID:0 /opt \ + && chmod g+w /opt \ + && chown -R $UID:0 /opt/app_protect \ + && chmod -R g+w /opt/app_protect; fi \ +# Forward request logs to Docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log \ +# NGINX Agent + && if [ "$NGINX_AGENT" = "true" ] ; then \ + curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor > /usr/share/keyrings/nginx-archive-keyring.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] http://packages.nginx.org/nginx-agent/debian/ `lsb_release -cs` agent" > /etc/apt/sources.list.d/nginx-agent.list \ + && apt-get -y update \ + && apt-get -y install nginx-agent \ +# implement changes required to run NGINX Agent as an unprivileged user + && chown -R $UID:0 /etc/nginx-agent \ + && chmod -R g+w /etc/nginx-agent \ + && chown -R $UID:0 /var/lib/nginx-agent \ + && chmod -R g+w /var/lib/nginx-agent \ + && chown -R $UID:0 /var/log/nginx-agent \ + && chmod -R g+w /var/log/nginx-agent \ + && chown -R $UID:0 /var/run/nginx-agent \ + && chmod -R g+w /var/run/nginx-agent; fi + +# implement changes required to run NGINX as an unprivileged user +RUN rm /etc/nginx/conf.d/default.conf \ + && sed -i '/user nginx;/d' /etc/nginx/nginx.conf \ + && sed -i 's,/var/run/nginx.pid,/tmp/nginx.pid,' /etc/nginx/nginx.conf \ + && sed -i "/^http {/a \ proxy_temp_path /tmp/proxy_temp;\n client_body_temp_path /tmp/client_temp;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n" /etc/nginx/nginx.conf \ +# nginx user must own the cache and etc directory to write cache and tweak the nginx config + && chown -R $UID:0 /var/cache/nginx \ + && chmod -R g+w /var/cache/nginx \ + && chown -R $UID:0 /etc/nginx \ + && chmod -R g+w /etc/nginx \ + && chown -R $UID:0 /usr/lib/nginx/modules \ + && chmod -R g+w /usr/lib/nginx/modules \ + && chown -R $UID:0 /etc/nms \ + && chmod -R g+w /etc/nms + +# Startup script +COPY ./container/start.sh /deployment/ +RUN chmod +x /deployment/start.sh && touch /.dockerenv + +EXPOSE 80 +STOPSIGNAL SIGTERM + +USER $UID + +CMD ["/deployment/start.sh"] diff --git a/NGINX-Docker-Image-Builder/README.md b/NGINX-Docker-Image-Builder/README.md new file mode 100644 index 0000000..490ec50 --- /dev/null +++ b/NGINX-Docker-Image-Builder/README.md @@ -0,0 +1,110 @@ +# NGINX Docker image builder + +## Description + +This repository can be used to build a docker image that includes: + +- [NGINX Plus](https://docs.nginx.com/nginx) in privileged or unprivileged/non-root mode +- [NGINX Open Source](https://nginx.org/) +- [NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect-waf) +- [NGINX Agent](https://docs.nginx.com/nginx-agent) + +## Tested releases + +This repository has been tested with: + +- [NGINX Plus](https://docs.nginx.com/nginx) R29+ +- [NGINX Open Source](https://nginx.org) 1.24.0+ +- [NGINX Agent](https://docs.nginx.com/nginx-agent) 2.14+ +- [NGINX Instance Manager](https://docs.nginx.com/nginx-instance-manager) 2.15+ +- [NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect-waf) 4.100.1+ +- [NGINX One Console](https://docs.nginx.com/nginx-app-protect-waf) + +## Prerequisites + +- Linux host running Docker to build the image +- NGINX Plus license +- Access to either control plane: + - [NGINX Instance Manager](https://docs.nginx.com/nginx-instance-manager/) + - [NGINX One Cloud Console](https://docs.nginx.com/nginx-one/) +- Docker/Docker-compose or Openshift/Kubernetes cluster + +## Building the docker image + +The `./scripts/build.sh` install script can be used to build the Docker image: + +``` +NGINX Docker Image builder + + This tool builds a Docker image to run NGINX Plus/Open Source, NGINX App Protect WAF and NGINX Agent + + === Usage: + + ./scripts/build.sh [options] + + === Options: + + -h - This help + -t [target image] - The Docker image to be created + -C [file.crt] - Certificate to pull packages from the official NGINX repository + -K [file.key] - Key to pull packages from the official NGINX repository + -w - Add NGINX App Protect WAF (requires NGINX Plus) + -O - Use NGINX Open Source instead of NGINX Plus + -u - Build unprivileged image (only for NGINX Plus) + -a - Add NGINX Agent + + === Examples: + + NGINX Plus and NGINX Agent image: + ./scripts/build.sh -C nginx-repo.crt -K nginx-repo.key -t registry.ff.lan:31005/nginx-docker:plus-agent-root -a + + NGINX Plus, NGINX App Protect WAF and NGINX Agent image: + ./scripts/build.sh -C nginx-repo.crt -K nginx-repo.key -t registry.ff.lan:31005/nginx-docker:plus-nap-agent-root -w -a + + NGINX Plus, NGINX App Protect WAF and NGINX Agent unprivileged image: + ./scripts/build.sh -C nginx-repo.crt -K nginx-repo.key -t registry.ff.lan:31005/nginx-docker:plus-nap-agent-nonroot -w -u -a + + NGINX Opensource and NGINX Agent image: + ./scripts/build.sh -O -t registry.ff.lan:31005/nginx-docker:oss-root -a +``` + +1. Clone this repository +2. For NGINX Plus only: get your license certificate and key +3. Build the Docker image using `./scripts/build.sh` + +### Running the docker image on Kubernetes + +1. Edit `manifests/1.nginx-nim.yaml` and specify the correct image by modifying the `image:` line, and set the following environment variables + - `NGINX_LICENSE` - NGINX R33+ JWT license token + - `NGINX_AGENT_SERVER_HOST` - NGINX Instance Manager / NGINX One Console hostname/IP address + - `NGINX_AGENT_SERVER_GRPCPORT` - NGINX Instance Manager / NGINX One Console gRPC port + - `NGINX_AGENT_SERVER_TOKEN` - NGINX Instance Manager / NGINX One Console authentication token + - `NGINX_AGENT_INSTANCE_GROUP` - instance group (NGINX Instance Manager) / config sync group (NGINX One Console) for the NGINX instance + - `NGINX_AGENT_TAGS` - comma separated list of tags for the NGINX instance + - `NAP_WAF` - set to `"true"` to enable NGINX App Protect WAF (docker image built using `-w`) - NGINX Plus only + - `NAP_WAF_PRECOMPILED_POLICIES` - set to `"true"` to enable NGINX App Protect WAF precompiled policies (docker image built using `-w`) - NGINX Plus only + - `NGINX_AGENT_LOG_LEVEL` - NGINX Agent loglevel, optional. If not specified defaults to `info` + +2. Deploy on Kubernetes using the example manifest `manifest/nginx-manifest.yaml` + +3. After startup the NGINX instance will register to NGINX Instance Manager / NGINX One console and will be displayed on the "instances" dashboard if the NGINX Agent has been build into the docker image + +### Running the docker image on Docker + +1. Start using + +``` +docker run --rm --name nginx -p [PORT_TO_EXPOSE] \ + -e "NGINX_LICENSE=" \ + -e "NGINX_AGENT_SERVER_HOST=" \ + -e "NGINX_AGENT_SERVER_GRPCPORT=" \ + -e "NGINX_AGENT_SERVER_TOKEN=" \ + -e "NGINX_AGENT_INSTANCE_GROUP=" \ + -e "NGINX_AGENT_TAGS=" \ + -e "NAP_WAF=[true|false]" \ + -e "NAP_WAF_PRECOMPILED_POLICIES=[true|false]" \ + -e "NGINX_AGENT_LOG_LEVEL=[panic|fatal|error|info|debug|trace]" \ + +``` + +2. After startup the NGINX instance will register to NGINX Instance Manager / NGINX One Console and will be displayed on the "instances" dashboard if the NGINX Agent has been build into the docker image diff --git a/NGINX-Docker-Image-Builder/container/start.sh b/NGINX-Docker-Image-Builder/container/start.sh new file mode 100755 index 0000000..0091560 --- /dev/null +++ b/NGINX-Docker-Image-Builder/container/start.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +if [[ `whoami` == "nginx" ]]; then + IS_UNPRIVILEGED="true" +else + IS_UNPRIVILEGED= +fi + +if [[ ! -z "$NGINX_LICENSE" ]]; then + echo ${NGINX_LICENSE} > /etc/nginx/license.jwt +fi + +nginx +sleep 2 + +if [[ "$NGINX_AGENT_ENABLED" == "true" ]]; then + + # NGINX Agent version detection, change in behaviour in v2.24.0+ + AGENT_VERSION=`nginx-agent -v|awk '{print $3}'` + AGENT_VERSION_MAJOR=`echo $AGENT_VERSION | awk -F\. '{print $1}' | sed 's/v//'` + AGENT_VERSION_MINOR=`echo $AGENT_VERSION | awk -F\. '{print $2}'` + + echo "=> NGINX Agent version $AGENT_VERSION" + + PARM="" + + yq -i ' + .server.host=strenv(NGINX_AGENT_SERVER_HOST) | + .server.grpcPort=strenv(NGINX_AGENT_SERVER_GRPCPORT) | + .tls.enable=true | + .tls.skip_verify=true | + .tls.cert="" | + .tls.key="" + ' /etc/nginx-agent/nginx-agent.conf + + if [[ ! -z "$NGINX_AGENT_INSTANCE_GROUP" ]]; then + PARM="${PARM} --instance-group $NGINX_AGENT_INSTANCE_GROUP" + fi + + if [[ ! -z "$NGINX_AGENT_TAGS" ]]; then + PARM="${PARM} --tags $NGINX_AGENT_TAGS" + fi + + if [[ ! -z "$NGINX_AGENT_SERVER_TOKEN" ]]; then + yq -i ' + .server.token=strenv(NGINX_AGENT_SERVER_TOKEN) + ' /etc/nginx-agent/nginx-agent.conf + fi + + if [[ ! -z "$NGINX_AGENT_LOG_LEVEL" ]]; then + yq -i ' + .log.level=strenv(NGINX_AGENT_LOG_LEVEL) + ' /etc/nginx-agent/nginx-agent.conf + fi +fi + +if [[ "$NAP_WAF" == "true" ]]; then + export FQDN=127.0.0.1 + + if [[ "$IS_UNPRIVILEGED" ]]; then + /opt/app_protect/bin/bd_agent & + /usr/share/ts/bin/bd-socket-plugin tmm_count 4 proc_cpuinfo_cpu_mhz 2000000 total_xml_memory 471859200 total_umu_max_size 3129344 sys_max_account_id 1024 no_static_config & + + yq -i ' + .nap_monitoring.collector_buffer_size=50000 | + .nap_monitoring.processor_buffer_size=50000 | + .nap_monitoring.syslog_ip=strenv(FQDN) | + .nap_monitoring.syslog_port=10514 | + .extensions += ["nginx-app-protect","nap-monitoring"] + ' /etc/nginx-agent/nginx-agent.conf + + else + yq -i ' + .nap_monitoring.collector_buffer_size=50000 | + .nap_monitoring.processor_buffer_size=50000 | + .nap_monitoring.syslog_ip=strenv(FQDN) | + .nap_monitoring.syslog_port=514 | + .extensions += ["nginx-app-protect","nap-monitoring"] + ' /etc/nginx-agent/nginx-agent.conf + + su - nginx -s /bin/bash -c "/opt/app_protect/bin/bd_agent &" + su - nginx -s /bin/bash -c "/usr/share/ts/bin/bd-socket-plugin tmm_count 4 proc_cpuinfo_cpu_mhz 2000000 total_xml_memory 471859200 total_umu_max_size 3129344 sys_max_account_id 1024 no_static_config &" + fi + + while ([ ! -e /opt/app_protect/pipe/app_protect_plugin_socket ] || [ ! -e /opt/app_protect/pipe/ts_agent_pipe ]) + do + sleep 1 + done + + chown nginx:nginx /opt/app_protect/pipe/* + +if [[ "$NAP_WAF_PRECOMPILED_POLICIES" == "true" ]]; then + yq -i ' + .nginx_app_protect.precompiled_publication=true + ' /etc/nginx-agent/nginx-agent.conf +fi + +fi + +if [[ "$NGINX_AGENT_ENABLED" == "true" ]]; then + if [[ "$IS_UNPRIVILEGED" ]]; then + /usr/bin/nginx-agent $PARM + else + sg nginx-agent "/usr/bin/nginx-agent $PARM" + fi +else + while [ true ]; do sleep 3600; done +fi diff --git a/NGINX-Docker-Image-Builder/manifests/nginx-manifest.yaml b/NGINX-Docker-Image-Builder/manifests/nginx-manifest.yaml new file mode 100644 index 0000000..b277574 --- /dev/null +++ b/NGINX-Docker-Image-Builder/manifests/nginx-manifest.yaml @@ -0,0 +1,105 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + replicas: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: YOUR_REGISTRY/NGINX_DOCKER_IMAGE_NAME:TAG + imagePullPolicy: Always + ports: + - name: http + containerPort: 80 + env: + # NGINX Plus R33+ JWT license token - it is recommended to store this in a Kubernetes secret + # To create the secret using a license.jwt file, run: + # kubectl create secret generic license-token --from-file=license.jwt= --type=nginx.com/license -n + - name: NGINX_LICENSE + #value: "NGINX_JWT_LICENSE_TOKEN" + valueFrom: + secretKeyRef: + name: license-token + key: license.jwt + + # NGINX Agent Configuration + - name: NGINX_AGENT_ENABLED + value: "true" + - name: NGINX_AGENT_SERVER_HOST + # NGINX Instance Manager / NGINX One Console hostname or IP address + value: "NGINX_CONTROL_PLANE_SERVER" + - name: NGINX_AGENT_SERVER_GRPCPORT + # NGINX Instance Manager / NGINX One Console gRPC port + value: "443" + # Optional parameters + - name: NGINX_AGENT_SERVER_TOKEN + # NGINX Instance Manager / NGINX One Console authentication token + value: "XYZ" + - name: NGINX_AGENT_INSTANCE_GROUP + # The Instance Group (NGINX Instance Manager) / Config Sync Group (NGINX One Console) + value: "lab" + - name: NGINX_AGENT_TAGS + # Comma-separated list of tags for the NGINX instance + value: "preprod,devops" + - name: NGINX_AGENT_LOG_LEVEL + # NGINX Agent loglevel - default is "info" + value: "info" + + # Optional if NGINX App Protect WAF is available in the docker image - set to "true" to enable + #- name: NAP_WAF + # value: "true" + #- name: NAP_WAF_PRECOMPILED_POLICIES + # value: "true" + +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - name: http + port: 80 + - name: api + port: 8080 + selector: + app: nginx + type: ClusterIP + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: nginx + annotations: + nginx.org/proxy-connect-timeout: "30s" + nginx.org/proxy-read-timeout: "20s" + nginx.org/client-max-body-size: "4m" + nginx.com/health-checks: "true" + labels: + app: nginx +spec: + ingressClassName: nginx + rules: + - host: nginx.yourdomain.tld + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: nginx + port: + number: 80 diff --git a/NGINX-Docker-Image-Builder/scripts/build.sh b/NGINX-Docker-Image-Builder/scripts/build.sh new file mode 100755 index 0000000..bc109e1 --- /dev/null +++ b/NGINX-Docker-Image-Builder/scripts/build.sh @@ -0,0 +1,114 @@ +#!/bin/bash + +# https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-docker/#docker_plus + +BANNER="NGINX Docker Image builder\n\n +This tool builds a Docker image to run NGINX Plus/Open Source, NGINX App Protect WAF and NGINX Agent\n\n +=== Usage:\n\n +$0 [options]\n\n +=== Options:\n\n +-h\t\t\t- This help\n +-t [target image]\t- The Docker image to be created\n +-C [file.crt]\t\t- Certificate to pull packages from the official NGINX repository\n +-K [file.key]\t\t- Key to pull packages from the official NGINX repository\n +-w\t\t\t- Add NGINX App Protect WAF (requires NGINX Plus)\n +-O\t\t\t- Use NGINX Open Source instead of NGINX Plus\n +-u\t\t\t- Build unprivileged image (only for NGINX Plus)\n +-a\t\t\t- Add NGINX Agent\n\n +=== Examples:\n\n +NGINX Plus and NGINX Agent image:\n + $0 -C nginx-repo.crt -K nginx-repo.key -t registry.ff.lan:31005/nginx-docker:plus-agent-root -a\n\n + +NGINX Plus, NGINX App Protect WAF and NGINX Agent image:\n + $0 -C nginx-repo.crt -K nginx-repo.key -t registry.ff.lan:31005/nginx-docker:plus-nap-agent-root -w -a\n\n + +NGINX Plus, NGINX App Protect WAF and NGINX Agent unprivileged image:\n + $0 -C nginx-repo.crt -K nginx-repo.key -t registry.ff.lan:31005/nginx-docker:plus-nap-agent-nonroot -w -u -a\n\n + +NGINX Opensource and NGINX Agent image:\n + $0 -O -t registry.ff.lan:31005/nginx-docker:oss-root -a\n" + +while getopts 'ht:C:K:awOu' OPTION +do + case "$OPTION" in + h) + echo -e $BANNER + exit + ;; + t) + IMAGENAME=$OPTARG + ;; + C) + NGINX_CERT=$OPTARG + ;; + K) + NGINX_KEY=$OPTARG + ;; + a) + NGINX_AGENT=true + ;; + w) + NAP_WAF=true + ;; + O) + NGINX_OSS=true + ;; + u) + UNPRIVILEGED=true + ;; + esac +done + +if [ -z "$1" ] +then + echo -e $BANNER + exit +fi + +if [ -z "${IMAGENAME}" ] +then + echo "Docker image name is required" + exit +fi + +if ([ -z "${NGINX_OSS}" ] && ([ -z "${NGINX_CERT}" ] || [ -z "${NGINX_KEY}" ]) ) +then + echo "NGINX certificate and key are required for automated installation" + exit +fi + +echo "=> Target docker image is $IMAGENAME" + +if [ "${NGINX_AGENT}" ] +then + echo "=> Building with NGINX Agent" +fi + +if ([ ! -z "${NAP_WAF}" ] && [ -z "${NGINX_OSS}" ]) +then + echo "=> Building with NGINX App Protect WAF" +fi + +if [ -z "${NGINX_OSS}" ] +then + if [ -z "${UNPRIVILEGED}" ] + then + DOCKERFILE_NAME=Dockerfile.plus + echo "=> Building with NGINX Plus" + else + DOCKERFILE_NAME=Dockerfile.plus.unprivileged + echo "=> Building with NGINX Plus unprivileged" + fi + + DOCKER_BUILDKIT=1 docker build --no-cache -f $DOCKERFILE_NAME \ + --secret id=nginx-key,src=$NGINX_KEY --secret id=nginx-crt,src=$NGINX_CERT \ + --build-arg NAP_WAF=$NAP_WAF --build-arg NGINX_AGENT=$NGINX_AGENT \ + -t $IMAGENAME . +else + echo "=> Building with NGINX Open Source" + DOCKER_BUILDKIT=1 docker build --no-cache -f Dockerfile.oss \ + --build-arg NGINX_AGENT=$NGINX_AGENT \ + -t $IMAGENAME . +fi + +echo "=> Build complete for $IMAGENAME" diff --git a/NGINX-Multicloud-Gateway/README.md b/NGINX-Multicloud-Gateway/README.md new file mode 100644 index 0000000..89deb88 --- /dev/null +++ b/NGINX-Multicloud-Gateway/README.md @@ -0,0 +1,279 @@ +# NGINX Multicloud Gateway + +## Description + +This NGINX "Multicloud Gateway" configuration allows dynamic proxying/redirection of REST API calls regardless of where the actual destination REST API is located (on premise datacenter / off-prem cloud provider). + +NGINX acts as a global gateway for REST API access, REST API mapping (what clients request vs where the API runs) is dynamically configured through the backend database. + +This repo provides a very simple example, in a production deployment it can be any backend/repository (SQL, noSQL, key/value, ...) that can be queried through REST API calls. + +The two supported modes of operation are rewrite and steering. + +## Rewrite mode + + + +## Steering mode + + + +## Prerequisites + +- Kubernetes or Openshift cluster +- Linux VM with Docker to build all images +- Private registry to push the NGINX Plus and backend DB images +- The NGINX Plus image must include javascript (nginx-plus-module-njs) support + + +## Building the NGINX Plus image + +Refer to the official documentation at + +``` +https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-docker/#docker_plus +``` + +## Deploying this repository + +Build the backend DB: + +``` +cd test-backend-db +docker build --no-cache -t YOUR_PRIVATE_REGISTRY/mcgw-test-backend-db:1.0 . +docker push YOUR_PRIVATE_REGISTRY/mcgw-test-backend-db:1.0 +``` + +Spin up NGINX Plus: + +1. Update the backend DB "image" line referenced in 1.mcgw-test-backend-db.yaml +2. Update the NGINX Plus "image" line referenced in 5.nginx-mcgw.yaml +3. Run the following commands + +``` +cd mcgw +kubectl apply -f 0.mcgw.ns.yaml +cd certs +./cert-install.sh install +cd .. +kubectl apply -f 1.mcgw-test-backend-db.yaml +kubectl apply -f 2.nginx.conf.yaml +kubectl apply -f 3.mcgw.js.yaml +kubectl apply -f 4.mcgw.conf.yaml +kubectl apply -f 5.nginx-mcgw.yaml +``` + +## Cleaning up this repository + +``` +cd mcgw +kubectl delete -f 0.mcgw.ns.yaml +``` + +## Namespace check + +``` +$ kubectl get all -n nginx-mcgw +NAME READY STATUS RESTARTS AGE +pod/mcgw-test-backend-db-5495877b88-hfq95 1/1 Running 0 56s +pod/nginx-mcgw-765c664-z2wqb 1/1 Running 0 25s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/mcgw-test-backend-db ClusterIP 10.100.198.14 5000/TCP 56s +service/nginx-mcgw ClusterIP 10.101.80.253 80/TCP,8080/TCP 25s + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/mcgw-test-backend-db 1/1 1 1 56s +deployment.apps/nginx-mcgw 1/1 1 1 25s + +NAME DESIRED CURRENT READY AGE +replicaset.apps/mcgw-test-backend-db-5495877b88 1 1 1 56s +replicaset.apps/nginx-mcgw-765c664 1 1 1 25s +``` + + +## Backend DB test + +``` +$ curl -ks "https://db.mcgw.ff.lan/mcgw?fqdn=api.ff.lan&uri=/getmyip" | jq +[ + { + "enabled": "true", + "fqdn": "api.ff.lan", + "rewrite": "http://api.ipify.org", + "title": "Gets my IP in plaintext", + "uri": "/getmyip" + } +] +``` + +``` +$ curl -ks "https://db.mcgw.ff.lan/mcgw?fqdn=api.ff.lan&uri=/getmyip/json" | jq +[ + { + "enabled": "true", + "fqdn": "api.ff.lan", + "rewrite": "http://api.ipify.org?format=json", + "title": "Gets my IP in json", + "uri": "/getmyip/json" + } +] +``` + +``` +$ curl -ks "https://db.mcgw.ff.lan/mcgw?fqdn=api.ff.lan&uri=/getmyip/json/callback" | jq +[ + { + "enabled": "true", + "fqdn": "api.ff.lan", + "rewrite": "http://api.ipify.org?format=jsonp", + "title": "Gets my IP in json with callback", + "uri": "/getmyip/json/callback" + } +] +``` + +## Log level configuration + +Log level can optionally be configured at runtime for each FQDN and URI accessed by clients. The current version supports either "full" or "basic", as defined in 4.mcgw.conf.yaml + +``` +$ curl -ki -X POST https://api.mcgw.ff.lan/api/6/http/keyvals/dynamic_loglevel -d '{"api.ff.lan:/getmyip":"full"}' +HTTP/1.1 201 Created +Server: nginx/1.19.5 +Date: Sun, 22 Aug 2021 22:33:04 GMT +Content-Length: 0 +Connection: keep-alive +Location: http://api.mcgw.ff.lan:8080/api/6/http/keyvals/dynamic_loglevel/ +``` + +``` +$ curl -ki -X POST https://api.mcgw.ff.lan/api/6/http/keyvals/dynamic_loglevel -d '{"api.ff.lan:/getmyip/json":"basic"}' +HTTP/1.1 201 Created +Server: nginx/1.19.5 +Date: Sun, 22 Aug 2021 22:33:28 GMT +Content-Length: 0 +Connection: keep-alive +Location: http://api.mcgw.ff.lan:8080/api/6/http/keyvals/dynamic_loglevel/ +``` + +Retrieve currently configured log levels: + +``` +$ curl -ks -X GET https://api.mcgw.ff.lan/api/6/http/keyvals/dynamic_loglevel | jq +{ + "api.ff.lan:/getmyip": "full", + "api.ff.lan:/getmyip/json": "basic" +} +``` + +## Multicloud Gateway testing + +1. Rewrite mode + +Client supporting HTTP/302 calling a REST API: + +``` +$ curl -ki -H "X-REDIRECT-SUPPORT: true" https://api.ff.lan/getmyip +HTTP/1.1 302 Moved Temporarily +Server: nginx/1.19.5 +Date: Sun, 22 Aug 2021 22:55:50 GMT +Content-Type: text/html +Content-Length: 146 +Connection: keep-alive +Location: http://api.ipify.org + + +302 Found + +

302 Found

+
nginx/1.19.10
+ + +``` + +NGINX mcgw log shows the following. The last line is the access_log in "basic" format, as configured above. + +``` +$ kubectl logs -l app=nginx-mcgw -n nginx-mcgw -f +2021/08/22 23:07:53 [warn] 7#7: *7 js: ------------------------------ +2021/08/22 23:07:53 [warn] 7#7: *7 js: Client[10.244.2.229] Method[GET] Host[api.ff.lan] URI[/getmyip] QueryString[undefined] Body[undefined] +2021/08/22 23:07:53 [warn] 7#7: *7 js: subReqCallback got 200 for [/getmyip] +2021/08/22 23:07:53 [warn] 7#7: *7 js: JSON reply: URI[/dbQuery/mcgw?fqdn=api.ff.lan&uri=/getmyip] status[200] body[[{"enabled":"true","fqdn":"api.ff.lan","rewrite":"http://api.ipify.org","title":"Gets my IP in plaintext","uri":"/getmyip"}] +] +2021/08/22 23:07:53 [warn] 7#7: *7 js: Found 1 DB records for [/getmyip] +2021/08/22 23:07:53 [warn] 7#7: *7 js: Rewrite rule [api.ff.lan/getmyip] -> [http://api.ipify.org] X-REDIRECT-SUPPORT [true] +2021/08/22 23:07:53 [warn] 7#7: *7 js: Redirect mode 302 to [http://api.ipify.org] +[MCGW full] 10.244.2.229 [22/Aug/2021:23:07:53 +0000] api.ff.lan "GET /getmyip HTTP/1.1" "-" 302 146 "-" "curl/7.74.0" +``` + +2. Steering mode + +Client with no HTTP/302 support calling a REST API: + +``` +$ curl -ki https://api.ff.lan/getmyip +HTTP/1.1 200 OK +Server: nginx/1.19.5 +Date: Sun, 22 Aug 2021 22:58:39 GMT +Content-Type: text/plain +Content-Length: 12 +Connection: keep-alive +Vary: Origin +Via: 1.1 vegur + +192.168.1.18 +``` + +NGINX mcgw log shows the following. The last line is the access_log in "basic" format, as configured above. + +``` +$ kubectl logs -l app=nginx-mcgw -n nginx-mcgw -f +2021/08/22 23:10:22 [warn] 7#7: *15 js: ------------------------------ +2021/08/22 23:10:22 [warn] 7#7: *15 js: Client[10.244.1.179] Method[GET] Host[api.ff.lan] URI[/getmyip] QueryString[undefined] Body[undefined] +2021/08/22 23:10:22 [warn] 7#7: *15 js: subReqCallback got 200 for [/getmyip] +2021/08/22 23:10:22 [warn] 7#7: *15 js: JSON reply: URI[/dbQuery/mcgw?fqdn=api.ff.lan&uri=/getmyip] status[200] body[[{"enabled":"true","fqdn":"api.ff.lan","rewrite":"http://api.ipify.org","title":"Gets my IP in plaintext","uri":"/getmyip"}] +] +2021/08/22 23:10:22 [warn] 7#7: *15 js: Found 1 DB records for [/getmyip] +2021/08/22 23:10:22 [warn] 7#7: *15 js: Rewrite rule [api.ff.lan/getmyip] -> [http://api.ipify.org] X-REDIRECT-SUPPORT [undefined] +2021/08/22 23:10:22 [warn] 7#7: *15 js: Steering mode to [http://api.ipify.org] +2021/08/22 23:10:22 [warn] 7#7: *15 js: POSTSCHEME[api.ipify.org] QSTRING[undefined]/[undefined] FULLURI[api.ipify.org,] FQDNURI[api.ipify.org/] REWRITEURI[] -- [] +2021/08/22 23:10:22 [warn] 7#7: *15 js: Rewrite [http://api.ipify.org] -> scheme[http] fqdn[api.ipify.org] URI[/] queryString[] -> [http://api.ipify.org/] +2021/08/22 23:10:23 [warn] 7#7: *15 js: steeringModeSubReqCallback got [200] [192.168.1.18] +[MCGW full] 10.244.1.179 [22/Aug/2021:23:10:23 +0000] api.ff.lan "GET /getmyip HTTP/1.1" "-" 200 12 "-" "curl/7.74.0" +``` + +3. Steering mode + +Client with no HTTP/302 support calling a REST API: + +``` +$ curl -ki https://api.ff.lan/getmyip/json +HTTP/1.1 200 OK +Server: nginx/1.19.5 +Date: Sun, 22 Aug 2021 23:10:58 GMT +Content-Type: text/plain +Content-Length: 12 +Connection: keep-alive +Vary: Origin +Via: 1.1 vegur + +192.168.1.18 +``` + +NGINX mcgw log shows the following. The last line is the access_log in "basic" format, as configured above. + +``` +2021/08/22 23:10:58 [warn] 7#7: *17 js: ------------------------------ +2021/08/22 23:10:58 [warn] 7#7: *17 js: Client[10.244.1.179] Method[GET] Host[api.ff.lan] URI[/getmyip/json] QueryString[undefined] Body[undefined] +2021/08/22 23:10:58 [warn] 7#7: *17 js: subReqCallback got 200 for [/getmyip/json] +2021/08/22 23:10:58 [warn] 7#7: *17 js: JSON reply: URI[/dbQuery/mcgw?fqdn=api.ff.lan&uri=/getmyip/json] status[200] body[[{"enabled":"true","fqdn":"api.ff.lan","rewrite":"http://api.ipify.org?format=json","title":"Gets my IP in json","uri":"/getmyip/json"}] +] +2021/08/22 23:10:58 [warn] 7#7: *17 js: Found 1 DB records for [/getmyip/json] +2021/08/22 23:10:58 [warn] 7#7: *17 js: Rewrite rule [api.ff.lan/getmyip/json] -> [http://api.ipify.org?format=json] X-REDIRECT-SUPPORT [undefined] +2021/08/22 23:10:58 [warn] 7#7: *17 js: Steering mode to [http://api.ipify.org?format=json] +2021/08/22 23:10:58 [warn] 7#7: *17 js: POSTSCHEME[api.ipify.org?format=json] QSTRING[format=json]/[undefined] FULLURI[api.ipify.org,] FQDNURI[api.ipify.org/] REWRITEURI[] -- [] +2021/08/22 23:10:58 [warn] 7#7: *17 js: Rewrite [http://api.ipify.org?format=json] -> scheme[http] fqdn[api.ipify.org] URI[/] queryString[] -> [http://api.ipify.org/] +2021/08/22 23:10:58 [warn] 7#7: *17 js: steeringModeSubReqCallback got [200] [192.168.1.18] +[MCGW basic] 10.244.1.179 [22/Aug/2021:23:10:58 +0000] api.ff.lan "GET /getmyip/json HTTP/1.1" 200 +``` diff --git a/NGINX-Multicloud-Gateway/images/mcgw-rewrite.jpg b/NGINX-Multicloud-Gateway/images/mcgw-rewrite.jpg new file mode 100644 index 0000000..43a518e Binary files /dev/null and b/NGINX-Multicloud-Gateway/images/mcgw-rewrite.jpg differ diff --git a/NGINX-Multicloud-Gateway/images/mcgw-steering.jpg b/NGINX-Multicloud-Gateway/images/mcgw-steering.jpg new file mode 100644 index 0000000..f8b2ea2 Binary files /dev/null and b/NGINX-Multicloud-Gateway/images/mcgw-steering.jpg differ diff --git a/NGINX-Multicloud-Gateway/mcgw/mcgw/0.mcgw.ns.yaml b/NGINX-Multicloud-Gateway/mcgw/mcgw/0.mcgw.ns.yaml new file mode 100644 index 0000000..28e2bcd --- /dev/null +++ b/NGINX-Multicloud-Gateway/mcgw/mcgw/0.mcgw.ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: nginx-mcgw diff --git a/NGINX-Multicloud-Gateway/mcgw/mcgw/1.mcgw-test-backend-db.yaml b/NGINX-Multicloud-Gateway/mcgw/mcgw/1.mcgw-test-backend-db.yaml new file mode 100644 index 0000000..e0a679b --- /dev/null +++ b/NGINX-Multicloud-Gateway/mcgw/mcgw/1.mcgw-test-backend-db.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcgw-test-backend-db + namespace: nginx-mcgw + labels: + app: mcgw-test-backend-db +spec: + selector: + matchLabels: + app: mcgw-test-backend-db + replicas: 1 + template: + metadata: + labels: + app: mcgw-test-backend-db + spec: + containers: + - name: mcgw-test-backend-db + image: YOUR_PRIVATE_REGISTRY/mcgw-test-backend-db:1.0 + ports: + - containerPort: 5000 + +--- +apiVersion: v1 +kind: Service +metadata: + name: mcgw-test-backend-db + namespace: nginx-mcgw + labels: + app: mcgw-test-backend-db +spec: + ports: + - port: 5000 + selector: + app: mcgw-test-backend-db + type: ClusterIP + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mcgw-test-backend-db + namespace: nginx-mcgw + annotations: + nginx.org/proxy-connect-timeout: "30s" + nginx.org/proxy-read-timeout: "20s" + nginx.org/client-max-body-size: "4m" + nginx.com/health-checks: "true" + labels: + app: nginx-mcgw +spec: + ingressClassName: nginx + tls: + - hosts: + - db.mcgw.ff.lan + secretName: mcgw.ff.lan + rules: + - host: db.mcgw.ff.lan + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: mcgw-test-backend-db + port: + number: 5000 diff --git a/NGINX-Multicloud-Gateway/mcgw/mcgw/2.nginx.conf.yaml b/NGINX-Multicloud-Gateway/mcgw/mcgw/2.nginx.conf.yaml new file mode 100644 index 0000000..42d05da --- /dev/null +++ b/NGINX-Multicloud-Gateway/mcgw/mcgw/2.nginx.conf.yaml @@ -0,0 +1,69 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-conf + namespace: nginx-mcgw +data: + nginx.conf: |- + user nginx; + worker_processes auto; + + load_module modules/ngx_http_js_module.so; + load_module modules/ndk_http_module.so; + + error_log /var/log/nginx/error.log notice; + pid /var/run/nginx.pid; + + events { + worker_connections 1024; + } + + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + js_import mcgw from conf.d/mcgw.js; + + include /etc/nginx/conf.d/*.conf; + } + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-api-conf + namespace: nginx-mcgw +data: + api.conf: |- + server { + listen 8080; + server_name api.mcgw.ff.lan; + + location /api/ { + api write=on; + } + + location /dashboard.html { + root /usr/share/nginx/html; + } + access_log off; + } + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-default-conf + namespace: nginx-mcgw +data: + default.conf: |- diff --git a/NGINX-Multicloud-Gateway/mcgw/mcgw/3.mcgw.js.yaml b/NGINX-Multicloud-Gateway/mcgw/mcgw/3.mcgw.js.yaml new file mode 100644 index 0000000..bef53f7 --- /dev/null +++ b/NGINX-Multicloud-Gateway/mcgw/mcgw/3.mcgw.js.yaml @@ -0,0 +1,130 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: mcgw-js + namespace: nginx-mcgw +data: + mcgw.js: |- + export default {dbQuery}; + + function dbQuery(r) { + var dbBucket="mcgw"; + + var rHost=r.headersIn['host']; + var rUri=r.uri; + + r.warn('------------------------------'); + r.warn('Client['+r.remoteAddress+'] Method['+r.method+'] Host['+rHost+'] URI['+rUri+'] QueryString['+r.variables.query_string+'] Body['+r.requestText+']'); + + // Queries the backend db + r.subrequest('/dbQuery/'+dbBucket+'?fqdn='+rHost+'&uri='+rUri,'',subReqCallback); + + function subReqCallback(reply) { + if(reply.status!=200) { + // Rule not found + + if(rUri.length==0) { + r.warn('Rule not found for ['+rUri+'] - returning 404'); + r.return(404); + } else { + var lastSlash=rUri.lastIndexOf("/"); + + if(lastSlash==-1) { + r.warn('Rule not found for ['+rUri+'] - returning 404'); + r.return(404); + } else { + rUri=rUri.substring(0,lastSlash); + r.warn('Rule not found, trying ['+rUri+']'); + + r.subrequest('/dbQuery/'+dbBucket+'?fqdn='+rHost+'&uri='+rUri,'',subReqCallback); + } + } + } else { + r.warn('subReqCallback got 200 for ['+rUri+']'); + r.warn('JSON reply: URI['+reply.uri+'] status['+reply.status.toString()+'] body['+reply.responseText+']'); + + // REST API backend returns an array [{json}], the first element shall be parsed + var body = JSON.parse(reply.responseText) + r.warn('Found '+body.length+' DB records for ['+rUri+']'); + var rule=body[0]; + + if (rule.enabled=='false') { + // Rule is disabled + r.warn('Rule is disabled - returning 404'); + r.return(404); + } else { + r.warn('Rewrite rule ['+r.headersIn['host']+r.uri+'] -> ['+rule.rewrite+'] X-REDIRECT-SUPPORT ['+r.headersIn['X-REDIRECT-SUPPORT']+']'); + + if(r.headersIn['X-REDIRECT-SUPPORT']=='true') { + // Client supports HTTP 302 - Redirect mode + r.warn('Redirect mode 302 to ['+rule.rewrite+']'); + r.return(302,rule.rewrite); + r.mcgwLogging='REDIRECT_MODE'; + } else { + // Client does not support HTTP 302 - Steering mode + + r.warn('Steering mode to ['+rule.rewrite+']'); + + // Parses the rewrite URI into scheme, fqdn and URI + // [http(s)]://fqdn[/uri][?query_string] + + // https://fqdn/a/b/c?var=value + var rewriteTokens = rule.rewrite.split('://'); + var rewriteScheme = rewriteTokens[0]; + + // rewritePostScheme = fqdn/a/b/c?var=value + var rewritePostScheme = rewriteTokens[1]; + var rewriteQueryString = rewritePostScheme.split('?')[1]; + var rewriteFQDNURI = rewritePostScheme.split('?')[0]+'/'; + + var rewriteFQDN = rewriteFQDNURI.split('/')[0]; + var fullURI = rewriteFQDNURI.split('/'); + + //var rewriteURI = rewriteFQDNURI.split('/')[1]; + var rewriteURI = fullURI.slice(1,-1); + var steeredURI = rewriteURI.join('/'); + + r.warn('POSTSCHEME['+rewritePostScheme+'] QSTRING['+rewriteQueryString+']/['+r.variables.query_string+'] FULLURI['+fullURI+'] FQDNURI['+rewriteFQDNURI+'] REWRITEURI['+rewriteURI+'] -- ['+steeredURI+']'); + + if (steeredURI == undefined) { + steeredURI=''; + } else { + steeredURI='/'+steeredURI; + } + if (rewriteQueryString == undefined) { + rewriteQueryString=''; + } else { + rewriteQueryString='?'+rewriteQueryString; + } + if (r.variables.query_string == undefined) { + rewriteQueryString=''; + } else { + rewriteQueryString='?'+rewriteQueryString+'&'+r.variables.query_string; + } + + var targetRewrite=rewriteScheme+'://'+rewriteFQDN+steeredURI+rewriteQueryString; + + r.warn('Rewrite ['+rule.rewrite+'] -> scheme['+rewriteScheme+'] fqdn['+rewriteFQDN+'] URI['+steeredURI+'] queryString['+rewriteQueryString+'] -> ['+targetRewrite+']'); + + // Proxies the client request + r.subrequest('/steeringMode/'+targetRewrite,{method: r.method, args: ''},steeringModeSubReqCallback); + } + } + } + + function steeringModeSubReqCallback(steeringReply) { + // Steering mode - returns the steered API response back to the client + + r.warn('steeringModeSubReqCallback got ['+steeringReply.status+'] ['+steeringReply.responseText+']'); + r.status=steeringReply.status; + + for (var header in steeringReply.headersOut) { + r.headersOut[header] = steeringReply.headersOut[header]; + } + + r.sendHeader(); + r.send(steeringReply.responseText); + r.finish(); + } + } + } diff --git a/NGINX-Multicloud-Gateway/mcgw/mcgw/4.mcgw.conf.yaml b/NGINX-Multicloud-Gateway/mcgw/mcgw/4.mcgw.conf.yaml new file mode 100644 index 0000000..6b23a59 --- /dev/null +++ b/NGINX-Multicloud-Gateway/mcgw/mcgw/4.mcgw.conf.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: mcgw-conf + namespace: nginx-mcgw +data: + mcgw.conf: |- + upstream upstream-db { + zone upstream-db 64k; + + # Upstream DB REST API endpoint + server mcgw-test-backend-db.nginx-mcgw.svc.cluster.local:5000; + } + + proxy_cache_path /var/tmp/cache levels=1:2 keys_zone=dbQueryCache:10m max_size=20m inactive=1m use_temp_path=off; + proxy_cache_key "$scheme://$host$request_uri$query_string"; + + log_format mcgw-basic '[MCGW basic] $remote_addr [$time_local] $http_host ' + '"$request" $status'; + + log_format mcgw-full '[MCGW full] $remote_addr [$time_local] $http_host ' + '"$request" "$request_body" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"'; + + # Keyval zone for dynamic log level configuration + # key: [request FQDN]:[request URI] - value: [none|basic|full] + keyval_zone zone=dynamic_loglevel:1M state=/var/tmp/dynamic_loglevel.json type=prefix; + keyval $host:$uri $dynamic_loglevel zone=dynamic_loglevel; + + server { + server_name $host; + resolver 8.8.8.8; + listen 80; + + location / { + js_content mcgw.dbQuery; + + if ( $dynamic_loglevel = basic ) { + access_log /var/log/nginx/access.log mcgw-basic; + } + if ( $dynamic_loglevel = full ) { + access_log /var/log/nginx/access.log mcgw-full; + } + } + + location ~ /dbQuery/(.*) { + internal; + + proxy_cache dbQueryCache; + proxy_cache_lock on; + proxy_cache_valid 200 1m; + proxy_ignore_headers Set-Cookie Cache-Control; + proxy_cache_methods GET; + + proxy_set_header Host db.mcgw.ff.lan; + proxy_pass http://upstream-db/$1$query_string; + } + + location ~ /steeringMode/(.*) { + internal; + + proxy_buffer_size 256k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + subrequest_output_buffer_size 256k; + + proxy_ssl_session_reuse off; + proxy_ssl_server_name on; + proxy_pass $1$query_string; + } + } diff --git a/NGINX-Multicloud-Gateway/mcgw/mcgw/5.nginx-mcgw.yaml b/NGINX-Multicloud-Gateway/mcgw/mcgw/5.nginx-mcgw.yaml new file mode 100644 index 0000000..e7669d6 --- /dev/null +++ b/NGINX-Multicloud-Gateway/mcgw/mcgw/5.nginx-mcgw.yaml @@ -0,0 +1,126 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-mcgw + namespace: nginx-mcgw + labels: + app: nginx-mcgw +spec: + selector: + matchLabels: + app: nginx-mcgw + replicas: 1 + template: + metadata: + labels: + app: nginx-mcgw + spec: + containers: + - name: nginx-mcgw + image: YOUR_PRIVATE_REGISTRY/nginxplus-js + ports: + - containerPort: 80 + - containerPort: 8080 + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-default-conf + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + - name: nginx-api-conf + mountPath: /etc/nginx/conf.d/api.conf + subPath: api.conf + - name: mcgw-js + mountPath: /etc/nginx/conf.d/mcgw.js + subPath: mcgw.js + - name: mcgw-conf + mountPath: /etc/nginx/conf.d/mcgw.conf + subPath: mcgw.conf + volumes: + - name: nginx-conf + configMap: + name: nginx-conf + - name: nginx-default-conf + configMap: + name: nginx-default-conf + - name: nginx-api-conf + configMap: + name: nginx-api-conf + - name: mcgw-conf + configMap: + name: mcgw-conf + - name: mcgw-js + configMap: + name: mcgw-js + +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-mcgw + namespace: nginx-mcgw + labels: + app: nginx-mcgw +spec: + ports: + - name: http + port: 80 + - name: api + port: 8080 + selector: + app: nginx-mcgw + type: ClusterIP + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: nginx-mcgw + namespace: nginx-mcgw + annotations: + nginx.org/proxy-connect-timeout: "30s" + nginx.org/proxy-read-timeout: "20s" + nginx.org/client-max-body-size: "4m" + nginx.com/health-checks: "true" + labels: + app: nginx-mcgw +spec: + ingressClassName: nginx + tls: + - hosts: + - mcgw.ff.lan + - api.mcgw.ff.lan + - api.ff.lan + secretName: mcgw.ff.lan + rules: + - host: mcgw.ff.lan + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: nginx-mcgw + port: + number: 80 + - host: api.mcgw.ff.lan + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: nginx-mcgw + port: + number: 8080 + - host: api.ff.lan + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: nginx-mcgw + port: + number: 80 diff --git a/NGINX-Multicloud-Gateway/mcgw/mcgw/certs/cert-install.sh b/NGINX-Multicloud-Gateway/mcgw/mcgw/certs/cert-install.sh new file mode 100755 index 0000000..91aa4b7 --- /dev/null +++ b/NGINX-Multicloud-Gateway/mcgw/mcgw/certs/cert-install.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +case $1 in + 'clean') + kubectl delete secret mcgw.ff.lan -n nginx-mcgw + rm mcgw.ff.lan.key mcgw.ff.lan.crt + ;; + 'install') + openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout mcgw.ff.lan.key -out mcgw.ff.lan.crt -config mcgw.ff.lan.cnf + kubectl create secret tls mcgw.ff.lan --key mcgw.ff.lan.key --cert mcgw.ff.lan.crt -n nginx-mcgw + ;; + *) + echo "$0 [clean|install]" + exit + ;; +esac diff --git a/NGINX-Multicloud-Gateway/mcgw/mcgw/certs/mcgw.ff.lan.cnf b/NGINX-Multicloud-Gateway/mcgw/mcgw/certs/mcgw.ff.lan.cnf new file mode 100644 index 0000000..06adf9e --- /dev/null +++ b/NGINX-Multicloud-Gateway/mcgw/mcgw/certs/mcgw.ff.lan.cnf @@ -0,0 +1,19 @@ +[req] +default_bits = 2048 +prompt = no +default_md = sha256 +x509_extensions = v3_req +distinguished_name = dn + +[dn] +emailAddress = my@emailaddress.com +CN = mcgw.ff.lan + +[v3_req] +subjectAltName = @alt_names + +[alt_names] +DNS.1 = mcgw.ff.lan +DNS.2 = db.mcgw.ff.lan +DNS.3 = api.mcgw.ff.lan +DNS.4 = api.ff.lan diff --git a/NGINX-Multicloud-Gateway/test-backend-db/Dockerfile b/NGINX-Multicloud-Gateway/test-backend-db/Dockerfile new file mode 100644 index 0000000..fffd7ed --- /dev/null +++ b/NGINX-Multicloud-Gateway/test-backend-db/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:latest +RUN apt-get update + +RUN apt-get install -y -q build-essential python3-pip python3-dev python3-simplejson git +RUN pip3 install --upgrade pip +RUN pip3 install --upgrade virtualenv + +RUN mkdir deployment +RUN virtualenv /deployment/env/ +RUN /deployment/env/bin/python -m pip install --upgrade pip +RUN /deployment/env/bin/pip3 install flask +WORKDIR /deployment +COPY app.py . + +CMD env/bin/python3 app.py diff --git a/NGINX-Multicloud-Gateway/test-backend-db/app.py b/NGINX-Multicloud-Gateway/test-backend-db/app.py new file mode 100755 index 0000000..53794c3 --- /dev/null +++ b/NGINX-Multicloud-Gateway/test-backend-db/app.py @@ -0,0 +1,55 @@ +#!flask/bin/python +from flask import Flask, jsonify, abort, make_response, request + +app = Flask(__name__) + +rules = [ + { + 'fqdn': u'api.ff.lan', + 'uri': u'/getmyip', + 'title': u'Gets my IP in plaintext', + 'rewrite': u'http://api.ipify.org', + 'enabled': u'true' + }, + { + 'fqdn': u'api.ff.lan', + 'uri': u'/getmyip/json', + 'title': u'Gets my IP in json', + 'rewrite': u'http://api.ipify.org?format=json', + 'enabled': u'true' + }, + { + 'fqdn': u'api.ff.lan', + 'uri': u'/getmyip/json/callback', + 'title': u'Gets my IP in json with callback', + 'rewrite': u'http://api.ipify.org?format=jsonp', + 'enabled': u'true' + } +] + +@app.route('/mcgw', methods=['GET']) +def get_key_query_string(): + fqdn = request.args.get('fqdn') + uri = request.args.get('uri') + rule = [rule for rule in rules if rule['fqdn'] == fqdn and rule['uri'] == uri] + if len(rule) == 0: + abort(404) + return jsonify([rule[0]]) + +@app.route('/mcgw/keys//', methods=['GET']) +def get_key(fqdn,uri): + rule = [rule for rule in rules if rule['fqdn'] == fqdn and rule['uri'] == uri] + if len(rule) == 0: + abort(404) + return jsonify({'rule': rule[0]}) + +@app.route('/mcgw/keys', methods=['GET']) +def get_all_keys(): + return jsonify({'rules': rules}) + +@app.errorhandler(404) +def not_found(error): + return make_response(jsonify({'error': 'Not found'}), 404) + +if __name__ == '__main__': + app.run(host='0.0.0.0') diff --git a/NGINX-NIM-Docker/Dockerfile.automated b/NGINX-NIM-Docker/Dockerfile.automated new file mode 100644 index 0000000..d5daba1 --- /dev/null +++ b/NGINX-NIM-Docker/Dockerfile.automated @@ -0,0 +1,33 @@ +FROM ubuntu:20.04 + +ARG ADD_PUM + +# Initial setup +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y -q build-essential git nano curl jq wget gawk \ + nginx lsb-release rsyslog systemd apt-transport-https ca-certificates netcat sudo && \ + mkdir -p /etc/ssl/nginx /deployment + +COPY ./container/startNIM.sh /deployment/ + +RUN --mount=type=secret,id=nginx-crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ + --mount=type=secret,id=nginx-key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ + set -x \ + && chmod +x /deployment/startNIM.sh \ + && printf "deb https://pkgs.nginx.com/nms/ubuntu `lsb_release -cs` nginx-plus\n" > /etc/apt/sources.list.d/nms.list \ + && wget -q -O /etc/apt/apt.conf.d/90pkgs-nginx https://cs.nginx.com/static/files/90pkgs-nginx \ + && wget -O /tmp/nginx_signing.key https://cs.nginx.com/static/keys/nginx_signing.key \ + && apt-key add /tmp/nginx_signing.key \ + && apt-get update \ + && apt-get install -y nms-instance-manager \ + && curl -s http://hg.nginx.org/nginx.org/raw-file/tip/xml/en/security_advisories.xml > /usr/share/nms/cve.xml \ + # Optional WAF Policy Compiler + && if [ ! -z "${ADD_PUM}" ] ; then \ + apt-get -y install nms-nap-compiler-$ADD_PUM; fi \ + # Set permissions + && chmod +x /etc/nms/scripts/*.sh \ + && wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_`dpkg --print-architecture` -O /usr/bin/yq \ + && chmod +x /usr/bin/yq + +WORKDIR /deployment +CMD /deployment/startNIM.sh diff --git a/NGINX-NIM-Docker/Dockerfile.manual b/NGINX-NIM-Docker/Dockerfile.manual new file mode 100644 index 0000000..b09700a --- /dev/null +++ b/NGINX-NIM-Docker/Dockerfile.manual @@ -0,0 +1,40 @@ +FROM ubuntu:20.04 + +ARG NIM_DEBFILE +ARG SM_IMAGE=nim-files/.placeholder +ARG PUM_IMAGE=nim-files/.placeholder + +# Initial setup +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y -q build-essential git nano curl jq wget gawk \ + nginx lsb-release rsyslog systemd apt-transport-https ca-certificates netcat && \ + mkdir -p /deployment/setup + +# NGINX Instance Manager 2.4.0+ +COPY $NIM_DEBFILE /deployment/setup/nim.deb +COPY $SM_IMAGE /deployment/setup/sm.deb +COPY $PUM_IMAGE /deployment/setup/pum.deb + +COPY ./container/startNIM.sh /deployment/ +RUN chmod +x /deployment/startNIM.sh + +WORKDIR /deployment/setup + +COPY $NIM_DEBFILE /deployment/setup/nim.deb + +RUN apt-get -y install /deployment/setup/nim.deb && \ + curl -s http://hg.nginx.org/nginx.org/raw-file/tip/xml/en/security_advisories.xml > /usr/share/nms/cve.xml \ + # Optional Security Monitoring + && if [ "$SM_IMAGE" != "nim-files/.placeholder" ] ; then \ + apt-get -y install /deployment/setup/sm.deb; fi \ + # Optional WAF Policy Compiler + && if [ "$PUM_IMAGE" != "nim-files/.placeholder" ] ; then \ + apt-get -y install /deployment/setup/pum.deb; fi \ + # Set permissions + && chmod +x /etc/nms/scripts/*.sh \ + && wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_`dpkg --print-architecture` -O /usr/bin/yq \ + && chmod +x /usr/bin/yq \ + && rm -r /deployment/setup + +WORKDIR /deployment +CMD /deployment/startNIM.sh diff --git a/NGINX-NIM-Docker/README.md b/NGINX-NIM-Docker/README.md new file mode 100644 index 0000000..8b148dc --- /dev/null +++ b/NGINX-NIM-Docker/README.md @@ -0,0 +1,295 @@ +# NGINX Instance Manager for Docker + +This repository helps deploying NGINX Instance Manager on containerized clusters by creating a docker image. + +It is also available as part of [official NGINX Demos](https://github.com/nginxinc/NGINX-Demos/tree/master/nginx-nim-docker) + +## Docker image creation + +Docker image creation is supported for: + +- [NGINX Instance Manager](https://docs.nginx.com/nginx-instance-manager/) 2.4.0+ +- [Security Monitoring](https://docs.nginx.com/nginx-management-suite/security/) 1.0.0+ +- [NGINX App Protect WAF compiler](https://docs.nginx.com/nginx-management-suite/nim/how-to/app-protect/setup-waf-config-management) + +The image can optionally be built with [Second Sight](https://github.com/F5Networks/SecondSight) support + +## Tested releases + +This repository has been tested on `amd64` and `arm64` architectures with: + +- NGINX Instance Manager 2.4.0+ +- Security Monitoring 1.0.0+ +- NGINX App Protect WAF compiler v3.1088.2+ + +## Prerequisites + +This repository has been tested with: + +- Docker 20.10+ to build the image +- Private registry to push the target Docker image +- Kubernetes cluster with dynamic storage provisioner enabled: see the [example](contrib/pvc-provisioner) +- NGINX Ingress Controller with `VirtualServer` CRD support (see https://docs.nginx.com/nginx-ingress-controller/configuration/virtualserver-and-virtualserverroute-resources/) +- Access to F5/NGINX downloads to fetch NGINX Instance Manager 2.4.0+ installation .deb file (when running in manual mode) +- Valid NGINX license certificate and key to fetch NGINX Instance Manager packages (when running in automated mode) +- Linux host running Docker to build the image + +## How to build + +The install script can be used to build the Docker image using automated or manual install: + +``` +$ ./scripts/buildNIM.sh +NGINX Instance Manager Docker image builder + + This tool builds a Docker image to run NGINX Instance Manager + + === Usage: + + ./scripts/buildNIM.sh [options] + + === Options: + + -h - This help + -t [target image] - Docker image name to be created + -s - Enable Second Sight (https://github.com/F5Networks/SecondSight/) - optional + + Manual build: + + -n [filename] - NGINX Instance Manager .deb package filename + -w [filename] - Security Monitoring .deb package filename - optional + -p [filename] - WAF policy compiler .deb package filename - optional + + Automated build: + + -i - Automated build - requires cert & key + -C [file.crt] - Certificate file to pull packages from the official NGINX repository + -K [file.key] - Key file to pull packages from the official NGINX repository + -W - Enable Security Monitoring - optional + -P [version] - Enable WAF policy compiler, version can be any [v3.1088.2|v4.100.1|v4.2.0|v4.218.0|v4.279.0|v4.402.0|v4.457.0|v4.583.0] - optional + + === Examples: + + Manual build: + ./scripts/buildNIM.sh -n nim-files/nms-instance-manager_2.6.0-698150575~focal_amd64.deb \ + -w nim-files/nms-sm_1.0.0-697204659~focal_amd64.deb \ + -p nim-files/nms-nap-compiler-v4.2.0.deb \ + -t my.registry.tld/nginx-nms:2.6.0 + + Automated build: + ./scripts/buildNIM.sh -i -C nginx-repo.crt -K nginx-repo.key + -W -P v4.583.0 -t my.registry.tld/nginx-nms:latest +``` + +### Automated build + +1. Clone this repo +2. Get your license certificate and key to fetch NGINX Instance Manager packages from NGINX repository +3. Build NGINX Instance Manager Docker image using: + +NGINX Instance Manager + +``` +./scripts/buildNIM.sh -t YOUR_DOCKER_REGISTRY/nginx-nim2:automated -i -C certs/nginx-repo.crt -K certs/nginx-repo.key +``` + +NGINX Instance Manager, Security Monitoring and WAF Policy Compiler + +``` +./scripts/buildNIM.sh -t YOUR_DOCKER_REGISTRY/nginx-nim2:automated -i -C certs/nginx-repo.crt -K certs/nginx-repo.key -W -P v4.457.0 +``` + +### Manual build + +1. Clone this repository +2. Download NGINX Instance Manager 2.4.0+ .deb installation file for Ubuntu 20.04 and copy it into `nim-files/` +3. Optional: download Security Monitoring .deb installation file for Ubuntu 20.04 and copy it into `nim-files/` +4. Optional: download WAF Policy Compiler .deb installation file for Ubuntu 20.04 and copy it into `nim-files/` +5. Build NGINX Instance Manager Docker image using the provided script + +Example: + +``` +cd nim-files + +apt-cache madison nms-instance-manager +apt-get download nms-instance-manager=2.15.1-1175574316~focal + +apt-cache madison nms-sm +apt-get download nms-sm=1.7.1-1046510610~focal + +apt-cache search nms-nap-compiler +apt-get download nms-nap-compiler-v4.815.0 + +cd .. + +./scripts/buildNIM.sh \ + -t my-private-registry/nginx-instance-manager:2.15.1-nap-v4.815.0-manualbuild \ + -n nim-files/nms-instance-manager_2.15.1-1175574316~focal_amd64.deb \ + -w nim-files/nms-sm_1.7.1-1046510610~focal_amd64.deb \ + -p nim-files/nms-nap-compiler-v4.815.0_4.815.0-1~focal_amd64.deb +``` + +### Configuring and running + +1. Edit `manifests/1.nginx-nim.yaml` and specify the correct image by modifying the "image" line and configure NGINX Instance Manager username, password and the base64-encoded license file for automated license activation. + +``` +image: your.registry.tld/nginx-nim2:tag +[...] +env: + ### NGINX Instance Manager environment + - name: NIM_USERNAME + value: admin + - name: NIM_PASSWORD + value: nimadmin + - name: NIM_LICENSE + value: "" +``` + +To base64-encode the license file the following command can be used: + +``` +base64 -w0 NIM_LICENSE_FILENAME.lic +``` + +Additionally, parameters user by NGINX Instance Manager to connect to ClickHouse can be configured: + +``` +env: + [...] + - name: NIM_CLICKHOUSE_ADDRESS + value: clickhouse + - name: NIM_CLICKHOUSE_PORT + value: "9000" + ### If username is not set to "default", the clickhouse-users ConfigMap in 0.clickhouse.yaml shall be updated accordingly + - name: NIM_CLICKHOUSE_USERNAME + value: "default" + ### If password is not set to "NGINXr0cks", the clickhouse-users ConfigMap in 0.clickhouse.yaml shall be updated accordingly + - name: NIM_CLICKHOUSE_PASSWORD + value: "NGINXr0cks" +``` + +2. If Second Sight was built in the image, configure the relevant environment variables. See the documentation at https://github.com/F5Networks/SecondSight/#on-kubernetesopenshift + +``` +env: + ### Second Sight Push mode + - name: STATS_PUSH_ENABLE + #value: "true" + value: "false" + - name: STATS_PUSH_MODE + value: CUSTOM + #value: PUSHGATEWAY + - name: STATS_PUSH_URL + value: "http://192.168.1.5/callHome" + #value: "http://pushgateway.nginx.ff.lan" + ### Push interval in seconds + - name: STATS_PUSH_INTERVAL + value: "10" +``` + +3. Check / modify files in `/manifests/certs` to customize the TLS certificate and key used for TLS offload + +4. Start and stop using + +``` +./scripts/nimDockerStart.sh start +./scripts/nimDockerStart.sh stop +``` + +5. After starting NGINX Instance Manager it will be accessible from outside the cluster at: + +NGINX Instance Manager GUI: `https://nim2.f5.ff.lan` +NGINX Instance Manager gRPC port: `nim2.f5.ff.lan:30443` + +and from inside the cluster at: + +NGINX Instance Manager GUI: `https://nginx-nim2.nginx-nim2` +NGINX Instance Manager gRPC port: `nginx-nim2.nginx-nim2:443` + + +Second Sight REST API (if enabled at build time - see the documentation at `https://github.com/F5Networks/SecondSight`): +- `https://nim2.f5.ff.lan/f5tt/instances` +- `https://nim2.f5.ff.lan/f5tt/metrics` +- Push mode (configured through env variables in `manifests/1.nginx-nim.yaml`) + +Grafana dashboard: `https://grafana.nim2.f5.ff.lan` - see [configuration details](contrib/grafana) + +Running pods are: + +``` +$ kubectl get pods -n nginx-nim2 -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +clickhouse-7bc96d6d56-jthtf 1/1 Running 0 5m8s 10.244.1.65 f5-node1 +grafana-6f58d455c7-8lk64 1/1 Running 0 5m8s 10.244.2.80 f5-node2 +nginx-nim2-679987c54d-7rl6b 1/1 Running 0 5m8s 10.244.1.64 f5-node1 +``` + +6. For NGINX Instances running on VM/bare metal only: after installing the nginx-agent on NGINX Instances to be managed with NGINX Instance Manager 2, update the file `/etc/nginx-agent/nginx-agent.conf` and modify the line: + +``` +grpcPort: 443 +``` + +into: + +``` +grpcPort: 30443 +``` + +and then restart nginx-agent + + +## Additional tools + +- [Grafana dashboard for telemetry](contrib/grafana) +- [Docker compose](contrib/docker-compose) + + +# Starting NGINX Instance Manager + +## On Kubernetes + +``` +$ ./scripts/nimDockerStart.sh start +namespace/nginx-nim2 created +Generating a RSA private key +...................+++++ +...............................+++++ +writing new private key to 'nim2.f5.ff.lan.key' +----- +secret/nim2.f5.ff.lan created +deployment.apps/nginx-nim2 created +service/nginx-nim2 created +service/nginx-nim2-grpc created +virtualserver.k8s.nginx.org/vs-nim2 created + +$ kubectl get pods -n nginx-nim2 -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +clickhouse-7bc96d6d56-jthtf 1/1 Running 0 5m8s 10.244.1.65 f5-node1 +grafana-6f58d455c7-8lk64 1/1 Running 0 5m8s 10.244.2.80 f5-node2 +nginx-nim2-679987c54d-7rl6b 1/1 Running 0 5m8s 10.244.1.64 f5-node1 +``` + +NGINX Instance Manager GUI is now reachable from outside the cluster at: +- Web GUI: `https://nim2.f5.ff.lan` +- gRPC: `nim2.f5.ff.lan:30443` +- Second Sight: see [usage](https://github.com/F5Networks/SecondSight/blob/main/USAGE.md) + +## On docker-compose + +See [docker-compose](contrib/docker-compose) + +# Stopping NGINX Instance Manager + +## On Kubernetes + +``` +$ ./scripts/nimDockerStart.sh stop +namespace "nginx-nim2" deleted +``` + +## On docker-compose + +See [docker-compose](contrib/docker-compose) diff --git a/NGINX-NIM-Docker/container/startNIM.sh b/NGINX-NIM-Docker/container/startNIM.sh new file mode 100755 index 0000000..2205592 --- /dev/null +++ b/NGINX-NIM-Docker/container/startNIM.sh @@ -0,0 +1,149 @@ +#!/bin/bash + +# Makes sure that Clickhouse is up and running - dedicated pod + +RETCODE=-1 +while [ ! $RETCODE = 0 ] +do + nc -z $NIM_CLICKHOUSE_ADDRESS $NIM_CLICKHOUSE_PORT + RETCODE=$? + echo "Waiting for ClickHouse..." + sleep 3 +done + +if [ -f "/deployment/counter.enabled" ] +then + export DATAPLANE_TYPE=NGINX_MANAGEMENT_SYSTEM + export DATAPLANE_FQDN="https://127.0.0.1:443" + export DATAPLANE_USERNAME=$NIM_USERNAME + export DATAPLANE_PASSWORD=$NIM_PASSWORD + export NMS_CH_HOST=$NIM_CLICKHOUSE_ADDRESS + export NMS_CH_PORT=$NIM_CLICKHOUSE_PORT + export NMS_CH_USER=$NIM_CLICKHOUSE_USERNAME + export NMS_CH_PASS=$NIM_CLICKHOUSE_PASSWORD + + python3 /deployment/app.py & +fi + +mkdir /nonexistent + +/etc/nms/scripts/basic_passwords.sh $NIM_USERNAME $NIM_PASSWORD + +# NGINX Management Suite version detection +# NMS >= 2.7.0 configuration is yaml +VERSION=`nms-core -v` +A=${VERSION%\/*} +B=${A##*\ } +RELEASE=`echo $B | awk -F- '{print $2"."$3"."$4}'` + +echo -n "Detected NMS $RELEASE... " + +case $RELEASE in + 2.4.0|2.5.0|2.5.1|2.6.0) + echo "legacy nms.conf" +# Clickhouse configuration - dedicated pod +echo -e " + +# Clickhouse config +clickhouse_address = $NIM_CLICKHOUSE_ADDRESS:$NIM_CLICKHOUSE_PORT +clickhouse_username = '$NIM_CLICKHOUSE_USERNAME' +clickhouse_password = '$NIM_CLICKHOUSE_PASSWORD' +" >> /etc/nms/nms.conf + ;; + 2.7.0|2.8.0|2.9.0|2.9.1|2.10.0|2.10.1|2.11.0|2.12.0) + echo "YAML nms.conf <= 2.12" +# Clickhouse configuration - dedicated pod +echo -e " + +# Clickhouse config +clickhouse: + address: $NIM_CLICKHOUSE_ADDRESS:$NIM_CLICKHOUSE_PORT + username: '$NIM_CLICKHOUSE_USERNAME' + password: '$NIM_CLICKHOUSE_PASSWORD' +" >> /etc/nms/nms.conf + ;; + *) + echo "YAML nms.conf >= 2.13" +# Clickhouse configuration - dedicated pod +export NIM_CLICKHOUSE_ADDRESSPORT=$NIM_CLICKHOUSE_ADDRESS:$NIM_CLICKHOUSE_PORT +yq '.clickhouse.address=strenv(NIM_CLICKHOUSE_ADDRESSPORT)|.clickhouse.username=strenv(NIM_CLICKHOUSE_USERNAME)|.clickhouse.password=strenv(NIM_CLICKHOUSE_PASSWORD)' /etc/nms/nms.conf > /etc/nms/nms.conf-updated +mv /etc/nms/nms.conf-updated /etc/nms/nms.conf +chown nms:nms /etc/nms/nms.conf +chmod 644 /etc/nms/nms.conf + +yq '.clickhouse.address="tcp://"+strenv(NIM_CLICKHOUSE_ADDRESSPORT)|.clickhouse.username=strenv(NIM_CLICKHOUSE_USERNAME)|.clickhouse.password=strenv(NIM_CLICKHOUSE_PASSWORD)' /etc/nms/nms-sm-conf.yaml > /etc/nms/nms-sm-conf.yaml-updated +mv /etc/nms/nms-sm-conf.yaml-updated /etc/nms/nms-sm-conf.yaml +chown nms:nms /etc/nms/nms-sm-conf.yaml +chmod 644 /etc/nms/nms-sm-conf.yaml + ;; +esac + +# Start nms core - from /lib/systemd/system/nms-core.service +/bin/bash -c '`which mkdir` -p /var/lib/nms/dqlite/' +/bin/bash -c '`which mkdir` -p /var/lib/nms/secrets/' +/bin/bash -c '`which mkdir` -p /var/run/nms/' +/bin/bash -c '`which mkdir` -p /var/log/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/log/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/run/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/lib/nms/' +/bin/bash -c '`which chmod` 0775 /var/log/nms/' +/bin/bash -c '`which chown` -R nms:nms /etc/nms/certs/services/core' +/bin/bash -c '`which chown` nms:nms /etc/nms/certs/services/ca.crt' +/bin/bash -c '`which chmod` 0700 /etc/nms/certs/services/core' +/bin/bash -c '`which chmod` 0600 /etc/nms/certs/services/core/*' +su - nms -c 'function repeat { while [ 1 ] ; do "$@" ; sleep 1 ; done; };repeat /usr/bin/nms-core &' -s /bin/bash + +# Start nms dpm - from /lib/systemd/system/nms-dpm.service +/bin/bash -c '`which mkdir` -p /var/lib/nms/streaming/' +/bin/bash -c '`which mkdir` -p /var/lib/nms/dqlite/' +/bin/bash -c '`which mkdir` -p /var/run/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/log/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/lib/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/run/nms/' +/bin/bash -c '`which chown` -R nms:nms /etc/nms/certs/services/dataplane-manager' +/bin/bash -c '`which chown` nms:nms /etc/nms/certs/services/ca.crt' +/bin/bash -c '`which chmod` 0700 /etc/nms/certs/services/dataplane-manager' +/bin/bash -c '`which chmod` 0600 /etc/nms/certs/services/dataplane-manager/*' +su - nms -c 'function repeat { while [ 1 ] ; do "$@" ; sleep 1 ; done; };repeat /usr/bin/nms-dpm &' -s /bin/bash + +# Start nms ingestion - from /lib/systemd/system/nms-ingestion.service +/bin/bash -c '`which mkdir` -p /var/run/nms/' +/bin/bash -c '`which mkdir` -p /var/log/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/log/nms/' +/bin/bash -c '`which chmod` 0775 /var/log/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/run/nms/' +su - nms -c 'function repeat { while [ 1 ] ; do "$@" ; sleep 1 ; done; };repeat /usr/bin/nms-ingestion &' -s /bin/bash + +# Start nms integrations - from /lib/systemd/system/nms-integrations.service +/bin/bash -c '`which mkdir` -p /var/lib/nms/dqlite/' +/bin/bash -c '`which mkdir` -p /var/run/nms/' +/bin/bash -c '`which mkdir` -p /var/log/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/lib/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/run/nms/' +/bin/bash -c '`which chown` -R nms:nms /var/log/nms/' +/bin/bash -c '`which chmod` 0775 /var/log/nms/' +/bin/bash -c '`which chown` nms:nms /etc/nms/certs/services/ca.crt' +su - nms -c 'function repeat { while [ 1 ] ; do "$@" ; sleep 1 ; done; };repeat /usr/bin/nms-integrations &' -s /bin/bash + +sleep 5 + +# Start Security Monitoring +if [ -f /usr/bin/nms-sm ] +then + su - nms -c 'function repeat { while [ 1 ] ; do "$@" ; sleep 1 ; done; };repeat /usr/bin/nms-sm start &' -s /bin/bash +fi + +chmod 666 /var/run/nms/*.sock + +/etc/init.d/nginx start + +# License activation +if ((${#NIM_LICENSE[@]})) +then + curl -s -X PUT -k https://127.0.0.1/api/platform/v1/license -u "$NIM_USERNAME:$NIM_PASSWORD" -d '{ "desiredState": { "content": "'$NIM_LICENSE'" }, "metadata": { "name": "license" } }' -H "Content-Type: application/json" +fi + +while [ 1 ] +do + sleep 60 +done diff --git a/NGINX-NIM-Docker/contrib/docker-compose/.env b/NGINX-NIM-Docker/contrib/docker-compose/.env new file mode 100644 index 0000000..bf6c41d --- /dev/null +++ b/NGINX-NIM-Docker/contrib/docker-compose/.env @@ -0,0 +1,10 @@ +# NGINX Management Suite settings +NIM_IMAGE= +NIM_LICENSE= +NIM_USERNAME=admin +NIM_PASSWORD=nimadmin + +# Clickhouse settings +NIM_CLICKHOUSE_ADDRESS=clickhouse +NIM_CLICKHOUSE_USERNAME=default +NIM_CLICKHOUSE_PASSWORD=NGINXr0cks diff --git a/NGINX-NIM-Docker/contrib/docker-compose/README.md b/NGINX-NIM-Docker/contrib/docker-compose/README.md new file mode 100644 index 0000000..368db99 --- /dev/null +++ b/NGINX-NIM-Docker/contrib/docker-compose/README.md @@ -0,0 +1,14 @@ +# Docker-compose for NGINX Instance Manager + +1. Edit the `.env` file configuring the NGINX Management Suite docker image name and the base64-encoded license +2. Start NGINX Management Suite using + +``` +docker-compose -f docker-compose.yaml up -d +``` + +3. Stop NGINX Management Suite using + +``` +docker-compose -f docker-compose.yaml down +``` diff --git a/NGINX-NIM-Docker/contrib/docker-compose/docker-compose.yaml b/NGINX-NIM-Docker/contrib/docker-compose/docker-compose.yaml new file mode 100644 index 0000000..fc0ffcc --- /dev/null +++ b/NGINX-NIM-Docker/contrib/docker-compose/docker-compose.yaml @@ -0,0 +1,36 @@ +version: "3" +services: + clickhouse: + image: clickhouse/clickhouse-server:23.1.2.9 + ports: + - "9000:9000" + environment: + - CLICKHOUSE_USER=${NIM_CLICKHOUSE_USERNAME} + - CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 + - CLICKHOUSE_PASSWORD=${NIM_CLICKHOUSE_PASSWORD} + volumes: + - db-data:/var/lib/clickhouse + ulimits: + nproc: 65535 + nofile: + soft: 262144 + hard: 262144 + + nms: + image: ${NIM_IMAGE} + ports: + - "443:443" + environment: + - NIM_LICENSE=${NIM_LICENSE} + - NIM_USERNAME=${NIM_USERNAME} + - NIM_PASSWORD=${NIM_PASSWORD} + - NIM_CLICKHOUSE_ADDRESS=${NIM_CLICKHOUSE_ADDRESS} + - NIM_CLICKHOUSE_PORT=9000 + - NIM_CLICKHOUSE_USERNAME=${NIM_CLICKHOUSE_USERNAME} + - NIM_CLICKHOUSE_PASSWORD=${NIM_CLICKHOUSE_PASSWORD} + volumes: + - nms-data:/var/lib/nms + +volumes: + db-data: + nms-data: diff --git a/NGINX-NIM-Docker/contrib/grafana/NGINX_NIM2_Telemetry_Grafana_Dashboard.json b/NGINX-NIM-Docker/contrib/grafana/NGINX_NIM2_Telemetry_Grafana_Dashboard.json new file mode 100644 index 0000000..ee3f315 --- /dev/null +++ b/NGINX-NIM-Docker/contrib/grafana/NGINX_NIM2_Telemetry_Grafana_Dashboard.json @@ -0,0 +1,2370 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 5, + "iteration": 1648720093378, + "links": [], + "liveNow": false, + "panels": [ + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "pattern": "nginx-agent (.*) started on (.*)", + "result": { + "color": "green", + "index": 0, + "text": "$1" + } + }, + "type": "regex" + }, + { + "options": { + "pattern": "nginx-agent (.*) stopped on (.*)", + "result": { + "color": "dark-red", + "index": 1, + "text": "$1" + } + }, + "type": "regex" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "yellow", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 0 + }, + "id": 31, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "reduceOptions": { + "calcs": [], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.4.4", + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "fZlVdGynz" + }, + "dateColDataType": "partition_key", + "dateLoading": false, + "dateTimeColDataType": "creation_time", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n message\nFROM $table\n\nWHERE alias = '$nginx_instance_name'\nORDER BY db_timestamp DESC\nLIMIT 1", + "rawQuery": "SELECT\n (intDiv(toUInt32(creation_time), 1) * 1) * 1000 as t,\n message\nFROM nms.events\n\nWHERE alias = 'ubuntu'\nORDER BY db_timestamp DESC\nLIMIT 1", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "events", + "tableLoading": false + } + ], + "title": "Agent Version", + "type": "stat" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "pattern": "Agent Start", + "result": { + "color": "green", + "index": 0, + "text": "Online" + } + }, + "type": "regex" + }, + { + "options": { + "pattern": "Agent Stop", + "result": { + "color": "dark-red", + "index": 1, + "text": "Offline" + } + }, + "type": "regex" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "yellow", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 4, + "y": 0 + }, + "id": 32, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "reduceOptions": { + "calcs": [], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.4.4", + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "fZlVdGynz" + }, + "dateColDataType": "partition_key", + "dateLoading": false, + "dateTimeColDataType": "creation_time", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n status\nFROM $table\n\nWHERE alias = '$nginx_instance_name'\nORDER BY db_timestamp DESC\nLIMIT 1", + "rawQuery": "SELECT\n (intDiv(toUInt32(creation_time), 2) * 2) * 1000 as t,\n status\nFROM nms.events\n\nWHERE alias = 'ubuntu'\nORDER BY db_timestamp DESC\nLIMIT 1", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "events", + "tableLoading": false + } + ], + "title": "Agent Status", + "type": "stat" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 33, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "reduceOptions": { + "calcs": [], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.4.4", + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "fZlVdGynz" + }, + "dateColDataType": "partition_key", + "dateLoading": false, + "dateTimeColDataType": "creation_time", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n db_timestamp\nFROM $table\n\nWHERE alias = '$nginx_instance_name'\nORDER BY db_timestamp DESC\nLIMIT 1", + "rawQuery": "SELECT\n (intDiv(toUInt32(creation_time), 1) * 1) * 1000 as t,\n db_timestamp\nFROM nms.events\n\nWHERE alias = 'ubuntu'\nORDER BY db_timestamp DESC\nLIMIT 1", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "events", + "tableLoading": false + } + ], + "title": "Last change", + "type": "stat" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 8, + "panels": [ + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 2, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'system.cpu.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 2) * 2) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648250847) AND timestamp <= toDateTime(1648254447)\n and name like 'system.cpu.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "CPU usage", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 5, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'system.load.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 2) * 2) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648250842) AND timestamp <= toDateTime(1648254442)\n and name like 'system.load.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Load Average", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'system.mem.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248449) AND timestamp <= toDateTime(1648248749)\n and name like 'system.mem.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Memory usage", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "system.disk.in_use" + }, + "properties": [ + { + "id": "unit", + "value": "percent" + }, + { + "id": "custom.axisSoftMin", + "value": 0 + }, + { + "id": "custom.axisSoftMax", + "value": 100 + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 4, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'system.disk.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248464) AND timestamp <= toDateTime(1648248764)\n and name like 'system.disk.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Disk", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "KBs" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "system.io.iops_r" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "none" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "system.io.iops_w" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "none" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 3, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and (name like 'system.io.iops_%' or name like 'system.io.kbs_%') and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248921) AND timestamp <= toDateTime(1648249221)\n and (name like 'system.io.iops_%' or name like 'system.io.kbs_%') and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Disk I/O", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 29, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'system.io.wait_%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 2) * 2) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648250834) AND timestamp <= toDateTime(1648254434)\n and name like 'system.io.wait_%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Disk Latency", + "type": "timeseries" + } + ], + "repeat": "nginx_instance", + "title": "System", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 26, + "panels": [ + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "system.disk.in_use" + }, + "properties": [ + { + "id": "unit", + "value": "percent" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 2 + }, + "id": 11, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'system.net.bytes_%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248475) AND timestamp <= toDateTime(1648248775)\n and name like 'system.net.bytes_%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Network Traffic", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "system.disk.in_use" + }, + "properties": [ + { + "id": "unit", + "value": "percent" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 2 + }, + "id": 24, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'system.net.packets_%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248486) AND timestamp <= toDateTime(1648248786)\n and name like 'system.net.packets_%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Network Packet Statistics", + "type": "timeseries" + } + ], + "title": "Network", + "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 6 + }, + "id": 28, + "panels": [], + "title": "SSL", + "type": "row" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 14, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'plus.ssl.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248527) AND timestamp <= toDateTime(1648248827)\n and name like 'plus.ssl.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "SSL", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 10, + "panels": [], + "repeat": "nginx_instance", + "title": "HTTP & SSL", + "type": "row" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 13, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'nginx.http.conn.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248511) AND timestamp <= toDateTime(1648248811)\n and name like 'nginx.http.conn.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "HTTP Connections", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "max": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "nginx.http.request.body_bytes_sent" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#73BF69", + "mode": "fixed" + } + }, + { + "id": "unit", + "value": "decbytes" + } + ] + }, + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "nginx.http.request.body_bytes_sent" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 15, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean", + "max", + "min" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'nginx.http.request.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248516) AND timestamp <= toDateTime(1648248816)\n and name like 'nginx.http.request.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "HTTP Requests", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 16, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'plus.http.status.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248532) AND timestamp <= toDateTime(1648248832)\n and name like 'plus.http.status.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "HTTP Status", + "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 22, + "panels": [ + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 17, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'plus.http.upstream.peers.health_checks.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248592) AND timestamp <= toDateTime(1648248892)\n and name like 'plus.http.upstream.peers.health_checks.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Upstream healthchecks", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 19, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'plus.http.upstream.peers.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248598) AND timestamp <= toDateTime(1648248898)\n and name like 'plus.http.upstream.peers.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "HTTP upstream peers", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 18, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'plus.http.upstream.peers.status.%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248604) AND timestamp <= toDateTime(1648248904)\n and name like 'plus.http.upstream.peers.status.%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "Upstream response status", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "plus.http.upstream.response.time" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8AB8FF", + "mode": "fixed" + } + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ms" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 20, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and (name like 'plus.http.upstream.peers.request.%' or name like 'plus.http.upstream.peers.response.%') and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248611) AND timestamp <= toDateTime(1648248911)\n and (name like 'plus.http.upstream.peers.request.%' or name like 'plus.http.upstream.peers.response.%') and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "HTTP upstream peers", + "type": "timeseries" + }, + { + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "plus.http.upstream.response.time" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8AB8FF", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 23, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "min", + "mean", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "database": "nms", + "datasource": { + "type": "vertamedia-clickhouse-datasource", + "uid": "kXXNDeonk" + }, + "dateColDataType": "", + "dateLoading": false, + "dateTimeColDataType": "timestamp", + "dateTimeType": "DATETIME", + "datetimeLoading": false, + "extrapolate": true, + "format": "time_series", + "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t", + "interval": "", + "intervalFactor": 1, + "query": "SELECT\n $timeSeries as t,\n name,avg(value)\nFROM $table\n\nWHERE $timeFilter\n and name like 'plus.http.upstream.peers.bytes_%' and display_name = '$nginx_instance_name'\nGROUP BY name,t\n\nORDER BY name,t\n", + "rawQuery": "SELECT\n (intDiv(toUInt32(timestamp), 1) * 1) * 1000 as t,\n name,avg(value)\nFROM nms.metrics\n\nWHERE timestamp >= toDateTime(1648248618) AND timestamp <= toDateTime(1648248918)\n and name like 'plus.http.upstream.peers.bytes_%' and display_name = 'devel'\nGROUP BY name,t\n\nORDER BY name,t", + "refId": "A", + "round": "0s", + "skip_comments": true, + "table": "metrics", + "tableLoading": false + } + ], + "title": "HTTP upstream bytes", + "type": "timeseries" + } + ], + "title": "HTTP Upstreams", + "type": "row" + } + ], + "refresh": false, + "schemaVersion": 35, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "nginx-nim-96cfdbd97-rrlss", + "value": "nginx-nim-96cfdbd97-rrlss" + }, + "definition": "select distinct display_name from nms.metrics", + "hide": 0, + "includeAll": false, + "label": "NGINX Instance Name", + "multi": false, + "name": "nginx_instance_name", + "options": [], + "query": "select distinct display_name from nms.metrics", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "hidden": false, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ], + "type": "timepicker" + }, + "timezone": "browser", + "title": "NGINX Telemetry", + "uid": "yFZSpgJ7q", + "version": 3, + "weekStart": "" +} \ No newline at end of file diff --git a/NGINX-NIM-Docker/contrib/grafana/README.md b/NGINX-NIM-Docker/contrib/grafana/README.md new file mode 100644 index 0000000..5d39b2f --- /dev/null +++ b/NGINX-NIM-Docker/contrib/grafana/README.md @@ -0,0 +1,18 @@ +# Grafana telemetry dashboard + +To configure the bundled Grafana container follow these steps: + +1. Browse to https://grafana.nim2.f5.ff.lan/ (the actual FQDN depends on your cluster/network settings, see YAML files under /manifests) +2. Login as user "admin", password "admin" and set a new password +3. Go to configuration / data sources +4. Add a new data source. In the search box type "clickhouse" and click "select". Configure the datasource as displayed here below. Set the password to "NGINXr0cks": + + + +5. Click "Save and test": "Datasource is working" should be displayed +6. Go to Dashboards / browse and click "Import" +7. Click "Upload JSON file" and select the file `contrib/NGINX_NIM2_Telemetry_Grafana_Dashboard.json` +8. Click import +9. The NGINX Instance Manager 2 telemetry dashboard is up and running + + diff --git a/NGINX-NIM-Docker/contrib/grafana/clickhouse-datasource.png b/NGINX-NIM-Docker/contrib/grafana/clickhouse-datasource.png new file mode 100644 index 0000000..876b61d Binary files /dev/null and b/NGINX-NIM-Docker/contrib/grafana/clickhouse-datasource.png differ diff --git a/NGINX-NIM-Docker/contrib/grafana/grafana-dashboard.png b/NGINX-NIM-Docker/contrib/grafana/grafana-dashboard.png new file mode 100644 index 0000000..a11e6f7 Binary files /dev/null and b/NGINX-NIM-Docker/contrib/grafana/grafana-dashboard.png differ diff --git a/NGINX-NIM-Docker/contrib/helm-installer/NGINX-NMS-Installer.sh b/NGINX-NIM-Docker/contrib/helm-installer/NGINX-NMS-Installer.sh new file mode 100755 index 0000000..9bea047 --- /dev/null +++ b/NGINX-NIM-Docker/contrib/helm-installer/NGINX-NMS-Installer.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +HEADER="NGINX Management Suite Helm Chart installation script" +BANNER="$HEADER\n\n +Usage:\n\n +$0 [options]\n\n +Options:\n\n +-h\t\t\t- This help\n\n +-i [filename]\t\t- NGINX Management Suite Helm installation file (mandatory)\n +-r [registry FQDN]\t- Private registry FQDN (mandatory)\n\n +-s [pull secret]\t- Private registry pull secret (optional)\n +-p [admin password]\t- NGINX Management Suite admin password (optional, default is 'admin')\n +-n [namespace]\t\t- Destination namespace to install to (optional, default is the current namespace)\n +-P [true|false]\t- Set persistent volumes usage (optional, default is 'true')\n\n +Example:\n\n +$0 -i nms-helm-2.5.1.tar.gz -r myregistry.k8s.local:31005 -s MyPullSecret -p adminP4ssw0rd -n nms-namespace\n +" + +while getopts 'hi:r:s:p:n:P:' OPTION +do + case "$OPTION" in + h) + echo -e $BANNER + exit + ;; + i) + HELMFILE=$OPTARG + ;; + r) + REGISTRY=$OPTARG + ;; + s) + PULLSECRET=$OPTARG + ;; + p) + ADMINPASS=$OPTARG + ;; + n) + NAMESPACE=$OPTARG + ;; + P) + PERSISTENTVOLUMES=$OPTARG + ;; + *) + exit + ;; + esac +done + +if [ $# == 0 ] || [ "$HELMFILE" = "" ] || [ "$REGISTRY" = "" ] +then + echo -e $BANNER + exit +fi + +echo -e "$HEADER\n\n-- Running preflight checks" +REQUIRED_COMMANDS="tar helm openssl" + +for RC in $REQUIRED_COMMANDS +do + echo -n "$RC... " + type $RC >/dev/null 2>&1 + if [ ! $? = 0 ] + then + echo -e "Not found, aborting" + exit + else + echo -e "OK" + fi +done + +RECAP=" +Release file:\t\t\t$HELMFILE\n +Private registry:\t\t$REGISTRY\n +Private registry pull secret:\t$PULLSECRET\n +Destination namespace:\t\t${NAMESPACE:=nms}\n +Persistent volumes:\t\t${PERSISTENTVOLUMES:=true}\n +Admin password:\t\t${ADMINPASS:=admin}\n" + +DRYRUN="\n-- Installing using:\n\n$RECAP" + +echo -e $DRYRUN + +read -p "Do you want to proceed (YES/no)? " PROCEED + +if [ ! "$PROCEED" = "YES" ] +then + echo "Aborting installation" + exit +fi + +echo + +if [ ! -f $HELMFILE ] +then + echo "$HELMFILE not found, aborting" + exit +fi + +NMSRELEASE=`basename $HELMFILE | sed "s/nms-helm-//g"|sed "s/.tar.gz//g"` +echo "-- Processing NMS Helm Chart for release $NMSRELEASE" + +DSTDIR=`mktemp -d` + +echo "-- Decompressing $HELMFILE" +tar -xf $HELMFILE -C $DSTDIR +pushd $DSTDIR > /dev/null + +IMAGES=`ls *.tar.gz` + +for I in $IMAGES +do + IMGNAME=`echo $I:$NMSRELEASE | sed "s/-$NMSRELEASE.tar.gz//g"` + + echo ".. Importing docker image for $I" + PUSHEDIMG=`docker load -i $I | tail -n1 | awk '{print $3}'` + + #echo ".. Tagging $PUSHEDIMG as $REGISTRY/$IMGNAME" + #docker tag $PUSHEDIMG $REGISTRY/$IMGNAME + + echo ".. Pushing $REGISTRY/$IMGNAME to private registry" + docker push $REGISTRY/$IMGNAME > /dev/null +done + +echo "-- Decompressing helm chart" +tar zxmf nms-hybrid-$NMSRELEASE.tgz + +echo "-- Running helm install" +helm install \ +--set core.persistence.enable=${PERSISTENTVOLUMES:true} \ +--set dpm.persistence.enable=${PERSISTENTVOLUMES:true} \ +--set integrations.persistence.enable=${PERSISTENTVOLUMES:true} \ +--set imagePullSecrets[0].name=${PULLSECRET:-""} \ +--set adminPasswordHash=`openssl passwd -1 ${ADMINPASS:=admin}` \ +--set namespace=$NAMESPACE \ +--set apigw.image.repository=$REGISTRY/nms-apigw \ +--set apigw.image.tag=$NMSRELEASE \ +--set core.image.repository=$REGISTRY/nms-core \ +--set core.image.tag=$NMSRELEASE \ +--set dpm.image.repository=$REGISTRY/nms-dpm \ +--set dpm.image.tag=$NMSRELEASE \ +--set ingestion.image.repository=$REGISTRY/nms-ingestion \ +--set ingestion.image.tag=$NMSRELEASE \ +--set integrations.image.repository=$REGISTRY/nms-integrations \ +--set integrations.image.tag=$NMSRELEASE \ +nim ./nms-hybrid +popd > /dev/null + +rm -r $DSTDIR + +echo -e "\n-- Installation complete\n" +echo -e $RECAP diff --git a/NGINX-NIM-Docker/contrib/helm-installer/README.md b/NGINX-NIM-Docker/contrib/helm-installer/README.md new file mode 100644 index 0000000..0925d71 --- /dev/null +++ b/NGINX-NIM-Docker/contrib/helm-installer/README.md @@ -0,0 +1,92 @@ +# NGINX Management Suite Helm Installer + +This is a bash script to simplify NGINX Management Suite installation through its helm chart. +NGINX Management Suite installation official docs are available at https://docs.nginx.com/nginx-management-suite/admin-guides/installation/helm-chart/ + +## Usage + +Follow these steps: + +1. Browse to NGINX Management Suite [Helm Installation Guide](https://docs.nginx.com/nginx-management-suite/admin-guides/installation/helm-chart/) +2. Check the [overview](https://docs.nginx.com/nginx-management-suite/admin-guides/installation/helm-chart/#overview) paragraph +3. Make sure all [prerequisites](https://docs.nginx.com/nginx-management-suite/admin-guides/installation/helm-chart/#before-you-begin) are met +4. Download the [helm bundle](https://docs.nginx.com/nginx-management-suite/admin-guides/installation/helm-chart/#download-helm-bundle) +5. Run the `NGINX-NMS-Installer.sh` script + +``` +$ ./NGINX-NMS-Installer.sh +NGINX Management Suite Helm Chart installation script + + Usage: + + ./NGINX-NMS-Installer.sh [options] + + Options: + + -h - This help + + -i [filename] - NGINX Management Suite Helm installation file (mandatory) + -r [registry FQDN] - Private registry FQDN (mandatory) + + -s [pull secret] - Private registry pull secret (optional) + -p [admin password] - NGINX Management Suite admin password (optional, default is 'admin') + -n [namespace] - Destination namespace to install to (optional, default is the current namespace) + -P [true|false] - Set persistent volumes usage (optional, default is 'true') + + Example: + + ./NGINX-NMS-Installer.sh -i nms-helm-2.5.1.tar.gz -r myregistry.k8s.local:31005 -s MyPullSecret -p adminP4ssw0rd -n nms-namespace +``` + +## How to run + +``` +$ ./NGINX-NMS-Installer.sh -i nms-helm-2.5.1.tar.gz -r registry.ff.lan:31005 -s myPullSecret -p nmsAdminPass -n nms-namespace +NGINX Management Suite Helm Chart installation script + +-- Running preflight checks +tar... OK +helm... OK +openssl... OK + +-- Installing using: + + Release file: nms-helm-2.5.1.tar.gz + Private registry: registry.ff.lan:31005 + Private registry pull secret: myPullSecret + Destination namespace: nms-namespace + Persistent volumes: true + Admin password: nmsAdminPass + +Do you want to proceed (YES/no)? YES + +-- Processing NMS Helm Chart for release 2.5.1 +-- Decompressing nms-helm-2.5.1.tar.gz +.. Importing docker image for nms-apigw-2.5.1.tar.gz +.. Pushing registry.ff.lan:31005/nms-apigw:2.5.1 to private registry +.. Importing docker image for nms-core-2.5.1.tar.gz +.. Pushing registry.ff.lan:31005/nms-core:2.5.1 to private registry +.. Importing docker image for nms-dpm-2.5.1.tar.gz +.. Pushing registry.ff.lan:31005/nms-dpm:2.5.1 to private registry +.. Importing docker image for nms-ingestion-2.5.1.tar.gz +.. Pushing registry.ff.lan:31005/nms-ingestion:2.5.1 to private registry +.. Importing docker image for nms-integrations-2.5.1.tar.gz +.. Pushing registry.ff.lan:31005/nms-integrations:2.5.1 to private registry +-- Decompressing helm chart +-- Running helm install +NAME: nim +LAST DEPLOYED: Tue Oct 25 17:47:23 2022 +NAMESPACE: default +STATUS: deployed +REVISION: 1 +TEST SUITE: None + +-- Installation complete + +Release file: nms-helm-2.5.1.tar.gz + Private registry: registry.ff.lan:31005 + Private registry pull secret: myPullSecret + Destination namespace: nms-namespace + Persistent volumes: true + Admin password: nmsAdminPass +``` diff --git a/NGINX-NIM-Docker/contrib/pvc-provisioner/README.md b/NGINX-NIM-Docker/contrib/pvc-provisioner/README.md new file mode 100644 index 0000000..022860c --- /dev/null +++ b/NGINX-NIM-Docker/contrib/pvc-provisioner/README.md @@ -0,0 +1,42 @@ +# Sample persistent volume dynamic provisioner + +`dynamic-nfs-storage.yaml` can be used to spin up a dynamic persistent volumes provisioner relying on a NFS server. To deploy it: + +1. Edit the Deployment in `dynamic-nfs-storage.yaml` setting the IP address and base path of an available NFS server: + +``` +[...] + env: + [...] + + ### CONFIGURE HERE NFS SERVER IP ADDRESS AND BASE PATH + - name: NFS_SERVER + value: + - name: NFS_PATH + value: + ### + volumes: + - name: nfs-client-root + nfs: + ### CONFIGURE HERE NFS SERVER IP ADDRESS AND BASE PATH + server: + path: + ### +``` + +2. Deploy it: + +``` +kubectl apply -f dynamic-nfs-storage.yaml +``` + +3. Two storage classes are created for persistent and disposable PVCs: + +``` +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +managed-nfs-storage-delete k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 82d +managed-nfs-storage-retain (default) k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 82d +``` + +4. `test-retain.yaml` and `test-delete.yaml` can be used for testing purposes diff --git a/NGINX-NIM-Docker/contrib/pvc-provisioner/dynamic-nfs-storage.yaml b/NGINX-NIM-Docker/contrib/pvc-provisioner/dynamic-nfs-storage.yaml new file mode 100644 index 0000000..4f7af57 --- /dev/null +++ b/NGINX-NIM-Docker/contrib/pvc-provisioner/dynamic-nfs-storage.yaml @@ -0,0 +1,143 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: storage + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: storage + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: storage +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: storage +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: storage +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: storage +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: managed-nfs-storage-retain + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME' +parameters: + pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" # waits for nfs.io/storage-path annotation, if not specified will accept as empty string. + onDelete: retain + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: managed-nfs-storage-delete +provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME' +parameters: + pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" # waits for nfs.io/storage-path annotation, if not specified will accept as empty string. + onDelete: delete + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-client-provisioner + labels: + app: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: storage +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: nfs-client-provisioner + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: k8s-sigs.io/nfs-subdir-external-provisioner + + ### CONFIGURE HERE NFS SERVER IP ADDRESS AND BASE PATH + - name: NFS_SERVER + value: + - name: NFS_PATH + value: + ### + volumes: + - name: nfs-client-root + nfs: + ### CONFIGURE HERE NFS SERVER IP ADDRESS AND BASE PATH + server: + path: + ### diff --git a/NGINX-NIM-Docker/contrib/pvc-provisioner/test-delete.yaml b/NGINX-NIM-Docker/contrib/pvc-provisioner/test-delete.yaml new file mode 100644 index 0000000..1576ef2 --- /dev/null +++ b/NGINX-NIM-Docker/contrib/pvc-provisioner/test-delete.yaml @@ -0,0 +1,36 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim-delete + annotations: + nfs.io/storage-path: test-claim-delete +spec: + storageClassName: managed-nfs-storage-delete + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Mi + +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-pod-delete +spec: + containers: + - name: test-pod + image: gcr.io/google_containers/busybox:1.24 + command: + - "/bin/sh" + args: + - "-c" + - "echo ok >> /mnt/SUCCESS && exit 0 || exit 1" + volumeMounts: + - name: nfs-pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: nfs-pvc + persistentVolumeClaim: + claimName: test-claim-delete diff --git a/NGINX-NIM-Docker/contrib/pvc-provisioner/test-retain.yaml b/NGINX-NIM-Docker/contrib/pvc-provisioner/test-retain.yaml new file mode 100644 index 0000000..17140bb --- /dev/null +++ b/NGINX-NIM-Docker/contrib/pvc-provisioner/test-retain.yaml @@ -0,0 +1,36 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim-retain + annotations: + nfs.io/storage-path: test-claim-retain +spec: + storageClassName: managed-nfs-storage-retain + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Mi + +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-pod-retain +spec: + containers: + - name: test-pod + image: gcr.io/google_containers/busybox:1.24 + command: + - "/bin/sh" + args: + - "-c" + - "echo ok >> /mnt/SUCCESS && exit 0 || exit 1" + volumeMounts: + - name: nfs-pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: nfs-pvc + persistentVolumeClaim: + claimName: test-claim-retain diff --git a/NGINX-NIM-Docker/manifests/0.clickhouse.yaml b/NGINX-NIM-Docker/manifests/0.clickhouse.yaml new file mode 100644 index 0000000..b02b29d --- /dev/null +++ b/NGINX-NIM-Docker/manifests/0.clickhouse.yaml @@ -0,0 +1,79 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-clickhouse + annotations: + nfs.io/storage-path: clickhouse +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: clickhouse + labels: + app: clickhouse +spec: + selector: + matchLabels: + app: clickhouse + replicas: 1 + template: + metadata: + labels: + app: clickhouse + spec: + containers: + - name: clickhouse + # NGINX Instance Manager up to 2.8.0 + #image: clickhouse/clickhouse-server:21.12.4.1 + # NGINX Instance Manager 2.9.0+ + #image: clickhouse/clickhouse-server:23.1.2.9 + # NGINX Instance Manager 2.14.0+ + image: clickhouse/clickhouse-server:23.8.3.48 + ports: + - name: binary + containerPort: 9000 + - name: http + containerPort: 8123 + volumeMounts: + - name: clickhouse-users + mountPath: /etc/clickhouse-server/users.xml + subPath: users.xml + - name: clickhouse-conf + mountPath: /etc/clickhouse-server/config.xml + subPath: config.xml + - name: clickhouse-storage + mountPath: /var/lib/clickhouse + volumes: + - name: clickhouse-users + configMap: + name: clickhouse-users + - name: clickhouse-conf + configMap: + name: clickhouse-conf + - name: clickhouse-storage + persistentVolumeClaim: + claimName: pvc-clickhouse + +--- +apiVersion: v1 +kind: Service +metadata: + name: clickhouse + labels: + app: clickhouse +spec: + ports: + - name: binary + port: 9000 + - name: http + port: 8123 + selector: + app: clickhouse + type: ClusterIP diff --git a/NGINX-NIM-Docker/manifests/1.nginx-nim.yaml b/NGINX-NIM-Docker/manifests/1.nginx-nim.yaml new file mode 100644 index 0000000..52057e7 --- /dev/null +++ b/NGINX-NIM-Docker/manifests/1.nginx-nim.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-nim2 + labels: + app: nginx-nim2 +spec: + selector: + matchLabels: + app: nginx-nim2 + replicas: 1 + template: + metadata: + labels: + app: nginx-nim2 + spec: + containers: + - name: nginx-nim2 + image: your.registry.tld/nginx-nim2:tag + ports: + - name: https + containerPort: 443 + - name: f5tt + containerPort: 5000 + env: + ### NGINX Instance Manager environment + - name: NIM_USERNAME + value: admin + - name: NIM_PASSWORD + value: nimadmin + - name: NIM_LICENSE + value: "" + + - name: NIM_CLICKHOUSE_ADDRESS + value: clickhouse + - name: NIM_CLICKHOUSE_PORT + value: "9000" + ### If username is not set to "default", the clickhouse-users ConfigMap in 0.clickhouse.yaml shall be updated accordingly + - name: NIM_CLICKHOUSE_USERNAME + value: "default" + ### If password is not set to "NGINXr0cks", the clickhouse-users ConfigMap in 0.clickhouse.yaml shall be updated accordingly + - name: NIM_CLICKHOUSE_PASSWORD + value: "NGINXr0cks" + + ### Second Sight Push mode + - name: STATS_PUSH_ENABLE + value: "false" + - name: STATS_PUSH_MODE + value: CUSTOM + #value: NGINX_PUSH + - name: STATS_PUSH_URL + value: "http://192.168.1.5/callHome" + ### Push interval in seconds + - name: STATS_PUSH_INTERVAL + value: "1800" + +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-nim2 + labels: + app: nginx-nim2 +spec: + ports: + - name: https + port: 443 + - name: f5tt + port: 5000 + selector: + app: nginx-nim2 + type: ClusterIP + +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-nim2-grpc + labels: + app: nginx-nim2 +spec: + ports: + - name: grpc + port: 443 + nodePort: 30443 + selector: + app: nginx-nim2 + type: NodePort diff --git a/NGINX-NIM-Docker/manifests/2.grafana.yaml b/NGINX-NIM-Docker/manifests/2.grafana.yaml new file mode 100644 index 0000000..d6f66e9 --- /dev/null +++ b/NGINX-NIM-Docker/manifests/2.grafana.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana-data + annotations: + nfs.io/storage-path: grafana/data +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana-log + annotations: + nfs.io/storage-path: grafana/log +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana + labels: + app: grafana +spec: + selector: + matchLabels: + app: grafana + replicas: 1 + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + ports: + - containerPort: 3000 + volumeMounts: + - name: grafana-data + mountPath: /var/lib/grafana + - name: grafana-log + mountPath: /var/log/grafana + env: + # Clickhouse datasource plugin installation + # https://grafana.com/grafana/plugins/vertamedia-clickhouse-datasource/?tab=installation + # grafana-cli plugins install vertamedia-clickhouse-datasource + - name: GF_INSTALL_PLUGINS + value: vertamedia-clickhouse-datasource + volumes: + - name: grafana-data + persistentVolumeClaim: + claimName: pvc-grafana-data + - name: grafana-log + persistentVolumeClaim: + claimName: pvc-grafana-log + +--- +apiVersion: v1 +kind: Service +metadata: + name: grafana + labels: + app: grafana +spec: + ports: + - port: 3000 + selector: + app: grafana + type: ClusterIP diff --git a/NGINX-NIM-Docker/manifests/3.vs.yaml b/NGINX-NIM-Docker/manifests/3.vs.yaml new file mode 100644 index 0000000..aaedf05 --- /dev/null +++ b/NGINX-NIM-Docker/manifests/3.vs.yaml @@ -0,0 +1,43 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: nim2 +spec: + host: nim2.f5.ff.lan + tls: + secret: nim2.f5.ff.lan + upstreams: + - name: nginx-nim2 + service: nginx-nim2 + port: 443 + tls: + enable: true + client-max-body-size: 10m + - name: nginx-nim2-f5tt + service: nginx-nim2 + port: 5000 + routes: + - path: / + action: + pass: nginx-nim2 + - path: /f5tt + action: + pass: nginx-nim2-f5tt + +--- +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: grafana +spec: + host: grafana.nim2.f5.ff.lan + tls: + secret: nim2.f5.ff.lan + upstreams: + - name: grafana + service: grafana + port: 3000 + routes: + - path: / + action: + pass: grafana diff --git a/NGINX-NIM-Docker/manifests/certs/cert-install.sh b/NGINX-NIM-Docker/manifests/certs/cert-install.sh new file mode 100755 index 0000000..b423412 --- /dev/null +++ b/NGINX-NIM-Docker/manifests/certs/cert-install.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +case $1 in + 'clean') + kubectl delete secret nim2.f5.ff.lan -n nginx-nim2 + rm nim2.f5.ff.lankey nim2.f5.ff.lan.crt + ;; + 'install') + openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout nim2.f5.ff.lan.key -out nim2.f5.ff.lan.crt -config nim2.f5.ff.lan.cnf + kubectl create secret tls nim2.f5.ff.lan --key nim2.f5.ff.lan.key --cert nim2.f5.ff.lan.crt -n nginx-nim2 + ;; + *) + echo "$0 [clean|install]" + exit + ;; +esac diff --git a/NGINX-NIM-Docker/manifests/certs/nim2.f5.ff.lan.cnf b/NGINX-NIM-Docker/manifests/certs/nim2.f5.ff.lan.cnf new file mode 100644 index 0000000..23d5f43 --- /dev/null +++ b/NGINX-NIM-Docker/manifests/certs/nim2.f5.ff.lan.cnf @@ -0,0 +1,16 @@ +[req] +default_bits = 2048 +prompt = no +default_md = sha256 +x509_extensions = v3_req +distinguished_name = dn + +[dn] +emailAddress = my@emailaddress.com +CN = nim2.f5.ff.lan + +[v3_req] +subjectAltName = @alt_names + +[alt_names] +DNS.1 = nim2.f5.ff.lan diff --git a/NGINX-NIM-Docker/manifests/configmaps/config.xml b/NGINX-NIM-Docker/manifests/configmaps/config.xml new file mode 100644 index 0000000..28d542a --- /dev/null +++ b/NGINX-NIM-Docker/manifests/configmaps/config.xml @@ -0,0 +1,1038 @@ + + + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + + 1000M + 10 + + + + + + + + + + + + + + 8123 + + + 9000 + + + 9004 + + + + + + + + + + + + + + + 9009 + + + + + + + + + + + + + + + 0.0.0.0 + + + + + + + + + + + + + 4096 + + + 3 + + + + + false + + + /path/to/ssl_cert_file + /path/to/ssl_key_file + + + false + + + /path/to/ssl_ca_cert_file + + + deflate + + + medium + + + -1 + -1 + + + false + + + + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + + + + + + + 100 + + + 0 + + + + 10000 + + + 0.9 + + + 4194304 + + + 0 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + + + + /var/lib/clickhouse/user_files/ + + + + + + + + + + + users.xml + + + + /var/lib/clickhouse/access/ + + + + + + + default + + + + + + + + + + + + default + + + + + + + + + true + + + false + + + + + + + + + + + + + + + localhost + 9000 + + + + + + + + + localhost + 9000 + + + + + localhost + 9000 + + + + + + + 127.0.0.1 + 9000 + + + + + 127.0.0.2 + 9000 + + + + + + true + + 127.0.0.1 + 9000 + + + + true + + 127.0.0.2 + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + localhost + 9000 + + + + + localhost + 1 + + + + + + + + + + + + + + + + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + + + + system + query_log
+ + toYYYYMM(event_date) + + + + + + 7500 +
+ + + + system + trace_log
+ + toYYYYMM(event_date) + 7500 +
+ + + + system + query_thread_log
+ toYYYYMM(event_date) + 7500 +
+ + + + + + + + system + metric_log
+ 7500 + 1000 +
+ + + + system + asynchronous_metric_log
+ + 60000 +
+ + + + + + engine MergeTree + partition by toYYYYMM(finish_date) + order by (finish_date, finish_time_us, trace_id) + + system + opentelemetry_span_log
+ 7500 +
+ + + + + system + crash_log
+ + + 1000 +
+ + + + + + + + + + + + + + + + + + *_dictionary.xml + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + + + hide encrypt/decrypt arguments + ((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\) + + \1(???) + + + + + + + + + + false + + false + + + https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277 + + + + +
diff --git a/NGINX-NIM-Docker/manifests/configmaps/users.xml b/NGINX-NIM-Docker/manifests/configmaps/users.xml new file mode 100644 index 0000000..33bf337 --- /dev/null +++ b/NGINX-NIM-Docker/manifests/configmaps/users.xml @@ -0,0 +1,124 @@ + + + + + + + + + + 10000000000 + + + random + + + + + 0 + + + + + + + + + b056ff6ca466a8974d175ef0dd980b6c155c9399 + + + + ::/0 + + + + default + + + default + + + + + + + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + + + diff --git a/NGINX-NIM-Docker/nim-files/.placeholder b/NGINX-NIM-Docker/nim-files/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/NGINX-NIM-Docker/scripts/buildNIM.sh b/NGINX-NIM-Docker/scripts/buildNIM.sh new file mode 100755 index 0000000..080a1e2 --- /dev/null +++ b/NGINX-NIM-Docker/scripts/buildNIM.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +BANNER="NGINX Instance Manager Docker image builder\n\n +This tool builds a Docker image to run NGINX Instance Manager\n\n +=== Usage:\n\n +$0 [options]\n\n +=== Options:\n\n +-h\t\t\t- This help\n +-t [target image]\t- Docker image name to be created\n\n +Manual build:\n\n +-n [filename]\t\t- NGINX Instance Manager .deb package filename\n +-w [filename]\t\t- Security Monitoring .deb package filename - optional\n +-p [filename]\t\t- WAF policy compiler .deb package filename - optional\n\n +Automated build:\n\n +-i\t\t\t- Automated build - requires cert & key\n +-C [file.crt]\t\t- Certificate file to pull packages from the official NGINX repository\n +-K [file.key]\t\t- Key file to pull packages from the official NGINX repository\n +-P [version]\t\t- Enable WAF policy compiler, version can be any [v3.1088.2|v4.100.1|v4.2.0|v4.218.0|v4.279.0|v4.402.0|v4.457.0|v4.583.0|v4.641|v4.762|v4.815.0|v5.17.0|v5.48.0|v5.144.0] - optional\n\n +=== Examples:\n\n +Manual build:\n +\t$0 -t my-private-registry/nginx-instance-manager:2.15.1-nap-v4.815.0-manualbuild \\\\\\n +\t\t-n nim-files/nms-instance-manager_2.15.1-1175574316~focal_amd64.deb \\\\\n +\t\t-w nim-files/nms-sm_1.7.1-1046510610~focal_amd64.deb \\\\\n +\t\t-p nim-files/nms-nap-compiler-v4.815.0_4.815.0-1~focal_amd64.deb\n\n +Automated build:\n +\t$0 -i -C nginx-repo.crt -K nginx-repo.key \\\\\n +\t\t-P v5.144.0 -t my.registry.tld/nginx-nms:latest\n +" + +while getopts 'hn:w:p:t:siC:K:AP:' OPTION +do + case "$OPTION" in + h) + echo -e $BANNER + exit + ;; + n) + DEBFILE=$OPTARG + ;; + w) + SM_IMAGE=$OPTARG + ;; + p) + PUM_IMAGE=$OPTARG + ;; + t) + IMGNAME=$OPTARG + ;; + i) + AUTOMATED_INSTALL=true + ;; + C) + NGINX_CERT=$OPTARG + ;; + K) + NGINX_KEY=$OPTARG + ;; + P) + ADD_PUM=$OPTARG + ;; + esac +done + +if [ -z "$1" ] +then + echo -e $BANNER + exit +fi + +if [ -z "${IMGNAME}" ] +then + echo "Docker image name is required" + exit +fi + +if ([ -z "${AUTOMATED_INSTALL}" ] && [ -z "${DEBFILE}" ]) +then + echo "NGINX Instance Manager package is required for manual installation" + exit +fi + +if ([ ! -z "${AUTOMATED_INSTALL}" ] && ([ -z "${NGINX_CERT}" ] || [ -z "${NGINX_KEY}" ])) +then + echo "NGINX certificate and key are required for automated installation" + exit +fi + +echo "==> Building NGINX Management Suite docker image" + +if [ -z "${AUTOMATED_INSTALL}" ] +then + docker build --no-cache -f Dockerfile.manual --build-arg NIM_DEBFILE=$DEBFILE \ + --build-arg SM_IMAGE=$SM_IMAGE --build-arg PUM_IMAGE=$PUM_IMAGE -t $IMGNAME . +else + DOCKER_BUILDKIT=1 docker build --no-cache -f Dockerfile.automated --secret id=nginx-key,src=$NGINX_KEY --secret id=nginx-crt,src=$NGINX_CERT \ + --build-arg ADD_PUM=$ADD_PUM \ + -t $IMGNAME . +fi + +docker push $IMGNAME diff --git a/NGINX-NIM-Docker/scripts/nimDockerStart.sh b/NGINX-NIM-Docker/scripts/nimDockerStart.sh new file mode 100755 index 0000000..22016ff --- /dev/null +++ b/NGINX-NIM-Docker/scripts/nimDockerStart.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +NAMESPACE=nginx-nim2 + +case $1 in + 'start') + kubectl create namespace $NAMESPACE + + pushd manifests/certs + ./cert-install.sh install + cd .. + + kubectl create configmap clickhouse-conf -n $NAMESPACE --from-file=configmaps/config.xml + kubectl create configmap clickhouse-users -n $NAMESPACE --from-file=configmaps/users.xml + kubectl apply -n $NAMESPACE -f . + popd + ;; + 'stop') + kubectl delete namespace $NAMESPACE + ;; + *) + echo "$0 [start|stop]" + exit + ;; +esac diff --git a/NGINX-SOAP-REST/README.md b/NGINX-SOAP-REST/README.md new file mode 100644 index 0000000..ac73cb8 --- /dev/null +++ b/NGINX-SOAP-REST/README.md @@ -0,0 +1,312 @@ +# NGINX SOAP/REST gateway + +## Description + +This is a sample NGINX Plus SOAP/REST gateway configuration to translate between SOAP webservices and REST APIs. + +NGINX Plus manages client requests based on service definitions provided by an external REST-enabled backend, and translates between XML and JSON based on custom templates. + +## Prerequisites + +This repo has been tested with: + +- Linux VM +- Docker-compose v2.20.2+ +- NGINX Plus R29+ license (`nginx-repo.crt` and `nginx-repo.key`) + +## Current status / work in progress + +XML/JSON and JSON/XML translation are performed based on templates or in automatic mode + +| JSON to XML | XML to JSON | Request | Response | Template-based | Automatic | +|-------------|--------------|---------|----------|----------------|-----------| +| X | | X | | X | | +| X | | X | X | | X | + +## High level architecture + +Sequence diagram + +```mermaid +sequenceDiagram + Client->>NGINX Plus: REST or SOAP request (with Basic Auth) + NGINX Plus->>LDAP Connector: REST API Call (REST request) + LDAP Connector-->>LDAP Server: LDAP Authentication request + LDAP Server-->>LDAP Connector: LDAP Authentication reply + LDAP Connector->>NGINX Plus: Authentication response (REST reply) + NGINX Plus->>Source of truth: Service definition JSON request + Source of truth->>NGINX Plus: Service definition JSON reply + NGINX Plus->>NGINX Plus: Payload XML/JSON translation + NGINX Plus->>Backend: SOAP/REST request +``` + +Flowchart + +```mermaid +flowchart LR + C[Client] -->|1. SOAP/REST request| N(NGINX Plus) + N --> |2. REST API request| LDAPC[LDAP Connector] + LDAPC --> |3. LDAP auth request| LDAP[[LDAP Server]] + LDAP --> |4. LDAP auth response| LDAPC + LDAPC --> |5. REST API response| N + N --> |6. Service definition request| SOT[[Source of Truth]] + SOT --> |7. Service definition response| N + N --> |8. XML/JSON request translation| N + N --> |9. SOAP/REST request| B[Backend] +``` + +## How to deploy + +A docker-compose YAML file is provided to spin up all components. `nginx/Dockerfile` has been created starting from https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-docker/ + +To start the environment: + +1. Run the startup script: during its first run it will build all Docker images + +``` +$ ./nginx-soap-rest.sh -o start -C /etc/ssl/nginx/nginx-repo.crt -K /etc/ssl/nginx/nginx-repo.key +[+] Running 6/6 +[...] + ✔ Network nginx-soap-rest_lab-network Created + ✔ Container nginx Started + ✔ Container ldap-connector Started + ✔ Container source-of-truth Started + ✔ Container echo-server Started + ✔ Container openldap Started +``` + +2. Check running containers: + +``` +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +738fbd26a28e osixia/openldap:1.5.0 "/container/tool/run" 8 minutes ago Up 6 minutes (healthy) 0.0.0.0:389->389/tcp, 0.0.0.0:636->636/tcp openldap +9052b76a4c34 source-of-truth "/bin/sh -c /deploym…" 8 minutes ago Up 6 minutes 0.0.0.0:10080->10080/tcp source-of-truth +bb8ce93e9b26 echo-server "/bin/sh -c /deploym…" 8 minutes ago Up 6 minutes 0.0.0.0:8000->8000/tcp echo-server +50be30c44fb9 ldap-connector "/bin/sh -c '/deploy…" 8 minutes ago Up 6 minutes 0.0.0.0:5389->5389/tcp ldap-connector +b659e2dde454 nginx-soap-rest "nginx -g 'daemon of…" 8 minutes ago Up 6 minutes 0.0.0.0:80->80/tcp nginx +``` + +3. Test LDAP authentication through the LDAP connector. The LDAP connector provides NGINX with a REST API and acts as an LDAP client + +``` +$ curl -iX POST 127.0.0.1:5389/ldap/auth -u "alice:testpassword" -w '\n' +HTTP/1.1 200 OK +date: Wed, 09 Aug 2023 13:36:39 GMT +server: uvicorn +content-length: 38 +content-type: application/json + +{"detail":"Authentication successful"} +``` + +4. Test the source of truth: + +``` +$ curl -s 127.0.0.1:10080/fetchallkeys +{"rules":[{"enabled":"true","request_translation":{"to_xml":"$JSON.username$$JSON.email$$JSON.userid$$JSON.phone$"},"ruleid":1,"upstream":"10.5.0.13:8000","upstream_content":"xml","uri":"test.xml"},{"enabled":"true","ruleid":2,"upstream":"10.5.0.13:8000","upstream_content":"xml","uri":"auto.xml"},{"enabled":"true","ruleid":3,"upstream":"10.5.0.13:8000","upstream_content":"json","uri":"auto.json"}]} +``` + +5. Test the echo server: + +``` +$ curl -X POST http://127.0.0.1:8000 -d '{"parm":123}' -w '\n' -i +HTTP/1.0 200 OK +Server: BaseHTTP/0.6 Python/3.10.12 +Date: Thu, 10 Aug 2023 14:53:28 GMT +Content-type: text/plain + +{"parm":123} +``` + +6. Test NGINX: + +``` +$ curl -i 127.0.0.1 +HTTP/1.1 401 Unauthorized +Server: nginx/1.23.4 +Date: Thu, 10 Aug 2023 11:06:15 GMT +Content-Type: text/html +Content-Length: 179 +Connection: keep-alive +www-authenticate: Basic +X-Custom-Header: testing123 + + +401 Authorization Required + +

401 Authorization Required

+
nginx/1.23.4
+ + +``` + +## Test: JSON to XML translation (template-based) + +The client POSTs a JSON payload and the backend service `echo-server` expects an XML payload. + +The source of truth holds service definition JSON files. Variables `$JSON.$` are rendered with values taken from input JSON fields. + +Example template: + +``` + + + + + $JSON.username$ + $JSON.email$ + + $JSON.userid$ + $JSON.phone$ + + + + +``` + +Service definition entries in the source of truth are looked up based on the `uri` requested by the client. +- `upstream` defines the backend service NGINX will `proxy_pass` the client request to +- `upstream_content` defines the backend service expected payload format `xml` or `json` +- `request_translation` defines translation templates `to_xml` or `to_json` + +``` + { + "ruleid": 1, + "enabled": "true", + "uri": "test.xml", + "upstream": "10.5.0.13:8000", + "upstream_content": "xml", + "request_translation": { + "to_xml": "$JSON.username$$JSON.email$$JSON.userid$JSON.phone" + } + } +``` + +Test client request: + +``` +curl -isH "X-Wanted-Content: xml" -X POST http://127.0.0.1/test.xml -u "alice:testpassword" -d ' +{ + "username":"John Doe", + "email":"john@doe.org", + "userid":"U-12345", + "phone":"(012) 345-6789" +}' +``` + +Response is: + +``` +HTTP/1.1 200 OK +Server: nginx/1.23.4 +Date: Thu, 10 Aug 2023 16:05:52 GMT +Content-Type: text/xml +Transfer-Encoding: chunked +Connection: keep-alive +X-Custom-Header: testing123 + + + + + + John Doe + john@doe.org + + U-12345 + (012) 345-6789 + + + + +``` + +NGINX logs report: + +``` +$ docker logs nginx -f +2023/08/10 16:05:53 [warn] 8#8: *5 js: Request: Client[10.5.0.1] Scheme[http] Method[POST] Host[127.0.0.1] URI [/test.xml] Body[ +{ + "username":"John Doe", + "email":"john@doe.org", + "userid":"U-12345", + "phone":"(012) 345-6789" +}] +2023/08/10 16:05:53 [warn] 8#8: *5 js: Subrequest [/sourceOfTruth/fetchkey/test.xml] +2023/08/10 16:05:53 [warn] 8#8: *5 js: subReqCallback got 200 +2023/08/10 16:05:53 [warn] 8#8: *5 js: Rewrite rule [127.0.0.1/test.xml] -> upstream content [xml] +2023/08/10 16:05:53 [warn] 8#8: *5 js: Upstream wants XML payload +2023/08/10 16:05:53 [warn] 8#8: *5 js: Payload translation JSON -> XML +2023/08/10 16:05:53 [warn] 8#8: *5 js: ===> Template-based JSON to XML translation +2023/08/10 16:05:53 [warn] 8#8: *5 js: Request body: [[object Object]] +2023/08/10 16:05:53 [warn] 8#8: *5 js: Template : [$JSON.username$$JSON.email$$JSON.userid$$JSON.phone$] +2023/08/10 16:05:53 [warn] 8#8: *5 js: => JSON -> XML Translation token [JSON.username] +2023/08/10 16:05:53 [warn] 8#8: *5 js: username found in JSON payload +2023/08/10 16:05:53 [warn] 8#8: *5 js: => JSON -> XML Translation token [JSON.email] +2023/08/10 16:05:53 [warn] 8#8: *5 js: email found in JSON payload +2023/08/10 16:05:53 [warn] 8#8: *5 js: => JSON -> XML Translation token [JSON.userid] +2023/08/10 16:05:53 [warn] 8#8: *5 js: userid found in JSON payload +2023/08/10 16:05:53 [warn] 8#8: *5 js: => JSON -> XML Translation token [JSON.phone] +2023/08/10 16:05:53 [warn] 8#8: *5 js: phone found in JSON payload +2023/08/10 16:05:53 [warn] 8#8: *5 js: Request body sent to upstream: [John Doejohn@doe.orgU-12345(012) 345-6789] +2023/08/10 16:05:53 [warn] 8#8: *5 js: Proxying to [http://10.5.0.13:8000] +10.5.0.1 - alice [10/Aug/2023:16:05:53 +0000] "POST /test.xml HTTP/1.1" 200 401 "-" "curl/8.0.1" "-" +``` + +## Test: JSON to XML translation (automatic mode) + +Client request: + +``` +curl -isH "X-Wanted-Content: xml" -X POST http://127.0.0.1/auto.xml -u "alice:testpassword" -d ' +{ + "username":"John Doe", + "email":"john@doe.org", + "userid":"U-12345", + "phone":"(012) 345-6789" +}' +``` + +Response: + +``` +HTTP/1.1 200 OK +Server: nginx/1.23.4 +Date: Thu, 10 Aug 2023 16:24:44 GMT +Content-Type: text/xml +Transfer-Encoding: chunked +Connection: keep-alive +X-Custom-Header: testing123 + + + + + + John Doe + john@doe.org + U-12345 + (012) 345-6789 + + + +``` + +## How to undeploy + +The environment can be stopped using: + +``` +$ ./nginx-soap-rest.sh -o stop +[+] Running 6/6 + ✔ Container echo-server Removed + ✔ Container ldap-connector Removed + ✔ Container openldap Removed + ✔ Container source-of-truth Removed + ✔ Container nginx Removed + ✔ Network nginx-soap-rest_lab-network Removed +``` diff --git a/NGINX-SOAP-REST/docker-compose.yaml b/NGINX-SOAP-REST/docker-compose.yaml new file mode 100644 index 0000000..0800508 --- /dev/null +++ b/NGINX-SOAP-REST/docker-compose.yaml @@ -0,0 +1,90 @@ +version: "3.9" + +services: + openldap: + container_name: openldap + image: osixia/openldap:1.5.0 + restart: always + ports: + - 389:389 + - 636:636 + networks: + lab-network: + ipv4_address: 10.5.0.10 + volumes: + - ./ldap/ldif:/ldif:ro + healthcheck: + test: ["CMD-SHELL", "ldapadd -x -D cn=admin,dc=example,dc=org -w admin -H ldap://localhost -ZZ -f /ldif/setup.ldif || exit 0"] + interval: 10s + timeout: 5s + retries: 2 + + ldap-connector: + container_name: ldap-connector + image: ldap-connector + build: + dockerfile: ldap/connector/Dockerfile + ports: + - 5389:5389 + networks: + lab-network: + ipv4_address: 10.5.0.11 + environment: + - LDAP_SERVER=openldap:389 + + source-of-truth: + container_name: source-of-truth + image: source-of-truth + build: + dockerfile: source-of-truth/Dockerfile + ports: + - 10080:10080 + networks: + lab-network: + ipv4_address: 10.5.0.12 + + echo-server: + container_name: echo-server + image: echo-server + build: + dockerfile: echo-server/Dockerfile + ports: + - 8000:8000 + networks: + lab-network: + ipv4_address: 10.5.0.13 + + nginx: + container_name: nginx + image: nginx-soap-rest + build: + dockerfile: nginx/Dockerfile + secrets: + - nginx-crt + - nginx-key + ports: + - 80:80 + networks: + lab-network: + ipv4_address: 10.5.0.14 + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro + - ./nginx/soaprest.js:/etc/nginx/conf.d/soaprest.js:ro + - ./nginx/soaprest.conf:/etc/nginx/conf.d/soaprest.conf:ro + - /dev/null:/etc/nginx/conf.d/default.conf:ro + +secrets: + nginx-crt: + name: nginx-crt + file: ${NGINX_CERT} + nginx-key: + name: nginx-key + file: ${NGINX_KEY} + +networks: + lab-network: + driver: bridge + ipam: + config: + - subnet: 10.5.0.0/24 + gateway: 10.5.0.1 diff --git a/NGINX-SOAP-REST/echo-server/Dockerfile b/NGINX-SOAP-REST/echo-server/Dockerfile new file mode 100644 index 0000000..ff8e315 --- /dev/null +++ b/NGINX-SOAP-REST/echo-server/Dockerfile @@ -0,0 +1,10 @@ +From ubuntu:22.04 + +RUN apt-get update \ + && apt-get -y install python3 \ + && mkdir /deployment + +COPY ./echo-server/echo-server.py /deployment + +WORKDIR /deployment +CMD /deployment/echo-server.py diff --git a/NGINX-SOAP-REST/echo-server/echo-server.py b/NGINX-SOAP-REST/echo-server/echo-server.py new file mode 100755 index 0000000..bd601b6 --- /dev/null +++ b/NGINX-SOAP-REST/echo-server/echo-server.py @@ -0,0 +1,26 @@ +#!/usr/bin/python3 + +import http.server +import socketserver + +class EchoRequestHandler(http.server.BaseHTTPRequestHandler): + def _set_response(self): + self.send_response(200) + self.send_header('Content-type', 'text/plain') + self.end_headers() + + def do_POST(self): + content_length = int(self.headers['Content-Length']) + post_data = self.rfile.read(content_length).decode('utf-8') + + self._set_response() + self.wfile.write(post_data.encode('utf-8')) + +def run(server_class=http.server.HTTPServer, handler_class=EchoRequestHandler, port=8000): + server_address = ('', port) + httpd = server_class(server_address, handler_class) + print(f"Starting httpd on {server_address[0]}:{server_address[1]}...") + httpd.serve_forever() + +if __name__ == '__main__': + run() diff --git a/NGINX-SOAP-REST/ldap/connector/Dockerfile b/NGINX-SOAP-REST/ldap/connector/Dockerfile new file mode 100644 index 0000000..1a3d869 --- /dev/null +++ b/NGINX-SOAP-REST/ldap/connector/Dockerfile @@ -0,0 +1,16 @@ +From ubuntu:22.04 + +ARG LDAP_SERVER + +RUN apt-get update \ +# && apt-get -y install python3 python3-venv python3-pip python3-openssl libsasl2-dev libldap2-dev libssl-dev \ + && apt-get -y install python3 python3-pip python3-openssl libsasl2-dev libldap2-dev libssl-dev \ + && mkdir /deployment + +COPY ./ldap/connector/ldap-connector.py /deployment +COPY ./ldap/connector/requirements.txt /deployment + +RUN pip install --no-cache -r /deployment/requirements.txt + +WORKDIR /deployment +CMD /deployment/ldap-connector.py $LDAP_SERVER diff --git a/NGINX-SOAP-REST/ldap/connector/ldap-connector.py b/NGINX-SOAP-REST/ldap/connector/ldap-connector.py new file mode 100755 index 0000000..1d203e5 --- /dev/null +++ b/NGINX-SOAP-REST/ldap/connector/ldap-connector.py @@ -0,0 +1,37 @@ +#!/usr/bin/python3 + +import os +import secrets +import uvicorn +import sys + +from fastapi import FastAPI, Depends, HTTPException, status, Response +from fastapi.security import HTTPBasic, HTTPBasicCredentials +from typing_extensions import Annotated + +from ldap3 import Server, Connection, SAFE_SYNC + + +app = FastAPI(openapi_url="/ldap/access/openapi.json", docs_url="/ldap/access/docs") +security = HTTPBasic() + +def authorize(credentials: HTTPBasicCredentials = Depends(security)): + + #server = Server('ubuntu.ff.lan:389') + server = Server(sys.argv[1]) + try: + conn = Connection(server, f'uid={credentials.username},ou=users,dc=example,dc=org', credentials.password, client_strategy=SAFE_SYNC, auto_bind=True) + except Exception: + raise HTTPException(status_code=401, detail='Authentication failed') + + +@app.post('/ldap/auth', dependencies=[Depends(authorize)], status_code=status.HTTP_200_OK) +def auth(response: Response): + return {'detail': 'Authentication successful'}; + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('LDAP server missing') + else: + uvicorn.run("ldap-connector:app", host='0.0.0.0', port=5389) diff --git a/NGINX-SOAP-REST/ldap/connector/requirements.txt b/NGINX-SOAP-REST/ldap/connector/requirements.txt new file mode 100644 index 0000000..40e08aa --- /dev/null +++ b/NGINX-SOAP-REST/ldap/connector/requirements.txt @@ -0,0 +1,6 @@ +cryptography +secrets +uvicorn +fastapi +typing_extensions +ldap3 diff --git a/NGINX-SOAP-REST/ldap/ldif/setup.ldif b/NGINX-SOAP-REST/ldap/ldif/setup.ldif new file mode 100644 index 0000000..010b7c8 --- /dev/null +++ b/NGINX-SOAP-REST/ldap/ldif/setup.ldif @@ -0,0 +1,20 @@ +dn: ou=users,dc=example,dc=org +objectClass: organizationalUnit +ou: users + +dn: uid=alice,ou=users,dc=example,dc=org +objectClass: top +objectClass: account +objectClass: posixAccount +objectClass: shadowAccount +cn: alice +uid: alice +uidNumber: 16861 +gidNumber: 100 +homeDirectory: /home/alice +loginShell: /bin/bash +gecos: alice +userPassword: testpassword +shadowLastChange: 0 +shadowMax: 0 +shadowWarning: 0 diff --git a/NGINX-SOAP-REST/nginx-soap-rest.sh b/NGINX-SOAP-REST/nginx-soap-rest.sh new file mode 100755 index 0000000..c891b83 --- /dev/null +++ b/NGINX-SOAP-REST/nginx-soap-rest.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +PROJECT_NAME=nginx-soap-rest +DOCKERCOMPOSE=docker-compose.yaml + +BANNER="NGINX SOAP REST lab - https://github.com/fabriziofiorucci/NGINX-SOAP-REST\n\n +=== Usage:\n\n +$0 [options]\n\n +== Options:\n\n +-h\t\t\t- This help\n +-o [start|stop]\t- Start/stop the lab\n +-C [file.crt]\t\t- Certificate file to pull packages from the official NGINX repository\n +-K [file.key]\t\t- Key file to pull packages from the official NGINX repository\n\n +=== Examples:\n\n +Lab start:\n +\t$0 -o start -C /etc/ssl/nginx/nginx-repo.crt -K /etc/ssl/nginx/nginx-repo.key\n\n +Lab stop:\n +\t$0 -o stop\n\n +" + + +while getopts 'ho:C:K:' OPTION +do + case "$OPTION" in + h) + echo -e $BANNER + exit + ;; + o) + MODE=$OPTARG + ;; + C) + export NGINX_CERT=$OPTARG + ;; + K) + export NGINX_KEY=$OPTARG + ;; + esac +done + +if [ -z "$1" ] || [ -z "${MODE}" ] +then + echo -e $BANNER + exit +fi + +case $MODE in + 'start') + if [ -z "${NGINX_CERT}" ] || [ -z "${NGINX_KEY}" ] + then + echo "Missing NGINX Plus certificate/key" + exit + fi + + DOCKER_BUILDKIT=1 docker-compose -p $PROJECT_NAME -f $DOCKERCOMPOSE up -d --remove-orphans + ;; + 'stop') + export NGINX_CERT="x" + export NGINX_KEY="x" + docker-compose -p $PROJECT_NAME -f $DOCKERCOMPOSE down + ;; + *) + echo "$0 [start|stop]" + exit + ;; +esac diff --git a/NGINX-SOAP-REST/nginx/Dockerfile b/NGINX-SOAP-REST/nginx/Dockerfile new file mode 100644 index 0000000..8ed3647 --- /dev/null +++ b/NGINX-SOAP-REST/nginx/Dockerfile @@ -0,0 +1,66 @@ +FROM alpine:3.17 + +LABEL maintainer="NGINX Docker Maintainers " + +# Define NGINX versions for NGINX Plus and NGINX Plus modules +# Uncomment this block and the versioned nginxPackages in the main RUN +# instruction to install a specific release +# ENV NGINX_VERSION 29 +# ENV NJS_VERSION 0.7.12 +# ENV PKG_RELEASE 1 + +# Download certificate and key from the customer portal (https://account.f5.com) +# and copy to the build context +RUN --mount=type=secret,id=nginx-crt,dst=cert.pem \ + --mount=type=secret,id=nginx-key,dst=cert.key \ + set -x \ +# Create nginx user/group first, to be consistent throughout Docker variants + && addgroup -g 101 -S nginx \ + && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \ +# Install the latest release of NGINX Plus and/or NGINX Plus modules +# Uncomment individual modules if necessary +# Use versioned packages over defaults to specify a release + && nginxPackages=" \ + nginx-plus \ + # nginx-plus=${NGINX_VERSION}-r${PKG_RELEASE} \ + # nginx-plus-module-xslt \ + # nginx-plus-module-xslt=${NGINX_VERSION}-r${PKG_RELEASE} \ + # nginx-plus-module-geoip \ + # nginx-plus-module-geoip=${NGINX_VERSION}-r${PKG_RELEASE} \ + # nginx-plus-module-image-filter \ + # nginx-plus-module-image-filter=${NGINX_VERSION}-r${PKG_RELEASE} \ + # nginx-plus-module-perl \ + # nginx-plus-module-perl=${NGINX_VERSION}-r${PKG_RELEASE} \ + nginx-plus-module-njs \ + # nginx-plus-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${PKG_RELEASE} \ + " \ + KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \ + && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \ + && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \ + echo "key verification succeeded!"; \ + mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \ + else \ + echo "key verification failed!"; \ + exit 1; \ + fi \ + && cat cert.pem > /etc/apk/cert.pem \ + && cat cert.key > /etc/apk/cert.key \ + && apk add -X "https://pkgs.nginx.com/plus/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ + && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \ + && if [ -f "/etc/apk/cert.key" ] && [ -f "/etc/apk/cert.pem" ]; then rm -f /etc/apk/cert.key /etc/apk/cert.pem; fi \ +# Bring in tzdata so users could set the timezones through the environment +# variables + && apk add --no-cache tzdata \ +# Bring in curl and ca-certificates to make registering on DNS SD easier + && apk add --no-cache curl ca-certificates \ +# Forward request and error logs to Docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +EXPOSE 80 + +STOPSIGNAL SIGQUIT + +CMD ["nginx", "-g", "daemon off;"] + +# vim:syntax=Dockerfile diff --git a/NGINX-SOAP-REST/nginx/nginx.conf b/NGINX-SOAP-REST/nginx/nginx.conf new file mode 100644 index 0000000..921a49b --- /dev/null +++ b/NGINX-SOAP-REST/nginx/nginx.conf @@ -0,0 +1,39 @@ +user nginx; +worker_processes auto; + +error_log /var/log/nginx/error.log notice; +pid /var/run/nginx.pid; + +load_module modules/ngx_http_js_module.so; +load_module modules/ngx_stream_js_module.so; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; +} + + +# TCP/UDP proxy and load balancing block +stream { + log_format stream-main '$remote_addr [$time_local] ' + '$protocol $status $bytes_sent $bytes_received ' + '$session_time "$ssl_preread_server_name"'; + #access_log /dev/stdout stream-main; + include /etc/nginx/stream-conf.d/*.conf; +} diff --git a/NGINX-SOAP-REST/nginx/soaprest.conf b/NGINX-SOAP-REST/nginx/soaprest.conf new file mode 100644 index 0000000..1c713a7 --- /dev/null +++ b/NGINX-SOAP-REST/nginx/soaprest.conf @@ -0,0 +1,73 @@ +js_import soaprest from conf.d/soaprest.js; + +proxy_cache_path /var/cache/nginx/jwk levels=1 keys_zone=jwk:1m max_size=10m; +proxy_cache_path /var/tmp/cache levels=1:2 keys_zone=dbQueryCache:10m max_size=20m inactive=1m use_temp_path=off; +proxy_cache_key "$scheme://$host$request_uri$query_string"; + +log_format jwt '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" "$http_user_agent" ' + '$jwt_header_alg $jwt_claim_sub'; + +# LDAP connector +upstream upstream_auth_server { + server ldap-connector:5389; +} + +# Source of truth +upstream upstream_source_of_truth { + server source-of-truth:10080; +} + +server { + server_name $host; + #resolver 8.8.8.8; + + listen 80; + + location / { + access_log /var/log/nginx/access.log main; + error_log /var/log/nginx/error.log warn; + + auth_request /auth; + + js_content soaprest.requestHandler; + #js_header_filter soaprest.headerFilter; + } + + location ~ /proxyToUpstream/(.*) { + internal; + + proxy_buffer_size 256k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + subrequest_output_buffer_size 256k; + + proxy_ssl_session_reuse off; + proxy_ssl_server_name on; + + # Request header injection + proxy_set_header X-Injected-Request-Header "test value"; + + # Proxy request to upstream + proxy_pass http://$1; + } + + location = /auth { + internal; + + proxy_method POST; + proxy_set_header Authorization $http_authorization; + + proxy_pass http://upstream_auth_server/ldap/auth; + } + + location ~ /sourceOfTruth/(.*) { + internal; + + proxy_cache dbQueryCache; + proxy_cache_bypass $http_pragma; + proxy_cache_lock on; + proxy_cache_valid 200 1m; + proxy_pass http://upstream_source_of_truth/$1; + } +} diff --git a/NGINX-SOAP-REST/nginx/soaprest.js b/NGINX-SOAP-REST/nginx/soaprest.js new file mode 100644 index 0000000..f89426d --- /dev/null +++ b/NGINX-SOAP-REST/nginx/soaprest.js @@ -0,0 +1,263 @@ +export default {requestHandler,headerFilter}; + +const xml = require("xml"); +const querystring = require("querystring"); +const fs = require("fs"); + +function requestHandler(r) { + r.warn('Request: Client['+r.remoteAddress+'] Scheme['+r.variables.scheme+'] Method['+r.method+'] Host['+r.headersIn['host']+'] URI ['+r.uri+'] Body['+r.requestText+']'); + + // Queries the source of truth + r.warn('Subrequest [/sourceOfTruth/fetchkey'+r.uri+']'); + r.subrequest('/sourceOfTruth/fetchkey'+r.uri,'',sourceOfTruthCallback); + + function sourceOfTruthCallback(reply) { + if(reply.status!=200) { + // Rule not found + r.warn('Rule not found - returning 404'); + r.return(404); + } else { + r.warn('subReqCallback got 200'); + + var body = JSON.parse(reply.responseText); + + if (body.rule.enabled=='false') { + // Rule is disabled + r.warn('Rule is disabled - returning 404'); + r.return(404); + } else { + // Rule is enabled + r.warn('Rewrite rule ['+r.headersIn['host']+r.uri+'] -> upstream content ['+body.rule.upstream_content+']'); + + // Request body translation + let requestBody = ''; + let convertedRequestBody = ''; + + if(r.requestText != null) { + convertedRequestBody = r.requestText; + } + + //let requestContentType = String(r.headersIn['Content-Type']); + + // Service definition JSON "upstream_content" field describes the content format expected by the upstream. It can be set to either "json" or "xml" and it enables request body translation when needed + if(r.requestText != null) { + if (body.rule.upstream_content == 'json') { + r.warn('Upstream requires JSON payload'); + + if(isXml(r.requestText)) { + requestBody = xml.parse(r.requestText); + + if ('request_translation' in body.rule) { + r.warn('Request payload translation XML -> JSON - template-based'); + convertedRequestBody = templateSoapToRest(r,requestBody,body.rule.request_translation.to_json) + } else { + r.warn('Request payload translation XML -> JSON - automatic mode'); + convertedRequestBody = soapToRest(r,requestBody); + } + } + } else if (body.rule.upstream_content == 'xml') { + r.warn('Upstream requires XML payload'); + + if(isJson(r.requestText)) { + requestBody = JSON.parse(r.requestText); + + if ('request_translation' in body.rule) { + r.warn('Request payload translation JSON -> XML - template-based'); + convertedRequestBody = templateRestToSoap(r,requestBody,body.rule.request_translation.to_xml) + } else { + r.warn('Request payload translation JSON -> XML - automatic mode'); + convertedRequestBody = restToSoap(r,requestBody); + } + } + } else { + convertedRequestBody = r.requestText; + } + } + + r.warn('Request body sent to upstream: [' + convertedRequestBody + ']'); + + // Proxy the request to upstream + r.warn('Proxying to [http://'+body.rule.upstream+']'); + r.subrequest('/proxyToUpstream/'+body.rule.upstream,{method: r.method, body: convertedRequestBody},proxyCallback); + + function proxyCallback(upstreamReply) { + // Collect upstream reply + //r.warn('Upstream reply status ['+upstreamReply.status+'] body ['+upstreamReply.responseText+']'); + r.status=upstreamReply.status; + + let responseBody = ''; + let convertedBody = ''; + + let replyContentType = upstreamReply.headersOut['Content-Type']; + + // XML to JSON and JSON to XML response payload translation + // X-Wanted-Content HTTP request header can be set to either "json" or "xml" to enable response body translation + if (r.headersIn['X-Wanted-Content'] == 'json' && replyContentType.includes('application/xml')) { + responseBody = xml.parse(upstreamReply.responseText); + convertedBody = soapToRest(r,responseBody); + + delete r.headersOut["Content-Length"]; + delete r.headersOut["Content-Type"]; + r.headersOut['Content-Type'] = 'application/json; charset=utf-8'; + } else if (r.headersIn['X-Wanted-Content'] == 'xml' && replyContentType.includes('application/json')) { + responseBody = JSON.parse(upstreamReply.responseText); + convertedBody = restToSoap(r,responseBody); + + delete r.headersOut["Content-Length"]; + delete r.headersOut["Content-Type"]; + r.headersOut['Content-Type'] = 'application/soap+xml; charset=utf-8'; + } else { + convertedBody = upstreamReply.responseText; + } + + //r.warn('Reply type: ' + upstreamReply.headersOut['Content-Type']); + //r.warn('Converted body: ' + convertedBody); + + // Returns upstream reply headers to client + for (var header in upstreamReply.headersOut) { + switch (header) { + case 'Content-Type': + case 'Content-Length': + break; + default: + r.headersOut[header] = upstreamReply.headersOut[header]; + } + } + + r.sendHeader(); + r.send(convertedBody); + r.finish(); + } + } + } +}} + +function headerFilter(r) { + // Sample response headers removal + //delete r.headersOut["Content-Length"]; + //delete r.headersOut["Content-Type"]; + + // Sample response headers injection + //r.headersOut["Content-Type"] = "application/json"; + //r.headersOut["Content-Type"] = "application/soap+xml; charset=utf-8"; + //r.headersOut["X-Custom-Header"] = "testing123"; +} + +// REST to SOAP payload translation +function restToSoap(r,obj) { + let soap = ''+ + '' + + '' + + '' + + jsonToXml(r,obj) + + '' + + '' + + '' + + return soap; +} + +function jsonToXml(r,json) { + let xml = ''; + + Object.keys(json).forEach(k => { + if (typeof json[k] == 'object') { + //r.warn('==> JSON object ['+k+']'); + xml += '<'+k+'>'+jsonToXml(r,json[k])+''; + } else { + //r.warn('JSON parameter ['+k+'] => ['+json[k]+']'); + xml += '<'+k+'>'+json[k]+''; + } + }) + + return xml; +} + +// template-based JSON to XML payload translation +function templateRestToSoap(r,jsonRequestBody,translationTemplate) { + r.warn('===> Template-based JSON to XML translation'); + r.warn('Request body: [' + jsonRequestBody + ']'); + r.warn('Template : [' + translationTemplate + ']'); + + let outputXml = ''; + let tokenFound = false; + let tokenName = ''; + + Object.keys(translationTemplate).forEach(c => { + let char = translationTemplate[c]; + + if (char != '$') { + if (tokenFound == false) { + outputXml += char; + } else { + tokenName += char; + } + } else { + // JSON translation '$' token found + if (tokenFound == true) { + let jsonField = tokenName.substring(5); + r.warn('=> JSON -> XML Translation token ['+tokenName+']'); + + // Removes 'JSON.' at the start of the token name + if (tokenName.substring(5) in jsonRequestBody) { + r.warn(' '+jsonField+' found in JSON payload'); + outputXml += jsonRequestBody[tokenName.substring(5)]; + } else { + r.warn(' '+jsonField+' missing in JSON payload'); + } + tokenName = ''; + tokenFound = false; + } else { + tokenFound = true; + } + } + }); + + return outputXml; +} + +// SOAP to REST payload translation +function soapToRest(r,obj) { + //let json = { test: 123, code: 456 }; + + let json = {}; + json = xmlToJson(r,obj); + + return JSON.stringify(json); +} + +function xmlToJson(r,xml) { + let json = {}; + + Object.keys(xml).forEach(k => { + if (typeof xml[k][Symbol.toStringTag] == 'XMLNode') { + //r.warn('==> XML node ['+k+'] => ['+xml[k]+']'); + json[k] = xmlToJson(r,xml[k]); + } else { + //r.warn('XML parameter ['+k+'] => ['+xml[k]+']'); + json[k] = xml[k]; + } + }) + + return json; +} + +// JSON format check +function isJson(str) { + try { + JSON.parse(str); + } catch (e) { + return false; + } + return true; +} + +// XML format check +function isXml(str) { + try { + xml.parse(str); + } catch (e) { + return false; + } + return true; +} diff --git a/NGINX-SOAP-REST/source-of-truth/Dockerfile b/NGINX-SOAP-REST/source-of-truth/Dockerfile new file mode 100644 index 0000000..5eff9f0 --- /dev/null +++ b/NGINX-SOAP-REST/source-of-truth/Dockerfile @@ -0,0 +1,13 @@ +From ubuntu:22.04 + +RUN apt-get update \ + && apt-get -y install python3 python3-venv python3-pip python3-openssl libsasl2-dev libldap2-dev libssl-dev \ + && mkdir /deployment + +COPY ./source-of-truth/source-of-truth.py /deployment +COPY ./source-of-truth/requirements.txt /deployment + +RUN pip install --no-cache -r /deployment/requirements.txt + +WORKDIR /deployment +CMD /deployment/source-of-truth.py diff --git a/NGINX-SOAP-REST/source-of-truth/requirements.txt b/NGINX-SOAP-REST/source-of-truth/requirements.txt new file mode 100644 index 0000000..7e10602 --- /dev/null +++ b/NGINX-SOAP-REST/source-of-truth/requirements.txt @@ -0,0 +1 @@ +flask diff --git a/NGINX-SOAP-REST/source-of-truth/source-of-truth.py b/NGINX-SOAP-REST/source-of-truth/source-of-truth.py new file mode 100755 index 0000000..1c19126 --- /dev/null +++ b/NGINX-SOAP-REST/source-of-truth/source-of-truth.py @@ -0,0 +1,49 @@ +#!/usr/bin/python3 +from flask import Flask, jsonify, abort, make_response, request + +app = Flask(__name__) + +rules = [ + { + 'ruleid': 1, + 'enabled': u'true', + 'uri': u'test.xml', + 'upstream_content': u'xml', + 'upstream': u'10.5.0.13:8000', + 'request_translation': { + 'to_xml': u'$JSON.username$$JSON.email$$JSON.userid$$JSON.phone$' + } + }, + { + 'ruleid': 2, + 'enabled': u'true', + 'uri': u'auto.xml', + 'upstream_content': u'xml', + 'upstream': u'10.5.0.13:8000' + }, + { + 'ruleid': 3, + 'enabled': u'true', + 'uri': u'auto.json', + 'upstream_content': u'json', + 'upstream': u'10.5.0.13:8000' + } +] + +@app.route('/fetchkey/', methods=['GET']) +def get_key(uri): + rule = [rule for rule in rules if rule['uri'] == uri] + if len(rule) == 0: + abort(404) + return jsonify({'rule': rule[0]}) + +@app.route('/fetchallkeys', methods=['GET']) +def get_all_keys(): + return jsonify({'rules': rules}) + +@app.errorhandler(404) +def not_found(error): + return make_response(jsonify({'error': 'Not found'}), 404) + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=10080) diff --git a/README.md b/README.md index 6bcfa8b..d338f39 100644 --- a/README.md +++ b/README.md @@ -19,10 +19,16 @@ Each demo might have unique deployment requirements. Please refer to each indivi |Title|Description| |-----|-----------| +|NGINX Advanced Healthcheck|Advanced active healthchecks for NGINX Plus| |NGINX API gateway|Configure NGINX as an API gateway| +|NGINX API Steering|NGINX as an API gateway using an external data source for authentication, authorization and steering| +|NGINX Docker Image Builder|Tool to build several docker images for NGINX Plus, NGINX App Protect, NGINX Agent| |NGINX Gateway Fabric|Simple overview of configuring NGINX Gateway Fabric to route traffic within Kubernetes| |NGINX Ingress Controller|Simple overview of deploying and configuring NGINX Ingress Controller| +|NGINX Multicloud Gateway|NGINX setup for URI-based kubernetes traffic routing| +|NGINX NIM Docker|Tool to build docker images for NGINX Instance Manager| |NGINX One|Simple overview of NGINX One and its capabilities| +|NGINX SOAP REST|Example NGINX configuration to translate between SOAP and REST| ## Contributing