Authored by chunhua.zhang

Merge branch 'ufo-nginx' into 'master'

Ufo nginx-upstream

Ufo nginx-upstream up down 修改

See merge request !31
  1 +local upstream_cache=ngx.shared.upstream
  2 +local upstream = require "ngx.upstream"
  3 +local cjson = require "cjson"
  4 +
  5 +
  6 +-->>begin: timer at fix rate call function.
  7 +local timer_handler
  8 +timer_handler=function(premature,t,f,id)
  9 + if id then
  10 + if ngx.worker.id() == id then
  11 + local b,errinfo=pcall(f)
  12 + if not b then
  13 + ngx.log(ngx.ERR, "task request error:" .. tostring(errinfo))
  14 + end
  15 + end
  16 + else
  17 + local b,errinfo=pcall(f)
  18 + if not b then
  19 + ngx.log(ngx.ERR, "task request error:" .. tostring(errinfo))
  20 + end
  21 + end
  22 + ngx.timer.at(t,timer_handler,t,f)
  23 +end
  24 +--<<end: timer at fix rate call function.
  25 +
  26 +
  27 +
  28 +local stream_ctx={}
  29 +function updownstream()
  30 + local keys=upstream_cache:get_keys()
  31 +
  32 + for _,k in ipairs(keys) do
  33 + if string.sub(k,1,1)=="d" then
  34 +
  35 + local vKey="v" .. string.sub(k,2)
  36 + local version=upstream_cache:get(vKey)
  37 +
  38 + local value=upstream_cache:get(k)
  39 + local v=cjson.decode(value)
  40 +
  41 + if ( not stream_ctx[vKey] ) or stream_ctx[vKey] < version then
  42 + local ok,err=upstream.set_peer_down(v["upstream"],v["backup"],v["id"],v["value"])
  43 + if not ok then
  44 + ngx.log(ngx.ERR,"up or down stream err:",ngx.worker.id(),value,err)
  45 + else
  46 + stream_ctx[vKey]=version
  47 + end
  48 + end
  49 +
  50 + end
  51 + end
  52 +end
  53 +
  54 +timer_handler(true,2,updownstream)
  1 +ngx.shared.upstream:flush_all()
  2 +ngx.shared.upstream:flush_expired()
  1 +local upstream = require "ngx.upstream"
  2 +local json=require "cjson"
  3 +
  4 +local get_servers = upstream.get_servers
  5 +local get_upstreams = upstream.get_upstreams
  6 +local cache=ngx.shared.upstream
  7 +
  8 +-- get all peers for upstram: u
  9 +function list(u)
  10 +
  11 + local d={}
  12 + d["name"]=u
  13 + d["value"]={}
  14 +
  15 + -- get primary peers
  16 + local peers,err = upstream.get_primary_peers(u)
  17 + if err then
  18 + ngx.say("failed to get primary servers in upstream ", u)
  19 + return
  20 + end
  21 + for _,p in ipairs(peers) do
  22 + local s={}
  23 + s["id"]=p.id
  24 + s["down"]=p.down and p.down or false
  25 + s["name"]=p.name
  26 + s["backup"]=false
  27 + table.insert(d["value"],s)
  28 + end
  29 +
  30 + -- get backup peers
  31 + peers,err = upstream.get_backup_peers(u)
  32 + if err then
  33 + ngx.say("failed to get backup servers in upstream ", u)
  34 + return
  35 + end
  36 + for _,p in ipairs(peers) do
  37 + local s={}
  38 + s["id"]=p.id
  39 + s["down"]=p.down and p.down or false
  40 + s["name"]=p.name
  41 + s["backup"]=true
  42 + table.insert(d["value"],s)
  43 + end
  44 +
  45 + ngx.header["Content-type"]="application/json;charset=utf-8"
  46 + ngx.say(json.encode(d))
  47 +end
  48 +
  49 +
  50 +function upordown(upstream_name,is_backup,peer_id,down_value)
  51 + local t={}
  52 + t["upstream"]=upstream_name
  53 + t["backup"]=is_backup
  54 + t["id"]=peer_id
  55 + t["value"]=down_value
  56 + local rKey=upstream_name .. ":" .. tostring(id) .. ":" .. tostring(is_backup)
  57 + local key="d:" .. rKey
  58 + local vKey="v:" .. rKey
  59 + cache:add(vKey,0)
  60 + local v,err=cache:incr(vKey,1)
  61 + if not v then
  62 + return false
  63 + end
  64 + local suc=cache:set(key,json.encode(t))
  65 + return suc
  66 +end
  67 +
  68 +local args=ngx.req.get_uri_args()
  69 +local method=args["method"]
  70 +if method == "list" then
  71 + local u_name = args["upstream"]
  72 + list(u_name)
  73 +elseif(method=="down" or method=="up") then
  74 + local upstream=args["upstream"]
  75 + local backup=args["backup"]=="true" and true or false
  76 + local id=tonumber(args["id"])
  77 + local down= method=="down" and true or false
  78 + local suc=upordown(upstream,backup,id,down)
  79 + local t={}
  80 + t["suc"]=suc
  81 + ngx.header["Content-type"]="application/json;charset=utf-8"
  82 + ngx.say(json.encode(t))
  83 +end
  1 +# Interface FOR YOHO ERP SYSTEM
  2 +server {
  3 +
  4 + listen 80;
  5 + server_name erp.yoho.yohoops.org;
  6 +
  7 + proxy_http_version 1.1;
  8 + proxy_set_header Connection "";
  9 +
  10 +
  11 + access_log /Data/logs/nginx/erp.yoho.yohoops.org_access.log fenxi;
  12 + error_log /Data/logs/nginx/erp.yoho.yohoops.org_error.log;
  13 +
  14 + if ( $request_method = HEAD ) {
  15 + return 200;
  16 + }
  17 +
  18 + # only allow local ip
  19 + allow 10.66.0.0/16;
  20 + allow 10.67.0.0/16;
  21 + allow 192.168.0.0/16;
  22 + allow 172.31.0.0/16;
  23 + allow 127.0.0.1;
  24 + deny all;
  25 +
  26 + location = /upstreams {
  27 + content_by_lua_file "conf/lua/upstream.lua";
  28 + }
  29 +
  30 +}
@@ -18,4 +18,4 @@ server { @@ -18,4 +18,4 @@ server {
18 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 18 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
19 proxy_set_header Accept-Encoding "gzip"; 19 proxy_set_header Accept-Encoding "gzip";
20 } 20 }
21 -}  
  21 +}
  1 +# java nginx 配置文件
  2 +
  3 +## 说明
  4 +
  5 +> 这些文件需要`copy`到`openresty` 的 `home` 目录
  6 +>
  7 +
  8 +
  9 +## 动态`upstream`的支持
  10 +
  11 +基于 [lua-upstream-nginx-module] (https://github.com/openresty/lua-upstream-nginx-module) 动态设置upstream的 `up`, `down`
  12 +
  13 +> 1. 查询所有的`upstreams`信息:
  14 +>
  15 +>`curl -i -H "Host: erp.yoho.yohoops.org" "http://127.0.0.1/upstreams?method=list&upstream=ufogateway"`
  16 +>
  17 +> 2. 设置 `upstream` 名称为 `ufogateway`的`upstream` 第0个 `backend server` 状态为`down`:
  18 +>
  19 +> `curl -i -H "Host: erp.yoho.yohoops.org" "http://127.0.0.1/upstreams?method=down&upstream=ufogateway&id=0"`
  20 +>
  21 +> 3. 设置`upstream` 名称为 `ufogateway `的`upstream` 第0个 `backend server` 状态为`up`:
  22 +>
  23 +> `curl -i -H "Host: erp.yoho.yohoops.org" "http://127.0.0.1/upstreams?method=up&upstream=ufogateway&id=0"`
  24 +
  25 +
  26 +### list 接口响应:
  27 +
  28 +```json
  29 +
  30 +{
  31 + "name": "ufogateway",
  32 + "value": [
  33 + {
  34 + "backup": false,
  35 + "name": "172.31.70.77:8080",
  36 + "down": false,
  37 + "id": 0
  38 + },
  39 + {
  40 + "backup": false,
  41 + "name": "172.31.70.113:8080",
  42 + "down": false,
  43 + "id": 1
  44 + },
  45 + {
  46 + "backup": false,
  47 + "name": "172.31.70.8:8080",
  48 + "down": false,
  49 + "id": 2
  50 + },
  51 + {
  52 + "backup": false,
  53 + "name": "172.31.70.104:8080",
  54 + "down": false,
  55 + "id": 3
  56 + }
  57 + ]
  58 +}
  59 +
  60 +
  61 +```
@@ -44,18 +44,10 @@ @@ -44,18 +44,10 @@
44 - java-ufo-fore 44 - java-ufo-fore
45 - java-ufo-platform 45 - java-ufo-platform
46 46
47 -  
48 -- name: copy ufo api conf files to openresty  
49 - template:  
50 - src: vhosts/api.ufo.conf  
51 - dest: "{{ path }}/nginx/conf/vhosts/api.ufo.conf"  
52 - notify:  
53 - - reload nginx  
54 -  
55 -- name: copy ufo admin conf files to openresty  
56 - template:  
57 - src: vhosts/platform.ufo.conf.j2  
58 - dest: "{{ path }}/nginx/conf/vhosts/platform.ufo.conf" 47 +- name: copy yoho conf files to openresty
  48 + copy:
  49 + src: java-nginx-config/nginx/conf/
  50 + dest: "{{ path }}/nginx/conf/"
59 notify: 51 notify:
60 - reload nginx 52 - reload nginx
61 53
1 -#worker_processes 1;  
2 -  
3 -  
4 -  
5 user www www; 1 user www www;
6 2
7 # setup worker proccess and worker cpu affinity 3 # setup worker proccess and worker cpu affinity
@@ -65,6 +61,11 @@ http { @@ -65,6 +61,11 @@ http {
65 proxy_buffers 32 32k; 61 proxy_buffers 32 32k;
66 proxy_busy_buffers_size 128k; 62 proxy_busy_buffers_size 128k;
67 63
  64 + lua_package_path "/Data/local/openresty-1.9.15.1/nginx/conf/lua/?.lua;;";
  65 + init_by_lua_file "conf/lua/init_lua.lua";
  66 + lua_shared_dict upstream 20m;
  67 + init_worker_by_lua_file "conf/lua/init_config_worker.lua";
  68 +
68 server { 69 server {
69 listen 80; 70 listen 80;
70 server_name localhost; 71 server_name localhost;