Authored by chunhua.zhang

Merge branch 'ufo-nginx' into 'master'

Ufo nginx-upstream

Ufo nginx-upstream up down 修改

See merge request !31
local upstream_cache=ngx.shared.upstream
local upstream = require "ngx.upstream"
local cjson = require "cjson"
-->>begin: timer at fix rate call function.
local timer_handler
timer_handler=function(premature,t,f,id)
if id then
if ngx.worker.id() == id then
local b,errinfo=pcall(f)
if not b then
ngx.log(ngx.ERR, "task request error:" .. tostring(errinfo))
end
end
else
local b,errinfo=pcall(f)
if not b then
ngx.log(ngx.ERR, "task request error:" .. tostring(errinfo))
end
end
ngx.timer.at(t,timer_handler,t,f)
end
--<<end: timer at fix rate call function.
local stream_ctx={}
function updownstream()
local keys=upstream_cache:get_keys()
for _,k in ipairs(keys) do
if string.sub(k,1,1)=="d" then
local vKey="v" .. string.sub(k,2)
local version=upstream_cache:get(vKey)
local value=upstream_cache:get(k)
local v=cjson.decode(value)
if ( not stream_ctx[vKey] ) or stream_ctx[vKey] < version then
local ok,err=upstream.set_peer_down(v["upstream"],v["backup"],v["id"],v["value"])
if not ok then
ngx.log(ngx.ERR,"up or down stream err:",ngx.worker.id(),value,err)
else
stream_ctx[vKey]=version
end
end
end
end
end
timer_handler(true,2,updownstream)
\ No newline at end of file
... ...
ngx.shared.upstream:flush_all()
ngx.shared.upstream:flush_expired()
... ...
local upstream = require "ngx.upstream"
local json=require "cjson"
local get_servers = upstream.get_servers
local get_upstreams = upstream.get_upstreams
local cache=ngx.shared.upstream
-- get all peers for upstram: u
function list(u)
local d={}
d["name"]=u
d["value"]={}
-- get primary peers
local peers,err = upstream.get_primary_peers(u)
if err then
ngx.say("failed to get primary servers in upstream ", u)
return
end
for _,p in ipairs(peers) do
local s={}
s["id"]=p.id
s["down"]=p.down and p.down or false
s["name"]=p.name
s["backup"]=false
table.insert(d["value"],s)
end
-- get backup peers
peers,err = upstream.get_backup_peers(u)
if err then
ngx.say("failed to get backup servers in upstream ", u)
return
end
for _,p in ipairs(peers) do
local s={}
s["id"]=p.id
s["down"]=p.down and p.down or false
s["name"]=p.name
s["backup"]=true
table.insert(d["value"],s)
end
ngx.header["Content-type"]="application/json;charset=utf-8"
ngx.say(json.encode(d))
end
function upordown(upstream_name,is_backup,peer_id,down_value)
local t={}
t["upstream"]=upstream_name
t["backup"]=is_backup
t["id"]=peer_id
t["value"]=down_value
local rKey=upstream_name .. ":" .. tostring(id) .. ":" .. tostring(is_backup)
local key="d:" .. rKey
local vKey="v:" .. rKey
cache:add(vKey,0)
local v,err=cache:incr(vKey,1)
if not v then
return false
end
local suc=cache:set(key,json.encode(t))
return suc
end
local args=ngx.req.get_uri_args()
local method=args["method"]
if method == "list" then
local u_name = args["upstream"]
list(u_name)
elseif(method=="down" or method=="up") then
local upstream=args["upstream"]
local backup=args["backup"]=="true" and true or false
local id=tonumber(args["id"])
local down= method=="down" and true or false
local suc=upordown(upstream,backup,id,down)
local t={}
t["suc"]=suc
ngx.header["Content-type"]="application/json;charset=utf-8"
ngx.say(json.encode(t))
end
... ...
# Interface FOR YOHO ERP SYSTEM
server {
listen 80;
server_name erp.yoho.yohoops.org;
proxy_http_version 1.1;
proxy_set_header Connection "";
access_log /Data/logs/nginx/erp.yoho.yohoops.org_access.log fenxi;
error_log /Data/logs/nginx/erp.yoho.yohoops.org_error.log;
if ( $request_method = HEAD ) {
return 200;
}
# only allow local ip
allow 10.66.0.0/16;
allow 10.67.0.0/16;
allow 192.168.0.0/16;
allow 172.31.0.0/16;
allow 127.0.0.1;
deny all;
location = /upstreams {
content_by_lua_file "conf/lua/upstream.lua";
}
}
... ...
... ... @@ -18,4 +18,4 @@ server {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Accept-Encoding "gzip";
}
}
\ No newline at end of file
}
... ...
# java nginx 配置文件
## 说明
> 这些文件需要`copy`到`openresty` 的 `home` 目录
>
## 动态`upstream`的支持
基于 [lua-upstream-nginx-module] (https://github.com/openresty/lua-upstream-nginx-module) 动态设置upstream的 `up`, `down`
> 1. 查询所有的`upstreams`信息:
>
>`curl -i -H "Host: erp.yoho.yohoops.org" "http://127.0.0.1/upstreams?method=list&upstream=ufogateway"`
>
> 2. 设置 `upstream` 名称为 `ufogateway`的`upstream` 第0个 `backend server` 状态为`down`:
>
> `curl -i -H "Host: erp.yoho.yohoops.org" "http://127.0.0.1/upstreams?method=down&upstream=ufogateway&id=0"`
>
> 3. 设置`upstream` 名称为 `ufogateway `的`upstream` 第0个 `backend server` 状态为`up`:
>
> `curl -i -H "Host: erp.yoho.yohoops.org" "http://127.0.0.1/upstreams?method=up&upstream=ufogateway&id=0"`
### list 接口响应:
```json
{
"name": "ufogateway",
"value": [
{
"backup": false,
"name": "172.31.70.77:8080",
"down": false,
"id": 0
},
{
"backup": false,
"name": "172.31.70.113:8080",
"down": false,
"id": 1
},
{
"backup": false,
"name": "172.31.70.8:8080",
"down": false,
"id": 2
},
{
"backup": false,
"name": "172.31.70.104:8080",
"down": false,
"id": 3
}
]
}
```
\ No newline at end of file
... ...
... ... @@ -44,18 +44,10 @@
- java-ufo-fore
- java-ufo-platform
- name: copy ufo api conf files to openresty
template:
src: vhosts/api.ufo.conf
dest: "{{ path }}/nginx/conf/vhosts/api.ufo.conf"
notify:
- reload nginx
- name: copy ufo admin conf files to openresty
template:
src: vhosts/platform.ufo.conf.j2
dest: "{{ path }}/nginx/conf/vhosts/platform.ufo.conf"
- name: copy yoho conf files to openresty
copy:
src: java-nginx-config/nginx/conf/
dest: "{{ path }}/nginx/conf/"
notify:
- reload nginx
... ...
#worker_processes 1;
user www www;
# setup worker proccess and worker cpu affinity
... ... @@ -65,6 +61,11 @@ http {
proxy_buffers 32 32k;
proxy_busy_buffers_size 128k;
lua_package_path "/Data/local/openresty-1.9.15.1/nginx/conf/lua/?.lua;;";
init_by_lua_file "conf/lua/init_lua.lua";
lua_shared_dict upstream 20m;
init_worker_by_lua_file "conf/lua/init_config_worker.lua";
server {
listen 80;
server_name localhost;
... ...