-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.go
113 lines (103 loc) · 3.27 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
package main
import (
"context"
"flag"
"k8s-leader-election/pkg/cleanup"
config2 "k8s-leader-election/pkg/configs"
"k8s-leader-election/pkg/leaselock"
"k8s-leader-election/pkg/server"
"k8s-leader-election/pkg/signals"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/klog"
"os"
"time"
)
var (
client *clientset.Clientset
)
/*
使用 gin http server 启动test接口返回pod名称
场景:模拟服务挂掉后还会有其他pod接受请求的场景,主要模拟有状态服务
*/
func main() {
var (
leaseLockName string // 锁名
leaseLockNamespace string // 获取锁的namespace
leaseLockMode bool // 是否为选主模式
debugMode bool
port int
healthPort int
podName = os.Getenv("POD_NAME") // 需要取到pod name
)
flag.StringVar(&leaseLockName, "lease-name", "lease-default-name", "election lease leaselock name")
flag.BoolVar(&leaseLockMode, "lease-mode", true, "Whether to use election mode")
flag.BoolVar(&debugMode, "debug-mode", true, "Whether to use debug mode")
flag.StringVar(&leaseLockNamespace, "lease-namespace", "default", "election lease leaselock namespace")
flag.IntVar(&port, "server-port", 8888, "")
flag.IntVar(&healthPort, "health-check-port", 29999, "")
flag.Parse()
// 配置文件
opt := &server.ServerOptions{
Port: port,
HealthPort: healthPort,
LeaderElectionMode: leaseLockMode,
DebugMode: debugMode,
}
// clientSet
var config *rest.Config
if opt.DebugMode {
// 本地debug使用,默认需要在项目根目录复制一份k8sconfig(.kube/config)配置文件
c := config2.K8sConfig{}
config = c.K8sRestConfig()
} else {
config, _ = rest.InClusterConfig()
}
client = clientset.NewForConfigOrDie(config)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
<-signals.SetupSignalHandler()
cancel()
}()
if opt.LeaderElectionMode {
lock := leaselock.GetNewLock(leaseLockName, podName, leaseLockNamespace, client)
// 选主模式
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second, // 租约时长,follower用来判断集群锁是否过期
RenewDeadline: 10 * time.Second, // leader更新锁的时长
RetryPeriod: 2 * time.Second, // 重试获取锁的间隔
// 当发生不同选主事件时的回调方法
Callbacks: leaderelection.LeaderCallbacks{
// 成为leader时,需要执行的回调
OnStartedLeading: func(c context.Context) {
// 执行server逻辑
klog.Info("leader election server running...")
server.Run(c, opt)
},
// 不是leader时,需要执行的回调
OnStoppedLeading: func() {
klog.Info("no longer a leader...")
klog.Info("clean up server...")
// 如果有退出逻辑可以在此执行
cleanup.CleanUp()
},
// 当产生新leader时,执行的回调
OnNewLeader: func(currentId string) {
if currentId == podName {
klog.Info("still the leader!")
return
}
klog.Infof("new leader is %v", currentId)
},
},
})
} else {
// 一般模式
klog.Info("server running...")
server.Run(ctx, opt)
}
}