Skip to content

Commit 2682661

Browse files
zechen0alexellis
authored andcommitted
Add Linode Provioner
Adds Linode provisioner in operator by integrating inletsctl which supports Linode starting from 0.5.5. Signed-off-by: Ze Chen <zechenbit@gmail.com>
1 parent e0438af commit 2682661

12 files changed

Lines changed: 275 additions & 119 deletions

File tree

README.md

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ Operator cloud host provisioning:
4949
- [x] Provision to Scaleway
5050
- [x] Provision to GCP
5151
- [x] Provision to AWS EC2
52+
- [x] Provision to Linode
5253
- [x] Publish stand-alone [Go provisioning library/SDK](https://github.com/inlets/inletsctl/tree/master/pkg/provision)
5354

5455
With [`inlets-pro`](https://github.com/inlets/inlets-pro) configured, you get the following additional benefits:
@@ -226,6 +227,61 @@ helm upgrade inlets-operator --install inlets/inlets-operator \
226227
--set provider=gce,zone=us-central1-a,projectID=$PROJECTID
227228
```
228229

230+
## Running in-cluster, using Linode for the exit node
231+
232+
Install using helm:
233+
```bash
234+
kubectl apply -f ./artifacts/crds/
235+
236+
# Create a secret to store the service account key file
237+
kubectl create secret generic inlets-access-key --from-literal inlets-access-key=<Linode API Access Key>
238+
239+
# Add and update the inlets-operator helm repo
240+
helm repo add inlets https://inlets.github.io/inlets-operator/
241+
242+
helm repo update
243+
244+
# Install inlets-operator with the required fields
245+
helm upgrade inlets-operator --install inlets/inlets-operator \
246+
--set provider=linode,region=us-east
247+
```
248+
249+
You can also install the inlets-operator using a single command using [arkade](https://get-arkade.dev/), arkade runs against any Kubernetes cluster.
250+
251+
Install with inlets PRO:
252+
253+
```bash
254+
arkade install inlets-operator \
255+
--provider linode \
256+
--region us-east \
257+
--access-key <Linode API Access Key> \
258+
--license $(cat $HOME/inlets-pro-license.txt)
259+
```
260+
261+
Install with inlets OSS:
262+
263+
```bash
264+
arkade install inlets-operator \
265+
--provider linode \
266+
--region us-east \
267+
--access-key <Linode API Access Key>
268+
```
269+
270+
You can also install using kubectl without helm: (Change `-provider` and `-region` in `./artifacts/operator.yaml`)
271+
272+
```bash
273+
# Create a secret to store the access token
274+
275+
kubectl create secret generic inlets-access-key \
276+
--from-literal inlets-access-key=<Linode API Access Key>
277+
278+
kubectl apply -f ./artifacts/crds/
279+
280+
# Apply the operator deployment and RBAC role
281+
kubectl apply -f ./artifacts/operator-rbac.yaml
282+
kubectl apply -f ./artifacts/operator.yaml
283+
```
284+
229285
## Expose a service with a LoadBalancer
230286

231287
The LoadBalancer type is usually provided by a cloud controller, but when that is not available, then you can use the inlets-operator to get a public IP and ingress. The free OSS version of inlets provides a HTTP tunnel, inlets PRO can provide TCP and full functionality to an IngressController.

chart/inlets-operator/README.md

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ helm upgrade inlets-operator --install inlets/inlets-operator \
7070

7171
```sh
7272
helm upgrade inlets-operator --install inlets/inlets-operator \
73-
--set provider=packet,region=ams1,projectID=PROJECTID,inletsProLicense=WT_GOES_HERE
73+
--set provider=packet,region=ams1,projectID=PROJECTID,inletsProLicense=JWT_GOES_HERE
7474
```
7575

7676
### Scaleway with inlets OSS
@@ -80,6 +80,20 @@ helm upgrade inlets-operator --install inlets/inlets-operator \
8080
--set provider=scaleway,region=ams1,organizationID=ORGANIZATIONID
8181
```
8282

83+
### Linode with inlets OSS
84+
85+
```sh
86+
helm upgrade inlets-operator --install inlets/inlets-operator \
87+
--set provider=linode,region=us-east
88+
```
89+
90+
### Linode with inlets-pro
91+
92+
```sh
93+
helm upgrade inlets-operator --install inlets/inlets-operator \
94+
--set provider=linode,region=us-east,inletsProLicense=JWT_GOES_HERE
95+
```
96+
8397

8498
## Chart parameters
8599

controller.go

Lines changed: 61 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
package main
55

66
import (
7+
"context"
78
"encoding/base64"
89
"fmt"
910
"log"
@@ -152,6 +153,9 @@ func NewController(
152153
case "civo":
153154
provisioner, _ = provision.NewCivoProvisioner(controller.infraConfig.GetAccessKey())
154155
break
156+
case "linode":
157+
provisioner, _ = provision.NewLinodeProvisioner(controller.infraConfig.GetAccessKey())
158+
break
155159
}
156160

157161
if provisioner != nil {
@@ -362,7 +366,7 @@ func (c *Controller) syncHandler(key string) error {
362366

363367
ops := metav1.GetOptions{}
364368
name := service.Name + "-tunnel"
365-
found, err := tunnels.Get(name, ops)
369+
found, err := tunnels.Get(context.Background(), name, ops)
366370

367371
if errors.IsNotFound(err) {
368372
if manageService(*c, *service) {
@@ -390,7 +394,8 @@ func (c *Controller) syncHandler(key string) error {
390394
},
391395
}
392396

393-
_, err := tunnels.Create(tunnel)
397+
ops := metav1.CreateOptions{}
398+
_, err := tunnels.Create(context.Background(), tunnel, ops)
394399

395400
if err != nil {
396401
log.Printf("Error creating tunnel: %s", err.Error())
@@ -402,7 +407,7 @@ func (c *Controller) syncHandler(key string) error {
402407
if manageService(*c, *service) == false {
403408
log.Printf("Removing tunnel: %s\n", found.Name)
404409

405-
err := tunnels.Delete(found.Name, &metav1.DeleteOptions{})
410+
err := tunnels.Delete(context.Background(), found.Name, metav1.DeleteOptions{})
406411

407412
if err != nil {
408413
log.Printf("Error deleting tunnel: %s", err.Error())
@@ -430,6 +435,7 @@ func (c *Controller) syncHandler(key string) error {
430435

431436
var id string
432437

438+
log.Printf("Provisioning started with provider:%s host:%s\n", c.infraConfig.Provider, tunnel.Name)
433439
start := time.Now()
434440
if c.infraConfig.Provider == "packet" {
435441

@@ -566,7 +572,27 @@ func (c *Controller) syncHandler(key string) error {
566572
return err
567573
}
568574
id = res.ID
575+
} else if c.infraConfig.Provider == "linode" {
576+
provisioner, _ := provision.NewLinodeProvisioner(c.infraConfig.GetAccessKey())
577+
578+
userData := makeUserdata(tunnel.Spec.AuthToken, c.infraConfig.UsePro(), tunnel.Spec.ServiceName)
579+
580+
res, err := provisioner.Provision(provision.BasicHost{
581+
Name: tunnel.Name,
582+
OS: "linode/ubuntu16.04lts", // https://api.linode.com/v4/images
583+
Plan: "g6-nanode-1", // https://api.linode.com/v4/linode/types
584+
Region: c.infraConfig.Region,
585+
UserData: userData,
586+
Additional: map[string]string{},
587+
})
569588

589+
if err != nil {
590+
return err
591+
}
592+
id = res.ID
593+
594+
} else {
595+
return fmt.Errorf("unsupported provider: %s", c.infraConfig.Provider)
570596
}
571597

572598
log.Printf("Provisioning call took: %fs\n", time.Since(start).Seconds())
@@ -727,12 +753,37 @@ func (c *Controller) syncHandler(key string) error {
727753
}
728754
}
729755
}
756+
} else if c.infraConfig.Provider == "linode" {
757+
provisioner, _ := provision.NewLinodeProvisioner(c.infraConfig.GetAccessKey())
758+
759+
host, err := provisioner.Status(tunnel.Status.HostID)
760+
761+
if err != nil {
762+
return err
763+
}
764+
765+
if host.Status == provision.ActiveStatus {
766+
if host.IP != "" {
767+
err := c.updateTunnelProvisioningStatus(tunnel, provision.ActiveStatus, host.ID, host.IP)
768+
if err != nil {
769+
return err
770+
}
771+
772+
err = c.updateService(tunnel, host.IP)
773+
if err != nil {
774+
log.Printf("Error updating service: %s, %s", tunnel.Spec.ServiceName, err.Error())
775+
return fmt.Errorf("tunnel update error %s", err)
776+
}
777+
}
778+
}
779+
} else {
780+
return fmt.Errorf("unsupported provider: %s", c.infraConfig.Provider)
730781
}
731782
break
732783
case provision.ActiveStatus:
733784
if tunnel.Spec.ClientDeploymentRef == nil {
734785
get := metav1.GetOptions{}
735-
service, getServiceErr := c.kubeclientset.CoreV1().Services(tunnel.Namespace).Get(tunnel.Spec.ServiceName, get)
786+
service, getServiceErr := c.kubeclientset.CoreV1().Services(tunnel.Namespace).Get(context.Background(), tunnel.Spec.ServiceName, get)
736787

737788
if getServiceErr != nil {
738789
return getServiceErr
@@ -758,7 +809,7 @@ func (c *Controller) syncHandler(key string) error {
758809

759810
deployment, createDeployErr := c.kubeclientset.AppsV1().
760811
Deployments(tunnel.Namespace).
761-
Create(client)
812+
Create(context.Background(), client, metav1.CreateOptions{})
762813

763814
if createDeployErr != nil {
764815
log.Println(createDeployErr)
@@ -771,7 +822,7 @@ func (c *Controller) syncHandler(key string) error {
771822

772823
_, updateErr := c.operatorclientset.InletsV1alpha1().
773824
Tunnels(tunnel.Namespace).
774-
Update(tunnel)
825+
Update(context.Background(), tunnel, metav1.UpdateOptions{})
775826

776827
if updateErr != nil {
777828
log.Println(updateErr)
@@ -880,7 +931,7 @@ func makeClient(tunnel *inletsv1alpha1.Tunnel, targetPort int32, clientImage str
880931

881932
func (c *Controller) updateService(tunnel *inletsv1alpha1.Tunnel, ip string) error {
882933
get := metav1.GetOptions{}
883-
res, err := c.kubeclientset.CoreV1().Services(tunnel.Namespace).Get(tunnel.Spec.ServiceName, get)
934+
res, err := c.kubeclientset.CoreV1().Services(tunnel.Namespace).Get(context.Background(), tunnel.Spec.ServiceName, get)
884935
if err != nil {
885936
return err
886937
}
@@ -899,7 +950,7 @@ func (c *Controller) updateService(tunnel *inletsv1alpha1.Tunnel, ip string) err
899950
copy.Spec.ExternalIPs = append(copy.Spec.ExternalIPs, ip)
900951
}
901952

902-
res, err = c.kubeclientset.CoreV1().Services(tunnel.Namespace).Update(copy)
953+
res, err = c.kubeclientset.CoreV1().Services(tunnel.Namespace).Update(context.Background(), copy, metav1.UpdateOptions{})
903954
if err != nil {
904955
return err
905956
}
@@ -911,7 +962,7 @@ func (c *Controller) updateService(tunnel *inletsv1alpha1.Tunnel, ip string) err
911962
copy.Status.LoadBalancer.Ingress[i] = corev1.LoadBalancerIngress{IP: ip}
912963
}
913964

914-
_, err = c.kubeclientset.CoreV1().Services(tunnel.Namespace).UpdateStatus(copy)
965+
_, err = c.kubeclientset.CoreV1().Services(tunnel.Namespace).UpdateStatus(context.Background(), copy, metav1.UpdateOptions{})
915966
return err
916967
}
917968

@@ -923,7 +974,7 @@ func (c *Controller) updateTunnelProvisioningStatus(tunnel *inletsv1alpha1.Tunne
923974
tunnelCopy.Status.HostID = id
924975
tunnelCopy.Status.HostIP = ip
925976

926-
_, err := c.operatorclientset.InletsV1alpha1().Tunnels(tunnel.Namespace).UpdateStatus(tunnelCopy)
977+
_, err := c.operatorclientset.InletsV1alpha1().Tunnels(tunnel.Namespace).UpdateStatus(context.Background(), tunnelCopy, metav1.UpdateOptions{})
927978
return err
928979
}
929980

go.mod

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,13 @@ go 1.13
66

77
require (
88
github.com/aws/aws-sdk-go v1.27.3 // indirect
9-
github.com/inlets/inletsctl v0.0.0-20200211114314-aab68519494e
9+
github.com/inlets/inletsctl v0.0.0-20200630123138-2af07d807845
1010
github.com/sethvargo/go-password v0.1.3
1111

12-
k8s.io/api v0.17.0
13-
k8s.io/apimachinery v0.17.1-beta.0
14-
k8s.io/client-go v0.17.0
15-
k8s.io/code-generator v0.17.0
12+
k8s.io/api v0.18.3
13+
k8s.io/apimachinery v0.18.3
14+
k8s.io/client-go v0.18.3
15+
k8s.io/code-generator v0.18.5
1616
k8s.io/gengo v0.0.0-20200127102705-1e9b17e831be // indirect
1717
k8s.io/klog v1.0.0
18-
k8s.io/kube-openapi v0.0.0-20200130172213-cdac1c71ff9f // indirect
1918
)

0 commit comments

Comments
 (0)