new test of fortigate for KSD

This commit is contained in:
maximilian.bartz 2025-07-22 10:47:20 +02:00
parent 9980d50b07
commit f76153ff5d
10 changed files with 97 additions and 195 deletions

View file

@ -1,18 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app fortigate-dev
namespace: ${TENANT_NAMESPACE}
spec:
commonMetadata:
labels:
app.kubernetes.io/name: *app
path: ./firewall-dev/vm
prune: true
sourceRef:
kind: GitRepository
name: tenant-repos
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m

View file

@ -1,64 +0,0 @@
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: fortigate-dev
namespace: ${TENANT_NAMESPACE}
spec:
dataVolumeTemplates:
- metadata:
name: fortigate-rootdisk-dev
spec:
source:
http:
url: http://nginx.demo.svc.cluster.local:80/fortios_v7_6_3.qcow2
storage:
resources:
requests:
storage: 30Gi
runStrategy: Always
template:
metadata:
labels:
kubevirt.io/domain: fortigate-dev
spec:
domain:
cpu:
cores: 2
memory:
guest: 4Gi
features:
acpi: {}
smm:
enabled: true
firmware:
bootloader:
efi:
secureBoot: true
devices:
rng: {}
networkInterfaceMultiqueue: true
interfaces:
- name: default
masquerade: {}
ports:
- port: 443
- port: 22
disks:
- disk:
bus: sata
name: rootdisk
resources:
requests:
memory: 4Gi
cpu: 2
limits:
memory: 4Gi
cpu: 2
networks:
- name: default
pod: {}
terminationGracePeriodSeconds: 180
volumes:
- name: rootdisk
dataVolume:
name: fortigate-rootdisk-dev

View file

@ -7,7 +7,7 @@ spec:
commonMetadata: commonMetadata:
labels: labels:
app.kubernetes.io/name: *app app.kubernetes.io/name: *app
path: ./firewall-s3/vm path: ./firewall-s3/vm/ksd
prune: true prune: true
sourceRef: sourceRef:
kind: GitRepository kind: GitRepository

View file

@ -0,0 +1,30 @@
---
apiVersion: v1
kind: Service
metadata:
name: fortigate-lb
namespace: ${TENANT_NAMESPACE}
labels:
app.kubernetes.io/component: fortigate-lb
spec:
type: LoadBalancer
externalTrafficPolicy: Local
ports:
- port: 4500
name: ipsec-nat
targetPort: 4500
protocol: UDP
- port: 500
name: key-management
targetPort: 500
protocol: UDP
#- port: 22
# name: ssh
# targetPort: 22
# protocol: TCP
- port: 443
name: https
targetPort: 443
protocol: TCP
selector:
kubevirt.io/domain: fortigate-ksd

View file

@ -0,0 +1,20 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: lan-net
namespace: ${TENANT_NAMESPACE}
spec:
config: '{
"cniVersion": "0.3.1",
"type": "bridge",
"bridge": "br-lan",
"ipam": {
"type": "static",
"addresses": [
{
"address": "172.168.100.2/24",
"gateway": "172.168.100.1"
}
]
}
}'

View file

@ -0,0 +1,14 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: mgmt-net
namespace: ${TENANT_NAMESPACE}
spec:
config: '{
"cniVersion": "0.3.1",
"type": "bridge",
"bridge": "br-mgmt",
"ipam": {
"type": "dhcp"
}
}'

View file

@ -1,14 +1,12 @@
apiVersion: kubevirt.io/v1 apiVersion: kubevirt.io/v1
kind: VirtualMachine kind: VirtualMachine
metadata: metadata:
name: fortigate-s3 name: fortigate-ksd
namespace: ${TENANT_NAMESPACE} namespace: ${TENANT_NAMESPACE}
annotations:
#kubevirt.io/allow-pod-bridge-network-live-migration:
spec: spec:
dataVolumeTemplates: dataVolumeTemplates:
- metadata: - metadata:
name: fortigate-rootdisk-s3 name: fortigate-rootdisk-ksd
spec: spec:
source: source:
http: http:
@ -22,16 +20,16 @@ spec:
template: template:
metadata: metadata:
labels: labels:
kubevirt.io/domain: fortigate-s3 kubevirt.io/domain: fortigate-ksd
spec: spec:
domain: domain:
cpu: cpu:
cores: 2 cores: 1
memory: memory:
guest: 4Gi guest: 2Gi
features: features:
acpi: {} acpi: {}
smm: smm:
enabled: true enabled: true
firmware: firmware:
bootloader: bootloader:
@ -41,29 +39,39 @@ spec:
rng: {} rng: {}
networkInterfaceMultiqueue: true networkInterfaceMultiqueue: true
interfaces: interfaces:
- name: external - name: wan
masquerade: {} masquerade: {}
ports: ports:
- port: 4500 - port: 4500
- port: 443 - port: 443
- port: 22 - port: 22
- port: 500 - port: 500
- name: mgmt
bridge: {}
- name: lan
bridge: {}
disks: disks:
- disk: - disk:
bus: sata bus: sata
name: rootdisk name: rootdisk
resources: resources:
requests: requests:
memory: 4Gi memory: 2Gi
cpu: 2 cpu: 1
limits: limits:
memory: 4Gi memory: 2Gi
cpu: 2 cpu: 1
networks: networks:
- name: external - name: wan
pod: {} pod: {}
- name: mgmt
multus:
networkName: ${TENANT_NAMESPACE}/mgmt-net
- name: lan
multus:
networkName: ${TENANT_NAMESPACE}/lan-net
terminationGracePeriodSeconds: 180 terminationGracePeriodSeconds: 180
volumes: volumes:
- name: rootdisk - name: rootdisk
dataVolume: dataVolume:
name: fortigate-rootdisk-s3 name: fortigate-rootdisk-ksd

View file

@ -1,18 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app fortigate
namespace: ${TENANT_NAMESPACE}
spec:
commonMetadata:
labels:
app.kubernetes.io/name: *app
path: ./firewall/vm
prune: true
sourceRef:
kind: GitRepository
name: tenant-repos
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m

View file

@ -1,72 +0,0 @@
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: fortigate
namespace: ${TENANT_NAMESPACE}
spec:
dataVolumeTemplates:
- metadata:
name: fortigate-rootdisk
spec:
source:
http:
url: http://nginx.demo.svc.cluster.local:80/fortios_v7_6_3.qcow2
storage:
resources:
requests:
storage: 30Gi
runStrategy: Always
template:
metadata:
labels:
kubevirt.io/domain: fortigate
spec:
domain:
cpu:
cores: 2
memory:
guest: 4Gi
devices:
rng: {}
networkInterfaceMultiqueue: true
interfaces:
- name: default
masquerade: {}
ports:
- port: 80
- port: 443
- port: 22
- port: 2222
- port: 5050
disks:
- disk:
bus: sata
name: rootdisk
# - disk:
# bus: scsi
# name: datadisk
# - disk:
# bus: scsi
# name: cloudinitdisk
resources:
requests:
memory: 4Gi
cpu: 2
limits:
memory: 4Gi
cpu: 2
networks:
- name: default
pod: {}
terminationGracePeriodSeconds: 180
volumes:
- name: rootdisk
dataVolume:
name: fortigate-rootdisk
# - name: datadisk
# persistentVolumeClaim:
# claimName: gitlab-datadisk
# - name: cloudinitdisk
# cloudInitNoCloud:
# secretRef:
# name: gitlab-cloud-init

View file

@ -38,11 +38,13 @@ spec:
cloudInitNoCloud: cloudInitNoCloud:
userData: | userData: |
#cloud-config #cloud-config
hostname: ubuntu-vm-1
ssh_pwauth: True
users: users:
- name: ubuntu - name: testuser
ssh-authorized-keys: groups: [sudo]
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPqlhZW/pPLK8zENt3o6tgl0QVinhGAF1sHvajqq3UvI ubuntu sudo: "ALL=(ALL) NOPASSWD:ALL"
sudo: ['ALL=(ALL) NOPASSWD:ALL'] lock_passwd: false
shell: /bin/bash passwd: "$6$oMZf5uou7t0.oAJ1$825Te06yt7JZwHSSj4MGQMjpd87LflANQpajCwIVPASkKZdOJo4L2bAEDDuK.jtu.fsRNc9bZAsYefmoqdN8O1"
chpasswd:
expire: false
ssh_pwauth: true