Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions docs/en/apis/advanced_apis/event/search.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,9 @@ i18n:

# Search

<OpenAPIPath path="/platform/events.alauda.io/v1/events"/>
<OpenAPIPath path="/platform/events.alauda.io/v1/projects/{project}/clusters/{cluster}/namespaces/{namespace}/events"/>
<OpenAPIPath
path={[
'/platform/events.alauda.io/v1/events',
'/platform/events.alauda.io/v1/projects/{project}/clusters/{cluster}/namespaces/{namespace}/events',
]}
/>
10 changes: 7 additions & 3 deletions docs/en/apis/advanced_apis/log/aggregation.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ i18n:

# Aggregation

<OpenAPIPath path="/platform/logging.alauda.io/v2/logs/aggregation"/>
<OpenAPIPath path="/platform/logging.alauda.io/v2/clusters/{cluster}/logs/aggregation"/>
<OpenAPIPath path="/platform/logging.alauda.io/v2/projects/{project}/clusters/{cluster}/namespaces/{namespace}/logs/aggregation"/>
<OpenAPIPath
path={[
'/platform/logging.alauda.io/v2/logs/aggregation',
'/platform/logging.alauda.io/v2/clusters/{cluster}/logs/aggregation',
'/platform/logging.alauda.io/v2/projects/{project}/clusters/{cluster}/namespaces/{namespace}/logs/aggregation',
]}
/>
10 changes: 7 additions & 3 deletions docs/en/apis/advanced_apis/log/archive.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ i18n:

# Archive

<OpenAPIPath path="/platform/logging.alauda.io/v2/logs/archive"/>
<OpenAPIPath path="/platform/logging.alauda.io/v2/clusters/{cluster}/logs/archive"/>
<OpenAPIPath path="/platform/logging.alauda.io/v2/projects/{project}/clusters/{cluster}/namespaces/{namespace}/logs/archive"/>
<OpenAPIPath
path={[
'/platform/logging.alauda.io/v2/logs/archive',
'/platform/logging.alauda.io/v2/clusters/{cluster}/logs/archive',
'/platform/logging.alauda.io/v2/projects/{project}/clusters/{cluster}/namespaces/{namespace}/logs/archive',
]}
/>
10 changes: 7 additions & 3 deletions docs/en/apis/advanced_apis/log/context.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ i18n:

# Context

<OpenAPIPath path="/platform/logging.alauda.io/v2/logs/context"/>
<OpenAPIPath path="/platform/logging.alauda.io/v2/clusters/{cluster}/logs/context"/>
<OpenAPIPath path="/platform/logging.alauda.io/v2/projects/{project}/clusters/{cluster}/namespaces/{namespace}/logs/context"/>
<OpenAPIPath
path={[
'/platform/logging.alauda.io/v2/logs/context',
'/platform/logging.alauda.io/v2/clusters/{cluster}/logs/context',
'/platform/logging.alauda.io/v2/projects/{project}/clusters/{cluster}/namespaces/{namespace}/logs/context',
]}
/>
10 changes: 7 additions & 3 deletions docs/en/apis/advanced_apis/log/search.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ i18n:

# Search

<OpenAPIPath path="/platform/logging.alauda.io/v2/logs/search"/>
<OpenAPIPath path="/platform/logging.alauda.io/v2/clusters/{cluster}/logs/search"/>
<OpenAPIPath path="/platform/logging.alauda.io/v2/projects/{project}/clusters/{cluster}/namespaces/{namespace}/logs/search"/>
<OpenAPIPath
path={[
'/platform/logging.alauda.io/v2/logs/search',
'/platform/logging.alauda.io/v2/clusters/{cluster}/logs/search',
'/platform/logging.alauda.io/v2/projects/{project}/clusters/{cluster}/namespaces/{namespace}/logs/search',
]}
/>
12 changes: 7 additions & 5 deletions docs/en/apis/advanced_apis/monitor/indicators.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@ sourceSHA: 16d2d3fa23c37585a14f34f86ceb05542f035409d29c5e91b4f3e6da53167fa1

# Indicators [monitoring.alauda.io/v1beta1]

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/clusters/{cluster}/indicators" />

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/projects/{project}/indicators" />

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/projects/{project}/clusters/{cluster}/namespaces/{namespace}/indicators" />
<OpenAPIPath
path={[
'/platform/monitoring.alauda.io/v1beta1/clusters/{cluster}/indicators',
'/platform/monitoring.alauda.io/v1beta1/projects/{project}/indicators',
'/platform/monitoring.alauda.io/v1beta1/projects/{project}/clusters/{cluster}/namespaces/{namespace}/indicators',
]}
/>
12 changes: 7 additions & 5 deletions docs/en/apis/advanced_apis/monitor/metrics.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@ sourceSHA: 8fda8bb72100def30b6a497567611e9df184d1785a4fa920383613ef10b811dc

# Metrics [monitoring.alauda.io/v1beta1]

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/clusters/{cluster}/metrics" />

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/projects/{project}/metrics" />

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/projects/{project}/clusters/{cluster}/namespaces/{namespace}/metrics" />
<OpenAPIPath
path={[
'/platform/monitoring.alauda.io/v1beta1/clusters/{cluster}/metrics',
'/platform/monitoring.alauda.io/v1beta1/projects/{project}/metrics',
'/platform/monitoring.alauda.io/v1beta1/projects/{project}/clusters/{cluster}/namespaces/{namespace}/metrics',
]}
/>
12 changes: 7 additions & 5 deletions docs/en/apis/advanced_apis/monitor/variables.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@ sourceSHA: 3aa1c2708eed20fd5a33043f3ee378d1895625619b1af54b04d8f7a5225cf6ef

# Variables [monitoring.alauda.io/v1beta1]

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/clusters/{cluster}/variables" />

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/projects/{project}/variables" />

<OpenAPIPath path="/platform/monitoring.alauda.io/v1beta1/projects/{project}/clusters/{cluster}/namespaces/{namespace}/variables" />
<OpenAPIPath
path={[
'/platform/monitoring.alauda.io/v1beta1/clusters/{cluster}/variables',
'/platform/monitoring.alauda.io/v1beta1/projects/{project}/variables',
'/platform/monitoring.alauda.io/v1beta1/projects/{project}/clusters/{cluster}/namespaces/{namespace}/variables',
]}
/>
28 changes: 7 additions & 21 deletions docs/en/apis/kubernetes_apis/workload/pod.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -3,26 +3,12 @@
<K8sAPI name="io.k8s.api.core.v1.Pod" pathPrefix="/kubernetes/{cluster}" />

<OpenAPIPath
path="/api/v1/namespaces/{namespace}/pods/{name}/eviction"
pathPrefix="/kubernetes/{cluster}"
/>

<OpenAPIPath
path="/api/v1/namespaces/{namespace}/pods/{name}/log"
pathPrefix="/kubernetes/{cluster}"
/>

<OpenAPIPath
path="/api/v1/namespaces/{namespace}/pods/{name}/exec"
pathPrefix="/kubernetes/{cluster}"
/>

<OpenAPIPath
path="/api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers"
pathPrefix="/kubernetes/{cluster}"
/>

<OpenAPIPath
path="/api/v1/namespaces/{namespace}/pods/{name}/attach"
path={[
'/api/v1/namespaces/{namespace}/pods/{name}/eviction',
'/api/v1/namespaces/{namespace}/pods/{name}/log',
'/api/v1/namespaces/{namespace}/pods/{name}/exec',
'/api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers',
'/api/v1/namespaces/{namespace}/pods/{name}/attach',
]}
pathPrefix="/kubernetes/{cluster}"
/>
2 changes: 1 addition & 1 deletion docs/en/configure/backup/backups/hooks.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ title: Hooks

Custom hooks are extension commands that run in containers within the cluster. By configuring hooks, you can perform tailored operations during backup and restore. For special configuration requirements, contact technical support.

## Backup Hook
## Backup Hook \{#backup-hook}

During backup execution, when a Pod is being backed up, one or more commands can be executed in containers within the Pod. These commands can run before (`pre`) or after (`post`) the Pod backup operation completes.

Expand Down
2 changes: 1 addition & 1 deletion docs/en/configure/backup/recovery/app.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ You can quickly restore the application to the target namespace by performing an
| **Recovery Target Configuration** | **Namespaces**: The namespace where data recovery is performed and the source namespace of the backup data. The optional range is the namespace set in the backup policy. The system restores backup data to the same namespace based on your selection. **Tip**: To restore to other namespaces in the cluster, configure **Advanced Recovery Target Settings**. |
| **Advanced Recovery Target Settings** | Restore backup data originally scheduled for the source namespace to any namespace in the cluster (existing or newly created). **Source Namespace**: The selected namespace. **Target Namespace**: The namespace where data recovery is performed; either an existing namespace or a new one created by entering a non-existent name. **Tip**: If **Backup Kubernetes Resources and Persistent Volume Claims** was selected as the application backup resource type, ensure that the target cluster **StorageClass** name matches the source. If not, configure the source and target storage class names in the advanced options; the platform stores data using the new storage class. |

6. Click **YAML** in the upper-right corner to switch to YAML editing mode. Refer to [Configuring Hooks](../backups/app.mdx#hooks) to configure commands that run during recovery.
6. Click **YAML** in the upper-right corner to switch to YAML editing mode. Refer to [Configuring Hooks](../backups/hooks.mdx) to configure commands that run during recovery.

**Caution**: By default, the backup file is compared with resources in the target namespace. Only data that exists in the backup file but is missing in the namespace is restored. Resources with the same name or incremental resources (existing in the namespace but missing in the backup file) are not overwritten.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,16 @@ When creating load balancers, it's recommended to manually configure service ann
kubectl get pod -n kube-system |grep aws-load
```

2. Create a load balancer; for detailed creation steps and parameters, see the Load Balancer creation section in [AWS EKS Service Annotation Instructions](/configure/clusters/managed/cloud-init/network/aws-eks.mdx#deploy-aws-load-balancer-controller).
2. Create a load balancer; for detailed creation steps and parameters, see the Load Balancer creation section in [AWS EKS Service Annotation Instructions](./aws-eks.mdx#deploy-aws-load-balancer-controller).

* If the above command returns no related Pods, it means the cluster does not have AWS Load Balancer Controller installed. No service annotations are needed; create the load balancer directly.

* If the above command returns related Pods, it means the cluster has AWS Load Balancer Controller installed. When creating a load balancer in the corresponding cluster, add the following service annotations. For annotation details, see [AWS EKS Service Annotation Instructions](/configure/networking/how_to/alb/deploy_alb.mdx#14):
* If the above command returns related Pods, it means the cluster has AWS Load Balancer Controller installed. When creating a load balancer in the corresponding cluster, add the following service annotations. For annotation details, see [AWS EKS Service Annotation Instructions](/configure/networking/how_to/alb/deploy_alb.mdx#alb_networking_configuration):

* `service.beta.kubernetes.io/aws-load-balancer-type: external //Required`

* `service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip //Required`

* `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing // Optional. Add this annotation if public network support is needed.`

### Access Address Acquisition Method
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ weight: 10

* Prepare two subnets with the **kubernetes.io/role/elb** tag. For shared subnets, add the **kubernetes.io/cluster/\<cluster-name\>: shared** tag. See [Adding Tags to Subnets](#add-tags-to-subnets).

* If you have created an EKS cluster, [import the Amazon EKS cluster](/configure/clusters/managed/import/aws-eks.mdx).
* If you have created an EKS cluster, [import the Amazon EKS cluster](/configure/clusters/managed/import/aws-eks.mdx).
{/* if not, [create an AWS EKS cluster](/configure/clusters/managed/create/aws-eks.mdx). */}

* Ensure kubectl, Helm, AWS CLI, and eksctl tools are available before deploying AWS Load Balancer Controller.
Expand All @@ -30,7 +30,7 @@ weight: 10

## Configuration Steps

### Deploy AWS Load Balancer Controller
### Deploy AWS Load Balancer Controller \{#deploy-aws-load-balancer-controller}

**Note**: For detailed information on deploying AWS Load Balancer Controller, see [official documentation](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/aws-load-balancer-controller.html).

Expand Down Expand Up @@ -120,13 +120,13 @@ You can create ingress and LoadBalancer services simultaneously or choose one ba

2. Click **Create Service** and select **LoadBalancer** for **External Access**.

3. Expand **annotations** and fill in [LoadBalancer service annotations](/configure/networking/how_to/alb/deploy_alb.mdx#14) as needed.
3. Expand **annotations** and fill in [LoadBalancer service annotations](/configure/networking/how_to/alb/deploy_alb.mdx#alb_networking_configuration) as needed.

4. Click **Create**.

## Related Operations

### Test AWS CLI and eksctl Installation
### Test AWS CLI and eksctl Installation \{#test-aws-cli-and-eksctl-installation}

* Execute the following command. If it returns a cluster list, AWS CLI is correctly installed:

Expand All @@ -140,7 +140,7 @@ You can create ingress and LoadBalancer services simultaneously or choose one ba
eksctl get clusters
```

### Get ACCOUNT_ID
### Get ACCOUNT_ID \{#get-account_id}

Execute `aws sts get-caller-identity` to get **ACCOUNT_ID**. The `651168850570` in the response is the **ACCOUNT_ID**:

Expand All @@ -150,7 +150,7 @@ Execute `aws sts get-caller-identity` to get **ACCOUNT_ID**. The `651168850570`
}
```

### Kubeconfig Configuration File
### Kubeconfig Configuration File \{#kubeconfig-configuration-file}

1. Execute the following command to update the Kubeconfig file for the specified region:

Expand All @@ -164,14 +164,14 @@ Execute `aws sts get-caller-identity` to get **ACCOUNT_ID**. The `651168850570`
kubectl get svc -n cpaas-system
```

### Add Tags to Subnets
### Add Tags to Subnets \{#add-tags-to-subnets}

1. Execute the following command to get cluster subnets:

```bash
eksctl get cluster --name <CLUSTER_NAME>
```

2. Execute the following command to get subnet details:

```bash
Expand All @@ -192,7 +192,7 @@ Execute `aws sts get-caller-identity` to get **ACCOUNT_ID**. The `651168850570`
aws ec2 create-tags --resources <subnet-id> --tags Key=kubernetes.io/cluster/<CLUSTER_NAME>,Value="shared"
```

### Create Certificate
### Create Certificate \{#create-certificate}

When using HTTPS protocol, save HTTPS certificate credentials as a Secret (TLS type) in advance.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ If you have created an AKS cluster, [import the Azure AKS cluster](/configure/cl

## Configuration Steps

### Deploy Ingress Controller
### Deploy Ingress Controller \{#deploy-ingress-controller}

AKS uses **container network mode** and leverages **Nginx Ingress Controller** to manage load balancers, while providing external access addresses for virtual IP addresses (VIPs) in the container internal network through **LoadBalancer** type **Services**.

Expand Down Expand Up @@ -56,7 +56,7 @@ You can create ingress and LoadBalancer services simultaneously or choose one ba

## Related Operations

### Create Certificate
### Create Certificate \{#create-certificate}

When using HTTPS protocol, save HTTPS certificate credentials as a Secret (TLS type) in advance.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ You can create ingress and LoadBalancer services simultaneously or choose one ba

3. View information about corresponding Ingress resources in the list.

### Create Certificate
### Create Certificate \{#create-certificate}

When using HTTPS protocol, save HTTPS certificate credentials as a Secret (TLS type) in advance.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ There are two methods to create ingress. **Method 1: Manual Ingress Class Select

## Related Operations

### Create Certificate
### Create Certificate \{#create-certificate}

When using HTTPS protocol, save HTTPS certificate credentials as a Secret (TLS type) in advance.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ Platform integration with AWS EKS and storage initialization configuration.

## Related Operations

### Configure Available Storage Class Parameters
### Configure Available Storage Class Parameters \{#configure-available-storage-class-parameters}

* EFS File Storage Available Parameters

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ The default azurefile file storage class may not support permission modification

## Related Information

### Default Storage Class Description
### Default Storage Class Description \{#default-storage-class-description}

| Storage Class Name | Storage Class Type | Description |
|---|---|---|
Expand All @@ -43,7 +43,7 @@ The default azurefile file storage class may not support permission modification
| managed-csi-premium | Block Storage | Creates managed disks using Azure premium locally redundant storage (LRS). |
| managed-premium | Block Storage | Creates managed disks using Azure premium storage. |

### Available Storage Class Parameters
### Available Storage Class Parameters \{#available-storage-class-parameters}

* For block storage optional parameters and meanings, see [Create and use volumes with Azure disks in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/zh-cn/azure/aks/azure-csi-disk-storage-provision).

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Platform integration with Google GKE and storage initialization configuration.

* Default file storage creation and deletion operations take considerable time. If it remains in creating status for a long time, please be patient.

## Prerequisites
## Prerequisites \{#prerequisites}

* When creating clusters, on the Google Cloud Platform **Cluster > Features** page under the **Other** section, check **Enable Compute Engine Persistent Disk CSI Driver** and **Enable Filestore CSI Driver** options.

Expand All @@ -38,7 +38,7 @@ Platform integration with Google GKE and storage initialization configuration.

## Related Information

### Default Storage Class Description
### Default Storage Class Description \{#default-storage-class-description}

| Storage Class Name | Storage Class Type | Description |
|---|---|---|
Expand All @@ -49,7 +49,7 @@ Platform integration with Google GKE and storage initialization configuration.
| enterprise-rwx | File Storage | Uses [Enterprise Filestore tier](https://cloud.google.com/filestore/docs/service-tiers?hl=zh-cn#enterprise_tier). |
| enterprise-multishare-rwx | File Storage | Uses [Enterprise Filestore tier](https://cloud.google.com/filestore/docs/service-tiers?hl=zh-cn#enterprise_tier). See [Filestore multishares for Google Kubernetes Engine](https://cloud.google.com/filestore/docs/multishares?hl=zh-cn). |

### Available Storage Class Parameters
### Available Storage Class Parameters \{#available-storage-class-parameters}

* For block storage optional parameters and meanings, see [Storage options](https://cloud.google.com/compute/docs/disks?hl=zh-cn#disk-types).

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ Cluster PVC quantity has limits, and account storage capacity has quotas. You ca

3. Select the **Project Assignment** method as needed and click **Update** to assign **csi-nas** or **csi-disk** storage classes to projects.

## Default Storage Class Description
## Default Storage Class Description \{#default-storage-class-description}

| Storage Class Name | Storage Class Type | Description |
|---|---|---|
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Obtain the configuration required to connect to the import cluster so that the p

## Get cluster information

### Get cluster token <span id="fetchtoken"></span>
### Get cluster token \{#fetchtoken}

1. Run the following commands:

Expand Down
2 changes: 1 addition & 1 deletion docs/en/configure/clusters/managed/import/alibaba-ack.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ For product information about ACK managed clusters (Managed Kubernetes) or Aliba

## Prerequisites

* The Kubernetes version and parameters on the cluster meet the [component version and parameter requirements for importing standard Kubernetes clusters](/configure/clusters/overview.mdx#version-compatibility).
* The Kubernetes version and parameters on the cluster meet the [component version and parameter requirements for importing standard Kubernetes clusters](/overview/kubernetes-support-matrix.mdx#version-support-matrix).

## Get Image Registry Address

Expand Down
2 changes: 1 addition & 1 deletion docs/en/configure/clusters/managed/import/aws-eks.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ Connect an existing Amazon EKS (Elastic Kubernetes Service) cluster to the platf

## Prerequisites

* The cluster's Kubernetes version and settings meet the requirements in [Version compatibility for importing standard Kubernetes clusters](/configure/clusters/overview.mdx#version-compatibility).
* The cluster's Kubernetes version and settings meet the requirements in [Version compatibility for importing standard Kubernetes clusters](/overview/kubernetes-support-matrix.mdx#version-support-matrix).

* The image registry must support HTTPS and provide a valid TLS certificate issued by a public CA.

Expand Down
Loading