Skip to content
Merged

Dev #1533

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion applications/Unity.AutoUI/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -33,66 +33,70 @@ public DataHealthCheckWorker(ICurrentTenant currentTenant,
_emailNotificationService = emailNotificationService;
_paymentRequestsRepository = paymentRequestsRepository;

string? envInfo = Environment.GetEnvironmentVariable("ASPNETCORE_ENVIRONMENT");

if (string.Equals(envInfo, "Production", StringComparison.OrdinalIgnoreCase))
{
string cronExpression = SettingDefinitions.GetSettingsValue(settingManager, SettingsConstants.BackgroundJobs.DataHealthCheckMonitor_Expression);

JobDetail = JobBuilder
.Create<DataHealthCheckWorker>()
.WithIdentity(nameof(DataHealthCheckWorker))
.Build();

Trigger = TriggerBuilder
.Create()
.WithIdentity(nameof(DataHealthCheckWorker))
.WithSchedule(CronScheduleBuilder.CronSchedule(cronExpression)
.WithMisfireHandlingInstructionIgnoreMisfires())
.Build();
}
string cronExpression = SettingDefinitions.GetSettingsValue(settingManager, SettingsConstants.BackgroundJobs.DataHealthCheckMonitor_Expression);

JobDetail = JobBuilder
.Create<DataHealthCheckWorker>()
.WithIdentity(nameof(DataHealthCheckWorker))
.Build();

Trigger = TriggerBuilder
.Create()
.WithIdentity(nameof(DataHealthCheckWorker))
.WithSchedule(CronScheduleBuilder.CronSchedule(cronExpression)
.WithMisfireHandlingInstructionIgnoreMisfires())
.Build();
}

public override async Task Execute(IJobExecutionContext context)
{
Logger.LogInformation("Executing DataHealthCheckWorker...");
var tenants = await _tenantRepository.GetListAsync();
bool sendEmail = false;
var emailBodyBuilder = new System.Text.StringBuilder();
foreach (var tenant in tenants)


string? envInfo = Environment.GetEnvironmentVariable("ASPNETCORE_ENVIRONMENT");

if (string.Equals(envInfo, "Production", StringComparison.OrdinalIgnoreCase))
{
using (_currentTenant.Change(tenant.Id, tenant.Name))

var tenants = await _tenantRepository.GetListAsync();
bool sendEmail = false;
var emailBodyBuilder = new System.Text.StringBuilder();

foreach (var tenant in tenants)
{
// Lookup the missing emails
var missingEmailsCount = await _emailNotificationService.GetEmailsChesWithNoResponseCountAsync();
if (missingEmailsCount > 0)
using (_currentTenant.Change(tenant.Id, tenant.Name))
{
Logger.LogWarning("Tenant {TenantName} has {MissingEmailsCount} missing email(s) with a status of Initialized or Sent but no CHES Response.", tenant.Name, missingEmailsCount);
string missingEmailBody = $"Unity tenant {tenant.Name} has {missingEmailsCount} email(s) that were sent but have no CHES Response.";
sendEmail = true;
emailBodyBuilder.AppendLine($"{missingEmailBody}<br />");
}
// Lookup the missing payments
var missingPayments = await GetPaymentsSentWithoutResponseCountAsync();
if (missingPayments > 0)
{
Logger.LogWarning("Tenant {TenantName} has {MissingPaymentsCount} payments sent without a response.", tenant.Name, missingPayments);
string missingPaymentBody = $"Unity tenant {tenant.Name} has {missingPayments} payment(s) that are in Submitted status but have no CAS Response.";
sendEmail = true;
emailBodyBuilder.AppendLine($"{missingPaymentBody}<br />");
// Lookup the missing emails
var missingEmailsCount = await _emailNotificationService.GetEmailsChesWithNoResponseCountAsync();
if (missingEmailsCount > 0)
{
Logger.LogWarning("Tenant {TenantName} has {MissingEmailsCount} missing email(s) with a status of Initialized or Sent but no CHES Response.", tenant.Name, missingEmailsCount);
string missingEmailBody = $"Unity tenant {tenant.Name} has {missingEmailsCount} email(s) that were sent but have no CHES Response.";
sendEmail = true;
emailBodyBuilder.AppendLine($"{missingEmailBody}<br />");
}
// Lookup the missing payments
var missingPayments = await GetPaymentsSentWithoutResponseCountAsync();
if (missingPayments > 0)
{
Logger.LogWarning("Tenant {TenantName} has {MissingPaymentsCount} payments sent without a response.", tenant.Name, missingPayments);
string missingPaymentBody = $"Unity tenant {tenant.Name} has {missingPayments} payment(s) that are in Submitted status but have no CAS Response.";
sendEmail = true;
emailBodyBuilder.AppendLine($"{missingPaymentBody}<br />");
}
}
}
}

if (sendEmail)
{
string emailBody = emailBodyBuilder.ToString();
await SendEmailAlert(emailBody, "Data Health Check Alert - Emails/Payments Missing Responses");
}
if (sendEmail)
{
string emailBody = emailBodyBuilder.ToString();
await SendEmailAlert(emailBody, "Data Health Check Alert - Emails/Payments Missing Responses");
}

Logger.LogInformation("DataHealthCheckWorker Executed...");
await Task.CompletedTask;
Logger.LogInformation("DataHealthCheckWorker Executed...");
await Task.CompletedTask;
}
}

private async Task<int> GetPaymentsSentWithoutResponseCountAsync()
Expand All @@ -118,7 +122,7 @@ await _emailNotificationService.SendEmailNotification(
htmlBody,
subject,
"NoReply@gov.bc.ca", "html",
"");
"");

Logger.LogInformation("Missing Alerts Sent...");

Expand Down
2 changes: 1 addition & 1 deletion database/crunchy-postgres/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.2
version: 0.1.4

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
Expand Down
68 changes: 68 additions & 0 deletions database/crunchy-postgres/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -132,3 +132,71 @@ A [Prometheus](https://prometheus.io/) exporter for PostgreSQL
| `pgmonitor.exporterr.limits.memory` | Memory limits | `128Mi` |

---

## Data Restore CronJob

This feature allows you to set up a daily CronJob that restores data from a source S3 repository (e.g., from another database instance) into the current PostgreSQL cluster. This is useful for change data capture scenarios where you need to regularly sync data from a source database. The configuration reuses the same structure as `dataSource` and `pgBackRest.s3` for consistency.

### Configuration

| Parameter | Description | Default |
| ---------------------------------------------- | ----------------------------------------------------- | ---------------------- |
| `dataRestore.enabled` | Enable the data restore CronJob | `false` |
| `dataRestore.schedule` | Cron schedule for the restore job | `"0 2 * * *"` |
| `dataRestore.image` | pgBackRest image to use for restore | `crunchy-pgbackrest` |
| `dataRestore.secretName` | K8s secret containing S3 credentials (reuse existing)| `s3-pgbackrest` |
| `dataRestore.repo.name` | Repository name (repo1, repo2, etc.) | `repo2` |
| `dataRestore.repo.path` | S3 path prefix | `/habackup` |
| `dataRestore.repo.s3.bucket` | Source S3 bucket name | `bucketName` |
| `dataRestore.repo.s3.endpoint` | S3 endpoint URL | Object store endpoint |
| `dataRestore.repo.s3.region` | S3 region | `not-used` |
| `dataRestore.repo.s3.uriStyle` | S3 URI style (path or host) | `path` |
| `dataRestore.stanza` | pgBackRest stanza name | `db` |
| `dataRestore.target.clusterName` | Target cluster name (defaults to current cluster) | `""` |
| `dataRestore.target.database` | Target database name | `postgres` |
| `dataRestore.resources.requests.cpu` | CPU requests for restore job | `100m` |
| `dataRestore.resources.requests.memory` | Memory requests for restore job | `256Mi` |
| `dataRestore.resources.limits.cpu` | CPU limits for restore job | `500m` |
| `dataRestore.resources.limits.memory` | Memory limits for restore job | `512Mi` |
| `dataRestore.successfulJobsHistoryLimit` | Number of successful jobs to keep in history | `3` |
| `dataRestore.failedJobsHistoryLimit` | Number of failed jobs to keep in history | `1` |
| `dataRestore.restartPolicy` | Pod restart policy for failed jobs | `OnFailure` |
| `dataRestore.additionalArgs` | Additional pgbackrest arguments | `[]` |

### Usage Example

The configuration reuses existing S3 secrets and follows the same patterns as `dataSource`:

```yaml
dataRestore:
enabled: true
schedule: "0 2 * * *" # Daily at 2 AM
# Reuse existing S3 secret from dataSource or pgBackRest.s3
secretName: "dev-s3-pgbackrest"
repo:
name: repo2
path: "/habackup-source-database"
s3:
bucket: "source-database-backups"
endpoint: "https://sector.objectstore.gov.bc.ca"
region: "not-used"
uriStyle: "path"
stanza: db
target:
database: "myapp"
additionalArgs:
- "--log-level-console=debug"
- "--process-max=2"
```

### Important Notes

- The restore uses `--delta` mode, which only restores changed files for efficiency
- Reuses existing S3 secrets from `dataSource` or `pgBackRest.s3` configuration
- The job runs with the specified S3 repository as the source
- Ensure the source S3 repository contains valid pgBackRest backups
- The target cluster must be accessible and have proper credentials
- Monitor CronJob logs for restore status and any errors
- Configuration follows the same patterns as `dataSource` for consistency

---
35 changes: 34 additions & 1 deletion database/crunchy-postgres/custom-values-example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,37 @@ pgBackRest:
keySecret: "s3SecretValue"
# set the default schedule to avoid conflicts
fullSchedule: 30 11 * * *
incrementalSchedule: 30 3,15,19,23 * * *
incrementalSchedule: 30 3,15,19,23 * * *

# Data restore cronjob configuration example
# Uncomment and configure to enable daily restore from source database
# Reuses the same structure as dataSource for consistency
# dataRestore:
# enabled: true
# schedule: "0 2 * * *" # Daily at 2 AM
# image: "artifacts.developer.gov.bc.ca/bcgov-docker-local/crunchy-pgbackrest:ubi8-2.47-1"
# # Reuse existing S3 secret (same as dataSource or pgBackRest.s3)
# secretName: "new-s3-pgbackrest"
# repo:
# name: repo2
# path: "/habackup-source"
# s3:
# bucket: "source-database-backups"
# endpoint: "https://sector.objectstore.gov.bc.ca"
# region: "not-used"
# uriStyle: "path"
# stanza: db
# target:
# # Leave empty to use current cluster name
# clusterName: ""
# database: "myapp"
# resources:
# requests:
# cpu: 200m
# memory: 512Mi
# limits:
# cpu: 1000m
# memory: 1Gi
# additionalArgs:
# - "--log-level-console=debug"
# - "--process-max=2"
27 changes: 25 additions & 2 deletions database/crunchy-postgres/templates/PostgresCluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,33 @@ apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: {{ template "crunchy-postgres.fullname" . }}
labels: {{ include "crunchy-postgres.labels" . | nindent 4 }}
labels:
helm.sh/chart: {{ include "crunchy-postgres.chart" . }}
app.kubernetes.io/name: {{ include "crunchy-postgres.name" . }}
app.kubernetes.io/instance: {{ include "crunchy-postgres.fullname" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- range $key, $value := .Values.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
app.kubernetes.io/component: "database"
spec:
openshift: {{ .Values.openshift | default false }}
metadata:
labels: {{ include "crunchy-postgres.labels" . | nindent 6 }}
labels:
helm.sh/chart: {{ include "crunchy-postgres.chart" . }}
app.kubernetes.io/name: {{ include "crunchy-postgres.name" . }}
app.kubernetes.io/instance: {{ include "crunchy-postgres.fullname" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- range $key, $value := .Values.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
app.kubernetes.io/component: "database"
{{ if .Values.crunchyImage }}
image: {{ .Values.crunchyImage }}
{{ end }}
Expand Down Expand Up @@ -87,6 +109,7 @@ spec:
- secret:
name: {{ .Values.dataSource.secretName }}
global:
repo2-s3-uri-style: {{ .Values.dataSource.repo.s3.uriStyle | quote }}
repo2-path: {{ .Values.dataSource.repo.path }}
repo:
name: {{ .Values.dataSource.repo.name }}
Expand Down
4 changes: 3 additions & 1 deletion database/crunchy-postgres/templates/_helpers.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,9 @@ helm.sh/chart: {{ include "crunchy-postgres.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: {{ index .Values.labels "app.kubernetes.io/part-of" | default "crunchydb" }}
{{- range $key, $value := .Values.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
app.kubernetes.io/component: "database"
{{- end }}

Expand Down
35 changes: 35 additions & 0 deletions database/crunchy-postgres/templates/data-restore-configmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{{- if .Values.dataRestore.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "crunchy-postgres.fullname" . }}-data-restore-config
labels:
helm.sh/chart: {{ include "crunchy-postgres.chart" . }}
app.kubernetes.io/name: {{ include "crunchy-postgres.name" . }}
app.kubernetes.io/instance: {{ include "crunchy-postgres.fullname" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- range $key, $value := .Values.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
app.kubernetes.io/component: "data-restore-config"
data:
pgbackrest.conf: |
[global]
repo{{ .Values.dataRestore.repo.name | replace "repo" "" }}-type=s3
repo{{ .Values.dataRestore.repo.name | replace "repo" "" }}-s3-bucket={{ .Values.dataRestore.repo.bucket }}
repo{{ .Values.dataRestore.repo.name | replace "repo" "" }}-s3-endpoint={{ .Values.dataRestore.repo.endpoint }}
repo{{ .Values.dataRestore.repo.name | replace "repo" "" }}-s3-region={{ .Values.dataRestore.repo.s3.region | default "not-used" }}
repo{{ .Values.dataRestore.repo.name | replace "repo" "" }}-path={{ .Values.dataRestore.repo.path }}
repo{{ .Values.dataRestore.repo.name | replace "repo" "" }}-s3-uri-style={{ .Values.dataRestore.repo.s3.uriStyle | default "path" }}
log-level-console=info
log-level-file=debug

[{{ .Values.dataRestore.stanza }}]
pg1-host={{ if .Values.dataRestore.target.clusterName }}{{ .Values.dataRestore.target.clusterName }}{{ else }}{{ include "crunchy-postgres.fullname" . }}{{ end }}-primary.{{ .Release.Namespace }}.svc.cluster.local
pg1-port=5432
pg1-user=postgres
pg1-database={{ .Values.dataRestore.target.database }}
{{- end }}
Loading