splitting out background tasks
This commit is contained in:
22
src/clj/auto_ap/jobs/current_balance_cache.clj
Normal file
22
src/clj/auto_ap/jobs/current_balance_cache.clj
Normal file
@@ -0,0 +1,22 @@
|
||||
(ns auto-ap.jobs.current-balance-cache
|
||||
(:gen-class)
|
||||
(:require [auto-ap.graphql.clients :as clients]
|
||||
[auto-ap.utils :refer [heartbeat]]
|
||||
[mount.core :as mount]
|
||||
[auto-ap.datomic :refer [conn]]
|
||||
[clojure.tools.logging :as log]
|
||||
[auto-ap.background.metrics :refer [metrics-setup container-tags container-data logging-context]]
|
||||
[unilog.context :as lc]))
|
||||
|
||||
(defn -main [& _]
|
||||
(try
|
||||
(lc/with-context {:background-job "current-balance-cache"}
|
||||
(mount/start (mount/only #{#'conn #'metrics-setup #'container-tags #'logging-context #'container-data}))
|
||||
((heartbeat clients/refresh-current-balance "current-balance-cache"))
|
||||
(mount/stop )
|
||||
(log/info "Stopping current-balance-cache")
|
||||
(System/exit 0))
|
||||
(finally
|
||||
(System/exit 0))))
|
||||
|
||||
|
||||
21
src/clj/auto_ap/jobs/ledger_reconcile.clj
Normal file
21
src/clj/auto_ap/jobs/ledger_reconcile.clj
Normal file
@@ -0,0 +1,21 @@
|
||||
(ns auto-ap.jobs.ledger-reconcile
|
||||
(:gen-class)
|
||||
(:require [auto-ap.ledger :as ledger]
|
||||
[auto-ap.utils :refer [heartbeat]]
|
||||
[mount.core :as mount]
|
||||
[auto-ap.datomic :refer [conn]]
|
||||
[clojure.tools.logging :as log]
|
||||
[auto-ap.background.metrics :refer [metrics-setup container-tags container-data logging-context]]
|
||||
[unilog.context :as lc]))
|
||||
|
||||
(defn -main [& _]
|
||||
(try
|
||||
(lc/with-context {:background-job "reconcile-ledger"}
|
||||
(mount/start (mount/only #{#'conn #'metrics-setup #'container-tags #'logging-context #'container-data}))
|
||||
((heartbeat ledger/reconcile-ledger "reconcile-ledger"))
|
||||
((heartbeat ledger/touch-broken-ledger "touch-broken-ledger"))
|
||||
(mount/stop)
|
||||
(log/info "Stopping Ledger reconciliation"))
|
||||
(finally
|
||||
(System/exit 0))))
|
||||
|
||||
47
terraform/background-job/main.tf
Normal file
47
terraform/background-job/main.tf
Normal file
@@ -0,0 +1,47 @@
|
||||
variable "ecs_cluster" {}
|
||||
variable "task_role_arn" {}
|
||||
variable "execution_role_arn" {}
|
||||
variable "stage" {}
|
||||
variable "job_name" {}
|
||||
variable "schedule" {}
|
||||
|
||||
|
||||
|
||||
resource "aws_ecs_task_definition" "background_taskdef" {
|
||||
|
||||
family = replace("${var.job_name}_${var.stage}", "-", "_")
|
||||
container_definitions = replace(replace(file("./background-taskdef.json"), "STAGE", var.stage), "JOBX", var.job_name)
|
||||
memory = 8192
|
||||
cpu = 2048
|
||||
network_mode = "awsvpc"
|
||||
requires_compatibilities = ["FARGATE"]
|
||||
execution_role_arn = var.execution_role_arn
|
||||
task_role_arn = var.task_role_arn
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_rule" "schedule" {
|
||||
name = "${var.job_name}-schedule"
|
||||
schedule_expression = "${var.schedule}"
|
||||
}
|
||||
|
||||
|
||||
|
||||
resource "aws_cloudwatch_event_target" "job_target" {
|
||||
target_id = "${var.job_name}"
|
||||
arn = var.ecs_cluster
|
||||
rule = aws_cloudwatch_event_rule.schedule.name
|
||||
role_arn = "arn:aws:iam::679918342773:role/service-role/Amazon_EventBridge_Invoke_ECS_1758992733"
|
||||
|
||||
ecs_target {
|
||||
task_count = 1
|
||||
launch_type = ""
|
||||
|
||||
# capacity_provider_strategy = [{capacity_provider = "FARGATE_SPOT"}]
|
||||
task_definition_arn = aws_ecs_task_definition.background_taskdef.arn
|
||||
network_configuration {
|
||||
assign_public_ip = true
|
||||
security_groups = [ "sg-004e5855310c453a3", "sg-02d167406b1082698"]
|
||||
subnets = [ "subnet-5e675761", "subnet-8519fde2", "subnet-89bab8d4" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
66
terraform/background-taskdef.json
Normal file
66
terraform/background-taskdef.json
Normal file
@@ -0,0 +1,66 @@
|
||||
[
|
||||
{
|
||||
"environment": [
|
||||
{
|
||||
"name": "config",
|
||||
"value": "/usr/local/config/STAGE-background-worker.edn"
|
||||
},
|
||||
{
|
||||
"name": "DD_ENV",
|
||||
"value": "STAGE"
|
||||
},
|
||||
{
|
||||
"name": "DD_SERVICE",
|
||||
"value": "JOBX"
|
||||
},
|
||||
{"name": "INTEGREAT_JOB",
|
||||
"value": "JOBX"},
|
||||
{"name": "DD_CONTAINER_ENV_AS_TAGS",
|
||||
"value":"{\"INTEGREAT_JOB\":\"background_job\"}"}
|
||||
],
|
||||
"essential": true,
|
||||
"image": "679918342773.dkr.ecr.us-east-1.amazonaws.com/integreat:STAGE",
|
||||
"logConfiguration": {
|
||||
"logDriver": "awslogs",
|
||||
"options": {
|
||||
"awslogs-group": "/ecs/integreat-background-worker-STAGE",
|
||||
"awslogs-region": "us-east-1",
|
||||
"awslogs-stream-prefix": "ecs"
|
||||
}
|
||||
},
|
||||
"dockerLabels": {
|
||||
"com.datadoghq.tags.env": "STAGE",
|
||||
"com.datadoghq.tags.service": "JOBX"
|
||||
},
|
||||
"mountPoints": [],
|
||||
"name": "integreat-app",
|
||||
"portMappings": [
|
||||
{
|
||||
"containerPort": 9000,
|
||||
"hostPort": 9000,
|
||||
"protocol": "tcp"
|
||||
},
|
||||
{
|
||||
"containerPort": 9090,
|
||||
"hostPort": 9090,
|
||||
"protocol": "tcp"
|
||||
}
|
||||
],
|
||||
"volumesFrom": []
|
||||
},
|
||||
{
|
||||
"environment": [
|
||||
{
|
||||
"name": "DD_API_KEY",
|
||||
"value": "ce10d932c47b358e81081ae67bd8c112"
|
||||
},
|
||||
{
|
||||
"name": "ECS_FARGATE",
|
||||
"value": "true"
|
||||
}
|
||||
],
|
||||
"essential": true,
|
||||
"image": "public.ecr.aws/datadog/agent:latest",
|
||||
"name": "datadog-agent"
|
||||
}
|
||||
]
|
||||
5
terraform/list.txt
Normal file
5
terraform/list.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
water bladder
|
||||
meal stuff
|
||||
toothbrush
|
||||
license
|
||||
camear
|
||||
Reference in New Issue
Block a user