feat(.gitea/workflows): Add GitHub Actions workflows for Docker build and test

This commit is contained in:
2024-12-29 14:14:46 +01:00
parent bec47150a3
commit 9de86bd382
17 changed files with 396 additions and 156 deletions
+40 -34
View File
@@ -1,4 +1,5 @@
# @serve.zone/coreflow
A comprehensive solution for managing Docker and scaling applications across servers, handling tasks from service provisioning to network traffic management.
## Install
@@ -42,6 +43,7 @@ await coreflowInstance.stop();
```
In the above example:
- The Coreflow instance is initialized.
- Coreflow is started, which internally initializes various managers and connectors.
- The method `handleDockerEvents` is used to handle Docker events.
@@ -54,8 +56,8 @@ Coreflow manages applications and services, often requiring direct interactions
```typescript
// Assuming coreflowInstance is already started as per previous examples
const serviceConnection = coreflowInstance.createServiceConnection({
serviceName: "myDatabaseService",
servicePort: 3306
serviceName: 'myDatabaseService',
servicePort: 3306,
});
serviceConnection.connect().then(() => {
@@ -69,7 +71,7 @@ Coreflow excels in scaling applications across multiple servers. This involves n
```typescript
const scalingPolicy = {
serviceName: "apiService",
serviceName: 'apiService',
replicaCount: 5, // Target number of replicas
maxReplicaCount: 10, // Maximum number of replicas
minReplicaCount: 2, // Minimum number of replicas
@@ -81,6 +83,7 @@ coreflowInstance.applyScalingPolicy(scalingPolicy).then(() => {
```
In the above example:
- A scaling policy is defined with target, maximum, and minimum replica counts for the `apiService`.
- The `applyScalingPolicy` method of the Coreflow instance is used to apply this scaling policy.
@@ -92,10 +95,10 @@ One of Coreflow's key features is its ability to manage network traffic, ensurin
import { TrafficRule } from '@serve.zone/coreflow';
const rule: TrafficRule = {
serviceName: "webService",
serviceName: 'webService',
externalPort: 80,
internalPort: 3000,
protocol: "http",
protocol: 'http',
};
coreflowInstance.applyTrafficRule(rule).then(() => {
@@ -104,6 +107,7 @@ coreflowInstance.applyTrafficRule(rule).then(() => {
```
In the above example:
- A traffic rule is defined for the `webService`, redirecting external traffic from port 80 to the service's internal port 3000.
- The `applyTrafficRule` method is used to enforce this rule.
@@ -113,9 +117,9 @@ Coreflow integrates continuous integration and deployment processes, allowing se
```typescript
const deploymentConfig = {
serviceName: "userAuthService",
image: "myregistry.com/userauthservice:latest",
updatePolicy: "rolling" // or "recreate"
serviceName: 'userAuthService',
image: 'myregistry.com/userauthservice:latest',
updatePolicy: 'rolling', // or "recreate"
};
coreflowInstance.deployService(deploymentConfig).then(() => {
@@ -124,6 +128,7 @@ coreflowInstance.deployService(deploymentConfig).then(() => {
```
In the above example:
- A deployment configuration is created for the `userAuthService` using the latest image from the specified registry.
- The `deployService` method is then used to deploy the service using the specified update policy (e.g., rolling updates or recreating the service).
@@ -132,12 +137,13 @@ In the above example:
To keep track of your applications' health and performance, Coreflow provides tools for logging, monitoring, and alerting.
```typescript
coreflowInstance.monitorService("webService").on('serviceHealthUpdate', (healthStatus) => {
coreflowInstance.monitorService('webService').on('serviceHealthUpdate', (healthStatus) => {
console.log(`Received health update for webService: ${healthStatus}`);
});
```
In the above example:
- The `monitorService` method is used to monitor the health status of the `webService`.
- When a health update event is received, it is logged to the console.
@@ -175,8 +181,8 @@ coreflowInstance.handleDockerEvents().then(() => {
```typescript
const serviceConnection = coreflowInstance.createServiceConnection({
serviceName: "databaseService",
servicePort: 5432
serviceName: 'databaseService',
servicePort: 5432,
});
serviceConnection.connect().then(() => {
@@ -188,7 +194,7 @@ serviceConnection.connect().then(() => {
```typescript
const scalingPolicy = {
serviceName: "microserviceA",
serviceName: 'microserviceA',
replicaCount: 3, // Starting with 3 replicas
maxReplicaCount: 10, // Allowing up to 10 replicas
minReplicaCount: 2, // Ensuring at least 2 replicas
@@ -206,17 +212,17 @@ import { TrafficRule } from '@serve.zone/coreflow';
const trafficRules: TrafficRule[] = [
{
serviceName: "frontendService",
serviceName: 'frontendService',
externalPort: 80,
internalPort: 3000,
protocol: "http",
protocol: 'http',
},
{
serviceName: "apiService",
serviceName: 'apiService',
externalPort: 443,
internalPort: 4000,
protocol: "https",
}
protocol: 'https',
},
];
Promise.all(trafficRules.map((rule) => coreflowInstance.applyTrafficRule(rule))).then(() => {
@@ -228,9 +234,9 @@ Promise.all(trafficRules.map((rule) => coreflowInstance.applyTrafficRule(rule)))
```typescript
const deploymentConfig = {
serviceName: "authService",
image: "myregistry.com/authservice:latest",
updatePolicy: "rolling", // Performing rolling updates
serviceName: 'authService',
image: 'myregistry.com/authservice:latest',
updatePolicy: 'rolling', // Performing rolling updates
};
coreflowInstance.deployService(deploymentConfig).then(() => {
@@ -241,7 +247,7 @@ coreflowInstance.deployService(deploymentConfig).then(() => {
#### Step 7: Monitoring a Service
```typescript
coreflowInstance.monitorService("frontendService").on('serviceHealthUpdate', (healthStatus) => {
coreflowInstance.monitorService('frontendService').on('serviceHealthUpdate', (healthStatus) => {
console.log(`Health update for frontendService: ${healthStatus}`);
});
```
@@ -262,7 +268,7 @@ const checkinTask = new Task({
buffered: true,
taskFunction: async () => {
console.log('Running checkin task...');
}
},
});
const taskManager = coreflowInstance.taskManager;
@@ -284,18 +290,18 @@ const coretrafficConnector = new CoretrafficConnector(coreflowInstance);
const reverseProxyConfigs = [
{
hostName: "example.com",
destinationIp: "192.168.1.100",
destinationPort: "3000",
privateKey: "<your-private-key>",
publicKey: "<your-public-key>",
hostName: 'example.com',
destinationIp: '192.168.1.100',
destinationPort: '3000',
privateKey: '<your-private-key>',
publicKey: '<your-public-key>',
},
{
hostName: "api.example.com",
destinationIp: "192.168.1.101",
destinationPort: "4000",
privateKey: "<your-private-key>",
publicKey: "<your-public-key>",
hostName: 'api.example.com',
destinationIp: '192.168.1.101',
destinationPort: '4000',
privateKey: '<your-private-key>',
publicKey: '<your-public-key>',
},
];
@@ -323,7 +329,7 @@ cloudlyConnector.start().then(() => {
```typescript
cloudlyConnector.getConfigFromCloudly().then((config) => {
console.log('Received configuration from Cloudly:', config);
coreflowInstance.clusterManager.provisionWorkloadServices(config).then(() => {
console.log('Workload services provisioned based on Cloudly config.');
});
@@ -335,4 +341,4 @@ cloudlyConnector.getConfigFromCloudly().then((config) => {
Coreflow is a powerful and flexible tool for managing Docker-based applications, scaling services, configuring network traffic, handling continuous deployments, and ensuring observability of your infrastructure. The examples provided aim to give a comprehensive understanding of how to use Coreflow in various scenarios, ensuring it meets your DevOps and CI/CD needs.
By leveraging Coreflow's rich feature set, you can optimize your infrastructure for high availability, scalability, and efficient operation across multiple servers and environments.
undefined
undefined