main
Kye 2 years ago
parent e7cd99a3d4
commit 9d18f0968e

@ -1,3 +1,16 @@
# This is a basic Dockerfile and might need to be adjusted according to your specific application's needs. Please replace the placeholders for environment variables with your actual keys. Also, remember not to expose sensitive data like API keys in your Dockerfile or any version control systems.
# When building and running this Docker container, be sure to allocate enough resources (especially GPU memory) for your chosen visual foundation model if running on a machine with an NVIDIA GPU. You may need to use nvidia-docker or Docker's --gpus option when running the container. The GPU memory usage you provided would be valuable for this purpose.
# It's important to note that Docker inherently does not fully support GPUs. As a result, running GPU-accelerated code within Docker requires a special runtime like NVIDIA Docker. For more complex orchestration, a platform like Kubernetes can be more effective.
# Lastly, since your application seems to be using Redis (CELERY_BROKER_URL), you might need to set up a separate Redis service as well. This can be accomplished by creating a multi-container Docker application using Docker Compose or Kubernetes.
# Use an official Python runtime as a parent image
FROM python:3.9-slim-buster

@ -0,0 +1,32 @@
version: '3.8'
services:
swarms:
build: .
ports:
- "${EVAL_PORT}:${EVAL_PORT}"
environment:
EVAL_PORT: 8000
MODEL_NAME: gpt-4
CELERY_BROKER_URL: redis://redis:6379
SERVER: http://localhost:${EVAL_PORT}
USE_GPU: False
PLAYGROUND_DIR: playground
LOG_LEVEL: INFO
BOT_NAME: Orca
# You will need to set these environment variables to your actual keys in production
OPENAI_API_KEY: your_openai_api_key
WINEDB_HOST: your_winedb_host
WINEDB_PASSWORD: your_winedb_password
BING_SEARCH_URL: your_bing_search_url
BING_SUBSCRIPTION_KEY: your_bing_subscription_key
SERPAPI_API_KEY: your_serpapi_api_key
depends_on:
- redis
volumes:
- .:/usr/src/app
redis:
image: redis:alpine
ports:
- 6379:6379

@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: swarms-deployment
spec:
replicas: 3
selector:
matchLabels:
app: swarms
template:
metadata:
labels:
app: swarms
spec:
containers:
- name: swarms
image: your_dockerhub_username/swarms:latest
ports:
- containerPort: 8000
env:
- name: EVAL_PORT
value: "8000"
- name: MODEL_NAME
value: "gpt-4"
- name: CELERY_BROKER_URL
value: "redis://redis:6379"
- name: SERVER
value: "http://localhost:8000"
- name: USE_GPU
value: "False"
- name: PLAYGROUND_DIR
value: "playground"
- name: LOG_LEVEL
value: "INFO"
- name: BOT_NAME
value: "Orca"
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: openai-secret
key: OPENAI_API_KEY
# Other environment variables

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: swarms-service
spec:
selector:
app: swarms
ports:
- protocol: TCP
port: 80
targetPort: 8000
type: LoadBalancer

@ -0,0 +1,208 @@
To create a Terraform configuration for deploying the Swarm application on an AWS EC2 instance with a T4 GPU, you would typically need the following resources:
1. **AWS Provider:** This is needed to configure the AWS resources.
2. **AWS Key Pair:** This is required for SSH access to the EC2 instances.
3. **Security Group:** This defines the firewall rules for your instances.
4. **EC2 Instance:** This is where you deploy your application. Be sure to choose an instance type that supports T4 GPUs (like `g4dn.xlarge` for example).
5. **IAM Role and Policy:** These are optional but recommended for managing permissions.
The Terraform configuration file(s) should be written in HashiCorp Configuration Language (HCL). The conventional file extension is `.tf`.
Here's an example of what the Terraform configuration might look like:
```hcl
provider "aws" {
region = "us-west-2"
}
resource "aws_key_pair" "deployer" {
key_name = "deployer-key"
public_key = file("~/.ssh/id_rsa.pub")
}
resource "aws_security_group" "swarm-sg" {
name = "swarm-sg"
description = "Security group for Swarm app"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 8000
to_port = 8000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "swarm" {
ami = "ami-0c94855ba95c574c8" # Update this with the correct AMI ID
instance_type = "g4dn.xlarge"
key_name = aws_key_pair.deployer.key_name
vpc_security_group_ids = [aws_security_group.swarm-sg.id]
tags = {
Name = "SwarmInstance"
}
user_data = <<-EOF
#!/bin/bash
sudo apt-get update
sudo apt-get install -y docker.io
sudo docker pull your_docker_image_name
sudo docker run -d -p 8000:8000 your_docker_image_name
EOF
}
```
Please replace the `"ami-0c94855ba95c574c8"` with the correct AMI ID for your desired operating system and `"your_docker_image_name"` with the name of your Docker image.
This is a simple configuration and may not cover all your requirements. You might need to modify this to fit your needs, such as adding persistent storage (EBS volumes), load balancers, auto scaling groups, etc.
Remember to install Terraform and initialize it in your working directory using `terraform init` before running `terraform apply` to create the resources. Also, ensure your AWS credentials are correctly set up in your environment.
Incorporating persistent storage, load balancers, and auto scaling will make our Terraform configuration significantly more complex. Below is a skeleton of what the configuration might look like:
```hcl
provider "aws" {
region = "us-west-2"
}
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"]
}
resource "aws_key_pair" "deployer" {
key_name = "deployer-key"
public_key = file("~/.ssh/id_rsa.pub")
}
resource "aws_security_group" "swarm-sg" {
name = "swarm-sg"
description = "Security group for Swarm app"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 8000
to_port = 8000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_launch_configuration" "swarm" {
name = "swarm-configuration"
image_id = data.aws_ami.ubuntu.id
instance_type = "g4dn.xlarge"
key_name = aws_key_pair.deployer.key_name
security_groups = [aws_security_group.swarm-sg.id]
user_data = <<-EOF
#!/bin/bash
sudo apt-get update
sudo apt-get install -y docker.io
sudo docker pull your_docker_image_name
sudo docker run -d -p 8000:8000 your_docker_image_name
EOF
root_block_device {
volume_type = "gp2"
volume_size = 30 # size in GBs
}
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "swarm" {
name_prefix = "swarm-asg"
max_size = 5
min_size = 1
desired_capacity = 1
launch_configuration = aws_launch_configuration.swarm.id
lifecycle {
create_before_destroy = true
}
}
resource "aws_elb" "swarm" {
name = "swarm-elb"
subnets = ["subnet-id1", "subnet-id2"]
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8000/"
interval = 30
}
instances = [aws_instance.swarm.id]
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
}
```
In this example, the `aws_launch_configuration` sets up the details
for launching new instances, including attaching an EBS volume for persistent storage. The `aws_autoscaling_group` uses this configuration to scale instances up and down as required.
The `aws_elb` resource creates a load balancer that distributes incoming traffic across all the instances in the autoscaling group. The `health_check` block inside `aws_elb` is used to check the health of the instances. If an instance fails the health check, it is replaced by the autoscaling group.
Please replace `"subnet-id1"` and `"subnet-id2"` with your actual subnet IDs and `"your_docker_image_name"` with the name of your Docker image.
Again, note that this is a simplified example and may need to be adjusted to suit your particular use case. For instance, this configuration assumes that you are using a single security group for all instances, which might not be the best setup for a real-world scenario.
Before running this Terraform configuration, make sure to initialize Terraform in your working directory using `terraform init`, and ensure that your AWS credentials are correctly set up in your environment.

@ -0,0 +1,115 @@
provider "aws" {
region = "us-west-2"
}
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"]
}
resource "aws_key_pair" "deployer" {
key_name = "deployer-key"
public_key = file("~/.ssh/id_rsa.pub")
}
resource "aws_security_group" "swarm-sg" {
name = "swarm-sg"
description = "Security group for Swarm app"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 8000
to_port = 8000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_launch_configuration" "swarm" {
name = "swarm-configuration"
image_id = data.aws_ami.ubuntu.id
instance_type = "g4dn.xlarge"
key_name = aws_key_pair.deployer.key_name
security_groups = [aws_security_group.swarm-sg.id]
user_data = <<-EOF
#!/bin/bash
sudo apt-get update
sudo apt-get install -y docker.io
sudo docker pull your_docker_image_name
sudo docker run -d -p 8000:8000 your_docker_image_name
EOF
root_block_device {
volume_type = "gp2"
volume_size = 30 # size in GBs
}
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "swarm" {
name_prefix = "swarm-asg"
max_size = 5
min_size = 1
desired_capacity = 1
launch_configuration = aws_launch_configuration.swarm.id
lifecycle {
create_before_destroy = true
}
}
resource "aws_elb" "swarm" {
name = "swarm-elb"
subnets = ["subnet-id1", "subnet-id2"]
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8000/"
interval = 30
}
instances = [aws_instance.swarm.id]
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
}
Loading…
Cancel
Save