-
Notifications
You must be signed in to change notification settings - Fork 46
/
Copy pathfdl-example.yaml
249 lines (239 loc) · 10.1 KB
/
fdl-example.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
# This file shows all the possible values that can be defined to confire the functions and their linked services
# Most of this values are already defined in the SCAR default configuration file
# The values define in a configuration file are only applied to the function or functions being deployed
# To override permanently some of this values and apply them to all the deployed functions, please edit the SCAR default configuration file in ~/.scar/scar.cfg
# ----------------------------------------------------------------------------------------------------------------
functions:
# Define different providers under this property. Supported 'aws' and 'oscar'
aws:
# Define a list of functions under this property.
# You can define the function properties and all its related services
# Possible values are 'lambda', 'iam', 'api_gateway', 'cloudwatch', 'batch'
# REQUIRED 'lambda'
- lambda:
# Boto profile used for the lambda client
# Default 'default'
# Must match the profiles in the file ~/.aws/credentials
boto_profile: default
# Region of the function, can be any region supported by AWS
# Default 'us-east-1'
region: us-east-1
# Function's name
# REQUIRED
name: function1
# Memory of the function, in MB, min 128, max 3008. Default '512'
memory: 1024
# Maximum execution time in seconds, max 900. Default '300'
timeout: 300
# Set job delegation or not
# Possible values 'lambda', 'lambda-batch', 'batch'
# Default 'lambda'
execution_mode: lambda
# Supervisor log level
# Can be INFO, DEBUG, ERROR, WARNING
# Default 'INFO'
log_level: INFO
# Lambda function's layers arn (max 4).
# SCAR adds the supervisor layer automatically
layers:
- arn:....
# Environment variables of the function
# This variables are used in the lambda's environment, not the container's environment
environment:
Variables:
KEY1: val1
KEY2: val2
# Script executed inside of the function's container
init_script: ffmpeg-script.sh
# Define udocker container properties
container:
# Container image to use. REQUIRED
image: jrottenberg/ffmpeg:4.1-ubuntu
# Time used to post-process data generated by the container
# This time is substracted from the total time set for the function
# If there are a lot of files to upload as output, maybe this value has to be increased
# Default '10' seconds
timeout_threshold": 10
# Environment variables of the container
# These variables are passed to the container environment, that is, can be accessed from the user's script
environment:
Variables:
KEY1: val1
KEY2: val2
# Define input storage providers linked with the function.
# In AWS environments only S3 providers can be configured as inputs.
# In OSCAR clusters only MinIO providers can be configured as inputs.
input:
# Storage provider identifier. Must have the form <PROVIDER_TYPE>.<IDENTIFIER>
# Possible 'PROVIDER_TYPE' values: 'minio', 's3', 'onedata'
- storage_provider: minio.my_minio
# Complete path of the bucket with folders 'if any'
path: my-bucket/test
# Define output storage providers linked with the function
output:
- storage_provider: s3.my_s3
path: my-bucket/test-output
# Define optional filters to upload the output files based on prefix or suffix
# Possible values 'prefix', 'suffix'
suffix:
# List of suffixes to filter (can be any string)
- wav
- srt
prefix:
# List of prefixes to filter (can be any string)
- result-
# Properties for the faas-supervisor used in the inside the lambda function
supervisor:
# Must be a Github tag or "latest". Default 'latest'
version: latest
# Set IAM properties
iam:
boto_profile: default
# The Amazon Resource Name (ARN) of the function's execution role.
# This value is usually set for all the functions in the SCAR's default configuration file
# REQUIRED
role: ""
# Set API Gateway properties
# All these values are set by default
api_gateway:
boto_profile: default
region: us-east-1
# Set CloudWatch properties
# All these values are set by default
cloudwatch:
boto_profile: default
region: us-east-1
# Number of days that the functions logs are stored
log_retention_policy_in_days: 30
# Set AWS Batch properties.
# Only used when execution mode in 'lambda' is set to 'lambda-batch' or 'batch'
batch:
boto_profile: default
region: us-east-1
# The number of vCPUs reserved for the container
# Used in the job definition
# Default 1
vcpus: 1
# The hard limit (in MiB) of memory to present to the container
# Used in the job definition
# Default 1024
memory: 1024
# Request GPU resources for the launched container
# Default 'False'. Values 'False', 'True'
enable_gpu: False
# The full arn of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf.
service_role: "arn:..."
# Environment variables passed to the batch container
environment:
Variables:
KEY1: val1
KEY2: val2
compute_resources:
# List of the Amazon EC2 security groups associated with instances launched in the compute environment
# REQUIRED when using batch
security_group_ids:
- sg-12345678
# The desired number of Amazon EC2 vCPUS in the compute environment
# Default 0
desired_v_cpus: 0
# The minimum number of Amazon EC2 vCPUs that an environment should maintain
# Default 0
min_v_cpus: 0
# The maximum number of Amazon EC2 vCPUs that an environment can reach
# Default 2
max_v_cpus: 2
# List of the VPC subnets into which the compute resources are launched.
# REQUIRED when using batch
subnets:
- subnet-12345
subnet-67891
# The instances types that may be launched.
# You can specify instance families to launch any instance type within those families (for example, c5 or p3 ), or you can specify specific sizes within a family (such as c5.8xlarge ).
# You can also choose optimal to pick instance types (from the C, M, and R instance families) on the fly that match the demand of your job queues.
# Default 'm3.medium'
instance_types:
- "m3.medium"
# The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment
instance_role: "arn:..."
oscar:
# User specified identifier for an OSCAR cluster.
- my_oscar:
# Service's name
# REQUIRED
name: service1
# Memory limit for the service following the kubernetes format
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory
# Optional. (default: 256Mi)
memory: 1Gi
# CPU limit for the service following the kubernetes format
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
# Optional. (default: 0.2)
cpu: '1.0'
# Log level for the FaaS Supervisor
# Can be INFO, DEBUG, ERROR, WARNING
# Default 'INFO'
log_level: INFO
# Container image to use. REQUIRED
image: grycap/darknet
script: my-script.sh
# Environment variables of the function
environment:
Variables:
KEY1: val1
KEY2: val2
# Define input storage providers linked with the function.
# In AWS environments only S3 providers can be configured as inputs.
# In OSCAR clusters only MinIO providers can be configured as inputs.
input:
# Storage provider identifier. Must have the form <PROVIDER_TYPE>.<IDENTIFIER>
# Possible 'PROVIDER_TYPE' values: 'minio', 's3', 'onedata'
- storage_provider: minio.my_minio
# Complete path of the bucket with folders 'if any'
path: my-bucket/test
# Define output storage providers linked with the function
output:
- storage_provider: s3.my_s3
path: my-bucket/test-output
# Define optional filters to upload the output files based on prefix or suffix
# Possible values 'prefix', 'suffix'
suffix:
# List of suffixes to filter (can be any string)
- wav
- srt
prefix:
# List of prefixes to filter (can be any string)
- result-
# Define different storage providers connections. Supported 's3','minio', 'onedata'
# If you use a default S3 storage with the default boto configuration, this properties are not needed in an AWS environment.
# If you use a default MinIO storage, this properties are not needed in an OSCAR cluster.
storage_providers:
# S3 properties
s3:
# User provided identifier for an AWS account for accessing to the S3 service. Many accounts could be specified.
my_s3:
# Define S3 (AWS) account properties
# If used, REQUIRED properties are 'access_key', 'secret_key'
# The supervisor will try to create the boto3 client using the function permissions (in AWS Lambda environment)
access_key: awsuser
secret_key: awskey
region: us-east-1
# MinIO properties
minio:
# User provided identifier for a MinIO server. Many servers could be specified.
my_minio:
# Define MinIO server properties
# If used, REQUIRED properties are 'endpoint', 'access_key', 'secret_key'
endpoint: minio-endpoint
verify: True
region: us-east-1
access_key: muser
secret_key: mpass
# Onedata properties
onedata:
# User provided identifier for a Onedata account. Many accounts could be specified.
my_onedata:
# Define onedata account properties
# If used, REQUIRED properties are 'oneprovider_host', 'token', 'space'
oneprovider_host: op-host
token: mytoken
space: onedata_space