# Make sure to increase resource requests and limits before using this example in production.
# For examples with more realistic resource configuration, see
# ray-cluster.complete.large.yaml and
# ray-cluster.autoscaler.large.yaml.
apiVersion: ray.io/v1
kind: RayService
metadata:
name: rayservice-sample
spec:
serviceUnhealthySecondThreshold: 900 # Config for the health check threshold for Ray Serve applications. Default value is 900.
deploymentUnhealthySecondThreshold: 300 # Config for the health check threshold for Ray dashboard agent. Default value is 300.
# The workload consists of two applications. The first application checks on an event in the second application.
# If the event isn't set, the first application will block on requests until the event is set. So, to test upscaling
# we can first send a bunch of requests to the first application, which will trigger Serve autoscaling to bring up
# more replicas since the existing replicas are blocked on requests. Worker pods should scale up. To test downscaling,
# set the event in the second application, releasing all blocked requests, and worker pods should scale down.
serveConfigV2: |
applications:
- name: app1
import_path: autoscaling.blocked_new:app
route_prefix: /
runtime_env:
working_dir: "https://github.com/ray-project/test_dag/archive/23eac0533522579dff9a8a1ecb88931874d987ea.zip"
deployments:
- name: Blocked
autoscaling_config:
metrics_interval_s: 0.2
min_replicas: 1
max_replicas: 14
look_back_period_s: 2
downscale_delay_s: 5
upscale_delay_s: 2
graceful_shutdown_timeout_s: 5
max_concurrent_queries: 1000
ray_actor_options:
num_cpus: 0.5
- name: signal
import_path: autoscaling.signaling:app
route_prefix: /signal
runtime_env:
working_dir: "https://github.com/ray-project/test_dag/archive/23eac0533522579dff9a8a1ecb88931874d987ea.zip"
rayClusterConfig:
rayVersion: '2.7.0' # should match the Ray version in the image of the containers
## raycluster autoscaling config
enableInTreeAutoscaling: true
autoscalerOptions:
upscalingMode: Default
resources:
limits:
cpu: "1"
memory: "1000Mi"
requests:
cpu: "1"
memory: "1000Mi"
######################headGroupSpecs#################################
headGroupSpec:
# The `rayStartParams` are used to configure the `ray start` command.
# See https://github.com/ray-project/kuberay/blob/master/docs/guidance/rayStartParams.md for the default settings of `rayStartParams` in KubeRay.
# See https://docs.ray.io/en/latest/cluster/cli.html#ray-start for all available options in `rayStartParams`.
rayStartParams:
dashboard-host: '0.0.0.0'
#pod template
template:
spec:
containers:
- name: ray-head
image: rayproject/ray:2.7.0
resources:
# limits:
# cpu: 2
# memory: 2Gi
requests:
cpu: 2
memory: 2Gi
ports:
- containerPort: 6379
name: gcs-server
- containerPort: 8265 # Ray dashboard
name: dashboard
- containerPort: 10001
name: client
- containerPort: 8000
name: serve
workerGroupSpecs:
# the pod replicas in this group typed worker
- replicas: 1
minReplicas: 1
maxReplicas: 5
# logical group name, for this called small-group, also can be functional
groupName: small-group
# The `rayStartParams` are used to configure the `ray start` command.
# See https://github.com/ray-project/kuberay/blob/master/docs/guidance/rayStartParams.md for the default settings of `rayStartParams` in KubeRay.
# See https://docs.ray.io/en/latest/cluster/cli.html#ray-start for all available options in `rayStartParams`.
rayStartParams: {}
#pod template
template:
spec:
containers:
- name: ray-worker # must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc'
image: rayproject/ray:2.7.0
lifecycle:
preStop:
exec:
command: ["/bin/sh","-c","ray stop"]
resources:
# limits:
# cpu: "1"
# memory: "2Gi"
requests:
cpu: "1"
memory: "2Gi"
Thank you for reporting the issue! This behavior is expected for the Ray Autoscaler. I have already opened a PR to explicitly document this limitation: https://github.com/ray-project/ray/pull/43832.
Search before asking
KubeRay Component
ray-operator
What happened + What you expected to happen
When limit resources are not set, the head node will always crash.
Reproduction script
deploy yaml:
Anything else
kuberay version: v1.0.0
Are you willing to submit a PR?