Skip to content

Commit

Permalink
Default ramalama serve to only listen on localhost
Browse files Browse the repository at this point in the history
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
  • Loading branch information
rhatdan committed Feb 24, 2025
1 parent 00839ee commit 573ff39
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 5 deletions.
2 changes: 1 addition & 1 deletion docs/ramalama.conf
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@

# IP address for llama.cpp to listen on.
#
#host = "0.0.0.0"
#host = "127.0.0.0"

# Pass `--group-add keep-groups` to podman, when using podman.
# In some cases this is needed to access the gpu from a rootless container
Expand Down
2 changes: 1 addition & 1 deletion docs/ramalama.conf.5.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ Run RamaLama using the specified container engine.
Valid options are: Podman and Docker
This field can be overridden by the RAMALAMA_CONTAINER_ENGINE environment variable.

**host**="0.0.0.0"
**host**="127.0.0.0"

IP address for llama.cpp to listen on.

Expand Down
2 changes: 1 addition & 1 deletion ramalama/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -856,7 +856,7 @@ def serve_parser(subparsers):
run_serve_perplexity_args(parser)
add_network_argument(parser, "")
parser.add_argument("-d", "--detach", action="store_true", dest="detach", help="run the container in detached mode")
parser.add_argument("--host", default=config.get('host', "0.0.0.0"), help="IP address to listen")
parser.add_argument("--host", default=config.get('host', "127.0.0.0"), help="IP address to listen")
parser.add_argument(
"--generate",
choices=["quadlet", "kube", "quadlet/kube"],
Expand Down
4 changes: 2 additions & 2 deletions test/system/040-serve.bats
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ verify_begin=".*run --rm -i --label ai.ramalama --name"
run_ramalama --dryrun serve --name foobar ${model}
is "$output" "${verify_begin} foobar .*" "dryrun correct with --name"
assert "$output" !~ ".*--network" "--network is not part of the output"
assert "$output" =~ ".*--host 0.0.0.0" "verify host 0.0.0.0 is added when run within container"
assert "$output" =~ ".*--host 127.0.0.0" "verify host 127.0.0.0 is added when run within container"
is "$output" ".*${model}" "verify model name"
assert "$output" !~ ".*--seed" "assert seed does not show by default"

Expand Down Expand Up @@ -53,7 +53,7 @@ verify_begin=".*run --rm -i --label ai.ramalama --name"
run_ramalama stop --all
else
run_ramalama --dryrun serve ${model}
assert "$output" =~ ".*--host 0.0.0.0" "Outside container sets host to 0.0.0.0"
assert "$output" =~ ".*--host 127.0.0.0" "Outside container sets host to 127.0.0.0"
run_ramalama --dryrun serve --seed abcd --host 127.0.0.1 ${model}
assert "$output" =~ ".*--host 127.0.0.1" "Outside container overrides host to 127.0.0.1"
assert "$output" =~ ".*--seed abcd" "Verify seed is set"
Expand Down

0 comments on commit 573ff39

Please sign in to comment.