Skip to content

Commit bc320ef

Browse files
committed
Merge branch 'master' into compilade/refactor-kv-cache
2 parents 9b38f8b + a47667c commit bc320ef

File tree

395 files changed

+58780
-171025
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

395 files changed

+58780
-171025
lines changed

.devops/full-cuda.Dockerfile

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,16 @@
11
ARG UBUNTU_VERSION=22.04
2-
32
# This needs to generally match the container host's environment.
4-
ARG CUDA_VERSION=11.7.1
5-
3+
ARG CUDA_VERSION=12.6.0
64
# Target the CUDA build image
75
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
86

9-
FROM ${BASE_CUDA_DEV_CONTAINER} as build
7+
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
108

11-
# Unless otherwise specified, we make a fat build.
12-
ARG CUDA_DOCKER_ARCH=all
9+
# CUDA architecture to build for (defaults to all supported archs)
10+
ARG CUDA_DOCKER_ARCH=default
1311

1412
RUN apt-get update && \
15-
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1
13+
apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1
1614

1715
COPY requirements.txt requirements.txt
1816
COPY requirements requirements
@@ -24,13 +22,12 @@ WORKDIR /app
2422

2523
COPY . .
2624

27-
# Set nvcc architecture
28-
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
29-
# Enable CUDA
30-
ENV GGML_CUDA=1
31-
# Enable cURL
32-
ENV LLAMA_CURL=1
33-
34-
RUN make -j$(nproc)
25+
# Use the default CUDA archs if not specified
26+
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
27+
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
28+
fi && \
29+
cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
30+
cmake --build build --config Release --target llama-cli -j$(nproc) && \
31+
cp build/bin/* .
3532

3633
ENTRYPOINT ["/app/.devops/tools.sh"]

.devops/full-rocm.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
66
# Target the CUDA build image
77
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
88

9-
FROM ${BASE_ROCM_DEV_CONTAINER} as build
9+
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878

.devops/full.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=22.04
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
RUN apt-get update && \
66
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1

.devops/llama-cli-cann.Dockerfile

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
ARG ASCEND_VERSION=8.0.rc2.alpha003-910b-openeuler22.03-py3.8
2+
3+
FROM cosdt/cann:$ASCEND_VERSION AS build
4+
5+
WORKDIR /app
6+
7+
COPY . .
8+
9+
RUN yum install -y gcc g++ cmake make
10+
ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
11+
ENV LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:$LIBRARY_PATH
12+
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/lib64/plugin/opskernel:${ASCEND_TOOLKIT_HOME}/lib64/plugin/nnengine:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH}
13+
ENV PYTHONPATH=${ASCEND_TOOLKIT_HOME}/python/site-packages:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe:${PYTHONPATH}
14+
ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${ASCEND_TOOLKIT_HOME}/compiler/ccec_compiler/bin:${PATH}
15+
ENV ASCEND_AICPU_PATH=${ASCEND_TOOLKIT_HOME}
16+
ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
17+
ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit
18+
ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME}
19+
20+
# find libascend_hal.so, because the drive hasn`t been mounted.
21+
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/runtime/lib64/stub:$LD_LIBRARY_PATH
22+
23+
RUN echo "Building with static libs" && \
24+
source /usr/local/Ascend/ascend-toolkit/set_env.sh --force && \
25+
cmake -B build -DGGML_CANN=ON -DBUILD_SHARED_LIBS=OFF && \
26+
cmake --build build --config Release --target llama-cli
27+
28+
# TODO: use image with NNRT
29+
FROM cosdt/cann:$ASCEND_VERSION AS runtime
30+
COPY --from=build /app/build/bin/llama-cli /llama-cli
31+
32+
ENV LC_ALL=C.utf8
33+
34+
ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
35+
ENV LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:$LIBRARY_PATH
36+
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/lib64/plugin/opskernel:${ASCEND_TOOLKIT_HOME}/lib64/plugin/nnengine:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH}
37+
ENV PYTHONPATH=${ASCEND_TOOLKIT_HOME}/python/site-packages:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe:${PYTHONPATH}
38+
ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${ASCEND_TOOLKIT_HOME}/compiler/ccec_compiler/bin:${PATH}
39+
ENV ASCEND_AICPU_PATH=${ASCEND_TOOLKIT_HOME}
40+
ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
41+
ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit
42+
ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME}
43+
44+
ENTRYPOINT ["/llama-cli" ]

.devops/llama-cli-cuda.Dockerfile

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,37 @@
11
ARG UBUNTU_VERSION=22.04
22
# This needs to generally match the container host's environment.
3-
ARG CUDA_VERSION=11.7.1
3+
ARG CUDA_VERSION=12.6.0
44
# Target the CUDA build image
55
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
66
# Target the CUDA runtime image
77
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
88

9-
FROM ${BASE_CUDA_DEV_CONTAINER} as build
9+
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
1010

11-
# Unless otherwise specified, we make a fat build.
12-
ARG CUDA_DOCKER_ARCH=all
11+
# CUDA architecture to build for (defaults to all supported archs)
12+
ARG CUDA_DOCKER_ARCH=default
1313

1414
RUN apt-get update && \
15-
apt-get install -y build-essential git
15+
apt-get install -y build-essential git cmake
1616

1717
WORKDIR /app
1818

1919
COPY . .
2020

21-
# Set nvcc architecture
22-
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
23-
# Enable CUDA
24-
ENV GGML_CUDA=1
21+
# Use the default CUDA archs if not specified
22+
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
23+
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
24+
fi && \
25+
cmake -B build -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
26+
cmake --build build --config Release --target llama-cli -j$(nproc)
2527

26-
RUN make -j$(nproc) llama-cli
27-
28-
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
28+
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
2929

3030
RUN apt-get update && \
3131
apt-get install -y libgomp1
3232

33-
COPY --from=build /app/llama-cli /llama-cli
33+
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
34+
COPY --from=build /app/build/src/libllama.so /libllama.so
35+
COPY --from=build /app/build/bin/llama-cli /llama-cli
3436

3537
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cli-intel.Dockerfile

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
22

3-
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
3+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
44

55
ARG GGML_SYCL_F16=OFF
66
RUN apt-get update && \
@@ -14,10 +14,12 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
1414
echo "GGML_SYCL_F16 is set" && \
1515
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
1616
fi && \
17-
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
17+
echo "Building with static libs" && \
18+
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \
19+
${OPT_SYCL_F16} -DBUILD_SHARED_LIBS=OFF && \
1820
cmake --build build --config Release --target llama-cli
1921

20-
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
22+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
2123

2224
COPY --from=build /app/build/bin/llama-cli /llama-cli
2325

.devops/llama-cli-rocm.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
66
# Target the CUDA build image
77
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
88

9-
FROM ${BASE_ROCM_DEV_CONTAINER} as build
9+
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878

.devops/llama-cli-vulkan.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=jammy
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
# Install build tools
66
RUN apt update && apt install -y git build-essential cmake wget libgomp1

.devops/llama-cli.Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=22.04
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
RUN apt-get update && \
66
apt-get install -y build-essential git
@@ -11,7 +11,7 @@ COPY . .
1111

1212
RUN make -j$(nproc) llama-cli
1313

14-
FROM ubuntu:$UBUNTU_VERSION as runtime
14+
FROM ubuntu:$UBUNTU_VERSION AS runtime
1515

1616
RUN apt-get update && \
1717
apt-get install -y libgomp1

.devops/llama-server-cuda.Dockerfile

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,41 @@
11
ARG UBUNTU_VERSION=22.04
22
# This needs to generally match the container host's environment.
3-
ARG CUDA_VERSION=11.7.1
3+
ARG CUDA_VERSION=12.6.0
44
# Target the CUDA build image
55
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
66
# Target the CUDA runtime image
77
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
88

9-
FROM ${BASE_CUDA_DEV_CONTAINER} as build
9+
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
1010

11-
# Unless otherwise specified, we make a fat build.
12-
ARG CUDA_DOCKER_ARCH=all
11+
# CUDA architecture to build for (defaults to all supported archs)
12+
ARG CUDA_DOCKER_ARCH=default
1313

1414
RUN apt-get update && \
15-
apt-get install -y build-essential git libcurl4-openssl-dev
15+
apt-get install -y build-essential git cmake libcurl4-openssl-dev
1616

1717
WORKDIR /app
1818

1919
COPY . .
2020

21-
# Set nvcc architecture
22-
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
23-
# Enable CUDA
24-
ENV GGML_CUDA=1
25-
# Enable cURL
26-
ENV LLAMA_CURL=1
21+
# Use the default CUDA archs if not specified
22+
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
23+
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
24+
fi && \
25+
cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
26+
cmake --build build --config Release --target llama-server -j$(nproc)
2727

28-
RUN make -j$(nproc) llama-server
29-
30-
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
28+
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
3129

3230
RUN apt-get update && \
3331
apt-get install -y libcurl4-openssl-dev libgomp1 curl
3432

35-
COPY --from=build /app/llama-server /llama-server
33+
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
34+
COPY --from=build /app/build/src/libllama.so /libllama.so
35+
COPY --from=build /app/build/bin/llama-server /llama-server
36+
37+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
38+
ENV LLAMA_ARG_HOST=0.0.0.0
3639

3740
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
3841

.devops/llama-server-intel.Dockerfile

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
22

3-
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
3+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
44

55
ARG GGML_SYCL_F16=OFF
66
RUN apt-get update && \
@@ -14,17 +14,20 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
1414
echo "GGML_SYCL_F16 is set" && \
1515
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
1616
fi && \
17+
echo "Building with dynamic libs" && \
1718
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
1819
cmake --build build --config Release --target llama-server
1920

20-
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
21+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
2122

2223
RUN apt-get update && \
2324
apt-get install -y libcurl4-openssl-dev curl
2425

2526
COPY --from=build /app/build/bin/llama-server /llama-server
2627

2728
ENV LC_ALL=C.utf8
29+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
30+
ENV LLAMA_ARG_HOST=0.0.0.0
2831

2932
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
3033

.devops/llama-server-rocm.Dockerfile

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
66
# Target the CUDA build image
77
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
88

9-
FROM ${BASE_ROCM_DEV_CONTAINER} as build
9+
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
@@ -39,6 +39,8 @@ ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3939
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
42+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
43+
ENV LLAMA_ARG_HOST=0.0.0.0
4244

4345
# Enable cURL
4446
ENV LLAMA_CURL=1

.devops/llama-server-vulkan.Dockerfile

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=jammy
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
# Install build tools
66
RUN apt update && apt install -y git build-essential cmake wget
@@ -23,6 +23,8 @@ RUN cp /app/build/bin/llama-server /llama-server && \
2323
rm -rf /app
2424

2525
ENV LC_ALL=C.utf8
26+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
27+
ENV LLAMA_ARG_HOST=0.0.0.0
2628

2729
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
2830

.devops/llama-server.Dockerfile

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
ARG UBUNTU_VERSION=22.04
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
RUN apt-get update && \
6-
apt-get install -y build-essential git libcurl4-openssl-dev curl
6+
apt-get install -y build-essential git libcurl4-openssl-dev
77

88
WORKDIR /app
99

@@ -13,14 +13,16 @@ ENV LLAMA_CURL=1
1313

1414
RUN make -j$(nproc) llama-server
1515

16-
FROM ubuntu:$UBUNTU_VERSION as runtime
16+
FROM ubuntu:$UBUNTU_VERSION AS runtime
1717

1818
RUN apt-get update && \
19-
apt-get install -y libcurl4-openssl-dev libgomp1
19+
apt-get install -y libcurl4-openssl-dev libgomp1 curl
2020

2121
COPY --from=build /app/llama-server /llama-server
2222

2323
ENV LC_ALL=C.utf8
24+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
25+
ENV LLAMA_ARG_HOST=0.0.0.0
2426

2527
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
2628

.devops/nix/apps.nix

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
"llama-embedding"
1111
"llama-server"
1212
"llama-quantize"
13-
"llama-train-text-from-scratch"
1413
];
1514
mkApp = name: {
1615
type = "app";

0 commit comments

Comments
 (0)