File tree Expand file tree Collapse file tree 3 files changed +44
-2
lines changed
ggml/src/ggml-remotingfrontend Expand file tree Collapse file tree 3 files changed +44
-2
lines changed Original file line number Diff line number Diff line change
1
+ # force isatty-->true, so that $0 |& head -50 has colors ...
2
+ rm -f READY FAILED
3
+
4
+ echo " int isatty(int fd) { return 1; }" | gcc -O2 -fpic -shared -ldl -o /tmp/isatty.so -xc -
5
+ export LD_PRELOAD=/tmp/isatty.so
6
+
7
+ TARGETS=" ggml-remotingfrontend"
8
+
9
+ TARGETS=" $BUILD_TARGET llama-run"
10
+ set -x
11
+ if [[ " ${BENCH_MODE:- } " == " bench" ]]; then
12
+ TARGETS=" $TARGETS llama-bench"
13
+ elif [[ " ${BENCH_MODE:- } " == " server" ]]; then
14
+ TARGETS=" $TARGETS llama-server"
15
+ elif [[ " ${BENCH_MODE:- } " == " perf" ]]; then
16
+ TARGETS=" $TARGETS test-backend-ops"
17
+ fi
18
+
19
+ cmake --build ../build.remoting-frontend$FLAVOR --parallel 8 --target $TARGETS " $@ "
20
+
21
+ if [[ $? == 0 ]]; then
22
+ touch READY
23
+ else
24
+ touch FAILED
25
+ fi
Original file line number Diff line number Diff line change @@ -62,7 +62,10 @@ create_virtgpu() {
62
62
1024 );
63
63
64
64
virt_gpu_result_t result = virtgpu_open (gpu);
65
- assert (result == APIR_SUCCESS);
65
+ if (result != APIR_SUCCESS) {
66
+ FATAL (" %s: failed to create the open the virtgpu device :/" , __func__);
67
+ return NULL ;
68
+ }
66
69
67
70
result = virtgpu_init_params (gpu);
68
71
assert (result == APIR_SUCCESS);
@@ -126,7 +129,7 @@ virtgpu_open(struct virtgpu *gpu)
126
129
drmDevicePtr devs[8 ];
127
130
int count = drmGetDevices2 (0 , devs, ARRAY_SIZE (devs));
128
131
if (count < 0 ) {
129
- ERROR (" failed to enumerate DRM devices" );
132
+ ERROR (" %s: failed to enumerate DRM devices" , __func__ );
130
133
return APIR_ERROR_INITIALIZATION_FAILED;
131
134
}
132
135
Original file line number Diff line number Diff line change @@ -18,6 +18,20 @@ set -x
18
18
if [[ " ${BENCH_MODE:- } " == " bench" ]]; then
19
19
cat << EOF
20
20
###
21
+ ### Running llama-server
22
+ ###
23
+
24
+ EOF
25
+ $prefix \
26
+ $LLAMA_BUILD_DIR /bin/llama-server \
27
+ --host 0.0.0.0 \
28
+ --port 8080 \
29
+ --model " $MODEL_HOME /$MODEL " \
30
+ --n-gpu-layers 99 \
31
+ --threads 1
32
+ elif [[ " ${BENCH_MODE:- } " == " bench" ]]; then
33
+ cat << EOF
34
+ ###
21
35
### Running llama-bench
22
36
###
23
37
You can’t perform that action at this time.
0 commit comments