代码拉取完成,页面将自动刷新
From 0337f608107f2ce3ba403135e832cf7237db3f1a Mon Sep 17 00:00:00 2001
From: wangguochun <wangguochun@kylinos.cn>
Date: Fri, 16 Aug 2024 03:33:01 +0000
Subject: [PATCH] Merge commit from fork
cherry pick:https://github.com/ggerganov/llama.cpp/commit/b72942fac998672a79a1ae3c03b340f7e629980b
---
examples/rpc/README.md | 4 ++++
examples/rpc/rpc-server.cpp | 13 ++++++++++++-
ggml-rpc.cpp | 36 +++++++++++++++++++++++++++++++++++-
ggml.c | 3 ++-
4 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/examples/rpc/README.md b/examples/rpc/README.md
index eeec71a..9941547 100644
--- a/examples/rpc/README.md
+++ b/examples/rpc/README.md
@@ -1,5 +1,9 @@
## Overview
+> [!IMPORTANT]
+> This example and the RPC backend are currently in a proof-of-concept development stage. As such, the functionality is fragile and
+> insecure. **Never run the RPC server on an open network or in a sensitive environment!**
+
The `rpc-server` allows running `ggml` backend on a remote host.
The RPC backend communicates with one or several instances of `rpc-server` and offloads computations to them.
This can be used for distributed LLM inference with `llama.cpp` in the following way:
diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp
index 7c15d2a..6342e64 100644
--- a/examples/rpc/rpc-server.cpp
+++ b/examples/rpc/rpc-server.cpp
@@ -16,7 +16,7 @@
#include <stdio.h>
struct rpc_server_params {
- std::string host = "0.0.0.0";
+ std::string host = "127.0.0.1";
int port = 50052;
size_t backend_mem = 0;
};
@@ -114,6 +114,17 @@ int main(int argc, char * argv[]) {
fprintf(stderr, "Invalid parameters\n");
return 1;
}
+
+ if (params.host != "127.0.0.1") {
+ fprintf(stderr, "\n");
+ fprintf(stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
+ fprintf(stderr, "WARNING: Host ('%s') is != '127.0.0.1'\n", params.host.c_str());
+ fprintf(stderr, " Never expose the RPC server to an open network!\n");
+ fprintf(stderr, " This is an experimental feature and is not secure!\n");
+ fprintf(stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
+ fprintf(stderr, "\n");
+ }
+
ggml_backend_t backend = create_backend();
if (!backend) {
fprintf(stderr, "Failed to create backend\n");
diff --git a/ggml-rpc.cpp b/ggml-rpc.cpp
index 49a20df..dc90e17 100644
--- a/ggml-rpc.cpp
+++ b/ggml-rpc.cpp
@@ -193,6 +193,10 @@ static std::shared_ptr<socket_t> create_server_socket(const char * host, int por
fprintf(stderr, "Failed to set SO_REUSEADDR\n");
return nullptr;
}
+ if (inet_addr(host) == INADDR_NONE) {
+ fprintf(stderr, "Invalid host address: %s\n", host);
+ return nullptr;
+ }
struct sockaddr_in serv_addr;
serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = inet_addr(host);
@@ -875,6 +879,14 @@ ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rp
if (result->buffer && buffers.find(result->buffer) == buffers.end()) {
return nullptr;
}
+
+ // require that the tensor data does not go beyond the buffer end
+ uint64_t tensor_size = (uint64_t) ggml_nbytes(result);
+ uint64_t buffer_start = (uint64_t) ggml_backend_buffer_get_base(result->buffer);
+ uint64_t buffer_size = (uint64_t) ggml_backend_buffer_get_size(result->buffer);
+ GGML_ASSERT(tensor->data + tensor_size >= tensor->data); // check for overflow
+ GGML_ASSERT(tensor->data >= buffer_start && tensor->data + tensor_size <= buffer_start + buffer_size);
+
result->op = (ggml_op) tensor->op;
for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) {
result->op_params[i] = tensor->op_params[i];
@@ -894,7 +906,7 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
const rpc_tensor * in_tensor = (const rpc_tensor *)input.data();
uint64_t offset;
memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset));
- size_t size = input.size() - sizeof(rpc_tensor) - sizeof(offset);
+ const size_t size = input.size() - sizeof(rpc_tensor) - sizeof(offset);
struct ggml_init_params params {
/*.mem_size =*/ ggml_tensor_overhead(),
@@ -909,6 +921,17 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
return false;
}
GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu\n", __func__, (void*)tensor->buffer, tensor->data, offset, size);
+
+ // sanitize tensor->data
+ {
+ const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer);
+ const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer);
+
+ if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size > (p1 - in_tensor->data - offset)) {
+ GGML_ABORT("[%s] tensor->data out of bounds\n", __func__);
+ }
+ }
+
const void * data = input.data() + sizeof(rpc_tensor) + sizeof(offset);
ggml_backend_tensor_set(tensor, data, offset, size);
ggml_free(ctx);
@@ -939,6 +962,17 @@ bool rpc_server::get_tensor(const std::vector<uint8_t> & input, std::vector<uint
return false;
}
GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size);
+
+ // sanitize tensor->data
+ {
+ const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer);
+ const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer);
+
+ if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size > (p1 - in_tensor->data - offset)) {
+ GGML_ABORT("[%s] tensor->data out of bounds\n", __func__);
+ }
+ }
+
// output serialization format: | data (size bytes) |
output.resize(size, 0);
ggml_backend_tensor_get(tensor, output.data(), offset, size);
diff --git a/ggml.c b/ggml.c
index 7680363..e70d075 100644
--- a/ggml.c
+++ b/ggml.c
@@ -3577,7 +3577,8 @@ static struct ggml_tensor * ggml_new_tensor_impl(
struct ggml_tensor * view_src,
size_t view_offs) {
- assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
+ GGML_ASSERT(type >= 0 && type < GGML_TYPE_COUNT);
+ GGML_ASSERT(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
// find the base tensor and absolute offset
if (view_src != NULL && view_src->view_src != NULL) {
--
2.43.0
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。