error LNK2019 #10735
Unanswered
lightrattle
asked this question in
Q&A
error LNK2019
#10735
Replies: 0 comments
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
When I try to build the test project for llama.cpp in VS2022, the solution with ggml-cpu does not generate properly, the compile prompt is as follows:
ggml-cpu-aarch64.obj : error LNK2019: unresolvable external symbol “struct ggml_backend_reg * __cdecl ggml_backend_cpu_reg(void)” (?ggml_backend_cpu_reg@@ YAPEAUggml_backend_reg@@xz), function “struct ggml_backend_buffer_type * __cdecl ggml_backend_cpu_aarch64_buffer_type(void)” (?ggml_backend_cpu aarch64_buffer_type@@YAPEAUggml_backend_buffer_type@@xz) is referenced in this symbol
1>ggml-cpu-traits.obj : error LNK2019: Unable to resolve external symbol “class std::vector<struct ggml_backend_buffer_type *,class std::allocator<struct ggml backend_buffer_type *> > & __cdecl ggml_backend_cpu_get_extra_buffers_type(void)” (?ggml_backend_cpu_get_extra_buffers_type@@YAAEAV?$ vector@PEAUggml_backend_buffer_type@@v?$allocator@PEAUggml_backend_buffer_type@@@std@@@std@@xz), in function ggml_cpu_extra_compute_forward The function ggml_cpu_extra_compute_forward references the symbol
1>C:\Users\35558\source\WinGPT\x64\Debug\WinGPT.exe : fatal error LNK1120: 2 external commands that could not be parsed
Translated with DeepL.com (free version)
The contents of the main function using llama and ggml are as in the main.cpp file:
#pragma warning(disable: 4996)
#if defined(_WIN32)
#include <windows.h>
#else
#include <unistd.h>
#endif
#include
#include
#include
#include
#include
#include
#include <unordered_map>
#include
#include "llama/llama-cpp.h"
typedef std::unique_ptr<char[]> char_array_ptr;
struct Argument {
std::string flag;
std::string help_text;
};
struct Options {
std::string model_path, prompt_non_interactive;
int ngl = 99;
int n_ctx = 2048;
};
class ArgumentParser {
public:
ArgumentParser(const char* program_name) : program_name(program_name) {}
private:
const char* program_name;
std::unordered_map<std::string, std::string*> string_args;
std::unordered_map<std::string, int*> int_args;
std::vector arguments;
};
class LlamaData {
public:
llama_model_ptr model;
llama_sampler_ptr sampler;
llama_context_ptr context;
std::vector<llama_chat_message> messages;
private:
// Initializes the model and returns a unique pointer to it
llama_model_ptr initialize_model(const std::string& model_path, const int ngl) {
llama_model_params model_params = llama_model_default_params();
model_params.n_gpu_layers = ngl;
};
// Add a message to
messages
and store its content inowned_content
static void add_message(const char* role, const std::string& text, LlamaData& llama_data,
std::vector<char_array_ptr>& owned_content) {
char_array_ptr content(new char[text.size() + 1]);
std::strcpy(content.get(), text.c_str());
llama_data.messages.push_back({ role, content.get() });
owned_content.push_back(std::move(content));
}
// Function to apply the chat template and resize
formatted
if neededstatic int apply_chat_template(const LlamaData& llama_data, std::vector& formatted, const bool append) {
int result = llama_chat_apply_template(llama_data.model.get(), nullptr, llama_data.messages.data(),
llama_data.messages.size(), append, formatted.data(), formatted.size());
if (result > static_cast(formatted.size())) {
formatted.resize(result);
result = llama_chat_apply_template(llama_data.model.get(), nullptr, llama_data.messages.data(),
llama_data.messages.size(), append, formatted.data(), formatted.size());
}
}
// Function to tokenize the prompt
static int tokenize_prompt(const llama_model_ptr& model, const std::string& prompt,
std::vector<llama_token>& prompt_tokens) {
const int n_prompt_tokens = -llama_tokenize(model.get(), prompt.c_str(), prompt.size(), NULL, 0, true, true);
prompt_tokens.resize(n_prompt_tokens);
if (llama_tokenize(model.get(), prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true,
true) < 0) {
GGML_ABORT("failed to tokenize the prompt\n");
}
}
// Check if we have enough space in the context to evaluate this batch
static int check_context_size(const llama_context_ptr& ctx, const llama_batch& batch) {
const int n_ctx = llama_n_ctx(ctx.get());
const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get());
if (n_ctx_used + batch.n_tokens > n_ctx) {
printf("\033[0m\n");
fprintf(stderr, "context size exceeded\n");
return 1;
}
}
// convert the token to a string
static int convert_token_to_string(const llama_model_ptr& model, const llama_token token_id, std::string& piece) {
char buf[256];
int n = llama_token_to_piece(model.get(), token_id, buf, sizeof(buf), 0, true);
if (n < 0) {
GGML_ABORT("failed to convert token to piece\n");
}
}
static void print_word_and_concatenate_to_response(const std::string& piece, std::string& response) {
printf("%s", piece.c_str());
fflush(stdout);
response += piece;
}
// helper function to evaluate a prompt and generate a response
static int generate(LlamaData& llama_data, const std::string& prompt, std::string& response) {
std::vector<llama_token> prompt_tokens;
const int n_prompt_tokens = tokenize_prompt(llama_data.model, prompt, prompt_tokens);
if (n_prompt_tokens < 0) {
return 1;
}
}
static int parse_arguments(const int argc, const char** argv, Options& opt) {
//此处将参数传入自定义类ArgumentParser
ArgumentParser parser(argv[0]);
}
static int read_user_input(std::string& user) {
std::getline(std::cin, user);
return user.empty(); // Indicate an error or empty input
}
// Function to generate a response based on the prompt
static int generate_response(LlamaData& llama_data, const std::string& prompt, std::string& response) {
// Set response color
printf("\033[33m");
if (generate(llama_data, prompt, response)) {
fprintf(stderr, "failed to generate response\n");
return 1;
}
}
// Helper function to apply the chat template and handle errors
static int apply_chat_template_with_error_handling(const LlamaData& llama_data, std::vector& formatted,
const bool is_user_input, int& output_length) {
const int new_len = apply_chat_template(llama_data, formatted, is_user_input);
if (new_len < 0) {
fprintf(stderr, "failed to apply the chat template\n");
return -1;
}
}
// Helper function to handle user input
static bool handle_user_input(std::string& user_input, const std::string& prompt_non_interactive) {
if (!prompt_non_interactive.empty()) {
user_input = prompt_non_interactive;
return true; // No need for interactive input
}
}
// Function to tokenize the prompt
static int chat_loop(LlamaData& llama_data, std::string& prompt_non_interactive) {
std::vector<char_array_ptr> owned_content;
std::vector fmtted(llama_n_ctx(llama_data.context.get()));
int prev_len = 0;
}
static void log_callback(const enum ggml_log_level level, const char* text, void*) {
if (level == GGML_LOG_LEVEL_ERROR) {
fprintf(stderr, "%s", text);
}
}
static bool is_stdin_a_terminal() {
#if defined(_WIN32)
HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
DWORD mode;
return GetConsoleMode(hStdin, &mode);
#else
return isatty(STDIN_FILENO);
#endif
}
static std::string read_pipe_data() {
std::ostringstream result;
result << std::cin.rdbuf(); // Read all data from std::cin
return result.str();
}
int main() {
Options opt;
//此处设定模型文件及提示语相关预置值:
// 定义参数的数量
const int paramCount = 5;
// 创建一个字符指针数组
const char* parameters[paramCount] = {
"./main.cpp",
"-m",
"model/model-llama3-8b.gguf",//模型文件相对路径
"-p",
""//提示语文本
};
}
Beta Was this translation helpful? Give feedback.
All reactions