mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-21 14:52:20 +00:00
misc(offline): link correctly
This commit is contained in:
parent
0c1dd0ed2b
commit
dbc5b7a0f7
@ -2,7 +2,6 @@
|
||||
// Created by mfuntowicz on 10/3/24.
|
||||
//
|
||||
|
||||
#include <string_view>
|
||||
#include <fmt/color.h>
|
||||
#include <fmt/format.h>
|
||||
#include <fmt/std.h>
|
||||
@ -23,13 +22,14 @@ int main(int argc, char** argv) {
|
||||
const auto prompt = "My name is Morgan";
|
||||
|
||||
const auto modelPath = absolute(std::filesystem::path(argv[1]));
|
||||
if (auto maybeBackend = CreateLlamaCppBackend(modelPath); maybeBackend.has_value()) {
|
||||
if (auto maybeBackend = TgiLlamaCppBackend::FromGGUF(modelPath); maybeBackend.has_value()) {
|
||||
// Retrieve the backend
|
||||
const auto& backend = *maybeBackend;
|
||||
auto [model, context] = *maybeBackend;
|
||||
auto backend = TgiLlamaCppBackend(model, context);
|
||||
|
||||
// Generate
|
||||
const auto promptTokens = backend->Tokenize(prompt);
|
||||
const auto out = backend->Generate(promptTokens, 30, 1.0, 2.0, 0.0, 32);
|
||||
const auto promptTokens = backend.Tokenize(prompt);
|
||||
const auto out = backend.Generate(promptTokens, 30, 1.0, 2.0, 0.0, 32);
|
||||
|
||||
if (out.has_value())
|
||||
fmt::print(FMT_STRING("Generated: {}"), *out);
|
||||
|
Loading…
Reference in New Issue
Block a user