# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 4,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "Are you ready to answer question?"}],
]
print(dialogs[0][0].keys())
print(dialogs[0][0].values())
for i in range(100):
answer = input("Input Question\n")
print ('Your Question {0}'.format(answer))
if (answer == "bye"):
print("LLAMA 2: So long!\n")
break
dialogs[0][0]['content'] = answer
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
接著我發現電腦的 ChatGPT 窗口還沒關, 順便就問它, “can you resolve a sudoku?", 然後奇妙的事情發生了. 我知道 ChatGPT 會寫程式, 但我只是隨 “口" 問問, 它就把程式寫好了. 看來不只是可以幫我們交訓練課程的心得報告而已. 連我們以後上機考試要更嚴格防弊了….哈!
Yes, a sudoku can be resolved using algorithms such as backtracking, constraint propagation, and search. Here’s an example of a simple backtracking solution in Python:
import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-003",
prompt="The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman: I'd like to cancel my subscription.\nAI:",
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
stop=[" Human:", " AI:"]
)