mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-20 06:12:07 +00:00
* Attempt at automatic max batch prefill. * Taking into account number of shards. * Adding more cards. * Adding A100 + H100 * Adding a few more cards. * Logprobs cost too much. * h100 better name, and keep factor of 2 * Damn inflated sparse tflops. * Typo in h100. * Updated the flops calculation (checked with fvcore). * chunking by default. * Fix prefix caching for chat completion since we removed logprobs. * More tests. * Dropping all the prefill logprobs. * Add a flag that enables users to get logprobs back. * Repairing prompt token counting. * Fixing a few tests. * Remove some scaffolding. * Attempting to reduces the issues (workarounds for now).
27 lines
728 B
Python
27 lines
728 B
Python
import json
|
|
|
|
|
|
def main():
|
|
with open("./ShareGPT_V3_unfiltered_cleaned_split.json", "r") as f:
|
|
data = json.load(f)
|
|
|
|
# Select only the first 2k conversations that start with a human.
|
|
max = 2000
|
|
conversations = []
|
|
for conversation in data:
|
|
conv = conversation.get("conversations")
|
|
if conv and conv[0]["from"] == "human":
|
|
# Trim the rest of the output
|
|
conversation["conversations"] = conversation["conversations"][:1]
|
|
conversations.append(conversation)
|
|
|
|
if len(conversation) >= max:
|
|
break
|
|
|
|
with open("./small.json", "w") as f:
|
|
data = json.dump(conversations, f, indent=4)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|