mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-21 06:42:10 +00:00
Adding Idefics multi modal model. (#842)
Co-Authored-By: Victor Sanh <victorsanh@gmail.com> # What does this PR do? <!-- Congratulations! You've made it this far! You're not quite done yet though. Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution. Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change. Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost. --> <!-- Remove if not applicable --> Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. <!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @ @OlivierDehaene OR @Narsil --> --------- Co-authored-by: Victor Sanh <victorsanh@gmail.com>
This commit is contained in:
parent
b9e33c4953
commit
bce5e22444
@ -0,0 +1,168 @@
|
|||||||
|
{
|
||||||
|
"details": {
|
||||||
|
"best_of_sequences": null,
|
||||||
|
"finish_reason": "length",
|
||||||
|
"generated_tokens": 10,
|
||||||
|
"prefill": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"logprob": null,
|
||||||
|
"text": "<s>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4911,
|
||||||
|
"logprob": -5.3632812,
|
||||||
|
"text": "User"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -0.00762558,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -0.7739258,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32001,
|
||||||
|
"logprob": -9.775162e-05,
|
||||||
|
"text": "<image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -1.1920929e-07,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1815,
|
||||||
|
"logprob": -4.4140625,
|
||||||
|
"text": "Can"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 366,
|
||||||
|
"logprob": -0.01436615,
|
||||||
|
"text": "you"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2649,
|
||||||
|
"logprob": -4.9414062,
|
||||||
|
"text": "tell"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 592,
|
||||||
|
"logprob": -0.3005371,
|
||||||
|
"text": "me"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 263,
|
||||||
|
"logprob": -3.5703125,
|
||||||
|
"text": "a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1407,
|
||||||
|
"logprob": -9.4296875,
|
||||||
|
"text": "very"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3273,
|
||||||
|
"logprob": -1.9111328,
|
||||||
|
"text": "short"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5828,
|
||||||
|
"logprob": -0.28881836,
|
||||||
|
"text": "story"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2729,
|
||||||
|
"logprob": -3.4179688,
|
||||||
|
"text": "based"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 373,
|
||||||
|
"logprob": -0.00056886673,
|
||||||
|
"text": "on"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 278,
|
||||||
|
"logprob": -0.14123535,
|
||||||
|
"text": "the"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1967,
|
||||||
|
"logprob": -0.053985596,
|
||||||
|
"text": "image"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29973,
|
||||||
|
"logprob": -0.15771484,
|
||||||
|
"text": "?"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"seed": null,
|
||||||
|
"tokens": [
|
||||||
|
{
|
||||||
|
"id": 32002,
|
||||||
|
"logprob": -0.004295349,
|
||||||
|
"special": true,
|
||||||
|
"text": "<end_of_utterance>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29871,
|
||||||
|
"logprob": -7.43866e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": " "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 13,
|
||||||
|
"logprob": -2.3126602e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": "\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7900,
|
||||||
|
"logprob": -3.9339066e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": "Ass"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 22137,
|
||||||
|
"logprob": 0.0,
|
||||||
|
"special": false,
|
||||||
|
"text": "istant"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -2.6226044e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 319,
|
||||||
|
"logprob": -0.87841797,
|
||||||
|
"special": false,
|
||||||
|
"text": " A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 521,
|
||||||
|
"logprob": -1.3837891,
|
||||||
|
"special": false,
|
||||||
|
"text": " ch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 21475,
|
||||||
|
"logprob": -0.00051641464,
|
||||||
|
"special": false,
|
||||||
|
"text": "icken"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 338,
|
||||||
|
"logprob": -1.1435547,
|
||||||
|
"special": false,
|
||||||
|
"text": " is"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"generated_text": "\nAssistant: A chicken is"
|
||||||
|
}
|
@ -0,0 +1,674 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"details": {
|
||||||
|
"best_of_sequences": null,
|
||||||
|
"finish_reason": "length",
|
||||||
|
"generated_tokens": 10,
|
||||||
|
"prefill": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"logprob": null,
|
||||||
|
"text": "<s>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4911,
|
||||||
|
"logprob": -5.3476562,
|
||||||
|
"text": "User"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -0.0075531006,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -0.7729492,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32001,
|
||||||
|
"logprob": -9.787083e-05,
|
||||||
|
"text": "<image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -1.1920929e-07,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1815,
|
||||||
|
"logprob": -4.4296875,
|
||||||
|
"text": "Can"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 366,
|
||||||
|
"logprob": -0.01424408,
|
||||||
|
"text": "you"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2649,
|
||||||
|
"logprob": -4.9335938,
|
||||||
|
"text": "tell"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 592,
|
||||||
|
"logprob": -0.2993164,
|
||||||
|
"text": "me"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 263,
|
||||||
|
"logprob": -3.5664062,
|
||||||
|
"text": "a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1407,
|
||||||
|
"logprob": -9.4453125,
|
||||||
|
"text": "very"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3273,
|
||||||
|
"logprob": -1.9306641,
|
||||||
|
"text": "short"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5828,
|
||||||
|
"logprob": -0.2836914,
|
||||||
|
"text": "story"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2729,
|
||||||
|
"logprob": -3.4179688,
|
||||||
|
"text": "based"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 373,
|
||||||
|
"logprob": -0.00056934357,
|
||||||
|
"text": "on"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 278,
|
||||||
|
"logprob": -0.13928223,
|
||||||
|
"text": "the"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1967,
|
||||||
|
"logprob": -0.05355835,
|
||||||
|
"text": "image"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29973,
|
||||||
|
"logprob": -0.15771484,
|
||||||
|
"text": "?"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"seed": null,
|
||||||
|
"tokens": [
|
||||||
|
{
|
||||||
|
"id": 32002,
|
||||||
|
"logprob": -0.004333496,
|
||||||
|
"special": true,
|
||||||
|
"text": "<end_of_utterance>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29871,
|
||||||
|
"logprob": -7.426739e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": " "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 13,
|
||||||
|
"logprob": -2.348423e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": "\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7900,
|
||||||
|
"logprob": -3.9339066e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": "Ass"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 22137,
|
||||||
|
"logprob": 0.0,
|
||||||
|
"special": false,
|
||||||
|
"text": "istant"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -2.861023e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 319,
|
||||||
|
"logprob": -0.8828125,
|
||||||
|
"special": false,
|
||||||
|
"text": " A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 521,
|
||||||
|
"logprob": -1.3759766,
|
||||||
|
"special": false,
|
||||||
|
"text": " ch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 21475,
|
||||||
|
"logprob": -0.0005083084,
|
||||||
|
"special": false,
|
||||||
|
"text": "icken"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 338,
|
||||||
|
"logprob": -1.1367188,
|
||||||
|
"special": false,
|
||||||
|
"text": " is"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"generated_text": "\nAssistant: A chicken is"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"details": {
|
||||||
|
"best_of_sequences": null,
|
||||||
|
"finish_reason": "length",
|
||||||
|
"generated_tokens": 10,
|
||||||
|
"prefill": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"logprob": null,
|
||||||
|
"text": "<s>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4911,
|
||||||
|
"logprob": -5.3476562,
|
||||||
|
"text": "User"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -0.0075531006,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -0.7729492,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32001,
|
||||||
|
"logprob": -9.787083e-05,
|
||||||
|
"text": "<image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -1.1920929e-07,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1815,
|
||||||
|
"logprob": -4.4296875,
|
||||||
|
"text": "Can"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 366,
|
||||||
|
"logprob": -0.01423645,
|
||||||
|
"text": "you"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2649,
|
||||||
|
"logprob": -4.9335938,
|
||||||
|
"text": "tell"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 592,
|
||||||
|
"logprob": -0.2993164,
|
||||||
|
"text": "me"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 263,
|
||||||
|
"logprob": -3.5664062,
|
||||||
|
"text": "a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1407,
|
||||||
|
"logprob": -9.4453125,
|
||||||
|
"text": "very"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3273,
|
||||||
|
"logprob": -1.9306641,
|
||||||
|
"text": "short"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5828,
|
||||||
|
"logprob": -0.2836914,
|
||||||
|
"text": "story"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2729,
|
||||||
|
"logprob": -3.4179688,
|
||||||
|
"text": "based"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 373,
|
||||||
|
"logprob": -0.00056934357,
|
||||||
|
"text": "on"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 278,
|
||||||
|
"logprob": -0.13928223,
|
||||||
|
"text": "the"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1967,
|
||||||
|
"logprob": -0.05355835,
|
||||||
|
"text": "image"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29973,
|
||||||
|
"logprob": -0.15771484,
|
||||||
|
"text": "?"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"seed": null,
|
||||||
|
"tokens": [
|
||||||
|
{
|
||||||
|
"id": 32002,
|
||||||
|
"logprob": -0.004333496,
|
||||||
|
"special": true,
|
||||||
|
"text": "<end_of_utterance>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29871,
|
||||||
|
"logprob": -7.4505806e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": " "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 13,
|
||||||
|
"logprob": -2.3722649e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": "\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7900,
|
||||||
|
"logprob": -3.9339066e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": "Ass"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 22137,
|
||||||
|
"logprob": 0.0,
|
||||||
|
"special": false,
|
||||||
|
"text": "istant"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -2.861023e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 319,
|
||||||
|
"logprob": -0.8828125,
|
||||||
|
"special": false,
|
||||||
|
"text": " A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 521,
|
||||||
|
"logprob": -1.3759766,
|
||||||
|
"special": false,
|
||||||
|
"text": " ch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 21475,
|
||||||
|
"logprob": -0.00050878525,
|
||||||
|
"special": false,
|
||||||
|
"text": "icken"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 338,
|
||||||
|
"logprob": -1.1367188,
|
||||||
|
"special": false,
|
||||||
|
"text": " is"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"generated_text": "\nAssistant: A chicken is"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"details": {
|
||||||
|
"best_of_sequences": null,
|
||||||
|
"finish_reason": "length",
|
||||||
|
"generated_tokens": 10,
|
||||||
|
"prefill": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"logprob": null,
|
||||||
|
"text": "<s>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4911,
|
||||||
|
"logprob": -5.3476562,
|
||||||
|
"text": "User"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -0.0075531006,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -0.7729492,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32001,
|
||||||
|
"logprob": -9.775162e-05,
|
||||||
|
"text": "<image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -1.1920929e-07,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1815,
|
||||||
|
"logprob": -4.4296875,
|
||||||
|
"text": "Can"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 366,
|
||||||
|
"logprob": -0.01424408,
|
||||||
|
"text": "you"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2649,
|
||||||
|
"logprob": -4.9335938,
|
||||||
|
"text": "tell"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 592,
|
||||||
|
"logprob": -0.2993164,
|
||||||
|
"text": "me"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 263,
|
||||||
|
"logprob": -3.5664062,
|
||||||
|
"text": "a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1407,
|
||||||
|
"logprob": -9.4453125,
|
||||||
|
"text": "very"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3273,
|
||||||
|
"logprob": -1.9306641,
|
||||||
|
"text": "short"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5828,
|
||||||
|
"logprob": -0.2836914,
|
||||||
|
"text": "story"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2729,
|
||||||
|
"logprob": -3.4179688,
|
||||||
|
"text": "based"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 373,
|
||||||
|
"logprob": -0.00056934357,
|
||||||
|
"text": "on"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 278,
|
||||||
|
"logprob": -0.13928223,
|
||||||
|
"text": "the"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1967,
|
||||||
|
"logprob": -0.05355835,
|
||||||
|
"text": "image"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29973,
|
||||||
|
"logprob": -0.15771484,
|
||||||
|
"text": "?"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"seed": null,
|
||||||
|
"tokens": [
|
||||||
|
{
|
||||||
|
"id": 32002,
|
||||||
|
"logprob": -0.004333496,
|
||||||
|
"special": true,
|
||||||
|
"text": "<end_of_utterance>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29871,
|
||||||
|
"logprob": -7.43866e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": " "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 13,
|
||||||
|
"logprob": -2.360344e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": "\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7900,
|
||||||
|
"logprob": -3.9339066e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": "Ass"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 22137,
|
||||||
|
"logprob": 0.0,
|
||||||
|
"special": false,
|
||||||
|
"text": "istant"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -2.7418137e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 319,
|
||||||
|
"logprob": -0.8828125,
|
||||||
|
"special": false,
|
||||||
|
"text": " A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 521,
|
||||||
|
"logprob": -1.3759766,
|
||||||
|
"special": false,
|
||||||
|
"text": " ch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 21475,
|
||||||
|
"logprob": -0.00050878525,
|
||||||
|
"special": false,
|
||||||
|
"text": "icken"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 338,
|
||||||
|
"logprob": -1.1367188,
|
||||||
|
"special": false,
|
||||||
|
"text": " is"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"generated_text": "\nAssistant: A chicken is"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"details": {
|
||||||
|
"best_of_sequences": null,
|
||||||
|
"finish_reason": "length",
|
||||||
|
"generated_tokens": 10,
|
||||||
|
"prefill": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"logprob": null,
|
||||||
|
"text": "<s>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4911,
|
||||||
|
"logprob": -5.3632812,
|
||||||
|
"text": "User"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -0.00762558,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -0.7739258,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32001,
|
||||||
|
"logprob": -9.775162e-05,
|
||||||
|
"text": "<image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32000,
|
||||||
|
"logprob": -1.1920929e-07,
|
||||||
|
"text": "<fake_token_around_image>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1815,
|
||||||
|
"logprob": -4.4140625,
|
||||||
|
"text": "Can"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 366,
|
||||||
|
"logprob": -0.01436615,
|
||||||
|
"text": "you"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2649,
|
||||||
|
"logprob": -4.9414062,
|
||||||
|
"text": "tell"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 592,
|
||||||
|
"logprob": -0.3005371,
|
||||||
|
"text": "me"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 263,
|
||||||
|
"logprob": -3.5703125,
|
||||||
|
"text": "a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1407,
|
||||||
|
"logprob": -9.4296875,
|
||||||
|
"text": "very"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3273,
|
||||||
|
"logprob": -1.9111328,
|
||||||
|
"text": "short"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5828,
|
||||||
|
"logprob": -0.28881836,
|
||||||
|
"text": "story"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2729,
|
||||||
|
"logprob": -3.4179688,
|
||||||
|
"text": "based"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 373,
|
||||||
|
"logprob": -0.00056886673,
|
||||||
|
"text": "on"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 278,
|
||||||
|
"logprob": -0.14123535,
|
||||||
|
"text": "the"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1967,
|
||||||
|
"logprob": -0.053985596,
|
||||||
|
"text": "image"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29973,
|
||||||
|
"logprob": -0.15771484,
|
||||||
|
"text": "?"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"seed": null,
|
||||||
|
"tokens": [
|
||||||
|
{
|
||||||
|
"id": 32002,
|
||||||
|
"logprob": -0.004295349,
|
||||||
|
"special": true,
|
||||||
|
"text": "<end_of_utterance>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29871,
|
||||||
|
"logprob": -7.43866e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": " "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 13,
|
||||||
|
"logprob": -2.3126602e-05,
|
||||||
|
"special": false,
|
||||||
|
"text": "\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7900,
|
||||||
|
"logprob": -3.9339066e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": "Ass"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 22137,
|
||||||
|
"logprob": 0.0,
|
||||||
|
"special": false,
|
||||||
|
"text": "istant"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 29901,
|
||||||
|
"logprob": -2.6226044e-06,
|
||||||
|
"special": false,
|
||||||
|
"text": ":"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 319,
|
||||||
|
"logprob": -0.87841797,
|
||||||
|
"special": false,
|
||||||
|
"text": " A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 521,
|
||||||
|
"logprob": -1.3837891,
|
||||||
|
"special": false,
|
||||||
|
"text": " ch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 21475,
|
||||||
|
"logprob": -0.00051641464,
|
||||||
|
"special": false,
|
||||||
|
"text": "icken"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 338,
|
||||||
|
"logprob": -1.1435547,
|
||||||
|
"special": false,
|
||||||
|
"text": " is"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"generated_text": "\nAssistant: A chicken is"
|
||||||
|
}
|
||||||
|
]
|
46
integration-tests/models/test_idefics.py
Normal file
46
integration-tests/models/test_idefics.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def idefics_handle(launcher):
|
||||||
|
with launcher(
|
||||||
|
"HuggingFaceM4/idefics-9b-instruct", num_shard=2
|
||||||
|
) as handle:
|
||||||
|
yield handle
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
async def idefics(idefics_handle):
|
||||||
|
await idefics_handle.health(300)
|
||||||
|
return idefics_handle.client
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_idefics(idefics, response_snapshot):
|
||||||
|
response = await idefics.generate(
|
||||||
|
"User:Can you tell me a very short story based on the image?",
|
||||||
|
max_new_tokens=10,
|
||||||
|
decoder_input_details=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.details.generated_tokens == 10
|
||||||
|
assert response == response_snapshot
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_idefics_load(idefics, generate_load, response_snapshot):
|
||||||
|
responses = await generate_load(
|
||||||
|
idefics,
|
||||||
|
"User:Can you tell me a very short story based on the image?",
|
||||||
|
max_new_tokens=10,
|
||||||
|
n=4,
|
||||||
|
)
|
||||||
|
|
||||||
|
generated_texts = [r.generated_text for r in responses]
|
||||||
|
|
||||||
|
assert len(generated_texts) == 4
|
||||||
|
assert generated_texts, all(
|
||||||
|
[text == generated_texts[0] for text in generated_texts]
|
||||||
|
)
|
||||||
|
|
||||||
|
assert responses == response_snapshot
|
639
server/poetry.lock
generated
639
server/poetry.lock
generated
@ -152,13 +152,13 @@ frozenlist = ">=1.1.0"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-timeout"
|
name = "async-timeout"
|
||||||
version = "4.0.2"
|
version = "4.0.3"
|
||||||
description = "Timeout context manager for asyncio programs"
|
description = "Timeout context manager for asyncio programs"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"},
|
{file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
|
||||||
{file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"},
|
{file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -310,35 +310,6 @@ files = [
|
|||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "cmake"
|
|
||||||
version = "3.27.0"
|
|
||||||
description = "CMake is an open-source, cross-platform family of tools designed to build, test and package software"
|
|
||||||
optional = false
|
|
||||||
python-versions = "*"
|
|
||||||
files = [
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-macosx_10_10_universal2.macosx_10_10_x86_64.macosx_11_0_arm64.macosx_11_0_universal2.whl", hash = "sha256:9ccab4cd93578d3c2df32e66b44b313b75a7484032645040431dc06a583ca4aa"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-manylinux2010_i686.manylinux_2_12_i686.whl", hash = "sha256:199bfaefb752e82d8067aeee5d6a6e0414fe0d60e9a3fd08e95d537a97e0db16"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:8745eff805f36762d3e8e904698b853cb4a9da8b4b07d1c12bcd1e1a6c4a1709"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:58a3f39d3d1bc897f05e531bfa676246a2b25d424c6a47e4b6bbc193fb560db7"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b470ccd3f86cf19a63f6b221c9cceebcc58e32d3787d0d5f9f43d1d91a095090"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:35a8d397ce883e93b5e6561e2803ce9470df52283862264093c1078530f98189"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:1f38d87b2c65763a0113f4a6c652e6f4b5adf90b384c1e1d69e4f8a3104a57d6"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b9d5811954dcedcaa6c915c4a9bb6d64b55ac189e9cbc74be726307d9d084804"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:073e4f196d0888216e6794c08cd984ddabc108c0e4e66f48fbd7610d1e6d726d"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-musllinux_1_1_i686.whl", hash = "sha256:e58e48643903e6fad76274337f9a4d3c575b8e21cd05c6214780b2c98bb0c706"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:9740ed9f61a3bd8708a41cadd5c057c04f38e5b89bd773e369df2e210a1c55a3"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-musllinux_1_1_s390x.whl", hash = "sha256:1b3189171665f5c8d748ae7fe10a29fff1ebeedeaef57b16f1ea54b1ec0fe514"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:c4c968c188e7518deb463a14e64f3a19f242c9dcf7f24e1dbcc1419690cd54e0"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-win32.whl", hash = "sha256:5561aca62b65aac844f3931e74cfeb696e4534de145e3307bf942e735736541e"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-win_amd64.whl", hash = "sha256:48be3afe62c9513a49be007896a4058fafec512cb1f269a50126da30aacad97f"},
|
|
||||||
{file = "cmake-3.27.0-py2.py3-none-win_arm64.whl", hash = "sha256:6f46a170b0c9c552d52da4346534570f818195dfc4f1d0c03264e24cc348fc60"},
|
|
||||||
{file = "cmake-3.27.0.tar.gz", hash = "sha256:d03f0a76a2b96805044ad1178b92aeeb5f695caa6776a32522bb5c430a55b4e8"},
|
|
||||||
]
|
|
||||||
|
|
||||||
[package.extras]
|
|
||||||
test = ["coverage (>=4.2)", "flake8 (>=3.0.4)", "path.py (>=11.5.0)", "pytest (>=3.0.3)", "pytest-cov (>=2.4.0)", "pytest-runner (>=2.9)", "pytest-virtualenv (>=1.7.0)", "scikit-build (>=0.10.0)", "setuptools (>=28.0.0)", "virtualenv (>=15.0.3)", "wheel"]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "colorama"
|
name = "colorama"
|
||||||
version = "0.4.6"
|
version = "0.4.6"
|
||||||
@ -352,13 +323,13 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datasets"
|
name = "datasets"
|
||||||
version = "2.14.3"
|
version = "2.14.4"
|
||||||
description = "HuggingFace community-driven open-source library of datasets"
|
description = "HuggingFace community-driven open-source library of datasets"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.8.0"
|
python-versions = ">=3.8.0"
|
||||||
files = [
|
files = [
|
||||||
{file = "datasets-2.14.3-py3-none-any.whl", hash = "sha256:8da5868e55e7c1f0bf3356913a14a9926fe5eca66663691886b95bc9c9b065f0"},
|
{file = "datasets-2.14.4-py3-none-any.whl", hash = "sha256:29336bd316a7d827ccd4da2236596279b20ca2ac78f64c04c9483da7cbc2459b"},
|
||||||
{file = "datasets-2.14.3.tar.gz", hash = "sha256:5fb20465a990cf7946f961884ba46fec19cdc95b1cd2cad2a7b87c8d183b02e1"},
|
{file = "datasets-2.14.4.tar.gz", hash = "sha256:ef29c2b5841de488cd343cfc26ab979bff77efa4d2285af51f1ad7db5c46a83b"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@ -436,13 +407,13 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "exceptiongroup"
|
name = "exceptiongroup"
|
||||||
version = "1.1.2"
|
version = "1.1.3"
|
||||||
description = "Backport of PEP 654 (exception groups)"
|
description = "Backport of PEP 654 (exception groups)"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "exceptiongroup-1.1.2-py3-none-any.whl", hash = "sha256:e346e69d186172ca7cf029c8c1d16235aa0e04035e5750b4b95039e65204328f"},
|
{file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"},
|
||||||
{file = "exceptiongroup-1.1.2.tar.gz", hash = "sha256:12c3e887d6485d16943a309616de20ae5582633e0a2eda17f4e10fd61c1e8af5"},
|
{file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
@ -608,148 +579,148 @@ testing = ["protobuf (>=4.21.9)"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "grpcio"
|
name = "grpcio"
|
||||||
version = "1.56.2"
|
version = "1.57.0"
|
||||||
description = "HTTP/2-based RPC framework"
|
description = "HTTP/2-based RPC framework"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:bf0b9959e673505ee5869950642428046edb91f99942607c2ecf635f8a4b31c9"},
|
{file = "grpcio-1.57.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:092fa155b945015754bdf988be47793c377b52b88d546e45c6a9f9579ac7f7b6"},
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:5144feb20fe76e73e60c7d73ec3bf54f320247d1ebe737d10672480371878b48"},
|
{file = "grpcio-1.57.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2f7349786da979a94690cc5c2b804cab4e8774a3cf59be40d037c4342c906649"},
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a72797549935c9e0b9bc1def1768c8b5a709538fa6ab0678e671aec47ebfd55e"},
|
{file = "grpcio-1.57.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:82640e57fb86ea1d71ea9ab54f7e942502cf98a429a200b2e743d8672171734f"},
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3f3237a57e42f79f1e560726576aedb3a7ef931f4e3accb84ebf6acc485d316"},
|
{file = "grpcio-1.57.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40b72effd4c789de94ce1be2b5f88d7b9b5f7379fe9645f198854112a6567d9a"},
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:900bc0096c2ca2d53f2e5cebf98293a7c32f532c4aeb926345e9747452233950"},
|
{file = "grpcio-1.57.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f708a6a17868ad8bf586598bee69abded4996b18adf26fd2d91191383b79019"},
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:97e0efaebbfd222bcaac2f1735c010c1d3b167112d9d237daebbeedaaccf3d1d"},
|
{file = "grpcio-1.57.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:60fe15288a0a65d5c1cb5b4a62b1850d07336e3ba728257a810317be14f0c527"},
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c0c85c5cbe8b30a32fa6d802588d55ffabf720e985abe9590c7c886919d875d4"},
|
{file = "grpcio-1.57.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6907b1cf8bb29b058081d2aad677b15757a44ef2d4d8d9130271d2ad5e33efca"},
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-win32.whl", hash = "sha256:06e84ad9ae7668a109e970c7411e7992751a116494cba7c4fb877656527f9a57"},
|
{file = "grpcio-1.57.0-cp310-cp310-win32.whl", hash = "sha256:57b183e8b252825c4dd29114d6c13559be95387aafc10a7be645462a0fc98bbb"},
|
||||||
{file = "grpcio-1.56.2-cp310-cp310-win_amd64.whl", hash = "sha256:10954662f77dc36c9a1fb5cc4a537f746580d6b5734803be1e587252682cda8d"},
|
{file = "grpcio-1.57.0-cp310-cp310-win_amd64.whl", hash = "sha256:7b400807fa749a9eb286e2cd893e501b110b4d356a218426cb9c825a0474ca56"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:c435f5ce1705de48e08fcbcfaf8aee660d199c90536e3e06f2016af7d6a938dd"},
|
{file = "grpcio-1.57.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:c6ebecfb7a31385393203eb04ed8b6a08f5002f53df3d59e5e795edb80999652"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:6108e5933eb8c22cd3646e72d5b54772c29f57482fd4c41a0640aab99eb5071d"},
|
{file = "grpcio-1.57.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:00258cbe3f5188629828363ae8ff78477ce976a6f63fb2bb5e90088396faa82e"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:8391cea5ce72f4a12368afd17799474015d5d3dc00c936a907eb7c7eaaea98a5"},
|
{file = "grpcio-1.57.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:23e7d8849a0e58b806253fd206ac105b328171e01b8f18c7d5922274958cc87e"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:750de923b456ca8c0f1354d6befca45d1f3b3a789e76efc16741bd4132752d95"},
|
{file = "grpcio-1.57.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5371bcd861e679d63b8274f73ac281751d34bd54eccdbfcd6aa00e692a82cd7b"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fda2783c12f553cdca11c08e5af6eecbd717280dc8fbe28a110897af1c15a88c"},
|
{file = "grpcio-1.57.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aed90d93b731929e742967e236f842a4a2174dc5db077c8f9ad2c5996f89f63e"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9e04d4e4cfafa7c5264e535b5d28e786f0571bea609c3f0aaab13e891e933e9c"},
|
{file = "grpcio-1.57.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:fe752639919aad9ffb0dee0d87f29a6467d1ef764f13c4644d212a9a853a078d"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89a49cc5ad08a38b6141af17e00d1dd482dc927c7605bc77af457b5a0fca807c"},
|
{file = "grpcio-1.57.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fada6b07ec4f0befe05218181f4b85176f11d531911b64c715d1875c4736d73a"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-win32.whl", hash = "sha256:6a007a541dff984264981fbafeb052bfe361db63578948d857907df9488d8774"},
|
{file = "grpcio-1.57.0-cp311-cp311-win32.whl", hash = "sha256:bb396952cfa7ad2f01061fbc7dc1ad91dd9d69243bcb8110cf4e36924785a0fe"},
|
||||||
{file = "grpcio-1.56.2-cp311-cp311-win_amd64.whl", hash = "sha256:af4063ef2b11b96d949dccbc5a987272f38d55c23c4c01841ea65a517906397f"},
|
{file = "grpcio-1.57.0-cp311-cp311-win_amd64.whl", hash = "sha256:e503cb45ed12b924b5b988ba9576dc9949b2f5283b8e33b21dcb6be74a7c58d0"},
|
||||||
{file = "grpcio-1.56.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:a6ff459dac39541e6a2763a4439c4ca6bc9ecb4acc05a99b79246751f9894756"},
|
{file = "grpcio-1.57.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:fd173b4cf02b20f60860dc2ffe30115c18972d7d6d2d69df97ac38dee03be5bf"},
|
||||||
{file = "grpcio-1.56.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:f20fd21f7538f8107451156dd1fe203300b79a9ddceba1ee0ac8132521a008ed"},
|
{file = "grpcio-1.57.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:d7f8df114d6b4cf5a916b98389aeaf1e3132035420a88beea4e3d977e5f267a5"},
|
||||||
{file = "grpcio-1.56.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:d1fbad1f9077372b6587ec589c1fc120b417b6c8ad72d3e3cc86bbbd0a3cee93"},
|
{file = "grpcio-1.57.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:76c44efa4ede1f42a9d5b2fed1fe9377e73a109bef8675fb0728eb80b0b8e8f2"},
|
||||||
{file = "grpcio-1.56.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee26e9dfb3996aff7c870f09dc7ad44a5f6732b8bdb5a5f9905737ac6fd4ef1"},
|
{file = "grpcio-1.57.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4faea2cfdf762a664ab90589b66f416274887641ae17817de510b8178356bf73"},
|
||||||
{file = "grpcio-1.56.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4c60abd950d6de3e4f1ddbc318075654d275c29c846ab6a043d6ed2c52e4c8c"},
|
{file = "grpcio-1.57.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c60b83c43faeb6d0a9831f0351d7787a0753f5087cc6fa218d78fdf38e5acef0"},
|
||||||
{file = "grpcio-1.56.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1c31e52a04e62c8577a7bf772b3e7bed4df9c9e0dd90f92b6ffa07c16cab63c9"},
|
{file = "grpcio-1.57.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b363bbb5253e5f9c23d8a0a034dfdf1b7c9e7f12e602fc788c435171e96daccc"},
|
||||||
{file = "grpcio-1.56.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:345356b307cce5d14355e8e055b4ca5f99bc857c33a3dc1ddbc544fca9cd0475"},
|
{file = "grpcio-1.57.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f1fb0fd4a1e9b11ac21c30c169d169ef434c6e9344ee0ab27cfa6f605f6387b2"},
|
||||||
{file = "grpcio-1.56.2-cp37-cp37m-win_amd64.whl", hash = "sha256:42e63904ee37ae46aa23de50dac8b145b3596f43598fa33fe1098ab2cbda6ff5"},
|
{file = "grpcio-1.57.0-cp37-cp37m-win_amd64.whl", hash = "sha256:34950353539e7d93f61c6796a007c705d663f3be41166358e3d88c45760c7d98"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:7c5ede2e2558f088c49a1ddda19080e4c23fb5d171de80a726b61b567e3766ed"},
|
{file = "grpcio-1.57.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:871f9999e0211f9551f368612460442a5436d9444606184652117d6a688c9f51"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:33971197c47965cc1d97d78d842163c283e998223b151bab0499b951fd2c0b12"},
|
{file = "grpcio-1.57.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:a8a8e560e8dbbdf29288872e91efd22af71e88b0e5736b0daf7773c1fecd99f0"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:d39f5d4af48c138cb146763eda14eb7d8b3ccbbec9fe86fb724cd16e0e914c64"},
|
{file = "grpcio-1.57.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:2313b124e475aa9017a9844bdc5eafb2d5abdda9d456af16fc4535408c7d6da6"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ded637176addc1d3eef35331c39acc598bac550d213f0a1bedabfceaa2244c87"},
|
{file = "grpcio-1.57.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4098b6b638d9e0ca839a81656a2fd4bc26c9486ea707e8b1437d6f9d61c3941"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c90da4b124647547a68cf2f197174ada30c7bb9523cb976665dfd26a9963d328"},
|
{file = "grpcio-1.57.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e5b58e32ae14658085c16986d11e99abd002ddbf51c8daae8a0671fffb3467f"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3ccb621749a81dc7755243665a70ce45536ec413ef5818e013fe8dfbf5aa497b"},
|
{file = "grpcio-1.57.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0f80bf37f09e1caba6a8063e56e2b87fa335add314cf2b78ebf7cb45aa7e3d06"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4eb37dd8dd1aa40d601212afa27ca5be255ba792e2e0b24d67b8af5e012cdb7d"},
|
{file = "grpcio-1.57.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5b7a4ce8f862fe32b2a10b57752cf3169f5fe2915acfe7e6a1e155db3da99e79"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-win32.whl", hash = "sha256:ddb4a6061933bd9332b74eac0da25f17f32afa7145a33a0f9711ad74f924b1b8"},
|
{file = "grpcio-1.57.0-cp38-cp38-win32.whl", hash = "sha256:9338bacf172e942e62e5889b6364e56657fbf8ac68062e8b25c48843e7b202bb"},
|
||||||
{file = "grpcio-1.56.2-cp38-cp38-win_amd64.whl", hash = "sha256:8940d6de7068af018dfa9a959a3510e9b7b543f4c405e88463a1cbaa3b2b379a"},
|
{file = "grpcio-1.57.0-cp38-cp38-win_amd64.whl", hash = "sha256:e1cb52fa2d67d7f7fab310b600f22ce1ff04d562d46e9e0ac3e3403c2bb4cc16"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:51173e8fa6d9a2d85c14426bdee5f5c4a0654fd5fddcc21fe9d09ab0f6eb8b35"},
|
{file = "grpcio-1.57.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:fee387d2fab144e8a34e0e9c5ca0f45c9376b99de45628265cfa9886b1dbe62b"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:373b48f210f43327a41e397391715cd11cfce9ded2fe76a5068f9bacf91cc226"},
|
{file = "grpcio-1.57.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:b53333627283e7241fcc217323f225c37783b5f0472316edcaa4479a213abfa6"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:42a3bbb2bc07aef72a7d97e71aabecaf3e4eb616d39e5211e2cfe3689de860ca"},
|
{file = "grpcio-1.57.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f19ac6ac0a256cf77d3cc926ef0b4e64a9725cc612f97228cd5dc4bd9dbab03b"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5344be476ac37eb9c9ad09c22f4ea193c1316bf074f1daf85bddb1b31fda5116"},
|
{file = "grpcio-1.57.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3fdf04e402f12e1de8074458549337febb3b45f21076cc02ef4ff786aff687e"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3fa3ab0fb200a2c66493828ed06ccd1a94b12eddbfb985e7fd3e5723ff156c6"},
|
{file = "grpcio-1.57.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5613a2fecc82f95d6c51d15b9a72705553aa0d7c932fad7aed7afb51dc982ee5"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b975b85d1d5efc36cf8b237c5f3849b64d1ba33d6282f5e991f28751317504a1"},
|
{file = "grpcio-1.57.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b670c2faa92124b7397b42303e4d8eb64a4cd0b7a77e35a9e865a55d61c57ef9"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cbdf2c498e077282cd427cfd88bdce4668019791deef0be8155385ab2ba7837f"},
|
{file = "grpcio-1.57.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a635589201b18510ff988161b7b573f50c6a48fae9cb567657920ca82022b37"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-win32.whl", hash = "sha256:139f66656a762572ae718fa0d1f2dce47c05e9fbf7a16acd704c354405b97df9"},
|
{file = "grpcio-1.57.0-cp39-cp39-win32.whl", hash = "sha256:d78d8b86fcdfa1e4c21f8896614b6cc7ee01a2a758ec0c4382d662f2a62cf766"},
|
||||||
{file = "grpcio-1.56.2-cp39-cp39-win_amd64.whl", hash = "sha256:830215173ad45d670140ff99aac3b461f9be9a6b11bee1a17265aaaa746a641a"},
|
{file = "grpcio-1.57.0-cp39-cp39-win_amd64.whl", hash = "sha256:20ec6fc4ad47d1b6e12deec5045ec3cd5402d9a1597f738263e98f490fe07056"},
|
||||||
{file = "grpcio-1.56.2.tar.gz", hash = "sha256:0ff789ae7d8ddd76d2ac02e7d13bfef6fc4928ac01e1dcaa182be51b6bcc0aaa"},
|
{file = "grpcio-1.57.0.tar.gz", hash = "sha256:4b089f7ad1eb00a104078bab8015b0ed0ebcb3b589e527ab009c53893fd4e613"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
protobuf = ["grpcio-tools (>=1.56.2)"]
|
protobuf = ["grpcio-tools (>=1.57.0)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "grpcio-reflection"
|
name = "grpcio-reflection"
|
||||||
version = "1.56.2"
|
version = "1.57.0"
|
||||||
description = "Standard Protobuf Reflection Service for gRPC"
|
description = "Standard Protobuf Reflection Service for gRPC"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
{file = "grpcio-reflection-1.56.2.tar.gz", hash = "sha256:74a81766af639ab8f1b7f59531dc814640f4a1bcf012dc06ce6be205b50a394c"},
|
{file = "grpcio-reflection-1.57.0.tar.gz", hash = "sha256:8f63a18729cba995a172f8325235f5094cb066febec75f9a3b1b2e28328aa166"},
|
||||||
{file = "grpcio_reflection-1.56.2-py3-none-any.whl", hash = "sha256:004bcc3d4a3dcd89bf83253e4c08ca032fc7ca862f7532ea09ebdf08801fc193"},
|
{file = "grpcio_reflection-1.57.0-py3-none-any.whl", hash = "sha256:d7deb8587f9d0095fb5d367c2aa5ce1380e3f23b0f8bca6c00bc404c5429cb6a"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
grpcio = ">=1.56.2"
|
grpcio = ">=1.57.0"
|
||||||
protobuf = ">=4.21.6"
|
protobuf = ">=4.21.6"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "grpcio-status"
|
name = "grpcio-status"
|
||||||
version = "1.56.2"
|
version = "1.57.0"
|
||||||
description = "Status proto mapping for gRPC"
|
description = "Status proto mapping for gRPC"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
{file = "grpcio-status-1.56.2.tar.gz", hash = "sha256:a046b2c0118df4a5687f4585cca9d3c3bae5c498c4dff055dcb43fb06a1180c8"},
|
{file = "grpcio-status-1.57.0.tar.gz", hash = "sha256:b098da99df1eebe58337f8f78e50df990273ccacc1226fddeb47c590e3df9e02"},
|
||||||
{file = "grpcio_status-1.56.2-py3-none-any.whl", hash = "sha256:63f3842867735f59f5d70e723abffd2e8501a6bcd915612a1119e52f10614782"},
|
{file = "grpcio_status-1.57.0-py3-none-any.whl", hash = "sha256:15d6af055914ebbc4ed17e55ebfb8e6bb17a45a57fea32e6af19978fb7844690"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
googleapis-common-protos = ">=1.5.5"
|
googleapis-common-protos = ">=1.5.5"
|
||||||
grpcio = ">=1.56.2"
|
grpcio = ">=1.57.0"
|
||||||
protobuf = ">=4.21.6"
|
protobuf = ">=4.21.6"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "grpcio-tools"
|
name = "grpcio-tools"
|
||||||
version = "1.56.2"
|
version = "1.57.0"
|
||||||
description = "Protobuf code generator for gRPC"
|
description = "Protobuf code generator for gRPC"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "grpcio-tools-1.56.2.tar.gz", hash = "sha256:82af2f4040084141a732f0ef1ecf3f14fdf629923d74d850415e4d09a077e77a"},
|
{file = "grpcio-tools-1.57.0.tar.gz", hash = "sha256:2f16130d869ce27ecd623194547b649dd657333ec7e8644cc571c645781a9b85"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:42272376e9a5a1c631863cda056c143c98d21e5b670db5c8c5b7ed0ba3a1a6eb"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:4fb8a8468031f858381a576078924af364a08833d8f8f3237018252c4573a802"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:a8735d7aa34be99dddfbd476eff6005e684bb2c893c0f62a5811528b84c5b371"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:35bf0dad8a3562043345236c26d0053a856fb06c04d7da652f2ded914e508ae7"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:68ef3aa7509e5e7a6e7c0ecc183e28118e73da4bef0fc77079648601ce35e58f"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:ec9aab2fb6783c7fc54bc28f58eb75f1ca77594e6b0fd5e5e7a8114a95169fe0"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:380985b8d95ea2469e103945bd83a815d1213e370f580631fdd5a3dbaa17e446"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cf5fc0a1c23f8ea34b408b72fb0e90eec0f404ad4dba98e8f6da3c9ce34e2ed"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bfb375eb4f1946d68b8bc7b963c756defa31aa573a35c152a7233d06c0ad6ad"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26e69d08a515554e0cfe1ec4d31568836f4b17f0ff82294f957f629388629eb9"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:13388a22fcba9a1a87f217130a1a01365716af74bd5d0a8a54fc383b8e048ef2"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c39a3656576b6fdaaf28abe0467f7a7231df4230c1bee132322dbc3209419e7f"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7a26160bc0ea5b464715789d4d2a66f01816271677673d65da39bac65b9ea838"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f64f8ab22d27d4a5693310748d35a696061c3b5c7b8c4fb4ab3b4bc1068b6b56"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-win32.whl", hash = "sha256:ff16dd0b086e75f574dbc122e018a44dbd1c6dae3f3621ea99e8e5a6b2706e12"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-win32.whl", hash = "sha256:d2a134756f4db34759a5cc7f7e43f7eb87540b68d1cca62925593c6fb93924f7"},
|
||||||
{file = "grpcio_tools-1.56.2-cp310-cp310-win_amd64.whl", hash = "sha256:2037109c1ce253a8e013c9e3ad3722e887d28a1807acdeb1a51b295c8200137b"},
|
{file = "grpcio_tools-1.57.0-cp310-cp310-win_amd64.whl", hash = "sha256:9a3d60fb8d46ede26c1907c146561b3a9caa20a7aff961bc661ef8226f85a2e9"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:6dc43300189a69807857c52a3d782e9d3bbfb1cb72dcb27b4043c25161919601"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:aac98ecad8f7bd4301855669d42a5d97ef7bb34bec2b1e74c7a0641d47e313cf"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:e7009623635ebcd3dd7fe974883fc2d9a3ff0fcef419bfc0a2da8071b372d9f5"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:cdd020cb68b51462983b7c2dfbc3eb6ede032b8bf438d4554df0c3f08ce35c76"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:c7ca2272022f90b73efe900244aaebe9dd7cf3b379e99e08a88984e2fdd229c2"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:f54081b08419a39221cd646363b5708857c696b3ad4784f1dcf310891e33a5f7"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:493775d17ea09cea6047ba81e4d3f0eb82e34d2fbd3b96e43f72b44ce74726ee"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed85a0291fff45b67f2557fe7f117d3bc7af8b54b8619d27bf374b5c8b7e3ca2"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41af279cf5359b123138236c0980440f4cb4d3d18f03b5c1c314cc1512048351"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e868cd6feb3ef07d4b35be104fe1fd0657db05259ff8f8ec5e08f4f89ca1191d"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:857d72e991d449ec4d2f8337e5e24ddf77b4539965f5cabc84d4b63585832982"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:dfb6f6120587b8e228a3cae5ee4985b5bdc18501bad05c49df61965dfc9d70a9"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c0640728d63c9fa56e9a1679943ae4e33ad43a10802dd7a93255870731f44d07"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a7ad7f328e28fc97c356d0f10fb10d8b5151bb65aa7cf14bf8084513f0b7306"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-win32.whl", hash = "sha256:355204d1b33c7a19e7d69afda411e6595d39ba1e9cbf561770ac1d5403296554"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-win32.whl", hash = "sha256:9867f2817b1a0c93c523f89ac6c9d8625548af4620a7ce438bf5a76e23327284"},
|
||||||
{file = "grpcio_tools-1.56.2-cp311-cp311-win_amd64.whl", hash = "sha256:ea5d108d28b4cd2e28539241c6aee96bda83086d8888c36785d9f84ea690d896"},
|
{file = "grpcio_tools-1.57.0-cp311-cp311-win_amd64.whl", hash = "sha256:1f9e917a9f18087f6c14b4d4508fb94fca5c2f96852363a89232fb9b2124ac1f"},
|
||||||
{file = "grpcio_tools-1.56.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:28444615b7a76b3d9267f81d1487fcad21a581d00564164d9e25ccc28635a811"},
|
{file = "grpcio_tools-1.57.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:9f2aefa8a37bd2c4db1a3f1aca11377e2766214520fb70e67071f4ff8d8b0fa5"},
|
||||||
{file = "grpcio_tools-1.56.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:45d8b5ad6716848d5b68d9cee29a1a9c5c4baa1824ec5b92d9e35acedddba076"},
|
{file = "grpcio_tools-1.57.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:850cbda0ec5d24c39e7215ede410276040692ca45d105fbbeada407fa03f0ac0"},
|
||||||
{file = "grpcio_tools-1.56.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:31d1183d28ffc8da242333cb9f683f5093941da80dd5281db0fa93077aecb518"},
|
{file = "grpcio_tools-1.57.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:6fa52972c9647876ea35f6dc2b51002a74ed900ec7894586cbb2fe76f64f99de"},
|
||||||
{file = "grpcio_tools-1.56.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0059dfc9bea8f7bca69c15ca62c88904c4f907fde1137e0743b5eee054661873"},
|
{file = "grpcio_tools-1.57.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0eea89d7542719594e50e2283f51a072978b953e8b3e9fd7c59a2c762d4c1"},
|
||||||
{file = "grpcio_tools-1.56.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24fc857252181c9950ed2d8cee3df5bd0f42861c4ad0db2a57400186827f96e5"},
|
{file = "grpcio_tools-1.57.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3da5240211252fc70a6451fe00c143e2ab2f7bfc2445695ad2ed056b8e48d96"},
|
||||||
{file = "grpcio_tools-1.56.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3a74a5e4fc8121a51401665f96f9a70aee50a2f1221e4a199e67b3b8f55881e8"},
|
{file = "grpcio_tools-1.57.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a0256f8786ac9e4db618a1aa492bb3472569a0946fd3ee862ffe23196323da55"},
|
||||||
{file = "grpcio_tools-1.56.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bec47db5d8b5c3b2a44afdbc3f3bf306e34279289a206d20222824381ca3cb13"},
|
{file = "grpcio_tools-1.57.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c026bdf5c1366ce88b7bbe2d8207374d675afd3fd911f60752103de3da4a41d2"},
|
||||||
{file = "grpcio_tools-1.56.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c0dbaac63a25c088f864295f394230eeb7be48dac2264433fda2603f86c36b25"},
|
{file = "grpcio_tools-1.57.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9053c2f655589545be08b9d6a673e92970173a4bf11a4b9f18cd6e9af626b587"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:0a4f9cce5a16613b6d3123c89f9d50e0d13b466799af17bc723dc7d2901a54e4"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:81ec4dbb696e095057b2528d11a8da04be6bbe2b967fa07d4ea9ba6354338cbf"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:ea5fc1b49514b44a3e5a45156c025002f172ade4c509e58c51967865c7c6fa45"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:495e2946406963e0b9f063f76d5af0f2a19517dac2b367b5b044432ac9194296"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:54da410124547bacb97a54546c1a95f1af0125e48edc8b5679412ef8b2844f81"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:7b46fc6aa8eb7edd18cafcd21fd98703cb6c09e46b507de335fca7f0161dfccb"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5223668649172d879ee780253b8e4a79144c56a3cc1bb021847f583508c2b0be"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb81ff861692111fa81bd85f64584e624cb4013bd66fbce8a209b8893f5ce398"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:483256d5f5be6a77b24d8a5f06ca152d1571c62bf5c738834da61107c7563afe"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a42dc220eb5305f470855c9284f4c8e85ae59d6d742cd07946b0cbe5e9ca186"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1f334718eb796799bfadbac5567456fb745cee8c7b438c93b74d1ce676c6ad07"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:90d10d9038ba46a595a223a34f136c9230e3d6d7abc2433dbf0e1c31939d3a8b"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:26751f69cbbc8ea19cf0657b7d109a6db7df81f80caf16380ebcd20eea27652c"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5bc3e6d338aefb052e19cedabe00452be46d0c10a4ed29ee77abb00402e438fe"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-win32.whl", hash = "sha256:4056ff13e30813d42a30ce1cdfeaeb6bbee915515c161c1df896dac3143ae643"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-win32.whl", hash = "sha256:34b36217b17b5bea674a414229913e1fd80ede328be51e1b531fcc62abd393b0"},
|
||||||
{file = "grpcio_tools-1.56.2-cp38-cp38-win_amd64.whl", hash = "sha256:878b9269ceb0dd934b61697a9dd9a5c3e9552521e8f46ab32cf4d72a223f7b6c"},
|
{file = "grpcio_tools-1.57.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbde4004a0688400036342ff73e3706e8940483e2871547b1354d59e93a38277"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:216e86d3a6ccc31b27fa4c12491981a0a39d4787d2358b6df05baffa40084494"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:784574709b9690dc28696617ea69352e2132352fdfc9bc89afa8e39f99ae538e"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:778224fcbc1cc7eaf222ce94676afbac8d72b4f84cf4239e30b01d2450a46126"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:85ac4e62eb44428cde025fd9ab7554002315fc7880f791c553fc5a0015cc9931"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:14120fb2c6f7894fac5b689934368c692ec50f50a320e8073277ab7778fd612f"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:dc771d4db5701f280957bbcee91745e0686d00ed1c6aa7e05ba30a58b02d70a1"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:014da3ed176beb2b1c8430ccc34c8fe962cdd5480e56fb4ab9de60e60c315f3f"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3ac06703c412f8167a9062eaf6099409967e33bf98fa5b02be4b4689b6bdf39"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8febb4f90b8fab3179f5bdaa159f1d2a20523ea17ec0d66bdec7732f9532de91"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02d78c034109f46032c7217260066d49d41e6bcaf588fa28fa40fe2f83445347"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ffae7df3318266614f7aa440acb2098c064b6b5ae061fc22125092386349e526"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2db25f15ed44327f2e02d0c4fe741ac966f9500e407047d8a7c7fccf2df65616"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7d86e24eb6e3973c55e9c74412ff755d1b9d15518c4eaf95676acff49d0162a2"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b417c97936d94874a3ce7ed8deab910f2233e3612134507cfee4af8735c38a6"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-win32.whl", hash = "sha256:506d00a86950adf4017395551a4547c0b7fcefa90e4c220135fc3e34e31be81b"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-win32.whl", hash = "sha256:f717cce5093e6b6049d9ea6d12fdf3658efdb1a80772f7737db1f8510b876df6"},
|
||||||
{file = "grpcio_tools-1.56.2-cp39-cp39-win_amd64.whl", hash = "sha256:8da04f033b8f4c597e8fc990e2f626bad2b269227bdd554592ea618f624f1aa9"},
|
{file = "grpcio_tools-1.57.0-cp39-cp39-win_amd64.whl", hash = "sha256:1c0e8a1a32973a5d59fbcc19232f925e5c48116e9411f788033a31c5ca5130b4"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
grpcio = ">=1.56.2"
|
grpcio = ">=1.57.0"
|
||||||
protobuf = ">=4.21.6,<5.0dev"
|
protobuf = ">=4.21.6,<5.0dev"
|
||||||
setuptools = "*"
|
setuptools = "*"
|
||||||
|
|
||||||
@ -851,16 +822,6 @@ MarkupSafe = ">=2.0"
|
|||||||
[package.extras]
|
[package.extras]
|
||||||
i18n = ["Babel (>=2.7)"]
|
i18n = ["Babel (>=2.7)"]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "lit"
|
|
||||||
version = "16.0.6"
|
|
||||||
description = "A Software Testing Tool"
|
|
||||||
optional = false
|
|
||||||
python-versions = "*"
|
|
||||||
files = [
|
|
||||||
{file = "lit-16.0.6.tar.gz", hash = "sha256:84623c9c23b6b14763d637f4e63e6b721b3446ada40bf7001d8fee70b8e77a9a"},
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "loguru"
|
name = "loguru"
|
||||||
version = "0.6.0"
|
version = "0.6.0"
|
||||||
@ -1377,6 +1338,75 @@ docs-specific = ["hf-doc-builder"]
|
|||||||
quality = ["black (>=22.0,<23.0)", "ruff (>=0.0.241)", "urllib3 (<=2.0.0)"]
|
quality = ["black (>=22.0,<23.0)", "ruff (>=0.0.241)", "urllib3 (<=2.0.0)"]
|
||||||
test = ["black (>=22.0,<23.0)", "datasets", "diffusers", "hf-doc-builder", "parameterized", "pytest", "pytest-cov", "pytest-xdist", "ruff (>=0.0.241)", "urllib3 (<=2.0.0)"]
|
test = ["black (>=22.0,<23.0)", "datasets", "diffusers", "hf-doc-builder", "parameterized", "pytest", "pytest-cov", "pytest-xdist", "ruff (>=0.0.241)", "urllib3 (<=2.0.0)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pillow"
|
||||||
|
version = "10.0.0"
|
||||||
|
description = "Python Imaging Library (Fork)"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.8"
|
||||||
|
files = [
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"},
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"},
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"},
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"},
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"},
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"},
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"},
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"},
|
||||||
|
{file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"},
|
||||||
|
{file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"},
|
||||||
|
{file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"},
|
||||||
|
{file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"},
|
||||||
|
{file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"},
|
||||||
|
{file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"},
|
||||||
|
{file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"},
|
||||||
|
{file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"},
|
||||||
|
{file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"},
|
||||||
|
{file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"},
|
||||||
|
{file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"},
|
||||||
|
{file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"},
|
||||||
|
{file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"},
|
||||||
|
{file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
|
||||||
|
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pluggy"
|
name = "pluggy"
|
||||||
version = "1.2.0"
|
version = "1.2.0"
|
||||||
@ -1394,24 +1424,24 @@ testing = ["pytest", "pytest-benchmark"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "protobuf"
|
name = "protobuf"
|
||||||
version = "4.23.4"
|
version = "4.24.0"
|
||||||
description = ""
|
description = ""
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "protobuf-4.23.4-cp310-abi3-win32.whl", hash = "sha256:5fea3c64d41ea5ecf5697b83e41d09b9589e6f20b677ab3c48e5f242d9b7897b"},
|
{file = "protobuf-4.24.0-cp310-abi3-win32.whl", hash = "sha256:81cb9c4621d2abfe181154354f63af1c41b00a4882fb230b4425cbaed65e8f52"},
|
||||||
{file = "protobuf-4.23.4-cp310-abi3-win_amd64.whl", hash = "sha256:7b19b6266d92ca6a2a87effa88ecc4af73ebc5cfde194dc737cf8ef23a9a3b12"},
|
{file = "protobuf-4.24.0-cp310-abi3-win_amd64.whl", hash = "sha256:6c817cf4a26334625a1904b38523d1b343ff8b637d75d2c8790189a4064e51c3"},
|
||||||
{file = "protobuf-4.23.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8547bf44fe8cec3c69e3042f5c4fb3e36eb2a7a013bb0a44c018fc1e427aafbd"},
|
{file = "protobuf-4.24.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:ae97b5de10f25b7a443b40427033e545a32b0e9dda17bcd8330d70033379b3e5"},
|
||||||
{file = "protobuf-4.23.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:fee88269a090ada09ca63551bf2f573eb2424035bcf2cb1b121895b01a46594a"},
|
{file = "protobuf-4.24.0-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:567fe6b0647494845d0849e3d5b260bfdd75692bf452cdc9cb660d12457c055d"},
|
||||||
{file = "protobuf-4.23.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:effeac51ab79332d44fba74660d40ae79985901ac21bca408f8dc335a81aa597"},
|
{file = "protobuf-4.24.0-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:a6b1ca92ccabfd9903c0c7dde8876221dc7d8d87ad5c42e095cc11b15d3569c7"},
|
||||||
{file = "protobuf-4.23.4-cp37-cp37m-win32.whl", hash = "sha256:c3e0939433c40796ca4cfc0fac08af50b00eb66a40bbbc5dee711998fb0bbc1e"},
|
{file = "protobuf-4.24.0-cp37-cp37m-win32.whl", hash = "sha256:a38400a692fd0c6944c3c58837d112f135eb1ed6cdad5ca6c5763336e74f1a04"},
|
||||||
{file = "protobuf-4.23.4-cp37-cp37m-win_amd64.whl", hash = "sha256:9053df6df8e5a76c84339ee4a9f5a2661ceee4a0dab019e8663c50ba324208b0"},
|
{file = "protobuf-4.24.0-cp37-cp37m-win_amd64.whl", hash = "sha256:5ab19ee50037d4b663c02218a811a5e1e7bb30940c79aac385b96e7a4f9daa61"},
|
||||||
{file = "protobuf-4.23.4-cp38-cp38-win32.whl", hash = "sha256:e1c915778d8ced71e26fcf43c0866d7499891bca14c4368448a82edc61fdbc70"},
|
{file = "protobuf-4.24.0-cp38-cp38-win32.whl", hash = "sha256:e8834ef0b4c88666ebb7c7ec18045aa0f4325481d724daa624a4cf9f28134653"},
|
||||||
{file = "protobuf-4.23.4-cp38-cp38-win_amd64.whl", hash = "sha256:351cc90f7d10839c480aeb9b870a211e322bf05f6ab3f55fcb2f51331f80a7d2"},
|
{file = "protobuf-4.24.0-cp38-cp38-win_amd64.whl", hash = "sha256:8bb52a2be32db82ddc623aefcedfe1e0eb51da60e18fcc908fb8885c81d72109"},
|
||||||
{file = "protobuf-4.23.4-cp39-cp39-win32.whl", hash = "sha256:6dd9b9940e3f17077e820b75851126615ee38643c2c5332aa7a359988820c720"},
|
{file = "protobuf-4.24.0-cp39-cp39-win32.whl", hash = "sha256:ae7a1835721086013de193311df858bc12cd247abe4ef9710b715d930b95b33e"},
|
||||||
{file = "protobuf-4.23.4-cp39-cp39-win_amd64.whl", hash = "sha256:0a5759f5696895de8cc913f084e27fd4125e8fb0914bb729a17816a33819f474"},
|
{file = "protobuf-4.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:44825e963008f8ea0d26c51911c30d3e82e122997c3c4568fd0385dd7bacaedf"},
|
||||||
{file = "protobuf-4.23.4-py3-none-any.whl", hash = "sha256:e9d0be5bf34b275b9f87ba7407796556abeeba635455d036c7351f7c183ef8ff"},
|
{file = "protobuf-4.24.0-py3-none-any.whl", hash = "sha256:82e6e9ebdd15b8200e8423676eab38b774624d6a1ad696a60d86a2ac93f18201"},
|
||||||
{file = "protobuf-4.23.4.tar.gz", hash = "sha256:ccd9430c0719dce806b93f89c91de7977304729e55377f872a92465d548329a9"},
|
{file = "protobuf-4.24.0.tar.gz", hash = "sha256:5d0ceb9de6e08311832169e601d1fc71bd8e8c779f3ee38a97a78554945ecb85"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1575,99 +1605,99 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex"
|
name = "regex"
|
||||||
version = "2023.6.3"
|
version = "2023.8.8"
|
||||||
description = "Alternative regular expression module, to replace re."
|
description = "Alternative regular expression module, to replace re."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
{file = "regex-2023.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:824bf3ac11001849aec3fa1d69abcb67aac3e150a933963fb12bda5151fe1bfd"},
|
{file = "regex-2023.8.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:88900f521c645f784260a8d346e12a1590f79e96403971241e64c3a265c8ecdb"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05ed27acdf4465c95826962528f9e8d41dbf9b1aa8531a387dee6ed215a3e9ef"},
|
{file = "regex-2023.8.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3611576aff55918af2697410ff0293d6071b7e00f4b09e005d614686ac4cd57c"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b49c764f88a79160fa64f9a7b425620e87c9f46095ef9c9920542ab2495c8bc"},
|
{file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8a0ccc8f2698f120e9e5742f4b38dc944c38744d4bdfc427616f3a163dd9de5"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8e3f1316c2293e5469f8f09dc2d76efb6c3982d3da91ba95061a7e69489a14ef"},
|
{file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c662a4cbdd6280ee56f841f14620787215a171c4e2d1744c9528bed8f5816c96"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:43e1dd9d12df9004246bacb79a0e5886b3b6071b32e41f83b0acbf293f820ee8"},
|
{file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf0633e4a1b667bfe0bb10b5e53fe0d5f34a6243ea2530eb342491f1adf4f739"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4959e8bcbfda5146477d21c3a8ad81b185cd252f3d0d6e4724a5ef11c012fb06"},
|
{file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:551ad543fa19e94943c5b2cebc54c73353ffff08228ee5f3376bd27b3d5b9800"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af4dd387354dc83a3bff67127a124c21116feb0d2ef536805c454721c5d7993d"},
|
{file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54de2619f5ea58474f2ac211ceea6b615af2d7e4306220d4f3fe690c91988a61"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2239d95d8e243658b8dbb36b12bd10c33ad6e6933a54d36ff053713f129aa536"},
|
{file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ec4b3f0aebbbe2fc0134ee30a791af522a92ad9f164858805a77442d7d18570"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:890e5a11c97cf0d0c550eb661b937a1e45431ffa79803b942a057c4fb12a2da2"},
|
{file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ae646c35cb9f820491760ac62c25b6d6b496757fda2d51be429e0e7b67ae0ab"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a8105e9af3b029f243ab11ad47c19b566482c150c754e4c717900a798806b222"},
|
{file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca339088839582d01654e6f83a637a4b8194d0960477b9769d2ff2cfa0fa36d2"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:25be746a8ec7bc7b082783216de8e9473803706723b3f6bef34b3d0ed03d57e2"},
|
{file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d9b6627408021452dcd0d2cdf8da0534e19d93d070bfa8b6b4176f99711e7f90"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:3676f1dd082be28b1266c93f618ee07741b704ab7b68501a173ce7d8d0d0ca18"},
|
{file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:bd3366aceedf274f765a3a4bc95d6cd97b130d1dda524d8f25225d14123c01db"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:10cb847aeb1728412c666ab2e2000ba6f174f25b2bdc7292e7dd71b16db07568"},
|
{file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7aed90a72fc3654fba9bc4b7f851571dcc368120432ad68b226bd593f3f6c0b7"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-win32.whl", hash = "sha256:dbbbfce33cd98f97f6bffb17801b0576e653f4fdb1d399b2ea89638bc8d08ae1"},
|
{file = "regex-2023.8.8-cp310-cp310-win32.whl", hash = "sha256:80b80b889cb767cc47f31d2b2f3dec2db8126fbcd0cff31b3925b4dc6609dcdb"},
|
||||||
{file = "regex-2023.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:c5f8037000eb21e4823aa485149f2299eb589f8d1fe4b448036d230c3f4e68e0"},
|
{file = "regex-2023.8.8-cp310-cp310-win_amd64.whl", hash = "sha256:b82edc98d107cbc7357da7a5a695901b47d6eb0420e587256ba3ad24b80b7d0b"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c123f662be8ec5ab4ea72ea300359023a5d1df095b7ead76fedcd8babbedf969"},
|
{file = "regex-2023.8.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1e7d84d64c84ad97bf06f3c8cb5e48941f135ace28f450d86af6b6512f1c9a71"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9edcbad1f8a407e450fbac88d89e04e0b99a08473f666a3f3de0fd292badb6aa"},
|
{file = "regex-2023.8.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ce0f9fbe7d295f9922c0424a3637b88c6c472b75eafeaff6f910494a1fa719ef"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcba6dae7de533c876255317c11f3abe4907ba7d9aa15d13e3d9710d4315ec0e"},
|
{file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06c57e14ac723b04458df5956cfb7e2d9caa6e9d353c0b4c7d5d54fcb1325c46"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29cdd471ebf9e0f2fb3cac165efedc3c58db841d83a518b082077e612d3ee5df"},
|
{file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7a9aaa5a1267125eef22cef3b63484c3241aaec6f48949b366d26c7250e0357"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12b74fbbf6cbbf9dbce20eb9b5879469e97aeeaa874145517563cca4029db65c"},
|
{file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b7408511fca48a82a119d78a77c2f5eb1b22fe88b0d2450ed0756d194fe7a9a"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c29ca1bd61b16b67be247be87390ef1d1ef702800f91fbd1991f5c4421ebae8"},
|
{file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14dc6f2d88192a67d708341f3085df6a4f5a0c7b03dec08d763ca2cd86e9f559"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77f09bc4b55d4bf7cc5eba785d87001d6757b7c9eec237fe2af57aba1a071d9"},
|
{file = "regex-2023.8.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48c640b99213643d141550326f34f0502fedb1798adb3c9eb79650b1ecb2f177"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ea353ecb6ab5f7e7d2f4372b1e779796ebd7b37352d290096978fea83c4dba0c"},
|
{file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0085da0f6c6393428bf0d9c08d8b1874d805bb55e17cb1dfa5ddb7cfb11140bf"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:10590510780b7541969287512d1b43f19f965c2ece6c9b1c00fc367b29d8dce7"},
|
{file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:964b16dcc10c79a4a2be9f1273fcc2684a9eedb3906439720598029a797b46e6"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e2fbd6236aae3b7f9d514312cdb58e6494ee1c76a9948adde6eba33eb1c4264f"},
|
{file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7ce606c14bb195b0e5108544b540e2c5faed6843367e4ab3deb5c6aa5e681208"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:6b2675068c8b56f6bfd5a2bda55b8accbb96c02fd563704732fd1c95e2083461"},
|
{file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:40f029d73b10fac448c73d6eb33d57b34607f40116e9f6e9f0d32e9229b147d7"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74419d2b50ecb98360cfaa2974da8689cb3b45b9deff0dcf489c0d333bcc1477"},
|
{file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3b8e6ea6be6d64104d8e9afc34c151926f8182f84e7ac290a93925c0db004bfd"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-win32.whl", hash = "sha256:fb5ec16523dc573a4b277663a2b5a364e2099902d3944c9419a40ebd56a118f9"},
|
{file = "regex-2023.8.8-cp311-cp311-win32.whl", hash = "sha256:942f8b1f3b223638b02df7df79140646c03938d488fbfb771824f3d05fc083a8"},
|
||||||
{file = "regex-2023.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:09e4a1a6acc39294a36b7338819b10baceb227f7f7dbbea0506d419b5a1dd8af"},
|
{file = "regex-2023.8.8-cp311-cp311-win_amd64.whl", hash = "sha256:51d8ea2a3a1a8fe4f67de21b8b93757005213e8ac3917567872f2865185fa7fb"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0654bca0cdf28a5956c83839162692725159f4cda8d63e0911a2c0dc76166525"},
|
{file = "regex-2023.8.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e951d1a8e9963ea51efd7f150450803e3b95db5939f994ad3d5edac2b6f6e2b4"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:463b6a3ceb5ca952e66550a4532cef94c9a0c80dc156c4cc343041951aec1697"},
|
{file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704f63b774218207b8ccc6c47fcef5340741e5d839d11d606f70af93ee78e4d4"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87b2a5bb5e78ee0ad1de71c664d6eb536dc3947a46a69182a90f4410f5e3f7dd"},
|
{file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22283c769a7b01c8ac355d5be0715bf6929b6267619505e289f792b01304d898"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6343c6928282c1f6a9db41f5fd551662310e8774c0e5ebccb767002fcf663ca9"},
|
{file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91129ff1bb0619bc1f4ad19485718cc623a2dc433dff95baadbf89405c7f6b57"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6192d5af2ccd2a38877bfef086d35e6659566a335b1492786ff254c168b1693"},
|
{file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de35342190deb7b866ad6ba5cbcccb2d22c0487ee0cbb251efef0843d705f0d4"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74390d18c75054947e4194019077e243c06fbb62e541d8817a0fa822ea310c14"},
|
{file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b993b6f524d1e274a5062488a43e3f9f8764ee9745ccd8e8193df743dbe5ee61"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:742e19a90d9bb2f4a6cf2862b8b06dea5e09b96c9f2df1779e53432d7275331f"},
|
{file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3026cbcf11d79095a32d9a13bbc572a458727bd5b1ca332df4a79faecd45281c"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8abbc5d54ea0ee80e37fef009e3cec5dafd722ed3c829126253d3e22f3846f1e"},
|
{file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:293352710172239bf579c90a9864d0df57340b6fd21272345222fb6371bf82b3"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:c2b867c17a7a7ae44c43ebbeb1b5ff406b3e8d5b3e14662683e5e66e6cc868d3"},
|
{file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d909b5a3fff619dc7e48b6b1bedc2f30ec43033ba7af32f936c10839e81b9217"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d831c2f8ff278179705ca59f7e8524069c1a989e716a1874d6d1aab6119d91d1"},
|
{file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3d370ff652323c5307d9c8e4c62efd1956fb08051b0e9210212bc51168b4ff56"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ee2d1a9a253b1729bb2de27d41f696ae893507c7db224436abe83ee25356f5c1"},
|
{file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:b076da1ed19dc37788f6a934c60adf97bd02c7eea461b73730513921a85d4235"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:61474f0b41fe1a80e8dfa70f70ea1e047387b7cd01c85ec88fa44f5d7561d787"},
|
{file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e9941a4ada58f6218694f382e43fdd256e97615db9da135e77359da257a7168b"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-win32.whl", hash = "sha256:0b71e63226e393b534105fcbdd8740410dc6b0854c2bfa39bbda6b0d40e59a54"},
|
{file = "regex-2023.8.8-cp36-cp36m-win32.whl", hash = "sha256:a8c65c17aed7e15a0c824cdc63a6b104dfc530f6fa8cb6ac51c437af52b481c7"},
|
||||||
{file = "regex-2023.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bbb02fd4462f37060122e5acacec78e49c0fbb303c30dd49c7f493cf21fc5b27"},
|
{file = "regex-2023.8.8-cp36-cp36m-win_amd64.whl", hash = "sha256:aadf28046e77a72f30dcc1ab185639e8de7f4104b8cb5c6dfa5d8ed860e57236"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b862c2b9d5ae38a68b92e215b93f98d4c5e9454fa36aae4450f61dd33ff48487"},
|
{file = "regex-2023.8.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:423adfa872b4908843ac3e7a30f957f5d5282944b81ca0a3b8a7ccbbfaa06103"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:976d7a304b59ede34ca2921305b57356694f9e6879db323fd90a80f865d355a3"},
|
{file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ae594c66f4a7e1ea67232a0846649a7c94c188d6c071ac0210c3e86a5f92109"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:83320a09188e0e6c39088355d423aa9d056ad57a0b6c6381b300ec1a04ec3d16"},
|
{file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e51c80c168074faa793685656c38eb7a06cbad7774c8cbc3ea05552d615393d8"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9427a399501818a7564f8c90eced1e9e20709ece36be701f394ada99890ea4b3"},
|
{file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09b7f4c66aa9d1522b06e31a54f15581c37286237208df1345108fcf4e050c18"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178bbc1b2ec40eaca599d13c092079bf529679bf0371c602edaa555e10b41c3"},
|
{file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e73e5243af12d9cd6a9d6a45a43570dbe2e5b1cdfc862f5ae2b031e44dd95a8"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:837328d14cde912af625d5f303ec29f7e28cdab588674897baafaf505341f2fc"},
|
{file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:941460db8fe3bd613db52f05259c9336f5a47ccae7d7def44cc277184030a116"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2d44dc13229905ae96dd2ae2dd7cebf824ee92bc52e8cf03dcead37d926da019"},
|
{file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f0ccf3e01afeb412a1a9993049cb160d0352dba635bbca7762b2dc722aa5742a"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d54af539295392611e7efbe94e827311eb8b29668e2b3f4cadcfe6f46df9c777"},
|
{file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2e9216e0d2cdce7dbc9be48cb3eacb962740a09b011a116fd7af8c832ab116ca"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7117d10690c38a622e54c432dfbbd3cbd92f09401d622902c32f6d377e2300ee"},
|
{file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5cd9cd7170459b9223c5e592ac036e0704bee765706445c353d96f2890e816c8"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bb60b503ec8a6e4e3e03a681072fa3a5adcbfa5479fa2d898ae2b4a8e24c4591"},
|
{file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4873ef92e03a4309b3ccd8281454801b291b689f6ad45ef8c3658b6fa761d7ac"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:65ba8603753cec91c71de423a943ba506363b0e5c3fdb913ef8f9caa14b2c7e0"},
|
{file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:239c3c2a339d3b3ddd51c2daef10874410917cd2b998f043c13e2084cb191684"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:271f0bdba3c70b58e6f500b205d10a36fb4b58bd06ac61381b68de66442efddb"},
|
{file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1005c60ed7037be0d9dea1f9c53cc42f836188227366370867222bda4c3c6bd7"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-win32.whl", hash = "sha256:9beb322958aaca059f34975b0df135181f2e5d7a13b84d3e0e45434749cb20f7"},
|
{file = "regex-2023.8.8-cp37-cp37m-win32.whl", hash = "sha256:e6bd1e9b95bc5614a7a9c9c44fde9539cba1c823b43a9f7bc11266446dd568e3"},
|
||||||
{file = "regex-2023.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fea75c3710d4f31389eed3c02f62d0b66a9da282521075061ce875eb5300cf23"},
|
{file = "regex-2023.8.8-cp37-cp37m-win_amd64.whl", hash = "sha256:9a96edd79661e93327cfeac4edec72a4046e14550a1d22aa0dd2e3ca52aec921"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8f56fcb7ff7bf7404becdfc60b1e81a6d0561807051fd2f1860b0d0348156a07"},
|
{file = "regex-2023.8.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2181c20ef18747d5f4a7ea513e09ea03bdd50884a11ce46066bb90fe4213675"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d2da3abc88711bce7557412310dfa50327d5769a31d1c894b58eb256459dc289"},
|
{file = "regex-2023.8.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a2ad5add903eb7cdde2b7c64aaca405f3957ab34f16594d2b78d53b8b1a6a7d6"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a99b50300df5add73d307cf66abea093304a07eb017bce94f01e795090dea87c"},
|
{file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9233ac249b354c54146e392e8a451e465dd2d967fc773690811d3a8c240ac601"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5708089ed5b40a7b2dc561e0c8baa9535b77771b64a8330b684823cfd5116036"},
|
{file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:920974009fb37b20d32afcdf0227a2e707eb83fe418713f7a8b7de038b870d0b"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:687ea9d78a4b1cf82f8479cab23678aff723108df3edeac098e5b2498879f4a7"},
|
{file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd2b6c5dfe0929b6c23dde9624483380b170b6e34ed79054ad131b20203a1a63"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3850beab9f527f06ccc94b446c864059c57651b3f911fddb8d9d3ec1d1b25d"},
|
{file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96979d753b1dc3b2169003e1854dc67bfc86edf93c01e84757927f810b8c3c93"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8915cc96abeb8983cea1df3c939e3c6e1ac778340c17732eb63bb96247b91d2"},
|
{file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ae54a338191e1356253e7883d9d19f8679b6143703086245fb14d1f20196be9"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:841d6e0e5663d4c7b4c8099c9997be748677d46cbf43f9f471150e560791f7ff"},
|
{file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2162ae2eb8b079622176a81b65d486ba50b888271302190870b8cc488587d280"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9edce5281f965cf135e19840f4d93d55b3835122aa76ccacfd389e880ba4cf82"},
|
{file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c884d1a59e69e03b93cf0dfee8794c63d7de0ee8f7ffb76e5f75be8131b6400a"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b956231ebdc45f5b7a2e1f90f66a12be9610ce775fe1b1d50414aac1e9206c06"},
|
{file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf9273e96f3ee2ac89ffcb17627a78f78e7516b08f94dc435844ae72576a276e"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:36efeba71c6539d23c4643be88295ce8c82c88bbd7c65e8a24081d2ca123da3f"},
|
{file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:83215147121e15d5f3a45d99abeed9cf1fe16869d5c233b08c56cdf75f43a504"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:cf67ca618b4fd34aee78740bea954d7c69fdda419eb208c2c0c7060bb822d747"},
|
{file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f7454aa427b8ab9101f3787eb178057c5250478e39b99540cfc2b889c7d0586"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b4598b1897837067a57b08147a68ac026c1e73b31ef6e36deeeb1fa60b2933c9"},
|
{file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0640913d2c1044d97e30d7c41728195fc37e54d190c5385eacb52115127b882"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-win32.whl", hash = "sha256:f415f802fbcafed5dcc694c13b1292f07fe0befdb94aa8a52905bd115ff41e88"},
|
{file = "regex-2023.8.8-cp38-cp38-win32.whl", hash = "sha256:0c59122ceccb905a941fb23b087b8eafc5290bf983ebcb14d2301febcbe199c7"},
|
||||||
{file = "regex-2023.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:d4f03bb71d482f979bda92e1427f3ec9b220e62a7dd337af0aa6b47bf4498f72"},
|
{file = "regex-2023.8.8-cp38-cp38-win_amd64.whl", hash = "sha256:c12f6f67495ea05c3d542d119d270007090bad5b843f642d418eb601ec0fa7be"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccf91346b7bd20c790310c4147eee6ed495a54ddb6737162a36ce9dbef3e4751"},
|
{file = "regex-2023.8.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:82cd0a69cd28f6cc3789cc6adeb1027f79526b1ab50b1f6062bbc3a0ccb2dbc3"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b28f5024a3a041009eb4c333863d7894d191215b39576535c6734cd88b0fcb68"},
|
{file = "regex-2023.8.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bb34d1605f96a245fc39790a117ac1bac8de84ab7691637b26ab2c5efb8f228c"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0bb18053dfcfed432cc3ac632b5e5e5c5b7e55fb3f8090e867bfd9b054dbcbf"},
|
{file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:987b9ac04d0b38ef4f89fbc035e84a7efad9cdd5f1e29024f9289182c8d99e09"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5bfb3004f2144a084a16ce19ca56b8ac46e6fd0651f54269fc9e230edb5e4a"},
|
{file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dd6082f4e2aec9b6a0927202c85bc1b09dcab113f97265127c1dc20e2e32495"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c6b48d0fa50d8f4df3daf451be7f9689c2bde1a52b1225c5926e3f54b6a9ed1"},
|
{file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7eb95fe8222932c10d4436e7a6f7c99991e3fdd9f36c949eff16a69246dee2dc"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:051da80e6eeb6e239e394ae60704d2b566aa6a7aed6f2890a7967307267a5dc6"},
|
{file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7098c524ba9f20717a56a8d551d2ed491ea89cbf37e540759ed3b776a4f8d6eb"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4c3b7fa4cdaa69268748665a1a6ff70c014d39bb69c50fda64b396c9116cf77"},
|
{file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b694430b3f00eb02c594ff5a16db30e054c1b9589a043fe9174584c6efa8033"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:457b6cce21bee41ac292d6753d5e94dcbc5c9e3e3a834da285b0bde7aa4a11e9"},
|
{file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2aeab3895d778155054abea5238d0eb9a72e9242bd4b43f42fd911ef9a13470"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:aad51907d74fc183033ad796dd4c2e080d1adcc4fd3c0fd4fd499f30c03011cd"},
|
{file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:988631b9d78b546e284478c2ec15c8a85960e262e247b35ca5eaf7ee22f6050a"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0385e73da22363778ef2324950e08b689abdf0b108a7d8decb403ad7f5191938"},
|
{file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:67ecd894e56a0c6108ec5ab1d8fa8418ec0cff45844a855966b875d1039a2e34"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c6a57b742133830eec44d9b2290daf5cbe0a2f1d6acee1b3c7b1c7b2f3606df7"},
|
{file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:14898830f0a0eb67cae2bbbc787c1a7d6e34ecc06fbd39d3af5fe29a4468e2c9"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3e5219bf9e75993d73ab3d25985c857c77e614525fac9ae02b1bebd92f7cecac"},
|
{file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f2200e00b62568cfd920127782c61bc1c546062a879cdc741cfcc6976668dfcf"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e5087a3c59eef624a4591ef9eaa6e9a8d8a94c779dade95d27c0bc24650261cd"},
|
{file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9691a549c19c22d26a4f3b948071e93517bdf86e41b81d8c6ac8a964bb71e5a6"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-win32.whl", hash = "sha256:20326216cc2afe69b6e98528160b225d72f85ab080cbdf0b11528cbbaba2248f"},
|
{file = "regex-2023.8.8-cp39-cp39-win32.whl", hash = "sha256:6ab2ed84bf0137927846b37e882745a827458689eb969028af8032b1b3dac78e"},
|
||||||
{file = "regex-2023.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:bdff5eab10e59cf26bc479f565e25ed71a7d041d1ded04ccf9aee1d9f208487a"},
|
{file = "regex-2023.8.8-cp39-cp39-win_amd64.whl", hash = "sha256:5543c055d8ec7801901e1193a51570643d6a6ab8751b1f7dd9af71af467538bb"},
|
||||||
{file = "regex-2023.6.3.tar.gz", hash = "sha256:72d1a25bf36d2050ceb35b517afe13864865268dfb45910e2e17a84be6cbfeb0"},
|
{file = "regex-2023.8.8.tar.gz", hash = "sha256:fcbdc5f2b0f1cd0f6a56cdb46fe41d2cce1e644e3b68832f3eeebc5fb0f7712e"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1700,24 +1730,42 @@ python-versions = "*"
|
|||||||
files = [
|
files = [
|
||||||
{file = "safetensors-0.3.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b6a66989075c2891d743153e8ba9ca84ee7232c8539704488f454199b8b8f84d"},
|
{file = "safetensors-0.3.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b6a66989075c2891d743153e8ba9ca84ee7232c8539704488f454199b8b8f84d"},
|
||||||
{file = "safetensors-0.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:670d6bc3a3b377278ce2971fa7c36ebc0a35041c4ea23b9df750a39380800195"},
|
{file = "safetensors-0.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:670d6bc3a3b377278ce2971fa7c36ebc0a35041c4ea23b9df750a39380800195"},
|
||||||
|
{file = "safetensors-0.3.2-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:564f42838721925b5313ae864ba6caa6f4c80a9fbe63cf24310c3be98ab013cd"},
|
||||||
|
{file = "safetensors-0.3.2-cp310-cp310-macosx_13_0_x86_64.whl", hash = "sha256:7f80af7e4ab3188daaff12d43d078da3017a90d732d38d7af4eb08b6ca2198a5"},
|
||||||
|
{file = "safetensors-0.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec30d78f20f1235b252d59cbb9755beb35a1fde8c24c89b3c98e6a1804cfd432"},
|
||||||
|
{file = "safetensors-0.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16063d94d8f600768d3c331b1e97964b1bf3772e19710105fe24ec5a6af63770"},
|
||||||
{file = "safetensors-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb44e140bf2aeda98d9dde669dbec15f7b77f96a9274469b91a6cf4bcc5ec3b"},
|
{file = "safetensors-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb44e140bf2aeda98d9dde669dbec15f7b77f96a9274469b91a6cf4bcc5ec3b"},
|
||||||
{file = "safetensors-0.3.2-cp310-cp310-win32.whl", hash = "sha256:2961c1243fd0da46aa6a1c835305cc4595486f8ac64632a604d0eb5f2de76175"},
|
{file = "safetensors-0.3.2-cp310-cp310-win32.whl", hash = "sha256:2961c1243fd0da46aa6a1c835305cc4595486f8ac64632a604d0eb5f2de76175"},
|
||||||
{file = "safetensors-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c813920482c337d1424d306e1b05824a38e3ef94303748a0a287dea7a8c4f805"},
|
{file = "safetensors-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c813920482c337d1424d306e1b05824a38e3ef94303748a0a287dea7a8c4f805"},
|
||||||
{file = "safetensors-0.3.2-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:707df34bd9b9047e97332136ad98e57028faeccdb9cfe1c3b52aba5964cc24bf"},
|
{file = "safetensors-0.3.2-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:707df34bd9b9047e97332136ad98e57028faeccdb9cfe1c3b52aba5964cc24bf"},
|
||||||
{file = "safetensors-0.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:becc5bb85b2947eae20ed23b407ebfd5277d9a560f90381fe2c42e6c043677ba"},
|
{file = "safetensors-0.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:becc5bb85b2947eae20ed23b407ebfd5277d9a560f90381fe2c42e6c043677ba"},
|
||||||
|
{file = "safetensors-0.3.2-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:30a75707be5cc9686490bde14b9a371cede4af53244ea72b340cfbabfffdf58a"},
|
||||||
|
{file = "safetensors-0.3.2-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:54ad6af663e15e2b99e2ea3280981b7514485df72ba6d014dc22dae7ba6a5e6c"},
|
||||||
|
{file = "safetensors-0.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37764b3197656ef507a266c453e909a3477dabc795962b38e3ad28226f53153b"},
|
||||||
|
{file = "safetensors-0.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4939067736783acd8391d83cd97d6c202f94181951ce697d519f9746381b6a39"},
|
||||||
{file = "safetensors-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0fac127ff8fb04834da5c6d85a8077e6a1c9180a11251d96f8068db922a17"},
|
{file = "safetensors-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0fac127ff8fb04834da5c6d85a8077e6a1c9180a11251d96f8068db922a17"},
|
||||||
{file = "safetensors-0.3.2-cp311-cp311-win32.whl", hash = "sha256:155b82dbe2b0ebff18cde3f76b42b6d9470296e92561ef1a282004d449fa2b4c"},
|
{file = "safetensors-0.3.2-cp311-cp311-win32.whl", hash = "sha256:155b82dbe2b0ebff18cde3f76b42b6d9470296e92561ef1a282004d449fa2b4c"},
|
||||||
{file = "safetensors-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:a86428d196959619ce90197731be9391b5098b35100a7228ef4643957648f7f5"},
|
{file = "safetensors-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:a86428d196959619ce90197731be9391b5098b35100a7228ef4643957648f7f5"},
|
||||||
{file = "safetensors-0.3.2-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:c1f8ab41ed735c5b581f451fd15d9602ff51aa88044bfa933c5fa4b1d0c644d1"},
|
{file = "safetensors-0.3.2-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:c1f8ab41ed735c5b581f451fd15d9602ff51aa88044bfa933c5fa4b1d0c644d1"},
|
||||||
|
{file = "safetensors-0.3.2-cp37-cp37m-macosx_13_0_x86_64.whl", hash = "sha256:bc9cfb3c9ea2aec89685b4d656f9f2296f0f0d67ecf2bebf950870e3be89b3db"},
|
||||||
|
{file = "safetensors-0.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ace5d471e3d78e0d93f952707d808b5ab5eac77ddb034ceb702e602e9acf2be9"},
|
||||||
|
{file = "safetensors-0.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de3e20a388b444381bcda1a3193cce51825ddca277e4cf3ed1fe8d9b2d5722cd"},
|
||||||
{file = "safetensors-0.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d7d70d48585fe8df00725aa788f2e64fd24a4c9ae07cd6be34f6859d0f89a9c"},
|
{file = "safetensors-0.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d7d70d48585fe8df00725aa788f2e64fd24a4c9ae07cd6be34f6859d0f89a9c"},
|
||||||
{file = "safetensors-0.3.2-cp37-cp37m-win32.whl", hash = "sha256:6ff59bc90cdc857f68b1023be9085fda6202bbe7f2fd67d06af8f976d6adcc10"},
|
{file = "safetensors-0.3.2-cp37-cp37m-win32.whl", hash = "sha256:6ff59bc90cdc857f68b1023be9085fda6202bbe7f2fd67d06af8f976d6adcc10"},
|
||||||
{file = "safetensors-0.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8b05c93da15fa911763a89281906ca333ed800ab0ef1c7ce53317aa1a2322f19"},
|
{file = "safetensors-0.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8b05c93da15fa911763a89281906ca333ed800ab0ef1c7ce53317aa1a2322f19"},
|
||||||
{file = "safetensors-0.3.2-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:8969cfd9e8d904e8d3c67c989e1bd9a95e3cc8980d4f95e4dcd43c299bb94253"},
|
{file = "safetensors-0.3.2-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:8969cfd9e8d904e8d3c67c989e1bd9a95e3cc8980d4f95e4dcd43c299bb94253"},
|
||||||
|
{file = "safetensors-0.3.2-cp38-cp38-macosx_13_0_x86_64.whl", hash = "sha256:f54148ac027556eb02187e9bc1556c4d916c99ca3cb34ca36a7d304d675035c1"},
|
||||||
|
{file = "safetensors-0.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caec25fedbcf73f66c9261984f07885680f71417fc173f52279276c7f8a5edd3"},
|
||||||
|
{file = "safetensors-0.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50224a1d99927ccf3b75e27c3d412f7043280431ab100b4f08aad470c37cf99a"},
|
||||||
{file = "safetensors-0.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa98f49e95f02eb750d32c4947e7d5aa43883149ebd0414920866446525b70f0"},
|
{file = "safetensors-0.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa98f49e95f02eb750d32c4947e7d5aa43883149ebd0414920866446525b70f0"},
|
||||||
{file = "safetensors-0.3.2-cp38-cp38-win32.whl", hash = "sha256:33409df5e28a83dc5cc5547a3ac17c0f1b13a1847b1eb3bc4b3be0df9915171e"},
|
{file = "safetensors-0.3.2-cp38-cp38-win32.whl", hash = "sha256:33409df5e28a83dc5cc5547a3ac17c0f1b13a1847b1eb3bc4b3be0df9915171e"},
|
||||||
{file = "safetensors-0.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:e04a7cbbb3856159ab99e3adb14521544f65fcb8548cce773a1435a0f8d78d27"},
|
{file = "safetensors-0.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:e04a7cbbb3856159ab99e3adb14521544f65fcb8548cce773a1435a0f8d78d27"},
|
||||||
{file = "safetensors-0.3.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:7c864cf5dcbfb608c5378f83319c60cc9c97263343b57c02756b7613cd5ab4dd"},
|
{file = "safetensors-0.3.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:7c864cf5dcbfb608c5378f83319c60cc9c97263343b57c02756b7613cd5ab4dd"},
|
||||||
{file = "safetensors-0.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:14e8c19d6dc51d4f70ee33c46aff04c8ba3f95812e74daf8036c24bc86e75cae"},
|
{file = "safetensors-0.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:14e8c19d6dc51d4f70ee33c46aff04c8ba3f95812e74daf8036c24bc86e75cae"},
|
||||||
|
{file = "safetensors-0.3.2-cp39-cp39-macosx_13_0_arm64.whl", hash = "sha256:042a60f633c3c7009fdf6a7c182b165cb7283649d2a1e9c7a4a1c23454bd9a5b"},
|
||||||
|
{file = "safetensors-0.3.2-cp39-cp39-macosx_13_0_x86_64.whl", hash = "sha256:fafd95e5ef41e8f312e2a32b7031f7b9b2a621b255f867b221f94bb2e9f51ae8"},
|
||||||
|
{file = "safetensors-0.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ed77cf358abce2307f03634694e0b2a29822e322a1623e0b1aa4b41e871bf8b"},
|
||||||
|
{file = "safetensors-0.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d344e8b2681a33aafc197c90b0def3229b3317d749531c72fa6259d0caa5c8c"},
|
||||||
{file = "safetensors-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87ff0024ef2e5722a79af24688ce4a430f70601d0cf712a744105ed4b8f67ba5"},
|
{file = "safetensors-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87ff0024ef2e5722a79af24688ce4a430f70601d0cf712a744105ed4b8f67ba5"},
|
||||||
{file = "safetensors-0.3.2-cp39-cp39-win32.whl", hash = "sha256:827af9478b78977248ba93e2fd97ea307fb63f463f80cef4824460f8c2542a52"},
|
{file = "safetensors-0.3.2-cp39-cp39-win32.whl", hash = "sha256:827af9478b78977248ba93e2fd97ea307fb63f463f80cef4824460f8c2542a52"},
|
||||||
{file = "safetensors-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:9b09f27c456efa301f98681ea14b12f81f2637889f6336223ccab71e42c34541"},
|
{file = "safetensors-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:9b09f27c456efa301f98681ea14b12f81f2637889f6336223ccab71e42c34541"},
|
||||||
@ -1945,19 +1993,31 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "torch"
|
name = "torch"
|
||||||
version = "2.0.1+cu118"
|
version = "2.0.1"
|
||||||
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
|
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8.0"
|
python-versions = ">=3.8.0"
|
||||||
files = [
|
files = [
|
||||||
{file = "torch-2.0.1+cu118-cp310-cp310-linux_x86_64.whl", hash = "sha256:a7a49d459bf4862f64f7bc1a68beccf8881c2fa9f3e0569608e16ba6f85ebf7b"},
|
{file = "torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8ced00b3ba471856b993822508f77c98f48a458623596a4c43136158781e306a"},
|
||||||
{file = "torch-2.0.1+cu118-cp310-cp310-win_amd64.whl", hash = "sha256:f58d75619bc96e4322343c030b893613701caa2d6db8017155da226c14171335"},
|
{file = "torch-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:359bfaad94d1cda02ab775dc1cc386d585712329bb47b8741607ef6ef4950747"},
|
||||||
{file = "torch-2.0.1+cu118-cp311-cp311-linux_x86_64.whl", hash = "sha256:143b6c658c17d43376e2dfbaa2c106d35639d615e5e8dec4429cf1e510dd8d61"},
|
{file = "torch-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:7c84e44d9002182edd859f3400deaa7410f5ec948a519cc7ef512c2f9b34d2c4"},
|
||||||
{file = "torch-2.0.1+cu118-cp311-cp311-win_amd64.whl", hash = "sha256:b663a4ee744d574095dbd612644de345944247c0605692309fd9f6c7ccdea022"},
|
{file = "torch-2.0.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:567f84d657edc5582d716900543e6e62353dbe275e61cdc36eda4929e46df9e7"},
|
||||||
{file = "torch-2.0.1+cu118-cp38-cp38-linux_x86_64.whl", hash = "sha256:2ce38a6e4ea7c4b7f5baa51e65243a5f687f6e19ab7915ba5b2a431105f50bbe"},
|
{file = "torch-2.0.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:787b5a78aa7917465e9b96399b883920c88a08f4eb63b5a5d2d1a16e27d2f89b"},
|
||||||
{file = "torch-2.0.1+cu118-cp38-cp38-win_amd64.whl", hash = "sha256:e58d26a11bd57ac19761c018c3151c15bc71d068afc8ec409bfd9b4cfcc63a52"},
|
{file = "torch-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e617b1d0abaf6ced02dbb9486803abfef0d581609b09641b34fa315c9c40766d"},
|
||||||
{file = "torch-2.0.1+cu118-cp39-cp39-linux_x86_64.whl", hash = "sha256:eb55f29db5744eda8a96f5594e637daed0d52278273005de759970e67cfa6a5a"},
|
{file = "torch-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b6019b1de4978e96daa21d6a3ebb41e88a0b474898fe251fd96189587408873e"},
|
||||||
{file = "torch-2.0.1+cu118-cp39-cp39-win_amd64.whl", hash = "sha256:fa225b6f941ee0e78978ac85ed7744d3c19fff462473821f8060c14faa60043e"},
|
{file = "torch-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:dbd68cbd1cd9da32fe5d294dd3411509b3d841baecb780b38b3b7b06c7754434"},
|
||||||
|
{file = "torch-2.0.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:ef654427d91600129864644e35deea761fb1fe131710180b952a6f2e2207075e"},
|
||||||
|
{file = "torch-2.0.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:25aa43ca80dcdf32f13da04c503ec7afdf8e77e3a0183dd85cd3e53b2842e527"},
|
||||||
|
{file = "torch-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5ef3ea3d25441d3957348f7e99c7824d33798258a2bf5f0f0277cbcadad2e20d"},
|
||||||
|
{file = "torch-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0882243755ff28895e8e6dc6bc26ebcf5aa0911ed81b2a12f241fc4b09075b13"},
|
||||||
|
{file = "torch-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:f66aa6b9580a22b04d0af54fcd042f52406a8479e2b6a550e3d9f95963e168c8"},
|
||||||
|
{file = "torch-2.0.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:1adb60d369f2650cac8e9a95b1d5758e25d526a34808f7448d0bd599e4ae9072"},
|
||||||
|
{file = "torch-2.0.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:1bcffc16b89e296826b33b98db5166f990e3b72654a2b90673e817b16c50e32b"},
|
||||||
|
{file = "torch-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e10e1597f2175365285db1b24019eb6f04d53dcd626c735fc502f1e8b6be9875"},
|
||||||
|
{file = "torch-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:423e0ae257b756bb45a4b49072046772d1ad0c592265c5080070e0767da4e490"},
|
||||||
|
{file = "torch-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8742bdc62946c93f75ff92da00e3803216c6cce9b132fbca69664ca38cfb3e18"},
|
||||||
|
{file = "torch-2.0.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:c62df99352bd6ee5a5a8d1832452110435d178b5164de450831a3a8cc14dc680"},
|
||||||
|
{file = "torch-2.0.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:671a2565e3f63b8fe8e42ae3e36ad249fe5e567435ea27b94edaa672a7d0c416"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@ -1965,33 +2025,27 @@ filelock = "*"
|
|||||||
jinja2 = "*"
|
jinja2 = "*"
|
||||||
networkx = "*"
|
networkx = "*"
|
||||||
sympy = "*"
|
sympy = "*"
|
||||||
triton = {version = "2.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
|
|
||||||
typing-extensions = "*"
|
typing-extensions = "*"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
opt-einsum = ["opt-einsum (>=3.3)"]
|
opt-einsum = ["opt-einsum (>=3.3)"]
|
||||||
|
|
||||||
[package.source]
|
|
||||||
type = "legacy"
|
|
||||||
url = "https://download.pytorch.org/whl/cu118"
|
|
||||||
reference = "pytorch-gpu-src"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tqdm"
|
name = "tqdm"
|
||||||
version = "4.65.0"
|
version = "4.66.1"
|
||||||
description = "Fast, Extensible Progress Meter"
|
description = "Fast, Extensible Progress Meter"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"},
|
{file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"},
|
||||||
{file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"},
|
{file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
dev = ["py-make (>=0.1.0)", "twine", "wheel"]
|
dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
|
||||||
notebook = ["ipywidgets (>=6)"]
|
notebook = ["ipywidgets (>=6)"]
|
||||||
slack = ["slack-sdk"]
|
slack = ["slack-sdk"]
|
||||||
telegram = ["requests"]
|
telegram = ["requests"]
|
||||||
@ -2065,43 +2119,6 @@ torchhub = ["filelock", "huggingface-hub (>=0.14.1,<1.0)", "importlib-metadata",
|
|||||||
video = ["av (==9.2.0)", "decord (==0.6.0)"]
|
video = ["av (==9.2.0)", "decord (==0.6.0)"]
|
||||||
vision = ["Pillow (<10.0.0)"]
|
vision = ["Pillow (<10.0.0)"]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "triton"
|
|
||||||
version = "2.0.0"
|
|
||||||
description = "A language and compiler for custom Deep Learning operations"
|
|
||||||
optional = false
|
|
||||||
python-versions = "*"
|
|
||||||
files = [
|
|
||||||
{file = "triton-2.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38806ee9663f4b0f7cd64790e96c579374089e58f49aac4a6608121aa55e2505"},
|
|
||||||
{file = "triton-2.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:226941c7b8595219ddef59a1fdb821e8c744289a132415ddd584facedeb475b1"},
|
|
||||||
{file = "triton-2.0.0-1-cp36-cp36m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4c9fc8c89874bc48eb7e7b2107a9b8d2c0bf139778637be5bfccb09191685cfd"},
|
|
||||||
{file = "triton-2.0.0-1-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d2684b6a60b9f174f447f36f933e9a45f31db96cb723723ecd2dcfd1c57b778b"},
|
|
||||||
{file = "triton-2.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9d4978298b74fcf59a75fe71e535c092b023088933b2f1df933ec32615e4beef"},
|
|
||||||
{file = "triton-2.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:74f118c12b437fb2ca25e1a04759173b517582fcf4c7be11913316c764213656"},
|
|
||||||
{file = "triton-2.0.0-1-pp37-pypy37_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9618815a8da1d9157514f08f855d9e9ff92e329cd81c0305003eb9ec25cc5add"},
|
|
||||||
{file = "triton-2.0.0-1-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1aca3303629cd3136375b82cb9921727f804e47ebee27b2677fef23005c3851a"},
|
|
||||||
{file = "triton-2.0.0-1-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e3e13aa8b527c9b642e3a9defcc0fbd8ffbe1c80d8ac8c15a01692478dc64d8a"},
|
|
||||||
{file = "triton-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f05a7e64e4ca0565535e3d5d3405d7e49f9d308505bb7773d21fb26a4c008c2"},
|
|
||||||
{file = "triton-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4b99ca3c6844066e516658541d876c28a5f6e3a852286bbc97ad57134827fd"},
|
|
||||||
{file = "triton-2.0.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47b4d70dc92fb40af553b4460492c31dc7d3a114a979ffb7a5cdedb7eb546c08"},
|
|
||||||
{file = "triton-2.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fedce6a381901b1547e0e7e1f2546e4f65dca6d91e2d8a7305a2d1f5551895be"},
|
|
||||||
{file = "triton-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75834f27926eab6c7f00ce73aaf1ab5bfb9bec6eb57ab7c0bfc0a23fac803b4c"},
|
|
||||||
{file = "triton-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0117722f8c2b579cd429e0bee80f7731ae05f63fe8e9414acd9a679885fcbf42"},
|
|
||||||
{file = "triton-2.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcd9be5d0c2e45d2b7e6ddc6da20112b6862d69741576f9c3dbaf941d745ecae"},
|
|
||||||
{file = "triton-2.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42a0d2c3fc2eab4ba71384f2e785fbfd47aa41ae05fa58bf12cb31dcbd0aeceb"},
|
|
||||||
{file = "triton-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c47b72c72693198163ece9d90a721299e4fb3b8e24fd13141e384ad952724f"},
|
|
||||||
]
|
|
||||||
|
|
||||||
[package.dependencies]
|
|
||||||
cmake = "*"
|
|
||||||
filelock = "*"
|
|
||||||
lit = "*"
|
|
||||||
torch = "*"
|
|
||||||
|
|
||||||
[package.extras]
|
|
||||||
tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)"]
|
|
||||||
tutorials = ["matplotlib", "pandas", "tabulate"]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "typer"
|
name = "typer"
|
||||||
version = "0.6.1"
|
version = "0.6.1"
|
||||||
@ -2448,4 +2465,4 @@ quantize = ["accelerate", "datasets", "texttable"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = ">=3.9,<3.13"
|
python-versions = ">=3.9,<3.13"
|
||||||
content-hash = "6b6dbd9e11bc2f71cd88e2ab50adce249a722fea02e7cdcf5fe1b94c7b997a57"
|
content-hash = "91a848038c08a44c67acfb4257781440ccc1e74a4b82f09513e75588fa33f72b"
|
||||||
|
@ -31,8 +31,9 @@ einops = "^0.6.1"
|
|||||||
texttable = { version = "^1.6.7", optional = true }
|
texttable = { version = "^1.6.7", optional = true }
|
||||||
datasets = { version = "^2.14.0", optional = true }
|
datasets = { version = "^2.14.0", optional = true }
|
||||||
peft = "^0.4.0"
|
peft = "^0.4.0"
|
||||||
torch = {version = "^2.0.1+cu118", source = "pytorch-gpu-src"}
|
torch = { version = "^2.0.1" }
|
||||||
scipy = "^1.11.1"
|
scipy = "^1.11.1"
|
||||||
|
pillow = "^10.0.0"
|
||||||
|
|
||||||
[tool.poetry.extras]
|
[tool.poetry.extras]
|
||||||
accelerate = ["accelerate"]
|
accelerate = ["accelerate"]
|
||||||
|
@ -1,11 +1,8 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
|
||||||
|
|
||||||
accelerate==0.20.3 ; python_version >= "3.9" and python_version < "3.13"
|
accelerate==0.20.3 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
backoff==2.2.1 ; python_version >= "3.9" and python_version < "3.13"
|
backoff==2.2.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
certifi==2023.7.22 ; python_version >= "3.9" and python_version < "3.13"
|
certifi==2023.7.22 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
charset-normalizer==3.2.0 ; python_version >= "3.9" and python_version < "3.13"
|
charset-normalizer==3.2.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
click==8.1.6 ; python_version >= "3.9" and python_version < "3.13"
|
click==8.1.6 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
cmake==3.27.0 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.9" and python_version < "3.13"
|
|
||||||
colorama==0.4.6 ; python_version >= "3.9" and python_version < "3.13" and (sys_platform == "win32" or platform_system == "Windows")
|
colorama==0.4.6 ; python_version >= "3.9" and python_version < "3.13" and (sys_platform == "win32" or platform_system == "Windows")
|
||||||
deprecated==1.2.14 ; python_version >= "3.9" and python_version < "3.13"
|
deprecated==1.2.14 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
einops==0.6.1 ; python_version >= "3.9" and python_version < "3.13"
|
einops==0.6.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
@ -13,14 +10,13 @@ filelock==3.12.2 ; python_version >= "3.9" and python_version < "3.13"
|
|||||||
fsspec==2023.6.0 ; python_version >= "3.9" and python_version < "3.13"
|
fsspec==2023.6.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
googleapis-common-protos==1.60.0 ; python_version >= "3.9" and python_version < "3.13"
|
googleapis-common-protos==1.60.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
grpc-interceptor==0.15.2 ; python_version >= "3.9" and python_version < "3.13"
|
grpc-interceptor==0.15.2 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
grpcio-reflection==1.56.2 ; python_version >= "3.9" and python_version < "3.13"
|
grpcio-reflection==1.57.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
grpcio-status==1.56.2 ; python_version >= "3.9" and python_version < "3.13"
|
grpcio-status==1.57.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
grpcio==1.56.2 ; python_version >= "3.9" and python_version < "3.13"
|
grpcio==1.57.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
hf-transfer==0.1.3 ; python_version >= "3.9" and python_version < "3.13"
|
hf-transfer==0.1.3 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
huggingface-hub==0.14.1 ; python_version >= "3.9" and python_version < "3.13"
|
huggingface-hub==0.14.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
idna==3.4 ; python_version >= "3.9" and python_version < "3.13"
|
idna==3.4 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
jinja2==3.1.2 ; python_version >= "3.9" and python_version < "3.13"
|
jinja2==3.1.2 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
lit==16.0.6 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.9" and python_version < "3.13"
|
|
||||||
loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13"
|
loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
markupsafe==2.1.3 ; python_version >= "3.9" and python_version < "3.13"
|
markupsafe==2.1.3 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
mpmath==1.3.0 ; python_version >= "3.9" and python_version < "3.13"
|
mpmath==1.3.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
@ -37,10 +33,11 @@ opentelemetry-sdk==1.15.0 ; python_version >= "3.9" and python_version < "3.13"
|
|||||||
opentelemetry-semantic-conventions==0.36b0 ; python_version >= "3.9" and python_version < "3.13"
|
opentelemetry-semantic-conventions==0.36b0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
packaging==23.1 ; python_version >= "3.9" and python_version < "3.13"
|
packaging==23.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
peft==0.4.0 ; python_version >= "3.9" and python_version < "3.13"
|
peft==0.4.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
protobuf==4.23.4 ; python_version >= "3.9" and python_version < "3.13"
|
pillow==10.0.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
|
protobuf==4.24.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
psutil==5.9.5 ; python_version >= "3.9" and python_version < "3.13"
|
psutil==5.9.5 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
pyyaml==6.0.1 ; python_version >= "3.9" and python_version < "3.13"
|
pyyaml==6.0.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
regex==2023.6.3 ; python_version >= "3.9" and python_version < "3.13"
|
regex==2023.8.8 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
requests==2.31.0 ; python_version >= "3.9" and python_version < "3.13"
|
requests==2.31.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
safetensors==0.3.2 ; python_version >= "3.9" and python_version < "3.13"
|
safetensors==0.3.2 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
scipy==1.11.1 ; python_version >= "3.9" and python_version < "3.13"
|
scipy==1.11.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
@ -48,10 +45,9 @@ sentencepiece==0.1.99 ; python_version >= "3.9" and python_version < "3.13"
|
|||||||
setuptools==68.0.0 ; python_version >= "3.9" and python_version < "3.13"
|
setuptools==68.0.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
sympy==1.12 ; python_version >= "3.9" and python_version < "3.13"
|
sympy==1.12 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
tokenizers==0.13.3 ; python_version >= "3.9" and python_version < "3.13"
|
tokenizers==0.13.3 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
torch==2.0.1+cu118 ; python_version >= "3.9" and python_version < "3.13"
|
torch==2.0.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
tqdm==4.65.0 ; python_version >= "3.9" and python_version < "3.13"
|
tqdm==4.66.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
transformers==4.31.0 ; python_version >= "3.9" and python_version < "3.13"
|
transformers==4.31.0 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
triton==2.0.0 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.9" and python_version < "3.13"
|
|
||||||
typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13"
|
typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
typing-extensions==4.7.1 ; python_version >= "3.9" and python_version < "3.13"
|
typing-extensions==4.7.1 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
urllib3==2.0.4 ; python_version >= "3.9" and python_version < "3.13"
|
urllib3==2.0.4 ; python_version >= "3.9" and python_version < "3.13"
|
||||||
|
@ -54,6 +54,7 @@ try:
|
|||||||
from text_generation_server.models.flash_santacoder import (
|
from text_generation_server.models.flash_santacoder import (
|
||||||
FlashSantacoderSharded,
|
FlashSantacoderSharded,
|
||||||
)
|
)
|
||||||
|
from text_generation_server.models.idefics import IDEFICSSharded
|
||||||
|
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
logger.warning(f"Could not import Flash Attention enabled models: {e}")
|
logger.warning(f"Could not import Flash Attention enabled models: {e}")
|
||||||
@ -64,6 +65,7 @@ if FLASH_ATTENTION:
|
|||||||
__all__.append(FlashRWSharded)
|
__all__.append(FlashRWSharded)
|
||||||
__all__.append(FlashSantacoderSharded)
|
__all__.append(FlashSantacoderSharded)
|
||||||
__all__.append(FlashLlama)
|
__all__.append(FlashLlama)
|
||||||
|
__all__.append(IDEFICSSharded)
|
||||||
|
|
||||||
|
|
||||||
def get_model(
|
def get_model(
|
||||||
@ -248,6 +250,17 @@ def get_model(
|
|||||||
dtype=dtype,
|
dtype=dtype,
|
||||||
trust_remote_code=trust_remote_code,
|
trust_remote_code=trust_remote_code,
|
||||||
)
|
)
|
||||||
|
elif model_type == "idefics":
|
||||||
|
if FLASH_ATTENTION:
|
||||||
|
return IDEFICSSharded(
|
||||||
|
model_id,
|
||||||
|
revision,
|
||||||
|
quantize=quantize,
|
||||||
|
dtype=dtype,
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics"))
|
||||||
|
|
||||||
if sharded:
|
if sharded:
|
||||||
raise ValueError("sharded is not supported for AutoModel")
|
raise ValueError("sharded is not supported for AutoModel")
|
||||||
|
@ -0,0 +1,323 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
||||||
|
# and OPT implementations in this library. It has been modified from its
|
||||||
|
# original forms to accommodate minor architectural differences compared
|
||||||
|
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
""" Idefics model configuration"""
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from transformers import PretrainedConfig
|
||||||
|
|
||||||
|
IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
||||||
|
"HuggingFaceM4/idefics-9b": "https://huggingface.co/HuggingFaceM4/idefics-9b/blob/main/config.json",
|
||||||
|
"HuggingFaceM4/idefics-80b": "https://huggingface.co/HuggingFaceM4/idefics-80b/blob/main/config.json",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class IdeficsVisionConfig(PretrainedConfig):
|
||||||
|
r"""
|
||||||
|
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
|
||||||
|
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
||||||
|
with the defaults will yield a similar configuration to that of the Idefics-9B.
|
||||||
|
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
|
||||||
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||||
|
documentation from [`PretrainedConfig`] for more information.
|
||||||
|
Args:
|
||||||
|
hidden_size (`int`, *optional*, defaults to 768):
|
||||||
|
Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`)
|
||||||
|
image_size (`int`, *optional*, defaults to 224):
|
||||||
|
The size (resolution) of each image.
|
||||||
|
intermediate_size (`int`, *optional*, defaults to 5120):
|
||||||
|
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
||||||
|
patch_size (`int`, *optional*, defaults to 14):
|
||||||
|
The size (resolution) of each patch.
|
||||||
|
num_hidden_layers (`int`, *optional*, defaults to 32):
|
||||||
|
Number of hidden layers in the Transformer encoder.
|
||||||
|
num_attention_heads (`int`, *optional*, defaults to 16):
|
||||||
|
Number of attention heads for each attention layer in the Transformer encoder.
|
||||||
|
image_num_channels (`int`, *optional*, defaults to `3`):
|
||||||
|
Number of image channels.
|
||||||
|
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
|
||||||
|
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
||||||
|
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
|
||||||
|
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
|
||||||
|
The epsilon used by the layer normalization layers.
|
||||||
|
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||||||
|
The dropout ratio for the attention probabilities.
|
||||||
|
initializer_range (`float`, *optional*, defaults to 0.02):
|
||||||
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||||
|
initializer_factor (`float`, *optional*, defaults to 1.0):
|
||||||
|
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
|
||||||
|
testing).
|
||||||
|
initializer_range (`float`, *optional*, defaults to 0.02):
|
||||||
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||||
|
"""
|
||||||
|
model_type = "idefics"
|
||||||
|
attribute_map = {
|
||||||
|
"hidden_size": "embed_dim",
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
embed_dim=768,
|
||||||
|
image_size=224,
|
||||||
|
intermediate_size=5120,
|
||||||
|
patch_size=14,
|
||||||
|
num_hidden_layers=32,
|
||||||
|
num_attention_heads=16,
|
||||||
|
num_channels=3,
|
||||||
|
hidden_act="quick_gelu",
|
||||||
|
layer_norm_eps=1e-5,
|
||||||
|
attention_dropout=0.0,
|
||||||
|
initializer_range=0.02,
|
||||||
|
initializer_factor=1.0,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
self.embed_dim = embed_dim
|
||||||
|
self.image_size = image_size
|
||||||
|
self.intermediate_size = intermediate_size
|
||||||
|
self.patch_size = patch_size
|
||||||
|
self.num_hidden_layers = num_hidden_layers
|
||||||
|
self.num_attention_heads = num_attention_heads
|
||||||
|
self.num_channels = num_channels
|
||||||
|
self.layer_norm_eps = layer_norm_eps
|
||||||
|
self.attention_dropout = attention_dropout
|
||||||
|
self.initializer_range = initializer_range
|
||||||
|
self.initializer_factor = initializer_factor
|
||||||
|
self.hidden_act = hidden_act
|
||||||
|
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class IdeficsPerceiverConfig(PretrainedConfig):
|
||||||
|
r"""
|
||||||
|
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
|
||||||
|
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
||||||
|
with the defaults will yield a similar configuration to that of the Idefics-9B.
|
||||||
|
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
|
||||||
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||||
|
documentation from [`PretrainedConfig`] for more information.
|
||||||
|
Args:
|
||||||
|
use_resampler (`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether or not to use the resampler
|
||||||
|
resampler_n_latents (`int`, *optional*, defaults to ):
|
||||||
|
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
|
||||||
|
resampler_depth (`int`, *optional*, defaults to 6):
|
||||||
|
Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
|
||||||
|
resampler_n_heads (`int`, *optional*, defaults to 16):
|
||||||
|
Number of heads in each Transformer block (for multi-headed self-attention).
|
||||||
|
resampler_head_dim (`int`, *optional*, defaults to 96):
|
||||||
|
Dimensionality of each head projection in the Transformer block.
|
||||||
|
qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether or not to use qk layer norms in perceiver
|
||||||
|
"""
|
||||||
|
model_type = "idefics"
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
use_resampler=False,
|
||||||
|
resampler_n_latents=64,
|
||||||
|
resampler_depth=6,
|
||||||
|
resampler_n_heads=16,
|
||||||
|
resampler_head_dim=96,
|
||||||
|
qk_layer_norms_perceiver=False,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
self.use_resampler = use_resampler
|
||||||
|
self.resampler_n_latents = resampler_n_latents
|
||||||
|
self.resampler_depth = resampler_depth
|
||||||
|
self.resampler_n_heads = resampler_n_heads
|
||||||
|
self.resampler_head_dim = resampler_head_dim
|
||||||
|
self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
|
||||||
|
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class IdeficsConfig(PretrainedConfig):
|
||||||
|
r"""
|
||||||
|
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
|
||||||
|
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
||||||
|
with the defaults will yield a similar configuration to that of the Idefics-9B.
|
||||||
|
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
|
||||||
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||||
|
documentation from [`PretrainedConfig`] for more information.
|
||||||
|
Args:
|
||||||
|
additional_vocab_size (`int`, *optional`, defaults to 0):
|
||||||
|
Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
|
||||||
|
are always trainable whereas regular vocab tokens can be frozen or not.
|
||||||
|
vocab_size (`int`, *optional*, defaults to 32000):
|
||||||
|
Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
|
||||||
|
`inputs_ids` passed when calling [`~IdeficsModel`]
|
||||||
|
hidden_size (`int`, *optional*, defaults to 4096):
|
||||||
|
Dimension of the hidden representations.
|
||||||
|
intermediate_size (`int`, *optional*, defaults to 11008):
|
||||||
|
Dimension of the MLP representations.
|
||||||
|
num_hidden_layers (`int`, *optional*, defaults to 32):
|
||||||
|
Number of hidden layers in the Transformer encoder.
|
||||||
|
num_attention_heads (`int`, *optional*, defaults to 32):
|
||||||
|
Number of attention heads for each attention layer in the Transformer encoder.
|
||||||
|
dropout (`float`, *optional*, defaults to 0.0):
|
||||||
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
||||||
|
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
||||||
|
The non-linear activation function (function or string) in the decoder.
|
||||||
|
initializer_range (`float`, *optional*, defaults to 0.02):
|
||||||
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||||
|
alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
|
||||||
|
Initialization type for the alphas.
|
||||||
|
alphas_initializer_range (`float`, *optional*, defaults to 0.0):
|
||||||
|
The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
|
||||||
|
Attention.
|
||||||
|
alpha_type (`str`, *optional*, defaults to `"float"`):
|
||||||
|
Whether the gating alphas should be vectors or single floats.
|
||||||
|
rms_norm_eps (`float`, *optional*, defaults to 1e-6):
|
||||||
|
The epsilon used by the rms normalization layers.
|
||||||
|
use_cache (`bool`, *optional*, defaults to `True`):
|
||||||
|
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
||||||
|
relevant if `config.is_decoder=True`.
|
||||||
|
pad_token_id (`int`, *optional*, defaults to 0)
|
||||||
|
Padding token id.
|
||||||
|
bos_token_id (`int`, *optional*, defaults to 1)
|
||||||
|
Beginning of stream token id.
|
||||||
|
eos_token_id (`int`, *optional*, defaults to 2)
|
||||||
|
End of stream token id.
|
||||||
|
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether to tie weight embeddings
|
||||||
|
cross_layer_interval (`int`, *optional*, default to 1)
|
||||||
|
Interval for cross attention (from text to image) layers.
|
||||||
|
qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
|
||||||
|
freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
|
||||||
|
freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
|
||||||
|
Exceptions to freezing text layers when `freeze_text_layers` is `True`
|
||||||
|
freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
|
||||||
|
freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
|
||||||
|
freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
|
||||||
|
Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
|
||||||
|
use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
|
||||||
|
vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
|
||||||
|
perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
>>> from transformers import IdeficsModel, IdeficsConfig
|
||||||
|
>>> # Initializing a Idefics idefics-9b style configuration
|
||||||
|
>>> configuration = IdeficsConfig()
|
||||||
|
>>> # Initializing a model from the idefics-9b style configuration
|
||||||
|
>>> model = IdeficsModel(configuration)
|
||||||
|
>>> # Accessing the model configuration
|
||||||
|
>>> configuration = model.config
|
||||||
|
```"""
|
||||||
|
model_type = "idefics"
|
||||||
|
is_composition = True
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
vocab_size=32000,
|
||||||
|
additional_vocab_size=0,
|
||||||
|
hidden_size=4096,
|
||||||
|
intermediate_size=11008,
|
||||||
|
num_hidden_layers=32,
|
||||||
|
num_attention_heads=32,
|
||||||
|
dropout=0.0,
|
||||||
|
hidden_act="silu",
|
||||||
|
initializer_range=0.02,
|
||||||
|
alpha_initializer="zeros",
|
||||||
|
alphas_initializer_range=0.0,
|
||||||
|
alpha_type="float",
|
||||||
|
rms_norm_eps=1e-6,
|
||||||
|
use_cache=True,
|
||||||
|
pad_token_id=0,
|
||||||
|
bos_token_id=1,
|
||||||
|
eos_token_id=2,
|
||||||
|
tie_word_embeddings=False,
|
||||||
|
cross_layer_interval=1,
|
||||||
|
qk_layer_norms=False,
|
||||||
|
freeze_text_layers=True,
|
||||||
|
freeze_text_module_exceptions=[],
|
||||||
|
freeze_lm_head=False,
|
||||||
|
freeze_vision_layers=True,
|
||||||
|
freeze_vision_module_exceptions=[],
|
||||||
|
use_resampler=False,
|
||||||
|
vision_config=None,
|
||||||
|
perceiver_config=None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
self.vocab_size = vocab_size
|
||||||
|
self.additional_vocab_size = additional_vocab_size
|
||||||
|
self.hidden_size = hidden_size
|
||||||
|
self.intermediate_size = intermediate_size
|
||||||
|
self.num_hidden_layers = num_hidden_layers
|
||||||
|
self.num_attention_heads = num_attention_heads
|
||||||
|
self.dropout = dropout
|
||||||
|
self.hidden_act = hidden_act
|
||||||
|
self.initializer_range = initializer_range
|
||||||
|
self.alpha_initializer = alpha_initializer
|
||||||
|
self.alphas_initializer_range = alphas_initializer_range
|
||||||
|
self.alpha_type = alpha_type
|
||||||
|
self.rms_norm_eps = rms_norm_eps
|
||||||
|
self.use_cache = use_cache
|
||||||
|
|
||||||
|
self.cross_layer_interval = cross_layer_interval
|
||||||
|
self.qk_layer_norms = qk_layer_norms
|
||||||
|
self.freeze_vision_layers = freeze_vision_layers
|
||||||
|
|
||||||
|
self.freeze_text_layers = freeze_text_layers
|
||||||
|
self.freeze_text_module_exceptions = freeze_text_module_exceptions
|
||||||
|
self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
|
||||||
|
self.freeze_lm_head = freeze_lm_head
|
||||||
|
|
||||||
|
self.use_resampler = use_resampler
|
||||||
|
|
||||||
|
if perceiver_config is None:
|
||||||
|
self.perceiver_config = IdeficsPerceiverConfig()
|
||||||
|
elif isinstance(perceiver_config, dict):
|
||||||
|
self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config)
|
||||||
|
elif isinstance(perceiver_config, IdeficsPerceiverConfig):
|
||||||
|
self.perceiver_config = perceiver_config
|
||||||
|
|
||||||
|
if vision_config is None:
|
||||||
|
self.vision_config = IdeficsVisionConfig()
|
||||||
|
elif isinstance(vision_config, dict):
|
||||||
|
self.vision_config = IdeficsVisionConfig(**vision_config)
|
||||||
|
elif isinstance(vision_config, IdeficsVisionConfig):
|
||||||
|
self.vision_config = vision_config
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
pad_token_id=pad_token_id,
|
||||||
|
bos_token_id=bos_token_id,
|
||||||
|
eos_token_id=eos_token_id,
|
||||||
|
tie_word_embeddings=tie_word_embeddings,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
|
||||||
|
# PretrainedConfig.from_dict first instantiates the class with the config dict and only then
|
||||||
|
# updates the config object with `kwargs` from from_pretrained, so during the instantiation
|
||||||
|
# of this object many attributes have default values and haven't yet been overridden.
|
||||||
|
# Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
"""
|
||||||
|
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
||||||
|
Returns:
|
||||||
|
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
||||||
|
"""
|
||||||
|
output = copy.deepcopy(self.__dict__)
|
||||||
|
|
||||||
|
output["vision_config"] = self.vision_config.to_dict()
|
||||||
|
output["perceiver_config"] = self.perceiver_config.to_dict()
|
||||||
|
output["model_type"] = self.__class__.model_type
|
||||||
|
|
||||||
|
return output
|
@ -0,0 +1,264 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""Image processor class for Idefics."""
|
||||||
|
|
||||||
|
from typing import Callable, Dict, List, Optional, Union, Iterable
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
|
||||||
|
from transformers.image_transforms import resize, to_channel_dimension_format, rescale, normalize
|
||||||
|
from transformers.image_utils import (
|
||||||
|
ChannelDimension,
|
||||||
|
ImageInput,
|
||||||
|
PILImageResampling,
|
||||||
|
make_list_of_images,
|
||||||
|
to_numpy_array,
|
||||||
|
valid_images,
|
||||||
|
)
|
||||||
|
from io import BytesIO
|
||||||
|
import requests
|
||||||
|
from transformers import TensorType, is_torch_available
|
||||||
|
|
||||||
|
|
||||||
|
IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
||||||
|
IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711]
|
||||||
|
|
||||||
|
|
||||||
|
def convert_to_rgb(image):
|
||||||
|
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
|
||||||
|
# for transparent images. The call to `alpha_composite` handles this case
|
||||||
|
if image.mode == "RGB":
|
||||||
|
return image
|
||||||
|
|
||||||
|
image_rgba = image.convert("RGBA")
|
||||||
|
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
|
||||||
|
alpha_composite = Image.alpha_composite(background, image_rgba)
|
||||||
|
alpha_composite = alpha_composite.convert("RGB")
|
||||||
|
return alpha_composite
|
||||||
|
|
||||||
|
|
||||||
|
class IdeficsImageProcessor(BaseImageProcessor):
|
||||||
|
r"""
|
||||||
|
Constructs a Idefics image processor.
|
||||||
|
Args:
|
||||||
|
image_size (`int`, *optional*, defaults to `224`):
|
||||||
|
Resize to image size
|
||||||
|
image_num_channels (`int`, *optional*, defaults to `3`):
|
||||||
|
Number of image channels.
|
||||||
|
image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
|
||||||
|
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
||||||
|
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
|
||||||
|
overridden by the `image_mean` parameter in the `preprocess` method.
|
||||||
|
image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
|
||||||
|
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
||||||
|
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
||||||
|
Can be overridden by the `image_std` parameter in the `preprocess` method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
model_input_names = ["pixel_values"]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
image_size: int = 224,
|
||||||
|
image_mean: Optional[Union[float, List[float]]] = None,
|
||||||
|
image_std: Optional[Union[float, List[float]]] = None,
|
||||||
|
image_num_channels: Optional[int] = 3,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
self.image_size = image_size
|
||||||
|
self.image_num_channels = image_num_channels
|
||||||
|
self.image_mean = image_mean
|
||||||
|
self.image_std = image_std
|
||||||
|
|
||||||
|
def preprocess(
|
||||||
|
self,
|
||||||
|
images: ImageInput,
|
||||||
|
image_num_channels: Optional[int] = 3,
|
||||||
|
image_size: Optional[Dict[str, int]] = None,
|
||||||
|
image_mean: Optional[Union[float, List[float]]] = None,
|
||||||
|
image_std: Optional[Union[float, List[float]]] = None,
|
||||||
|
transform: Callable = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> TensorType.PYTORCH:
|
||||||
|
"""
|
||||||
|
Preprocess a batch of images.
|
||||||
|
Args:
|
||||||
|
images (`ImageInput`):
|
||||||
|
A list of images to preprocess.
|
||||||
|
image_size (`int`, *optional*, defaults to `self.image_size`):
|
||||||
|
Resize to image size
|
||||||
|
image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`):
|
||||||
|
Number of image channels.
|
||||||
|
image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
|
||||||
|
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
||||||
|
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can
|
||||||
|
be overridden by the `image_mean` parameter in the `preprocess` method.
|
||||||
|
image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
|
||||||
|
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
||||||
|
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess`
|
||||||
|
method. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
||||||
|
transform (`Callable`, *optional*, defaults to `None`):
|
||||||
|
A custom transform function that accepts a single image can be passed for training. For example,
|
||||||
|
`torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
|
||||||
|
assumed - and then a preset of inference-specific transforms will be applied to the images
|
||||||
|
Returns:
|
||||||
|
a PyTorch tensor of the processed images
|
||||||
|
"""
|
||||||
|
image_size = image_size if image_size is not None else self.image_size
|
||||||
|
image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels
|
||||||
|
image_mean = image_mean if image_mean is not None else self.image_mean
|
||||||
|
image_std = image_std if image_std is not None else self.image_std
|
||||||
|
size = (image_size, image_size)
|
||||||
|
|
||||||
|
if len(images) == 0:
|
||||||
|
return []
|
||||||
|
|
||||||
|
images = make_list_of_images(images)
|
||||||
|
|
||||||
|
if not valid_images(images):
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
||||||
|
"torch.Tensor, tf.Tensor or jax.ndarray."
|
||||||
|
)
|
||||||
|
|
||||||
|
# For training a user needs to pass their own set of transforms as a Callable.
|
||||||
|
# For reference this is what was used in the original IDEFICS training:
|
||||||
|
# transform = transforms.Compose([
|
||||||
|
# convert_to_rgb,
|
||||||
|
# transforms.RandomResizedCrop((size, size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
|
||||||
|
# transforms.ToTensor(),
|
||||||
|
# transforms.Normalize(mean=image_mean, std=image_std),
|
||||||
|
# ])
|
||||||
|
if transform is not None:
|
||||||
|
if not is_torch_available():
|
||||||
|
raise ImportError("To pass in `transform` torch must be installed")
|
||||||
|
import torch
|
||||||
|
|
||||||
|
images = [transform(x) for x in images]
|
||||||
|
return torch.stack(images)
|
||||||
|
|
||||||
|
# for inference we do the exact transforms that were used to train IDEFICS
|
||||||
|
images = [convert_to_rgb(x) for x in images]
|
||||||
|
# further transforms expect numpy arrays
|
||||||
|
images = [to_numpy_array(x) for x in images]
|
||||||
|
images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images]
|
||||||
|
images = [self.rescale(image=image, scale=1 / 255) for image in images]
|
||||||
|
images = [self.normalize(x, mean=image_mean, std=image_std) for x in images]
|
||||||
|
images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images]
|
||||||
|
# TODO: this converts to torch tensors - switch to convert_to_tensors once it becomes available
|
||||||
|
images = BatchFeature(data={"pixel_values": images}, tensor_type=TensorType.PYTORCH)["pixel_values"]
|
||||||
|
|
||||||
|
return images
|
||||||
|
|
||||||
|
def fetch_images(self, image_url_or_urls: Union[str, List[str]]):
|
||||||
|
"""
|
||||||
|
Convert a single or a list of urls into the corresponding `PIL.Image` objects.
|
||||||
|
If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
|
||||||
|
returned.
|
||||||
|
"""
|
||||||
|
headers = {
|
||||||
|
"User-Agent": (
|
||||||
|
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0"
|
||||||
|
" Safari/537.36"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if isinstance(image_url_or_urls, list):
|
||||||
|
return [self.fetch_images(x) for x in image_url_or_urls]
|
||||||
|
elif isinstance(image_url_or_urls, str):
|
||||||
|
response = requests.get(image_url_or_urls, stream=True, headers=headers)
|
||||||
|
response.raise_for_status()
|
||||||
|
return Image.open(BytesIO(response.content))
|
||||||
|
else:
|
||||||
|
raise ValueError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}")
|
||||||
|
|
||||||
|
def rescale(
|
||||||
|
self,
|
||||||
|
image: np.ndarray,
|
||||||
|
scale: float,
|
||||||
|
data_format: Optional[Union[str, ChannelDimension]] = None,
|
||||||
|
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Rescale an image by a scale factor. image = image * scale.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
image (`np.ndarray`):
|
||||||
|
Image to rescale.
|
||||||
|
scale (`float`):
|
||||||
|
The scaling factor to rescale pixel values by.
|
||||||
|
data_format (`str` or `ChannelDimension`, *optional*):
|
||||||
|
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
||||||
|
image is used. Can be one of:
|
||||||
|
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
||||||
|
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
||||||
|
input_data_format (`ChannelDimension` or `str`, *optional*):
|
||||||
|
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
||||||
|
from the input image. Can be one of:
|
||||||
|
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
||||||
|
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
`np.ndarray`: The rescaled image.
|
||||||
|
"""
|
||||||
|
# return rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs)
|
||||||
|
# requires 4.32
|
||||||
|
return rescale(image, scale=scale, data_format=data_format, **kwargs)
|
||||||
|
|
||||||
|
def normalize(
|
||||||
|
self,
|
||||||
|
image: np.ndarray,
|
||||||
|
mean: Union[float, Iterable[float]],
|
||||||
|
std: Union[float, Iterable[float]],
|
||||||
|
data_format: Optional[Union[str, ChannelDimension]] = None,
|
||||||
|
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Normalize an image. image = (image - image_mean) / image_std.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
image (`np.ndarray`):
|
||||||
|
Image to normalize.
|
||||||
|
mean (`float` or `Iterable[float]`):
|
||||||
|
Image mean to use for normalization.
|
||||||
|
std (`float` or `Iterable[float]`):
|
||||||
|
Image standard deviation to use for normalization.
|
||||||
|
data_format (`str` or `ChannelDimension`, *optional*):
|
||||||
|
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
||||||
|
image is used. Can be one of:
|
||||||
|
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
||||||
|
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
||||||
|
input_data_format (`ChannelDimension` or `str`, *optional*):
|
||||||
|
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
||||||
|
from the input image. Can be one of:
|
||||||
|
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
||||||
|
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
`np.ndarray`: The normalized image.
|
||||||
|
"""
|
||||||
|
# TODO 4.32
|
||||||
|
return normalize(
|
||||||
|
image, mean=mean, std=std, data_format=data_format, **kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
import transformers
|
||||||
|
transformers.IdeficsImageProcessor = IdeficsImageProcessor
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,246 @@
|
|||||||
|
# This code was adapted from https://github.com/lucidrains/flamingo-pytorch licensed under the MIT License.
|
||||||
|
#
|
||||||
|
# MIT License
|
||||||
|
#
|
||||||
|
# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
# SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
Generic interface to various configurations of the Perceiver Resampler, that simply takes in a series of (potentially
|
||||||
|
time-indexed) contextual embeddings, and "resamples" (compresses) them down to a pre-specified number of latents! Note
|
||||||
|
that the Perceiver in general resamples based solely off the *long-range* context; there's a nice opportunity here to
|
||||||
|
prime the Perceiver Resampler with say a single layer's worth of language embeddings (the target domain), and use that
|
||||||
|
to softly "retrieve & compress" what we need --> this would be a novel contribution we should explore.
|
||||||
|
|
||||||
|
References:
|
||||||
|
- DeepMind's Flamingo: https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model
|
||||||
|
- Code borrowed w/ love from: https://github.com/lucidrains/flamingo-pytorch
|
||||||
|
|
||||||
|
"""
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
|
||||||
|
from text_generation_server.utils.layers import (
|
||||||
|
TensorParallelColumnLinear,
|
||||||
|
TensorParallelRowLinear,
|
||||||
|
)
|
||||||
|
|
||||||
|
EPS=1e-5
|
||||||
|
|
||||||
|
class IdeficsPerceiverResampler(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
prefix,
|
||||||
|
config,
|
||||||
|
embed_dim: int,
|
||||||
|
depth: int,
|
||||||
|
n_heads: int,
|
||||||
|
head_dim: int,
|
||||||
|
n_latents: int,
|
||||||
|
weights,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
|
||||||
|
MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
|
||||||
|
returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed
|
||||||
|
to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler.
|
||||||
|
Could be e.g., VIT embed_dim, ResNet pool dim, and so on.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (`IdeficsConfig`): config object
|
||||||
|
embed_dim (`int`): The size of each embedding vector
|
||||||
|
depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
|
||||||
|
n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention).
|
||||||
|
head_dim (`int`): Dimensionality of each head projection in the Transformer block.
|
||||||
|
n_latents (`int`):
|
||||||
|
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
|
||||||
|
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
self.embed_dim, self.n_heads, self.head_dim, self.n_latents = embed_dim, n_heads, head_dim, n_latents
|
||||||
|
self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver
|
||||||
|
|
||||||
|
# Create Latents for Perceiver
|
||||||
|
self.latents = nn.Parameter(weights.get_tensor(f"{prefix}.latents"))
|
||||||
|
|
||||||
|
self.intermediate_dim = (
|
||||||
|
self.embed_dim * 4
|
||||||
|
if not hasattr(config.vision_config, "embed_dim")
|
||||||
|
else config.vision_config.embed_dim * 4
|
||||||
|
)
|
||||||
|
# Create Transformer Blocks
|
||||||
|
self.blocks = nn.ModuleList(
|
||||||
|
[
|
||||||
|
nn.ModuleList(
|
||||||
|
[
|
||||||
|
IdeficsPerceiverAttention(
|
||||||
|
prefix=f"{prefix}.blocks.{layer_id}.0",
|
||||||
|
config=config,
|
||||||
|
embed_dim=self.embed_dim,
|
||||||
|
n_heads=self.n_heads,
|
||||||
|
head_dim=self.head_dim,
|
||||||
|
qk_layer_norms=self.qk_layer_norms,
|
||||||
|
weights=weights,
|
||||||
|
),
|
||||||
|
IdeficsMLP(
|
||||||
|
prefix=f"{prefix}.blocks.{layer_id}.1",
|
||||||
|
intermediate_size=self.intermediate_dim,
|
||||||
|
config=config,
|
||||||
|
weights=weights
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
for layer_id in range(depth)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.layer_norm = nn.LayerNorm.load(prefix=f"{prefix}.layer_norm", weights=weights, eps=EPS)
|
||||||
|
|
||||||
|
def forward(self, context: torch.Tensor) -> torch.Tensor:
|
||||||
|
"""Resample arbitrary length context & *compress* down to self.n_latents latent embeddings"""
|
||||||
|
# einsum.repeat(self.latents, "seq embed -> bsz seq embed", bsz=context.shape[0])
|
||||||
|
latents = self.latents.repeat(context.shape[0], 1, 1)
|
||||||
|
|
||||||
|
# Feed through Perceiver Attention blocks...
|
||||||
|
for attn, ff in self.blocks:
|
||||||
|
latents = attn(context, latents) + latents
|
||||||
|
latents = ff(latents) + latents
|
||||||
|
|
||||||
|
return self.layer_norm(latents)
|
||||||
|
|
||||||
|
|
||||||
|
class IdeficsPerceiverAttention(nn.Module):
|
||||||
|
def __init__(self,
|
||||||
|
prefix,
|
||||||
|
config,
|
||||||
|
embed_dim: int,
|
||||||
|
n_heads: int,
|
||||||
|
head_dim: int,
|
||||||
|
qk_layer_norms: bool,
|
||||||
|
weights
|
||||||
|
) -> None:
|
||||||
|
"""Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
|
||||||
|
super().__init__()
|
||||||
|
self.embed_dim, self.n_heads, self.head_dim = embed_dim, n_heads, head_dim
|
||||||
|
self.qk_layer_norms = qk_layer_norms
|
||||||
|
# Normalization & Scaling
|
||||||
|
self.context_layer_norm = nn.LayerNorm.load(prefix=f"{prefix}.context_layer_norm", weights=weights, eps=EPS)
|
||||||
|
self.latents_layer_norm = nn.LayerNorm.load(prefix=f"{prefix}.latents_layer_norm", weights=weights, eps=EPS)
|
||||||
|
if self.qk_layer_norms:
|
||||||
|
self.q_layer_norm = nn.LayerNorm.load(prefix=f"{prefix}.q_layer_norm", weights=weights, eps=EPS)
|
||||||
|
self.k_layer_norm = nn.LayerNorm.load(prefix=f"{prefix}.k_layer_norm", weights=weights, eps=EPS)
|
||||||
|
|
||||||
|
self.qk_scale = self.head_dim**-0.5
|
||||||
|
|
||||||
|
process_group = weights.process_group
|
||||||
|
if n_heads % weights.process_group.size() != 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {n_heads} "
|
||||||
|
f"and `num_shards`: {weights.process_group.size()}"
|
||||||
|
)
|
||||||
|
self.n_heads //= weights.process_group.size()
|
||||||
|
|
||||||
|
# Q, K, V Projection (no bias -- detail from Perceiver/Flamingo Papers).
|
||||||
|
self.q_proj = TensorParallelColumnLinear.load(
|
||||||
|
config=config, prefix=f"{prefix}.q_proj", weights=weights, bias=False
|
||||||
|
)
|
||||||
|
self.k_proj = TensorParallelColumnLinear.load(
|
||||||
|
config=config, prefix=f"{prefix}.k_proj", weights=weights, bias=False
|
||||||
|
)
|
||||||
|
self.v_proj = TensorParallelColumnLinear.load(
|
||||||
|
config=config, prefix=f"{prefix}.v_proj", weights=weights, bias=False
|
||||||
|
)
|
||||||
|
|
||||||
|
self.output_proj = TensorParallelRowLinear.load(
|
||||||
|
config=config, prefix=f"{prefix}.output_proj", weights=weights, bias=False
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context (`torch.Tensor`):
|
||||||
|
Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.
|
||||||
|
latents (`torch.Tensor`):
|
||||||
|
Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
`torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross
|
||||||
|
from context.
|
||||||
|
"""
|
||||||
|
context = self.context_layer_norm(context)
|
||||||
|
latents = self.latents_layer_norm(latents)
|
||||||
|
batch_size, seq_length, embed_dim = context.shape[:3]
|
||||||
|
|
||||||
|
# Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
|
||||||
|
# Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
|
||||||
|
q = self.q_proj(latents)
|
||||||
|
k = self.k_proj(torch.cat([context, latents], dim=-2))
|
||||||
|
v = self.v_proj(torch.cat([context, latents], dim=-2))
|
||||||
|
|
||||||
|
# Multiheaded Self-Attention w/ stable softmax (subtract per-row max -- `amax` -- before softmax call)
|
||||||
|
# =>> `attn` should be a 2D matrix of shape [n_latents x (context + n_latents)]
|
||||||
|
# einsum.rearrange(x, "bsz seq (heads embed) -> bsz heads seq embed", heads=self.n_heads)
|
||||||
|
q, k, v = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)]
|
||||||
|
|
||||||
|
if self.qk_layer_norms:
|
||||||
|
q = self.q_layer_norm(q)
|
||||||
|
k = self.k_layer_norm(k)
|
||||||
|
|
||||||
|
scores = torch.einsum("... i d, ... j d -> ... i j", q * self.qk_scale, k)
|
||||||
|
stabilized_scores = scores - (scores.amax(dim=-1, keepdim=True).detach())
|
||||||
|
attn = stabilized_scores.softmax(dim=-1)
|
||||||
|
|
||||||
|
# Attend & project back to output...
|
||||||
|
resampled = torch.einsum("... i j, ... j d -> ... i d", attn, v)
|
||||||
|
# einsum.rearrange(resampled, "bsz heads seq embed -> bsz seq (heads embed)", heads=self.n_heads)
|
||||||
|
return self.output_proj(resampled.transpose(1, 2).flatten(-2))
|
||||||
|
|
||||||
|
|
||||||
|
class IdeficsMLP(nn.Module):
|
||||||
|
def __init__(self,
|
||||||
|
prefix,
|
||||||
|
intermediate_size,
|
||||||
|
config,
|
||||||
|
weights,
|
||||||
|
):
|
||||||
|
"""Simple MLP block with intermediate_size and embedding size"""
|
||||||
|
super().__init__()
|
||||||
|
self.embed_dim = config.vision_config.embed_dim
|
||||||
|
self.ln = nn.LayerNorm.load(prefix=f"{prefix}.ln", weights=weights, eps=EPS)
|
||||||
|
self.fc = TensorParallelColumnLinear.load(
|
||||||
|
config=config, prefix=f"{prefix}.fc", weights=weights, bias=False,
|
||||||
|
)
|
||||||
|
self.act = nn.ReLU()
|
||||||
|
self.c_proj = TensorParallelRowLinear.load(
|
||||||
|
config=config, prefix=f"{prefix}.c_proj", weights=weights, bias=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
|
||||||
|
hidden_states = self.ln(hidden_states)
|
||||||
|
hidden_states = self.fc(hidden_states)
|
||||||
|
hidden_states = self.act(hidden_states)
|
||||||
|
hidden_states = self.c_proj(hidden_states)
|
||||||
|
|
||||||
|
return hidden_states
|
@ -0,0 +1,413 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
# Copyright 2022 The HuggingFace Inc. team.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
Processor class for IDEFICS.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Callable, List, Optional, Union
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from transformers.feature_extraction_utils import BatchFeature
|
||||||
|
from transformers.processing_utils import ProcessorMixin
|
||||||
|
from transformers.tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
|
||||||
|
from transformers.utils import TensorType, is_torch_available
|
||||||
|
from text_generation_server.models.custom_modeling.idefics_image_processing import IdeficsImageProcessor
|
||||||
|
|
||||||
|
|
||||||
|
if is_torch_available():
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
IMAGE_TOKEN = "<image>"
|
||||||
|
|
||||||
|
|
||||||
|
# copied from m4.training.packing
|
||||||
|
def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1):
|
||||||
|
# This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]]
|
||||||
|
|
||||||
|
# If any of images index are more than num_classes, set them to -1.
|
||||||
|
# Words after the max number of images allowed have been seen don't attend on anything
|
||||||
|
if num_classes != -1:
|
||||||
|
incremental_mask[incremental_mask >= num_classes] = -1
|
||||||
|
|
||||||
|
negatives = incremental_mask == -1
|
||||||
|
incremental_mask[negatives] = 0
|
||||||
|
attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes)
|
||||||
|
attn_mask[negatives, :] = 0
|
||||||
|
return attn_mask
|
||||||
|
|
||||||
|
|
||||||
|
# copied from m4.training.packing
|
||||||
|
def image_attention_mask_for_packed_input_ids(input_ids, tokenizer):
|
||||||
|
image_attention_mask = torch.full_like(input_ids, fill_value=-1)
|
||||||
|
next_image_attention_mask = torch.full_like(input_ids, fill_value=-1)
|
||||||
|
image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
|
||||||
|
eod_token_id = tokenizer.eos_token_id
|
||||||
|
for batch_idx in range(input_ids.size(0)):
|
||||||
|
count = -1
|
||||||
|
seen_eod = False
|
||||||
|
for idx, token_id in enumerate(input_ids[batch_idx]):
|
||||||
|
if token_id == image_token_id:
|
||||||
|
count += 1
|
||||||
|
image_attention_mask[batch_idx][idx] = count
|
||||||
|
seen_eod = False
|
||||||
|
else:
|
||||||
|
image_attention_mask[batch_idx][idx] = count
|
||||||
|
|
||||||
|
if seen_eod:
|
||||||
|
image_attention_mask[batch_idx][idx] = -1
|
||||||
|
|
||||||
|
if token_id == eod_token_id:
|
||||||
|
seen_eod = True
|
||||||
|
|
||||||
|
for batch_idx in range(input_ids.size(0)):
|
||||||
|
count = -1
|
||||||
|
seen_eod = False
|
||||||
|
for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1):
|
||||||
|
token_id = input_ids[batch_idx][idx]
|
||||||
|
if token_id == image_token_id:
|
||||||
|
count += 1
|
||||||
|
next_image_attention_mask[batch_idx][idx] = count
|
||||||
|
seen_eod = False
|
||||||
|
else:
|
||||||
|
next_image_attention_mask[batch_idx][idx] = count
|
||||||
|
|
||||||
|
if token_id == eod_token_id:
|
||||||
|
seen_eod = True
|
||||||
|
|
||||||
|
if seen_eod:
|
||||||
|
next_image_attention_mask[batch_idx][idx] = -1
|
||||||
|
|
||||||
|
non_negative_indices = next_image_attention_mask[batch_idx] != -1
|
||||||
|
next_image_attention_mask[batch_idx][non_negative_indices] -= count
|
||||||
|
next_image_attention_mask[batch_idx][non_negative_indices] *= -1
|
||||||
|
|
||||||
|
return image_attention_mask, next_image_attention_mask
|
||||||
|
|
||||||
|
|
||||||
|
def is_url(string):
|
||||||
|
"""Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
|
||||||
|
invalidated the url"""
|
||||||
|
if " " in string:
|
||||||
|
return False
|
||||||
|
result = urlparse(string)
|
||||||
|
return all([result.scheme, result.netloc])
|
||||||
|
|
||||||
|
|
||||||
|
class IdeficsProcessor(ProcessorMixin):
|
||||||
|
r"""
|
||||||
|
Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
|
||||||
|
|
||||||
|
[`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
|
||||||
|
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
image_processor (`IdeficsImageProcessor`):
|
||||||
|
An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
|
||||||
|
tokenizer (`LlamaTokenizerFast`):
|
||||||
|
An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
|
||||||
|
image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image)
|
||||||
|
"""
|
||||||
|
attributes = ["image_processor", "tokenizer"]
|
||||||
|
image_processor_class = "IdeficsImageProcessor"
|
||||||
|
tokenizer_class = "LlamaTokenizerFast"
|
||||||
|
|
||||||
|
def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
|
||||||
|
if image_processor is None:
|
||||||
|
raise ValueError("You need to specify an `image_processor`.")
|
||||||
|
if tokenizer is None:
|
||||||
|
raise ValueError("You need to specify a `tokenizer`.")
|
||||||
|
|
||||||
|
super().__init__(image_processor, tokenizer)
|
||||||
|
self.current_processor = self.image_processor
|
||||||
|
self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
|
||||||
|
|
||||||
|
self.default_image_dims = (
|
||||||
|
self.image_processor.image_num_channels,
|
||||||
|
self.image_processor.image_size,
|
||||||
|
self.image_processor.image_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.tokenizer_was_trained_with_end_of_utterance_token = (
|
||||||
|
True
|
||||||
|
if "<end_of_utterance>" in self.tokenizer.special_tokens_map.get("additional_special_tokens", [])
|
||||||
|
else False
|
||||||
|
)
|
||||||
|
|
||||||
|
def __call__(
|
||||||
|
self,
|
||||||
|
prompts: Union[List[TextInput], List[List[TextInput]]],
|
||||||
|
padding: Union[bool, str, PaddingStrategy] = False,
|
||||||
|
truncation: Union[bool, str, TruncationStrategy] = None,
|
||||||
|
max_length: Optional[int] = None,
|
||||||
|
transform: Callable = None,
|
||||||
|
add_eos_token=False,
|
||||||
|
add_end_of_utterance_token=None,
|
||||||
|
debug=False,
|
||||||
|
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
||||||
|
) -> BatchEncoding:
|
||||||
|
"""This method takes batched or non-batched prompts made of text and images and converts them into prompts that
|
||||||
|
the model was trained on and prepares the image pixel values for the model to process.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompts (`Union[List[TextInput], [List[List[TextInput]]]]`):
|
||||||
|
either a single prompt or a batched list of prompts - see the detailed description immediately after
|
||||||
|
the end of the arguments doc section.
|
||||||
|
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
|
||||||
|
Select a strategy to pad the returned sequences (according to the model's padding side and padding
|
||||||
|
index) among:
|
||||||
|
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
||||||
|
sequence if provided).
|
||||||
|
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
||||||
|
acceptable input length for the model if that argument is not provided.
|
||||||
|
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
||||||
|
lengths).
|
||||||
|
max_length (`int`, *optional*):
|
||||||
|
Maximum length of the returned list and optionally padding length (see above).
|
||||||
|
truncation (`bool`, *optional*):
|
||||||
|
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
|
||||||
|
transform (`Callable`, *optional*):
|
||||||
|
A custom transform function that accepts a single image can be passed for training. For example,
|
||||||
|
`torchvision.Compose` can be used to compose multiple functions. If `None` a preset inference-specific
|
||||||
|
set of transforms will be applied to the images
|
||||||
|
add_eos_token (`bool`, *optional*, defaults to `False`):
|
||||||
|
Adds `eos_token` at the end of the final prompt if True`
|
||||||
|
add_end_of_utterance_token (`bool`, *optional*)
|
||||||
|
Whether to automatically add `<end_of_utterance>` after each prompt's text input (unless followed by an
|
||||||
|
image). If `None` the tokenizer will be checked instead and if this token is found in
|
||||||
|
`additional_special_tokens` then the value will be `True`.
|
||||||
|
debug (`bool`, *optional*, defaults to `False`):
|
||||||
|
`True` value will help debug prompt generation by dumping useful information
|
||||||
|
return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
|
||||||
|
The type of tensors to return. Can be one of:
|
||||||
|
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
|
||||||
|
directly passed to `model.generate`
|
||||||
|
|
||||||
|
Detailed explanation:
|
||||||
|
|
||||||
|
Each entry in `prompts` is either a text to be passed as is or an image that will be processed.
|
||||||
|
|
||||||
|
An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
|
||||||
|
|
||||||
|
When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>`
|
||||||
|
entry into the prompt.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
checkpoint = "HuggingFaceM4/idefics-9b"
|
||||||
|
processor = AutoProcessor.from_pretrained(checkpoint)
|
||||||
|
url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
|
||||||
|
img = processor.image_processor.fetch_images([url])[0]
|
||||||
|
|
||||||
|
prompts = [
|
||||||
|
"User:",
|
||||||
|
img,
|
||||||
|
"Describe this image.\nAssistant: An image of two kittens in grass.\n",
|
||||||
|
"User:",
|
||||||
|
"https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
|
||||||
|
"Describe this image.\nAssistant:",
|
||||||
|
]
|
||||||
|
|
||||||
|
inputs = processor(prompts, return_tensors="pt")
|
||||||
|
generated_ids = model.generate(**inputs, max_length=100)
|
||||||
|
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example the `prompts` will be converted into:
|
||||||
|
|
||||||
|
```
|
||||||
|
<s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
|
||||||
|
Assistant: An image of two kittens in grass.
|
||||||
|
User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
|
||||||
|
Assistant:'
|
||||||
|
```
|
||||||
|
|
||||||
|
and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
|
||||||
|
`pixel_values` dict entry of the return value.
|
||||||
|
|
||||||
|
This example also examplifies that images can be passed as objects or as text urls. It can be seen that the
|
||||||
|
first image is passed as object and the second one as a url.
|
||||||
|
|
||||||
|
To do training do:
|
||||||
|
|
||||||
|
```python
|
||||||
|
image_transform = transforms.Compose(
|
||||||
|
[
|
||||||
|
transforms.RandomResizedCrop(
|
||||||
|
(w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
|
||||||
|
),
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize(mean=self.image_mean, std=self.image_std),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
inputs = processor(prompts, transform=image_transform, return_tensors="pt")
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to help debug prompt generation enable `debug=True` which will show you what's happening.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# if the value isn't overriden by the user, check if the tokenizer was trained with this token and then use it
|
||||||
|
if add_end_of_utterance_token is None:
|
||||||
|
add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token
|
||||||
|
|
||||||
|
# turn non-batched prompts into batched
|
||||||
|
if not any(isinstance(i, list) for i in prompts):
|
||||||
|
prompts = [prompts]
|
||||||
|
|
||||||
|
fake_token = "<fake_token_around_image>"
|
||||||
|
image_token = "<image>"
|
||||||
|
end_of_utterance_token = "<end_of_utterance>"
|
||||||
|
|
||||||
|
def image_tokens(last_was_image):
|
||||||
|
if last_was_image:
|
||||||
|
return image_token + fake_token
|
||||||
|
else:
|
||||||
|
return fake_token + image_token + fake_token
|
||||||
|
|
||||||
|
all_texts = []
|
||||||
|
all_images = []
|
||||||
|
for sample in prompts:
|
||||||
|
# the model was trained on samples starting with <s>
|
||||||
|
full_text = f"{self.tokenizer.bos_token}"
|
||||||
|
|
||||||
|
# an image can either be an image object in the item or the url, everything else is a verbatim prompt text
|
||||||
|
image_objects = []
|
||||||
|
last_was_image = False
|
||||||
|
last_was_text = False
|
||||||
|
for i, item in enumerate(sample):
|
||||||
|
if i > 0:
|
||||||
|
last_was_text = True if not last_was_image else False
|
||||||
|
|
||||||
|
if isinstance(item, str):
|
||||||
|
item = item.strip(" ")
|
||||||
|
if is_url(item):
|
||||||
|
image = self.image_processor.fetch_images(item)
|
||||||
|
full_text += image_tokens(last_was_image)
|
||||||
|
image_objects.append(image)
|
||||||
|
last_was_image = True
|
||||||
|
else:
|
||||||
|
# we add end_of_utterance_token between each subsequent text prompts (but not at the last one!)
|
||||||
|
if add_end_of_utterance_token and last_was_text:
|
||||||
|
full_text += end_of_utterance_token
|
||||||
|
full_text += item
|
||||||
|
last_was_image = False
|
||||||
|
else:
|
||||||
|
# must be an image obj
|
||||||
|
full_text += image_tokens(last_was_image)
|
||||||
|
image_objects.append(item)
|
||||||
|
last_was_image = True
|
||||||
|
|
||||||
|
if add_eos_token:
|
||||||
|
full_text += self.tokenizer.eos_token
|
||||||
|
|
||||||
|
if debug is True:
|
||||||
|
print(f"{full_text=}")
|
||||||
|
|
||||||
|
image_objects = self.image_processor(image_objects, transform=transform)
|
||||||
|
|
||||||
|
text_encoding = self.tokenizer(
|
||||||
|
text=full_text,
|
||||||
|
add_special_tokens=False,
|
||||||
|
padding=padding,
|
||||||
|
truncation=truncation,
|
||||||
|
max_length=max_length,
|
||||||
|
)
|
||||||
|
|
||||||
|
all_texts.append(text_encoding["input_ids"])
|
||||||
|
all_images.append(image_objects)
|
||||||
|
|
||||||
|
max_seq_len = max(len(x) for x in all_texts)
|
||||||
|
|
||||||
|
# max_num_images has to be at least 1 even when there are no images
|
||||||
|
max_num_images = max(len(x) for x in all_images)
|
||||||
|
max_num_images = max(1, max_num_images)
|
||||||
|
|
||||||
|
at_least_one_image = sum(len(x) for x in all_images) > 0
|
||||||
|
output_input_ids = []
|
||||||
|
output_images = []
|
||||||
|
output_attention_masks = []
|
||||||
|
for text, images in zip(all_texts, all_images):
|
||||||
|
padded_input_ids = [self.tokenizer.pad_token_id] * max_seq_len
|
||||||
|
unpadded_seq_len = len(text)
|
||||||
|
start = max_seq_len - unpadded_seq_len
|
||||||
|
padded_input_ids[start:] = text[:max_seq_len]
|
||||||
|
|
||||||
|
attention_mask = torch.zeros((max_seq_len,), dtype=torch.long)
|
||||||
|
attention_mask[start:] = 1
|
||||||
|
|
||||||
|
image_count = padded_input_ids.count(self.image_token_id)
|
||||||
|
local_max_num_images = min(image_count, max_num_images)
|
||||||
|
|
||||||
|
current_images = images[:local_max_num_images]
|
||||||
|
|
||||||
|
if len(current_images) > 0:
|
||||||
|
padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:])
|
||||||
|
padded_image_tensor[: current_images.size(0)] = current_images
|
||||||
|
else:
|
||||||
|
padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims)
|
||||||
|
|
||||||
|
output_images.append(padded_image_tensor)
|
||||||
|
output_input_ids.append(torch.tensor(padded_input_ids))
|
||||||
|
|
||||||
|
output_attention_masks.append(attention_mask)
|
||||||
|
|
||||||
|
output_input_ids = torch.stack(output_input_ids)
|
||||||
|
output_images = torch.stack(output_images)
|
||||||
|
output_attention_masks = torch.stack(output_attention_masks)
|
||||||
|
|
||||||
|
if at_least_one_image:
|
||||||
|
image_attention_mask, _ = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer)
|
||||||
|
image_attention_mask = incremental_to_binary_attention_mask(
|
||||||
|
image_attention_mask, num_classes=max_num_images
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# in full language mode we set the image mask to all-0s
|
||||||
|
image_attention_mask = torch.zeros(
|
||||||
|
output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool
|
||||||
|
)
|
||||||
|
|
||||||
|
return BatchFeature(
|
||||||
|
data={
|
||||||
|
"input_ids": output_input_ids,
|
||||||
|
"attention_mask": output_attention_masks,
|
||||||
|
"pixel_values": output_images,
|
||||||
|
"image_attention_mask": image_attention_mask,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def batch_decode(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
||||||
|
refer to the docstring of this method for more information.
|
||||||
|
"""
|
||||||
|
return self.tokenizer.batch_decode(*args, **kwargs)
|
||||||
|
|
||||||
|
def decode(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
||||||
|
the docstring of this method for more information.
|
||||||
|
"""
|
||||||
|
return self.tokenizer.decode(*args, **kwargs)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_input_names(self):
|
||||||
|
tokenizer_input_names = self.tokenizer.model_input_names
|
||||||
|
image_processor_input_names = self.image_processor.model_input_names
|
||||||
|
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
@ -0,0 +1,476 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
""" PyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object"""
|
||||||
|
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional, Tuple, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.utils.checkpoint
|
||||||
|
from torch import nn
|
||||||
|
|
||||||
|
from transformers.activations import ACT2FN
|
||||||
|
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
||||||
|
from transformers.utils import (
|
||||||
|
ModelOutput,
|
||||||
|
logging,
|
||||||
|
)
|
||||||
|
from text_generation_server.utils.layers import (
|
||||||
|
TensorParallelColumnLinear,
|
||||||
|
TensorParallelRowLinear,
|
||||||
|
TensorParallelEmbedding,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class IdeficsVisionModelOutput(ModelOutput):
|
||||||
|
"""
|
||||||
|
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
||||||
|
The image embeddings obtained by applying the projection layer to the pooler_output.
|
||||||
|
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
||||||
|
Sequence of hidden-states at the output of the last layer of the model.
|
||||||
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
||||||
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
||||||
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
||||||
|
|
||||||
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
||||||
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
||||||
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
||||||
|
sequence_length)`.
|
||||||
|
|
||||||
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
||||||
|
heads.
|
||||||
|
"""
|
||||||
|
|
||||||
|
image_embeds: Optional[torch.FloatTensor] = None
|
||||||
|
last_hidden_state: torch.FloatTensor = None
|
||||||
|
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
||||||
|
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
||||||
|
|
||||||
|
|
||||||
|
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Idefics
|
||||||
|
class IdeficsVisionEmbeddings(nn.Module):
|
||||||
|
def __init__(self, prefix, config, weights):
|
||||||
|
super().__init__()
|
||||||
|
self.config = config
|
||||||
|
self.embed_dim = config.hidden_size
|
||||||
|
self.image_size = config.image_size
|
||||||
|
self.patch_size = config.patch_size
|
||||||
|
|
||||||
|
self.class_embedding = nn.Parameter(weights.get_tensor(f"{prefix}.class_embedding"))
|
||||||
|
|
||||||
|
self.patch_embedding = nn.Conv2d.load_no_bias(
|
||||||
|
prefix=f"{prefix}.patch_embedding",
|
||||||
|
weights=weights,
|
||||||
|
in_channels=config.num_channels,
|
||||||
|
out_channels=self.embed_dim,
|
||||||
|
kernel_size=self.patch_size,
|
||||||
|
stride=self.patch_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.num_patches = (self.image_size // self.patch_size) ** 2
|
||||||
|
self.num_positions = self.num_patches + 1
|
||||||
|
# self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
||||||
|
self.position_embedding = TensorParallelEmbedding(
|
||||||
|
prefix="model.vision_model.embeddings.position_embedding", weights=weights
|
||||||
|
)
|
||||||
|
# self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
||||||
|
self.position_ids = weights.get_tensor(f"{prefix}.position_ids")
|
||||||
|
|
||||||
|
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
||||||
|
batch_size = pixel_values.shape[0]
|
||||||
|
target_dtype = self.patch_embedding.weight.dtype
|
||||||
|
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
||||||
|
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
||||||
|
|
||||||
|
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
||||||
|
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
||||||
|
embeddings = embeddings + self.position_embedding(self.position_ids)
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
|
# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->IdeficsVision
|
||||||
|
class IdeficsVisionAttention(nn.Module):
|
||||||
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
||||||
|
|
||||||
|
def __init__(self, prefix, config, weights):
|
||||||
|
super().__init__()
|
||||||
|
self.config = config
|
||||||
|
self.embed_dim = config.hidden_size
|
||||||
|
self.num_heads = config.num_attention_heads
|
||||||
|
self.head_dim = self.embed_dim // self.num_heads
|
||||||
|
if self.head_dim * self.num_heads != self.embed_dim:
|
||||||
|
raise ValueError(
|
||||||
|
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
||||||
|
f" {self.num_heads})."
|
||||||
|
)
|
||||||
|
self.scale = self.head_dim**-0.5
|
||||||
|
self.dropout = config.attention_dropout
|
||||||
|
|
||||||
|
process_group = weights.process_group
|
||||||
|
if self.num_heads % weights.process_group.size() != 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
|
||||||
|
f"and `num_shards`: {weights.process_group.size()}"
|
||||||
|
)
|
||||||
|
self.num_heads = self.num_heads // weights.process_group.size()
|
||||||
|
self.embed_dim = self.embed_dim // weights.process_group.size()
|
||||||
|
|
||||||
|
|
||||||
|
self.k_proj = TensorParallelColumnLinear.load(
|
||||||
|
config, prefix=f"{prefix}.k_proj", weights=weights, bias=True
|
||||||
|
)
|
||||||
|
self.v_proj = TensorParallelColumnLinear.load(
|
||||||
|
config, prefix=f"{prefix}.v_proj", weights=weights, bias=True
|
||||||
|
)
|
||||||
|
self.q_proj = TensorParallelColumnLinear.load(
|
||||||
|
config, prefix=f"{prefix}.q_proj", weights=weights, bias=True
|
||||||
|
)
|
||||||
|
self.out_proj = TensorParallelRowLinear.load(
|
||||||
|
config, prefix=f"{prefix}.out_proj", weights=weights, bias=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
||||||
|
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
hidden_states: torch.Tensor,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
causal_attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
output_attentions: Optional[bool] = False,
|
||||||
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
||||||
|
"""Input shape: Batch x Time x Channel"""
|
||||||
|
|
||||||
|
bsz, tgt_len, _ = hidden_states.size()
|
||||||
|
|
||||||
|
# get query proj
|
||||||
|
query_states = self.q_proj(hidden_states) * self.scale
|
||||||
|
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
||||||
|
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
||||||
|
|
||||||
|
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
||||||
|
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
||||||
|
key_states = key_states.view(*proj_shape)
|
||||||
|
value_states = value_states.view(*proj_shape)
|
||||||
|
|
||||||
|
src_len = key_states.size(1)
|
||||||
|
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
||||||
|
|
||||||
|
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
||||||
|
raise ValueError(
|
||||||
|
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
||||||
|
f" {attn_weights.size()}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# apply the causal_attention_mask first
|
||||||
|
if causal_attention_mask is not None:
|
||||||
|
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
||||||
|
raise ValueError(
|
||||||
|
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
||||||
|
f" {causal_attention_mask.size()}"
|
||||||
|
)
|
||||||
|
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
|
||||||
|
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
||||||
|
|
||||||
|
if attention_mask is not None:
|
||||||
|
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
||||||
|
raise ValueError(
|
||||||
|
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
||||||
|
)
|
||||||
|
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
||||||
|
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
||||||
|
|
||||||
|
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
||||||
|
|
||||||
|
if output_attentions:
|
||||||
|
# this operation is a bit akward, but it's required to
|
||||||
|
# make sure that attn_weights keeps its gradient.
|
||||||
|
# In order to do so, attn_weights have to reshaped
|
||||||
|
# twice and have to be reused in the following
|
||||||
|
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
||||||
|
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
||||||
|
else:
|
||||||
|
attn_weights_reshaped = None
|
||||||
|
|
||||||
|
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
||||||
|
|
||||||
|
attn_output = torch.bmm(attn_probs, value_states)
|
||||||
|
|
||||||
|
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
||||||
|
raise ValueError(
|
||||||
|
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
||||||
|
f" {attn_output.size()}"
|
||||||
|
)
|
||||||
|
|
||||||
|
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
||||||
|
attn_output = attn_output.transpose(1, 2)
|
||||||
|
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
||||||
|
|
||||||
|
attn_output = self.out_proj(attn_output)
|
||||||
|
|
||||||
|
return attn_output, attn_weights_reshaped
|
||||||
|
|
||||||
|
|
||||||
|
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->IdeficsVision
|
||||||
|
class IdeficsVisionMLP(nn.Module):
|
||||||
|
def __init__(self, prefix, config, weights):
|
||||||
|
super().__init__()
|
||||||
|
self.config = config
|
||||||
|
self.activation_fn = ACT2FN[config.hidden_act]
|
||||||
|
self.fc1 = TensorParallelColumnLinear.load(
|
||||||
|
config, prefix=f"{prefix}.fc1", weights=weights, bias=True
|
||||||
|
)
|
||||||
|
self.fc2 = TensorParallelRowLinear.load(
|
||||||
|
config, prefix=f"{prefix}.fc2", weights=weights, bias=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
||||||
|
hidden_states = self.fc1(hidden_states)
|
||||||
|
hidden_states = self.activation_fn(hidden_states)
|
||||||
|
hidden_states = self.fc2(hidden_states)
|
||||||
|
return hidden_states
|
||||||
|
|
||||||
|
|
||||||
|
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->IdeficsVision
|
||||||
|
class IdeficsVisionEncoderLayer(nn.Module):
|
||||||
|
def __init__(self, prefix, config, weights):
|
||||||
|
super().__init__()
|
||||||
|
self.embed_dim = config.hidden_size
|
||||||
|
self.self_attn = IdeficsVisionAttention(prefix=f"{prefix}.self_attn", config=config, weights=weights)
|
||||||
|
self.layer_norm1 = nn.LayerNorm.load(
|
||||||
|
prefix=f"{prefix}.layer_norm1", weights=weights, eps=config.layer_norm_eps
|
||||||
|
)
|
||||||
|
self.mlp = IdeficsVisionMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
||||||
|
self.layer_norm2 = nn.LayerNorm.load(
|
||||||
|
prefix=f"{prefix}.layer_norm2", weights=weights, eps=config.layer_norm_eps
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
hidden_states: torch.Tensor,
|
||||||
|
attention_mask: torch.Tensor,
|
||||||
|
causal_attention_mask: torch.Tensor,
|
||||||
|
output_attentions: Optional[bool] = False,
|
||||||
|
) -> Tuple[torch.FloatTensor]:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
||||||
|
attention_mask (`torch.FloatTensor`): attention mask of size
|
||||||
|
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
||||||
|
`(config.encoder_attention_heads,)`.
|
||||||
|
output_attentions (`bool`, *optional*):
|
||||||
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
||||||
|
returned tensors for more detail.
|
||||||
|
"""
|
||||||
|
residual = hidden_states
|
||||||
|
|
||||||
|
hidden_states = self.layer_norm1(hidden_states)
|
||||||
|
hidden_states, attn_weights = self.self_attn(
|
||||||
|
hidden_states=hidden_states,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
causal_attention_mask=causal_attention_mask,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
)
|
||||||
|
hidden_states = residual + hidden_states
|
||||||
|
|
||||||
|
residual = hidden_states
|
||||||
|
hidden_states = self.layer_norm2(hidden_states)
|
||||||
|
hidden_states = self.mlp(hidden_states)
|
||||||
|
hidden_states = residual + hidden_states
|
||||||
|
|
||||||
|
outputs = (hidden_states,)
|
||||||
|
|
||||||
|
if output_attentions:
|
||||||
|
outputs += (attn_weights,)
|
||||||
|
|
||||||
|
return outputs
|
||||||
|
|
||||||
|
|
||||||
|
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->IdeficsVision
|
||||||
|
class IdeficsVisionEncoder(nn.Module):
|
||||||
|
"""
|
||||||
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
||||||
|
[`IdeficsVisionEncoderLayer`].
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: IdeficsVisionConfig
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, prefix, config, weights):
|
||||||
|
super().__init__()
|
||||||
|
self.config = config
|
||||||
|
self.layers = nn.ModuleList(
|
||||||
|
[
|
||||||
|
IdeficsVisionEncoderLayer(prefix=f"{prefix}.encoder.layers.{layer_id}", config=config, weights=weights)
|
||||||
|
for layer_id in range(config.num_hidden_layers)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
# self.gradient_checkpointing = False
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
inputs_embeds,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
causal_attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
return_dict: Optional[bool] = None,
|
||||||
|
) -> Union[Tuple, BaseModelOutput]:
|
||||||
|
r"""
|
||||||
|
Args:
|
||||||
|
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
||||||
|
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
||||||
|
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
||||||
|
than the model's internal embedding lookup matrix.
|
||||||
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
||||||
|
|
||||||
|
- 1 for tokens that are **not masked**,
|
||||||
|
- 0 for tokens that are **masked**.
|
||||||
|
|
||||||
|
[What are attention masks?](../glossary#attention-mask)
|
||||||
|
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Causal mask for the text model. Mask values selected in `[0, 1]`:
|
||||||
|
|
||||||
|
- 1 for tokens that are **not masked**,
|
||||||
|
- 0 for tokens that are **masked**.
|
||||||
|
|
||||||
|
[What are attention masks?](../glossary#attention-mask)
|
||||||
|
output_attentions (`bool`, *optional*):
|
||||||
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
||||||
|
returned tensors for more detail.
|
||||||
|
output_hidden_states (`bool`, *optional*):
|
||||||
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
||||||
|
for more detail.
|
||||||
|
return_dict (`bool`, *optional*):
|
||||||
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
||||||
|
"""
|
||||||
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||||
|
|
||||||
|
encoder_states = () if output_hidden_states else None
|
||||||
|
all_attentions = () if output_attentions else None
|
||||||
|
|
||||||
|
hidden_states = inputs_embeds
|
||||||
|
for idx, encoder_layer in enumerate(self.layers):
|
||||||
|
if output_hidden_states:
|
||||||
|
encoder_states = encoder_states + (hidden_states,)
|
||||||
|
# if self.gradient_checkpointing and self.training:
|
||||||
|
|
||||||
|
# def create_custom_forward(module):
|
||||||
|
# def custom_forward(*inputs):
|
||||||
|
# return module(*inputs, output_attentions)
|
||||||
|
|
||||||
|
# return custom_forward
|
||||||
|
|
||||||
|
# layer_outputs = torch.utils.checkpoint.checkpoint(
|
||||||
|
# create_custom_forward(encoder_layer),
|
||||||
|
# hidden_states,
|
||||||
|
# attention_mask,
|
||||||
|
# causal_attention_mask,
|
||||||
|
# )
|
||||||
|
# else:
|
||||||
|
layer_outputs = encoder_layer(
|
||||||
|
hidden_states,
|
||||||
|
attention_mask,
|
||||||
|
causal_attention_mask,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = layer_outputs[0]
|
||||||
|
|
||||||
|
if output_attentions:
|
||||||
|
all_attentions = all_attentions + (layer_outputs[1],)
|
||||||
|
|
||||||
|
if output_hidden_states:
|
||||||
|
encoder_states = encoder_states + (hidden_states,)
|
||||||
|
|
||||||
|
if not return_dict:
|
||||||
|
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
||||||
|
return BaseModelOutput(
|
||||||
|
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer
|
||||||
|
class IdeficsVisionTransformer(nn.Module):
|
||||||
|
def __init__(self, prefix, config, weights):
|
||||||
|
super().__init__()
|
||||||
|
self.config = config
|
||||||
|
embed_dim = config.hidden_size
|
||||||
|
|
||||||
|
self.embeddings = IdeficsVisionEmbeddings(prefix=f"{prefix}.embeddings", config=config, weights=weights)
|
||||||
|
self.pre_layrnorm = nn.LayerNorm.load(
|
||||||
|
prefix=f"{prefix}.pre_layrnorm", weights=weights, eps=config.layer_norm_eps
|
||||||
|
)
|
||||||
|
self.encoder = IdeficsVisionEncoder(prefix=prefix, config=config, weights=weights)
|
||||||
|
self.post_layernorm = nn.LayerNorm.load(
|
||||||
|
prefix=f"{prefix}.post_layernorm", weights=weights, eps=config.layer_norm_eps
|
||||||
|
)
|
||||||
|
|
||||||
|
# copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
pixel_values: Optional[torch.FloatTensor] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
return_dict: Optional[bool] = None,
|
||||||
|
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
||||||
|
r"""
|
||||||
|
Returns:
|
||||||
|
|
||||||
|
"""
|
||||||
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||||
|
|
||||||
|
if pixel_values is None:
|
||||||
|
raise ValueError("You have to specify pixel_values")
|
||||||
|
|
||||||
|
hidden_states = self.embeddings(pixel_values)
|
||||||
|
hidden_states = self.pre_layrnorm(hidden_states)
|
||||||
|
|
||||||
|
encoder_outputs = self.encoder(
|
||||||
|
inputs_embeds=hidden_states,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
return_dict=return_dict,
|
||||||
|
)
|
||||||
|
|
||||||
|
last_hidden_state = encoder_outputs[0]
|
||||||
|
pooled_output = last_hidden_state[:, 0, :]
|
||||||
|
pooled_output = self.post_layernorm(pooled_output)
|
||||||
|
|
||||||
|
if not return_dict:
|
||||||
|
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
||||||
|
|
||||||
|
return BaseModelOutputWithPooling(
|
||||||
|
last_hidden_state=last_hidden_state,
|
||||||
|
pooler_output=pooled_output,
|
||||||
|
hidden_states=encoder_outputs.hidden_states,
|
||||||
|
attentions=encoder_outputs.attentions,
|
||||||
|
)
|
91
server/text_generation_server/models/idefics.py
Normal file
91
server/text_generation_server/models/idefics.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
import torch
|
||||||
|
import torch.distributed
|
||||||
|
|
||||||
|
from typing import List, Optional, Tuple
|
||||||
|
|
||||||
|
from transformers import (
|
||||||
|
AutoTokenizer,
|
||||||
|
AutoConfig,
|
||||||
|
AutoProcessor,
|
||||||
|
)
|
||||||
|
|
||||||
|
from text_generation_server.models.custom_modeling.idefics_config import IdeficsConfig
|
||||||
|
from text_generation_server.models.custom_modeling.idefics_processing import (
|
||||||
|
IdeficsProcessor,
|
||||||
|
)
|
||||||
|
from transformers import LlamaTokenizerFast
|
||||||
|
from text_generation_server.models.custom_modeling.idefics_modeling import (
|
||||||
|
IdeficsForVisionText2Text,
|
||||||
|
)
|
||||||
|
from text_generation_server.models.idefics_causal_lm import IdeficsCausalLM
|
||||||
|
from text_generation_server.utils import (
|
||||||
|
initialize_torch_distributed,
|
||||||
|
weight_files,
|
||||||
|
Weights,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class IDEFICSSharded(IdeficsCausalLM):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_id: str,
|
||||||
|
revision: Optional[str] = None,
|
||||||
|
quantize: Optional[str] = None,
|
||||||
|
dtype: Optional[torch.dtype] = None,
|
||||||
|
trust_remote_code: bool = False,
|
||||||
|
):
|
||||||
|
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = torch.device(f"cuda:{rank}")
|
||||||
|
# 9b seems to work correctly enough in float16, but 80b seems
|
||||||
|
# to be really saturating for f16.
|
||||||
|
dtype = torch.bfloat16 if dtype is None else dtype
|
||||||
|
else:
|
||||||
|
device = torch.device("cpu")
|
||||||
|
dtype = torch.float32
|
||||||
|
self.device, self.dtype = device, dtype
|
||||||
|
|
||||||
|
config = IdeficsConfig.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
revision=revision,
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
)
|
||||||
|
config.quantize = quantize
|
||||||
|
config.vision_config.quantize = quantize
|
||||||
|
|
||||||
|
tokenizer = LlamaTokenizerFast.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
revision=revision,
|
||||||
|
padding_side="left",
|
||||||
|
truncation_side="left",
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
)
|
||||||
|
self.processor = IdeficsProcessor.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
revision=revision,
|
||||||
|
padding_side="left",
|
||||||
|
truncation_side="left",
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.distributed.barrier(group=self.process_group)
|
||||||
|
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
|
||||||
|
weights = Weights(
|
||||||
|
filenames,
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
process_group=self.process_group,
|
||||||
|
)
|
||||||
|
|
||||||
|
model = IdeficsForVisionText2Text(config, weights)
|
||||||
|
|
||||||
|
torch.distributed.barrier(group=self.process_group)
|
||||||
|
super(IdeficsCausalLM, self).__init__(
|
||||||
|
model=model,
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
requires_padding=True,
|
||||||
|
dtype=dtype,
|
||||||
|
device=device,
|
||||||
|
rank=rank,
|
||||||
|
world_size=world_size,
|
||||||
|
)
|
806
server/text_generation_server/models/idefics_causal_lm.py
Normal file
806
server/text_generation_server/models/idefics_causal_lm.py
Normal file
@ -0,0 +1,806 @@
|
|||||||
|
import torch
|
||||||
|
import inspect
|
||||||
|
import re
|
||||||
|
from io import BytesIO
|
||||||
|
import base64
|
||||||
|
from PIL import Image
|
||||||
|
import re
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from opentelemetry import trace
|
||||||
|
from transformers import AutoProcessor, AutoTokenizer, AutoModelForCausalLM, PreTrainedTokenizerBase, ProcessorMixin
|
||||||
|
from typing import Optional, Tuple, List, Type, Dict
|
||||||
|
|
||||||
|
from text_generation_server.models import Model
|
||||||
|
from text_generation_server.models.types import (
|
||||||
|
Batch,
|
||||||
|
PrefillTokens,
|
||||||
|
Generation,
|
||||||
|
GeneratedText,
|
||||||
|
)
|
||||||
|
from text_generation_server.pb import generate_pb2
|
||||||
|
from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
IMAGES = re.compile(r'!\[[^\]]*\]\((.*?)\s*(\"(?:.*[^\"])\")?\s*\)')
|
||||||
|
|
||||||
|
def split(string):
|
||||||
|
parts = []
|
||||||
|
cursor = 0
|
||||||
|
for pattern in IMAGES.finditer(string):
|
||||||
|
start = pattern.start()
|
||||||
|
if start != cursor:
|
||||||
|
parts.append(string[cursor:start])
|
||||||
|
|
||||||
|
parts.append(pattern.group(1))
|
||||||
|
cursor = pattern.end()
|
||||||
|
|
||||||
|
if cursor != len(string):
|
||||||
|
parts.append(string[cursor:])
|
||||||
|
|
||||||
|
return parts
|
||||||
|
|
||||||
|
tracer = trace.get_tracer(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class IdeficsCausalLMBatch(Batch):
|
||||||
|
batch_id: int
|
||||||
|
requests: List[generate_pb2.Request]
|
||||||
|
requests_idx_mapping: Dict[int, int]
|
||||||
|
|
||||||
|
# Decoder values
|
||||||
|
input_ids: torch.Tensor
|
||||||
|
attention_mask: torch.Tensor
|
||||||
|
position_ids: torch.Tensor
|
||||||
|
pixel_values: Optional[torch.Tensor]
|
||||||
|
image_hidden_states: Optional[torch.Tensor]
|
||||||
|
image_attention_mask: Optional[torch.Tensor]
|
||||||
|
past_key_values: Optional[List[Tuple]]
|
||||||
|
|
||||||
|
# All tokens
|
||||||
|
all_input_ids: List[torch.Tensor]
|
||||||
|
|
||||||
|
# Lengths of all generations present in the batch
|
||||||
|
input_lengths: List[int]
|
||||||
|
prefix_offsets: List[int]
|
||||||
|
read_offsets: List[int]
|
||||||
|
|
||||||
|
# Generation helpers
|
||||||
|
next_token_choosers: List[NextTokenChooser]
|
||||||
|
stopping_criterias: List[StoppingCriteria]
|
||||||
|
|
||||||
|
# Metadata used for padding
|
||||||
|
max_input_length: int
|
||||||
|
padding_right_offset: int
|
||||||
|
|
||||||
|
# Maximum number of tokens this batch will grow to
|
||||||
|
max_tokens: int
|
||||||
|
|
||||||
|
# Past metadata
|
||||||
|
keys_head_dim_last: bool = True
|
||||||
|
|
||||||
|
def to_pb(self) -> generate_pb2.CachedBatch:
|
||||||
|
return generate_pb2.CachedBatch(
|
||||||
|
id=self.batch_id,
|
||||||
|
request_ids=[r.id for r in self.requests],
|
||||||
|
size=len(self),
|
||||||
|
max_tokens=self.max_tokens,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_pb(
|
||||||
|
cls,
|
||||||
|
pb: generate_pb2.Batch,
|
||||||
|
tokenizer: PreTrainedTokenizerBase,
|
||||||
|
processor: ProcessorMixin, # Hack
|
||||||
|
dtype: torch.dtype,
|
||||||
|
device: torch.device,
|
||||||
|
) -> "IdeficsCausalLMBatch":
|
||||||
|
inputs = []
|
||||||
|
next_token_choosers = []
|
||||||
|
stopping_criterias = []
|
||||||
|
prefix_offsets = []
|
||||||
|
read_offsets = []
|
||||||
|
requests_idx_mapping = {}
|
||||||
|
|
||||||
|
# Parse batch
|
||||||
|
max_truncation = 0
|
||||||
|
padding_right_offset = 0
|
||||||
|
max_decode_tokens = 0
|
||||||
|
for i, r in enumerate(pb.requests):
|
||||||
|
requests_idx_mapping[r.id] = i
|
||||||
|
inputs.append(r.inputs)
|
||||||
|
next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device))
|
||||||
|
stopping_criteria = StoppingCriteria.from_pb(
|
||||||
|
r.stopping_parameters, tokenizer
|
||||||
|
)
|
||||||
|
stopping_criterias.append(stopping_criteria)
|
||||||
|
max_truncation = max(max_truncation, r.truncate)
|
||||||
|
max_decode_tokens += stopping_criteria.max_new_tokens
|
||||||
|
padding_right_offset = max(
|
||||||
|
padding_right_offset, stopping_criteria.max_new_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
prompts = []
|
||||||
|
for inp in inputs:
|
||||||
|
# Each input is encoded into a list, where each element of this input list is either a string or a URL
|
||||||
|
prompts.append(split(inp))
|
||||||
|
|
||||||
|
# The processor replaces the call to tokenizer, and
|
||||||
|
# a/ takes care of fetching images from the URL
|
||||||
|
# b/ generate the correct input_ids, attention_mask, pixel_values, image_attention_mask to feed to the model
|
||||||
|
tokenized_inputs = processor(
|
||||||
|
prompts,
|
||||||
|
return_tensors="pt",
|
||||||
|
padding=True,
|
||||||
|
truncation=True,
|
||||||
|
max_length=max_truncation,
|
||||||
|
add_end_of_utterance_token=False, # Already taken care of inside the prompts, so bypassing the processor's handling of this token
|
||||||
|
).to(device)
|
||||||
|
for _ in pb.requests:
|
||||||
|
input_len = tokenized_inputs["input_ids"].shape[1]
|
||||||
|
prefix_offsets.append(input_len - 5) # To decode without potential fallbacks errors
|
||||||
|
read_offsets.append(input_len) # To decode without potential fallbacks errors
|
||||||
|
|
||||||
|
input_lengths = tokenized_inputs["attention_mask"].sum(1)
|
||||||
|
max_input_length = input_lengths.max()
|
||||||
|
|
||||||
|
input_ids = tokenized_inputs["input_ids"]
|
||||||
|
pixel_values = tokenized_inputs["pixel_values"]
|
||||||
|
image_hidden_states = None
|
||||||
|
# Allocate maximum attention_mask
|
||||||
|
attention_mask = input_ids.new_zeros(
|
||||||
|
(pb.size, max_input_length + padding_right_offset)
|
||||||
|
)
|
||||||
|
# Copy tokenizer attention_mask into fully allocated attention_mask
|
||||||
|
attention_mask[:, :max_input_length] = tokenized_inputs["attention_mask"]
|
||||||
|
# Do the same for image_attention_mask
|
||||||
|
image_attention_mask = input_ids.new_zeros(
|
||||||
|
(pb.size, max_input_length + padding_right_offset, tokenized_inputs["pixel_values"].size(1))
|
||||||
|
)
|
||||||
|
image_attention_mask[:, :max_input_length, :] = tokenized_inputs["image_attention_mask"]
|
||||||
|
|
||||||
|
|
||||||
|
position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1
|
||||||
|
position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1)
|
||||||
|
all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1) # It's input_ids but splitted into a tuple of tensors where each tensor is (seq_len, 1) size. It is then transformed into a list
|
||||||
|
|
||||||
|
max_tokens = len(inputs) * (max_input_length + max_decode_tokens)
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
batch_id=pb.id,
|
||||||
|
requests=pb.requests,
|
||||||
|
requests_idx_mapping=requests_idx_mapping,
|
||||||
|
input_ids=input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=position_ids,
|
||||||
|
pixel_values=pixel_values,
|
||||||
|
image_hidden_states=image_hidden_states,
|
||||||
|
image_attention_mask=image_attention_mask,
|
||||||
|
past_key_values=None,
|
||||||
|
all_input_ids=list(all_input_ids),
|
||||||
|
input_lengths=input_lengths.tolist(),
|
||||||
|
prefix_offsets=prefix_offsets,
|
||||||
|
read_offsets=read_offsets,
|
||||||
|
next_token_choosers=next_token_choosers,
|
||||||
|
stopping_criterias=stopping_criterias,
|
||||||
|
max_input_length=max_input_length.item(),
|
||||||
|
padding_right_offset=padding_right_offset,
|
||||||
|
max_tokens=max_tokens,
|
||||||
|
)
|
||||||
|
|
||||||
|
@tracer.start_as_current_span("filter")
|
||||||
|
def filter(self, request_ids: List[int]) -> Optional["IdeficsCausalLMBatch"]:
|
||||||
|
# It deletes requests from the batch. For instance when client lost connection
|
||||||
|
if len(request_ids) == 0:
|
||||||
|
raise ValueError("Batch must have at least one request")
|
||||||
|
if len(request_ids) == len(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
keep_indices = []
|
||||||
|
|
||||||
|
# New values after filtering
|
||||||
|
requests_idx_mapping = {}
|
||||||
|
requests = []
|
||||||
|
input_lengths = []
|
||||||
|
prefix_offsets = []
|
||||||
|
read_offsets = []
|
||||||
|
all_input_ids = []
|
||||||
|
max_input_length = 0
|
||||||
|
|
||||||
|
next_token_choosers = []
|
||||||
|
stopping_criterias = []
|
||||||
|
|
||||||
|
total_remaining_decode_tokens = 0
|
||||||
|
new_padding_right_offset = 0
|
||||||
|
|
||||||
|
for i, request_id in enumerate(request_ids):
|
||||||
|
idx = self.requests_idx_mapping[request_id]
|
||||||
|
requests_idx_mapping[request_id] = i
|
||||||
|
keep_indices.append(idx)
|
||||||
|
|
||||||
|
requests.append(self.requests[idx])
|
||||||
|
prefix_offsets.append(self.prefix_offsets[idx])
|
||||||
|
read_offsets.append(self.read_offsets[idx])
|
||||||
|
all_input_ids.append(self.all_input_ids[idx])
|
||||||
|
|
||||||
|
request_input_length = self.input_lengths[idx]
|
||||||
|
input_lengths.append(request_input_length)
|
||||||
|
max_input_length = max(max_input_length, request_input_length)
|
||||||
|
|
||||||
|
next_token_choosers.append(self.next_token_choosers[idx])
|
||||||
|
stopping_criteria = self.stopping_criterias[idx]
|
||||||
|
stopping_criterias.append(stopping_criteria)
|
||||||
|
remaining_decode_tokens = (
|
||||||
|
stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
|
||||||
|
)
|
||||||
|
total_remaining_decode_tokens += remaining_decode_tokens
|
||||||
|
new_padding_right_offset = max(
|
||||||
|
new_padding_right_offset, remaining_decode_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply indices to input_ids, attention mask, past key values and other items that need to be cached
|
||||||
|
input_ids = self.input_ids[keep_indices]
|
||||||
|
position_ids = self.position_ids[keep_indices]
|
||||||
|
self.attention_mask = self.attention_mask[
|
||||||
|
keep_indices,
|
||||||
|
-(self.padding_right_offset + max_input_length) : (
|
||||||
|
self.attention_mask.shape[1] - self.padding_right_offset
|
||||||
|
)
|
||||||
|
+ new_padding_right_offset,
|
||||||
|
]
|
||||||
|
# Do the same for pixel_values and image_attention_mask
|
||||||
|
pixel_values = self.pixel_values[keep_indices]
|
||||||
|
self.image_attention_mask = self.image_attention_mask[
|
||||||
|
keep_indices,
|
||||||
|
-(self.padding_right_offset + max_input_length) : (
|
||||||
|
self.image_attention_mask.shape[1] - self.padding_right_offset
|
||||||
|
)
|
||||||
|
+ new_padding_right_offset,
|
||||||
|
:
|
||||||
|
]
|
||||||
|
if self.image_hidden_states is None:
|
||||||
|
image_hidden_states = None
|
||||||
|
else:
|
||||||
|
image_hidden_states = self.image_hidden_states[keep_indices]
|
||||||
|
|
||||||
|
# Ensure that past_key_values tensors can be updated in-place
|
||||||
|
if type(self.past_key_values[0]) == tuple:
|
||||||
|
self.past_key_values = [list(layer) for layer in self.past_key_values]
|
||||||
|
|
||||||
|
# Update tensors in-place to allow incremental garbage collection
|
||||||
|
past_kv_length = max_input_length - 1
|
||||||
|
for layer in self.past_key_values:
|
||||||
|
past_keys, past_values = layer
|
||||||
|
if len(past_keys.shape) == 3:
|
||||||
|
# Force past to be of dim [self_size, num_heads, ...] for easy indexing
|
||||||
|
past_keys = past_keys.view(len(self), -1, *past_keys.shape[-2:])
|
||||||
|
past_values = past_values.view(len(self), -1, *past_values.shape[-2:])
|
||||||
|
if self.keys_head_dim_last:
|
||||||
|
layer[0] = past_keys[keep_indices, :, -past_kv_length:, :]
|
||||||
|
else:
|
||||||
|
layer[0] = past_keys[keep_indices, :, :, -past_kv_length:]
|
||||||
|
del past_keys
|
||||||
|
layer[1] = past_values[keep_indices, :, -past_kv_length:, :]
|
||||||
|
del past_values
|
||||||
|
|
||||||
|
max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens
|
||||||
|
|
||||||
|
self.requests = requests
|
||||||
|
self.requests_idx_mapping = requests_idx_mapping
|
||||||
|
self.input_ids = input_ids
|
||||||
|
self.pixel_values = pixel_values
|
||||||
|
self.image_hidden_states = image_hidden_states
|
||||||
|
self.position_ids = position_ids
|
||||||
|
self.all_input_ids = all_input_ids
|
||||||
|
self.input_lengths = input_lengths
|
||||||
|
self.prefix_offsets = prefix_offsets
|
||||||
|
self.read_offsets = read_offsets
|
||||||
|
self.next_token_choosers = next_token_choosers
|
||||||
|
self.stopping_criterias = stopping_criterias
|
||||||
|
self.max_input_length = max_input_length
|
||||||
|
self.padding_right_offset = new_padding_right_offset
|
||||||
|
self.max_tokens = max_tokens
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@tracer.start_as_current_span("concatenate")
|
||||||
|
def concatenate(cls, batches: List["IdeficsCausalLMBatch"]) -> "IdeficsCausalLMBatch":
|
||||||
|
# It adds new requests to the batch
|
||||||
|
# Used for padding
|
||||||
|
total_batch_size = 0
|
||||||
|
max_input_length = 0
|
||||||
|
max_num_images = 0
|
||||||
|
padding_right_offset = 0
|
||||||
|
for batch in batches:
|
||||||
|
total_batch_size += len(batch)
|
||||||
|
max_input_length = max(max_input_length, batch.max_input_length)
|
||||||
|
max_num_images = max(max_num_images, batch.pixel_values.size(1))
|
||||||
|
padding_right_offset = max(padding_right_offset, batch.padding_right_offset)
|
||||||
|
|
||||||
|
# Batch attributes
|
||||||
|
requests = []
|
||||||
|
requests_idx_mapping = {}
|
||||||
|
input_lengths = []
|
||||||
|
prefix_offsets = []
|
||||||
|
read_offsets = []
|
||||||
|
all_input_ids = []
|
||||||
|
next_token_choosers = []
|
||||||
|
stopping_criterias = []
|
||||||
|
max_tokens = 0
|
||||||
|
|
||||||
|
# Batch tensors
|
||||||
|
input_ids = None
|
||||||
|
attention_mask = None
|
||||||
|
position_ids = None
|
||||||
|
pixel_values = None
|
||||||
|
image_hidden_states = None
|
||||||
|
image_attention_mask = None
|
||||||
|
past_key_values = []
|
||||||
|
|
||||||
|
# Used for slicing correctly inside the tensors
|
||||||
|
# Equivalent to a cumsum on batch sizes
|
||||||
|
start_index = 0
|
||||||
|
for i, batch in enumerate(batches):
|
||||||
|
requests.extend(batch.requests)
|
||||||
|
input_lengths.extend(batch.input_lengths)
|
||||||
|
prefix_offsets.extend(batch.prefix_offsets)
|
||||||
|
read_offsets.extend(batch.read_offsets)
|
||||||
|
all_input_ids.extend(batch.all_input_ids)
|
||||||
|
next_token_choosers.extend(batch.next_token_choosers)
|
||||||
|
stopping_criterias.extend(batch.stopping_criterias)
|
||||||
|
|
||||||
|
if i == 0:
|
||||||
|
requests_idx_mapping = batch.requests_idx_mapping
|
||||||
|
else:
|
||||||
|
# We need to offset the mapping for each batch by the cumulative batch size
|
||||||
|
for k, v in batch.requests_idx_mapping.items():
|
||||||
|
requests_idx_mapping[k] = v + start_index
|
||||||
|
|
||||||
|
# Slicing end index for this batch
|
||||||
|
end_index = start_index + len(batch)
|
||||||
|
|
||||||
|
# We only concatenate batches that did at least one step
|
||||||
|
if batch.past_key_values is None:
|
||||||
|
raise ValueError("only concatenate prefilled batches")
|
||||||
|
|
||||||
|
# Create empty tensor
|
||||||
|
# input_ids is always of shape [batch_size, 1]
|
||||||
|
# We do not need to pad it
|
||||||
|
if input_ids is None:
|
||||||
|
input_ids = batch.input_ids.new_empty((total_batch_size, 1))
|
||||||
|
# Copy to correct indices
|
||||||
|
input_ids[start_index:end_index] = batch.input_ids
|
||||||
|
|
||||||
|
# Create padded tensor
|
||||||
|
if attention_mask is None:
|
||||||
|
attention_mask = batch.attention_mask.new_zeros(
|
||||||
|
(total_batch_size, max_input_length + padding_right_offset),
|
||||||
|
)
|
||||||
|
|
||||||
|
curr_batch_max_num_images = batch.pixel_values.size(1)
|
||||||
|
if pixel_values is None:
|
||||||
|
pixel_values = batch.pixel_values.new_zeros((total_batch_size, max_num_images, 3, 224, 224))
|
||||||
|
pixel_values[start_index:end_index, :curr_batch_max_num_images] = batch.pixel_values
|
||||||
|
|
||||||
|
if image_attention_mask is None:
|
||||||
|
image_attention_mask = batch.image_attention_mask.new_zeros(
|
||||||
|
(total_batch_size, max_input_length + padding_right_offset, max_num_images)
|
||||||
|
)
|
||||||
|
|
||||||
|
# We need to slice the attention mask to remove padding from previous steps
|
||||||
|
# and to remove unused allocated space
|
||||||
|
left_offset = max_input_length - batch.max_input_length
|
||||||
|
batch_left_offset = (
|
||||||
|
batch.attention_mask.shape[1]
|
||||||
|
- batch.max_input_length
|
||||||
|
- batch.padding_right_offset
|
||||||
|
)
|
||||||
|
attention_mask[
|
||||||
|
start_index:end_index,
|
||||||
|
left_offset:-padding_right_offset,
|
||||||
|
] = batch.attention_mask[
|
||||||
|
:,
|
||||||
|
batch_left_offset : -batch.padding_right_offset,
|
||||||
|
]
|
||||||
|
image_attention_mask[
|
||||||
|
start_index:end_index,
|
||||||
|
left_offset:-padding_right_offset,
|
||||||
|
:curr_batch_max_num_images
|
||||||
|
] = batch.image_attention_mask[
|
||||||
|
:,
|
||||||
|
batch_left_offset : - batch.padding_right_offset,
|
||||||
|
:
|
||||||
|
]
|
||||||
|
|
||||||
|
# Create empty tensor
|
||||||
|
# position_ids is always of shape [batch_size, 1]
|
||||||
|
if position_ids is None:
|
||||||
|
position_ids = batch.position_ids.new_empty((total_batch_size, 1))
|
||||||
|
position_ids[start_index:end_index] = batch.position_ids
|
||||||
|
|
||||||
|
# Shenanigans to get dimensions because BLOOM outputs a past with a different shape
|
||||||
|
# BLOOM Keys: [batch_size * num_heads, head_dim, seq_length]
|
||||||
|
# BLOOM Values: [batch_size * num_heads, seq_length, head_dim]
|
||||||
|
# And ensure that we can update tensors in-place
|
||||||
|
if type(batch.past_key_values[0]) == tuple:
|
||||||
|
batch.past_key_values = [
|
||||||
|
[t.view(len(batch), -1, *t.shape[-2:]) for t in layer]
|
||||||
|
for layer in batch.past_key_values
|
||||||
|
]
|
||||||
|
elif len(batch.past_key_values[0][0].shape) == 3:
|
||||||
|
for layer in batch.past_key_values:
|
||||||
|
for k, t in enumerate(layer):
|
||||||
|
layer[k] = t.view(len(batch), -1, *t.shape[-2:])
|
||||||
|
|
||||||
|
# Add eventual padding tokens that were added while concatenating
|
||||||
|
max_tokens += batch.max_tokens + (
|
||||||
|
max_input_length - batch.max_input_length
|
||||||
|
) * len(batch)
|
||||||
|
|
||||||
|
start_index = end_index
|
||||||
|
|
||||||
|
first_past_kvs = batches[0].past_key_values
|
||||||
|
_, num_heads, padded_sequence_length, head_dim = first_past_kvs[0][1].shape
|
||||||
|
|
||||||
|
padded_past_values_shape = (
|
||||||
|
total_batch_size,
|
||||||
|
num_heads,
|
||||||
|
max_input_length - 1,
|
||||||
|
head_dim,
|
||||||
|
)
|
||||||
|
|
||||||
|
if batches[0].keys_head_dim_last:
|
||||||
|
padded_past_keys_shape = padded_past_values_shape
|
||||||
|
else:
|
||||||
|
# seq_length is last for BLOOM
|
||||||
|
padded_past_keys_shape = (
|
||||||
|
total_batch_size,
|
||||||
|
num_heads,
|
||||||
|
head_dim,
|
||||||
|
max_input_length - 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Iterate over attention layers
|
||||||
|
# Concatenate past key values layer by layer to allow incremental garbage collection
|
||||||
|
for j in range(len(first_past_kvs)):
|
||||||
|
padded_past_keys = first_past_kvs[j][0].new_zeros(padded_past_keys_shape)
|
||||||
|
start_index = 0
|
||||||
|
for batch in batches:
|
||||||
|
past_keys = batch.past_key_values[j][0]
|
||||||
|
# Clear reference to the original tensor
|
||||||
|
batch.past_key_values[j][0] = None
|
||||||
|
|
||||||
|
# Slicing end index for this batch
|
||||||
|
end_index = start_index + len(batch)
|
||||||
|
# We slice the keys to remove the padding from previous batches
|
||||||
|
past_seq_len = batch.max_input_length - 1
|
||||||
|
if batch.keys_head_dim_last:
|
||||||
|
padded_past_keys[
|
||||||
|
start_index:end_index, :, -past_seq_len:, :
|
||||||
|
] = past_keys[:, :, -past_seq_len:, :]
|
||||||
|
else:
|
||||||
|
# BLOOM case
|
||||||
|
padded_past_keys[
|
||||||
|
start_index:end_index, :, :, -past_seq_len:
|
||||||
|
] = past_keys[:, :, :, -past_seq_len:]
|
||||||
|
del past_keys
|
||||||
|
|
||||||
|
start_index = end_index
|
||||||
|
|
||||||
|
padded_past_values = first_past_kvs[j][1].new_zeros(
|
||||||
|
padded_past_values_shape
|
||||||
|
)
|
||||||
|
start_index = 0
|
||||||
|
for batch in batches:
|
||||||
|
past_values = batch.past_key_values[j][1]
|
||||||
|
# Clear reference to the original tensor
|
||||||
|
batch.past_key_values[j][1] = None
|
||||||
|
|
||||||
|
# Slicing end index for this batch
|
||||||
|
end_index = start_index + len(batch)
|
||||||
|
# We slice the past values to remove the padding from previous batches
|
||||||
|
past_seq_len = batch.max_input_length - 1
|
||||||
|
padded_past_values[
|
||||||
|
start_index:end_index, :, -past_seq_len:, :
|
||||||
|
] = past_values[:, :, -past_seq_len:, :]
|
||||||
|
del past_values
|
||||||
|
|
||||||
|
# Update values
|
||||||
|
start_index = end_index
|
||||||
|
|
||||||
|
past_key_values.append([padded_past_keys, padded_past_values])
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
batch_id=batches[0].batch_id,
|
||||||
|
requests=requests,
|
||||||
|
requests_idx_mapping=requests_idx_mapping,
|
||||||
|
input_ids=input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=position_ids,
|
||||||
|
pixel_values=pixel_values,
|
||||||
|
image_hidden_states=image_hidden_states,
|
||||||
|
image_attention_mask=image_attention_mask,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
all_input_ids=all_input_ids,
|
||||||
|
input_lengths=input_lengths,
|
||||||
|
prefix_offsets=prefix_offsets,
|
||||||
|
read_offsets=read_offsets,
|
||||||
|
next_token_choosers=next_token_choosers,
|
||||||
|
stopping_criterias=stopping_criterias,
|
||||||
|
max_input_length=max_input_length,
|
||||||
|
padding_right_offset=padding_right_offset,
|
||||||
|
keys_head_dim_last=batches[0].keys_head_dim_last,
|
||||||
|
max_tokens=max_tokens,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.requests)
|
||||||
|
|
||||||
|
|
||||||
|
class IdeficsCausalLM(Model):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_id: str,
|
||||||
|
revision: Optional[str] = None,
|
||||||
|
quantize: Optional[str] = None,
|
||||||
|
dtype: Optional[torch.dtype] = None,
|
||||||
|
trust_remote_code: bool = False,
|
||||||
|
):
|
||||||
|
from text_generation_server.models.custom_modeling.idefics_modeling import IdeficsForVisionText2Text
|
||||||
|
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = torch.device("cuda")
|
||||||
|
dtype = torch.float16 if dtype is None else dtype
|
||||||
|
else:
|
||||||
|
if quantize:
|
||||||
|
raise ValueError("quantization is not available on CPU")
|
||||||
|
|
||||||
|
device = torch.device("cpu")
|
||||||
|
dtype = torch.float32
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
revision=revision,
|
||||||
|
padding_side="left",
|
||||||
|
truncation_side="left",
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
)
|
||||||
|
self.processor = AutoProcessor.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
revision=revision,
|
||||||
|
padding_side="left",
|
||||||
|
truncation_side="left",
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
)
|
||||||
|
model = IdeficsForVisionText2Text.from_pretrained(
|
||||||
|
model_id,
|
||||||
|
revision=revision,
|
||||||
|
torch_dtype=dtype,
|
||||||
|
device_map="auto"
|
||||||
|
if torch.cuda.is_available() and torch.cuda.device_count() > 1
|
||||||
|
else None,
|
||||||
|
load_in_8bit=quantize == "bitsandbytes",
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
)
|
||||||
|
if torch.cuda.is_available() and torch.cuda.device_count() == 1:
|
||||||
|
model = model.cuda()
|
||||||
|
|
||||||
|
if tokenizer.pad_token_id is None:
|
||||||
|
if model.config.pad_token_id is not None:
|
||||||
|
tokenizer.pad_token_id = model.config.pad_token_id
|
||||||
|
elif model.config.eos_token_id is not None:
|
||||||
|
tokenizer.pad_token_id = model.config.eos_token_id
|
||||||
|
elif tokenizer.eos_token_id is not None:
|
||||||
|
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||||
|
else:
|
||||||
|
tokenizer.add_special_tokens({"pad_token": "<unk>"})
|
||||||
|
|
||||||
|
super(IdeficsCausalLM, self).__init__(
|
||||||
|
model=model,
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
requires_padding=True,
|
||||||
|
dtype=dtype,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def batch_type(self) -> Type[IdeficsCausalLMBatch]:
|
||||||
|
return IdeficsCausalLMBatch
|
||||||
|
|
||||||
|
def decode(self, generated_ids: List[int]) -> str:
|
||||||
|
return self.tokenizer.decode(
|
||||||
|
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
input_ids,
|
||||||
|
attention_mask,
|
||||||
|
position_ids,
|
||||||
|
pixel_values,
|
||||||
|
image_hidden_states,
|
||||||
|
image_attention_mask,
|
||||||
|
past_key_values: Optional = None,
|
||||||
|
) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]:
|
||||||
|
# Model Forward
|
||||||
|
kwargs = {
|
||||||
|
"input_ids": input_ids,
|
||||||
|
"attention_mask": attention_mask,
|
||||||
|
"pixel_values": pixel_values,
|
||||||
|
"image_hidden_states": image_hidden_states,
|
||||||
|
"image_attention_mask": image_attention_mask,
|
||||||
|
"past_key_values": past_key_values,
|
||||||
|
"use_cache": True,
|
||||||
|
"return_dict": True,
|
||||||
|
}
|
||||||
|
if self.has_position_ids:
|
||||||
|
kwargs["position_ids"] = position_ids
|
||||||
|
|
||||||
|
outputs = self.model.forward(**kwargs)
|
||||||
|
return outputs.logits, outputs.past_key_values, outputs.image_hidden_states
|
||||||
|
|
||||||
|
@tracer.start_as_current_span("generate_token")
|
||||||
|
def generate_token(
|
||||||
|
self, batch: IdeficsCausalLMBatch
|
||||||
|
) -> Tuple[List[Generation], Optional[IdeficsCausalLMBatch]]:
|
||||||
|
# slice the attention mask to the correct shape
|
||||||
|
attention_mask = batch.attention_mask[:, : -batch.padding_right_offset]
|
||||||
|
if batch.input_ids.size(1) == 1:
|
||||||
|
# THIS is a hack: when calling idefics.generate, the first time, we need the whole image_attention_mask (size bs x max_seq_len x max_num_images),
|
||||||
|
# but the subsequent times, we only need the last attention mask along the `max_seq_len` dimension
|
||||||
|
# this is due to the nature IDEFICS: it's an encoder decoder, and so when decoding, only the currently generated
|
||||||
|
# token need to attend to the encoder hidden states (i.e. the vision encoder)
|
||||||
|
# Also see seq2seq_lm.Seq2SeqLM.generate_token which has roughly the same logic
|
||||||
|
image_attention_mask = batch.image_attention_mask[:, -(batch.padding_right_offset+1)].unsqueeze(1)
|
||||||
|
else:
|
||||||
|
image_attention_mask = batch.image_attention_mask[:, : -batch.padding_right_offset]
|
||||||
|
|
||||||
|
logits, past, image_hidden_states = self.forward(
|
||||||
|
input_ids=batch.input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=batch.position_ids,
|
||||||
|
pixel_values=batch.pixel_values,
|
||||||
|
image_hidden_states=batch.image_hidden_states,
|
||||||
|
image_attention_mask=image_attention_mask,
|
||||||
|
past_key_values=batch.past_key_values,
|
||||||
|
)
|
||||||
|
# Hardcoded remove image tokens
|
||||||
|
logits[:, 32000:32001] = torch.finfo(logits.dtype).min
|
||||||
|
|
||||||
|
# Results
|
||||||
|
generations: List[Generation] = []
|
||||||
|
stopped = True
|
||||||
|
|
||||||
|
# Zipped iterator
|
||||||
|
iterator = zip(
|
||||||
|
batch.requests,
|
||||||
|
batch.input_lengths,
|
||||||
|
batch.prefix_offsets,
|
||||||
|
batch.read_offsets,
|
||||||
|
logits,
|
||||||
|
batch.next_token_choosers,
|
||||||
|
batch.stopping_criterias,
|
||||||
|
batch.all_input_ids,
|
||||||
|
)
|
||||||
|
|
||||||
|
# For each member of the batch
|
||||||
|
for i, (
|
||||||
|
request,
|
||||||
|
input_length,
|
||||||
|
prefix_offset,
|
||||||
|
read_offset,
|
||||||
|
logits,
|
||||||
|
next_token_chooser,
|
||||||
|
stopping_criteria,
|
||||||
|
all_input_ids,
|
||||||
|
) in enumerate(iterator):
|
||||||
|
# Select next token
|
||||||
|
next_token_id, logprobs = next_token_chooser(
|
||||||
|
all_input_ids.view(1, -1), logits[-1:, :]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Append next token to all tokens
|
||||||
|
all_input_ids = torch.cat([all_input_ids, next_token_id])
|
||||||
|
new_input_length = input_length + 1
|
||||||
|
|
||||||
|
# Generated token
|
||||||
|
next_token_logprob = logprobs[-1, next_token_id]
|
||||||
|
next_token_id_squeezed = next_token_id.squeeze()
|
||||||
|
next_token_text, prefix_offset, read_offset = self.decode_token(
|
||||||
|
all_input_ids[:, 0], prefix_offset, read_offset
|
||||||
|
)
|
||||||
|
|
||||||
|
# Evaluate stopping criteria
|
||||||
|
stop, reason = stopping_criteria(
|
||||||
|
next_token_id_squeezed,
|
||||||
|
next_token_text,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not stop:
|
||||||
|
stopped = False
|
||||||
|
|
||||||
|
# Shard generations
|
||||||
|
# All generations will be appended in the rust sharded client
|
||||||
|
if i % self.world_size == self.rank:
|
||||||
|
if stop:
|
||||||
|
# Decode generated tokens
|
||||||
|
output_text = self.decode(
|
||||||
|
all_input_ids[-stopping_criteria.current_tokens :, 0]
|
||||||
|
)
|
||||||
|
# Get seed
|
||||||
|
if isinstance(next_token_chooser.choice, Sampling):
|
||||||
|
seed = next_token_chooser.choice.seed
|
||||||
|
else:
|
||||||
|
seed = None
|
||||||
|
|
||||||
|
generated_text = GeneratedText(
|
||||||
|
output_text, stopping_criteria.current_tokens, reason, seed
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
generated_text = None
|
||||||
|
|
||||||
|
# Prefill
|
||||||
|
if stopping_criteria.current_tokens == 1 and request.prefill_logprobs:
|
||||||
|
# Remove generated token to only have prefill and add nan for first prompt token
|
||||||
|
prefill_logprobs = [float("nan")] + torch.log_softmax(
|
||||||
|
logits, -1
|
||||||
|
).gather(1, all_input_ids[1:]).squeeze(1)[
|
||||||
|
-new_input_length:-1
|
||||||
|
].tolist()
|
||||||
|
prefill_token_ids = all_input_ids[-new_input_length:-1]
|
||||||
|
prefill_texts = self.tokenizer.batch_decode(
|
||||||
|
prefill_token_ids,
|
||||||
|
clean_up_tokenization_spaces=False,
|
||||||
|
skip_special_tokens=False,
|
||||||
|
)
|
||||||
|
prefill_tokens = PrefillTokens(
|
||||||
|
prefill_token_ids, prefill_logprobs, prefill_texts
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
prefill_tokens = None
|
||||||
|
|
||||||
|
generation = Generation(
|
||||||
|
request.id,
|
||||||
|
prefill_tokens,
|
||||||
|
next_token_id_squeezed,
|
||||||
|
next_token_logprob,
|
||||||
|
next_token_text,
|
||||||
|
next_token_id_squeezed.item() in self.all_special_ids,
|
||||||
|
generated_text,
|
||||||
|
)
|
||||||
|
|
||||||
|
generations.append(generation)
|
||||||
|
|
||||||
|
# Update values
|
||||||
|
batch.input_ids[i, 0] = next_token_id
|
||||||
|
batch.all_input_ids[i] = all_input_ids
|
||||||
|
batch.input_lengths[i] = new_input_length
|
||||||
|
batch.prefix_offsets[i] = prefix_offset
|
||||||
|
batch.read_offsets[i] = read_offset
|
||||||
|
batch.max_input_length = max(batch.max_input_length, new_input_length)
|
||||||
|
|
||||||
|
# We finished all generations in the batch; there is no next batch
|
||||||
|
if stopped:
|
||||||
|
return generations, None
|
||||||
|
|
||||||
|
# Slice unused values from prefill
|
||||||
|
batch.input_ids = batch.input_ids[:, :1]
|
||||||
|
|
||||||
|
# Update attention_mask as we added a new token to input_ids
|
||||||
|
batch.attention_mask[:, -batch.padding_right_offset] = 1
|
||||||
|
batch.image_attention_mask[:, -batch.padding_right_offset, :] = batch.image_attention_mask[:, -(batch.padding_right_offset+1), :]
|
||||||
|
# Decrease right offset
|
||||||
|
batch.padding_right_offset -= 1
|
||||||
|
|
||||||
|
# Update position_ids
|
||||||
|
batch.position_ids = batch.position_ids[:, -1:] + 1
|
||||||
|
|
||||||
|
# Update past key values
|
||||||
|
batch.past_key_values = past
|
||||||
|
batch.image_hidden_states = image_hidden_states
|
||||||
|
|
||||||
|
return generations, batch
|
@ -14,7 +14,7 @@ from text_generation_server.interceptor import ExceptionInterceptor
|
|||||||
from text_generation_server.models import Model, get_model
|
from text_generation_server.models import Model, get_model
|
||||||
from text_generation_server.pb import generate_pb2_grpc, generate_pb2
|
from text_generation_server.pb import generate_pb2_grpc, generate_pb2
|
||||||
from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor
|
from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor
|
||||||
|
from text_generation_server.models.idefics_causal_lm import IdeficsCausalLMBatch
|
||||||
|
|
||||||
class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
|
class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
|
||||||
def __init__(self, model: Model, cache: Cache, server_urls: List[str]):
|
def __init__(self, model: Model, cache: Cache, server_urls: List[str]):
|
||||||
@ -26,6 +26,7 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
|
|||||||
# Force inference mode for the lifetime of TextGenerationService
|
# Force inference mode for the lifetime of TextGenerationService
|
||||||
self._inference_mode_raii_guard = torch._C._InferenceMode(True)
|
self._inference_mode_raii_guard = torch._C._InferenceMode(True)
|
||||||
|
|
||||||
|
|
||||||
async def Info(self, request, context):
|
async def Info(self, request, context):
|
||||||
return self.model.info
|
return self.model.info
|
||||||
|
|
||||||
@ -54,9 +55,14 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
|
|||||||
return generate_pb2.FilterBatchResponse(batch=filtered_batch.to_pb())
|
return generate_pb2.FilterBatchResponse(batch=filtered_batch.to_pb())
|
||||||
|
|
||||||
async def Warmup(self, request, context):
|
async def Warmup(self, request, context):
|
||||||
batch = self.model.batch_type.from_pb(
|
if self.model.batch_type == IdeficsCausalLMBatch: #Hack, i would rather use kwargs in the `from_pb` call
|
||||||
request.batch, self.model.tokenizer, self.model.dtype, self.model.device
|
batch = self.model.batch_type.from_pb(
|
||||||
)
|
request.batch, self.model.tokenizer, self.model.processor, self.model.dtype, self.model.device
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
batch = self.model.batch_type.from_pb(
|
||||||
|
request.batch, self.model.tokenizer, self.model.dtype, self.model.device
|
||||||
|
)
|
||||||
max_supported_total_tokens = self.model.warmup(batch)
|
max_supported_total_tokens = self.model.warmup(batch)
|
||||||
|
|
||||||
return generate_pb2.WarmupResponse(
|
return generate_pb2.WarmupResponse(
|
||||||
@ -64,9 +70,14 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
|
|||||||
)
|
)
|
||||||
|
|
||||||
async def Prefill(self, request, context):
|
async def Prefill(self, request, context):
|
||||||
batch = self.model.batch_type.from_pb(
|
if self.model.batch_type == IdeficsCausalLMBatch: #Hack, i would rather use kwargs in the `from_pb` call
|
||||||
request.batch, self.model.tokenizer, self.model.dtype, self.model.device
|
batch = self.model.batch_type.from_pb(
|
||||||
)
|
request.batch, self.model.tokenizer, self.model.processor, self.model.dtype, self.model.device
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
batch = self.model.batch_type.from_pb(
|
||||||
|
request.batch, self.model.tokenizer, self.model.dtype, self.model.device
|
||||||
|
)
|
||||||
|
|
||||||
generations, next_batch = self.model.generate_token(batch)
|
generations, next_batch = self.model.generate_token(batch)
|
||||||
self.cache.set(next_batch)
|
self.cache.set(next_batch)
|
||||||
|
@ -51,7 +51,31 @@ def load_layer_norm_no_bias(cls, prefix, weights, eps):
|
|||||||
ln.bias = None
|
ln.bias = None
|
||||||
return ln
|
return ln
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_conv2d(cls, prefix, weights, in_channels, out_channels, kernel_size, stride):
|
||||||
|
weight = weights.get_tensor(f"{prefix}.weight")
|
||||||
|
bias = weights.get_tensor(f"{prefix}.bias")
|
||||||
|
with init_empty_weights():
|
||||||
|
conv2d = cls(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride)
|
||||||
|
|
||||||
|
conv2d.weight = nn.Parameter(weight)
|
||||||
|
conv2d.bias = nn.Parameter(bias)
|
||||||
|
return conv2d
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_conv2d_no_bias(cls, prefix, weights, in_channels, out_channels, kernel_size, stride):
|
||||||
|
weight = weights.get_tensor(f"{prefix}.weight")
|
||||||
|
with init_empty_weights():
|
||||||
|
conv2d = cls(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride)
|
||||||
|
|
||||||
|
conv2d.weight = nn.Parameter(weight)
|
||||||
|
conv2d.bias = None
|
||||||
|
return conv2d
|
||||||
|
|
||||||
|
|
||||||
|
torch.nn.Conv2d.load = load_conv2d
|
||||||
|
torch.nn.Conv2d.load_no_bias = load_conv2d_no_bias
|
||||||
torch.nn.LayerNorm.load = load_layer_norm
|
torch.nn.LayerNorm.load = load_layer_norm
|
||||||
torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias
|
torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user