Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
1261001
1
Parent(s):
4f1f91e
:sparkles: initial commit
Browse files- .github/workflows/push_to_hub.yml +20 -0
- README.md +14 -0
- linkedin_gradio.py +74 -0
- requirements.txt +87 -0
.github/workflows/push_to_hub.yml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Sync to Hugging Face hub
|
| 2 |
+
on:
|
| 3 |
+
push:
|
| 4 |
+
branches: [main]
|
| 5 |
+
|
| 6 |
+
# to run this workflow manually from the Actions tab
|
| 7 |
+
workflow_dispatch:
|
| 8 |
+
|
| 9 |
+
jobs:
|
| 10 |
+
sync-to-hub:
|
| 11 |
+
runs-on: ubuntu-latest
|
| 12 |
+
steps:
|
| 13 |
+
- uses: actions/checkout@v3
|
| 14 |
+
with:
|
| 15 |
+
fetch-depth: 0
|
| 16 |
+
lfs: true
|
| 17 |
+
- name: Push to hub
|
| 18 |
+
env:
|
| 19 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 20 |
+
run: git push https://willsh1997:[email protected]/spaces/willsh1997/linkedin-generator main
|
README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Linkedin Generator
|
| 3 |
+
emoji: 🏆
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.23.3
|
| 8 |
+
app_file: linkedin_gradio.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: apache-2.0
|
| 11 |
+
short_description: generates linkedin posts from freetext entries
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
linkedin_gradio.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import pipeline
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import os
|
| 7 |
+
import copy
|
| 8 |
+
import spaces
|
| 9 |
+
|
| 10 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextStreamer, TextIteratorStreamer
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
| 14 |
+
torch_device = "cuda" if torch.cuda.is_available() else ("mps" if torch.mps.is_available() else "cpu")
|
| 15 |
+
|
| 16 |
+
torch_dtype = torch.float16 if torch_device in ["cuda", "mps"] else torch.float32
|
| 17 |
+
|
| 18 |
+
llama_model=AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-3B-Instruct",
|
| 19 |
+
# quantization_config=quantization_config,
|
| 20 |
+
torch_dtype=torch_dtype,
|
| 21 |
+
device_map=torch_device,
|
| 22 |
+
load_in_4bit=True) #for puny devices like mine.
|
| 23 |
+
|
| 24 |
+
llama_tokenizer=AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3B-Instruct")
|
| 25 |
+
|
| 26 |
+
# streamer = TextStreamer(llama_tokenizer)
|
| 27 |
+
|
| 28 |
+
llama32_3b_pipe = pipeline(
|
| 29 |
+
"text-generation",
|
| 30 |
+
model=llama_model,
|
| 31 |
+
tokenizer=llama_tokenizer,
|
| 32 |
+
# streamer = streamer,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
@spaces.GPU
|
| 36 |
+
def llama32_3b_chat(message) -> str:
|
| 37 |
+
"simplifies pipeline output to only return generated text"
|
| 38 |
+
input_history = [{"role": "system", "content": """You are a machine that takes literally any text, and then responds in the format of a linkedin post about the same content in the text. Make sure to use phrases like "Excited for the opportunity", "Here's what X taught me about B2B sales", "grateful for my time at X", "excited for the new adventures at {insert company name}", and "professional growth mindset". Make sure that your linkedin post always refers to the content in some way.
|
| 39 |
+
"""}]
|
| 40 |
+
input_history.append({"role": "user", "content": f"I am about to start a enw job at {message}"})
|
| 41 |
+
##add sth about context window here
|
| 42 |
+
|
| 43 |
+
outputs = llama32_3b_pipe(
|
| 44 |
+
input_history,
|
| 45 |
+
max_new_tokens=512
|
| 46 |
+
)
|
| 47 |
+
return outputs[-1]['generated_text'][-1]['content']
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# Create the Gradio interface
|
| 52 |
+
def create_interface():
|
| 53 |
+
|
| 54 |
+
with gr.Blocks() as demo:
|
| 55 |
+
gr.Markdown("""LinkedIn Post Generator - fixing the chat "head" to force a chat model to only generate linkedin posts
|
| 56 |
+
""")
|
| 57 |
+
with gr.Row():
|
| 58 |
+
text_input = gr.Textbox(label="input for Linkedin Post Generator", value = "Excited for my new opportunity at {x company} after {y} years at {z company}! To new adventures!")
|
| 59 |
+
with gr.Row():
|
| 60 |
+
submit_btn = gr.Button("Translate")
|
| 61 |
+
with gr.Row():
|
| 62 |
+
text_output = gr.Textbox(interactive=False)
|
| 63 |
+
|
| 64 |
+
submit_btn.click(
|
| 65 |
+
fn=llama32_3b_chat,
|
| 66 |
+
inputs=[text_input],
|
| 67 |
+
outputs=[text_output]
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
return demo
|
| 71 |
+
|
| 72 |
+
# Launch the app
|
| 73 |
+
demo = create_interface()
|
| 74 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accelerate==1.4.0
|
| 2 |
+
aiofiles==23.2.1
|
| 3 |
+
annotated-types==0.7.0
|
| 4 |
+
anyio==4.8.0
|
| 5 |
+
asttokens==3.0.0
|
| 6 |
+
bitsandbytes==0.45.4
|
| 7 |
+
certifi==2025.1.31
|
| 8 |
+
charset-normalizer==3.4.1
|
| 9 |
+
click==8.1.8
|
| 10 |
+
comm==0.2.2
|
| 11 |
+
debugpy==1.8.12
|
| 12 |
+
decorator==5.1.1
|
| 13 |
+
exceptiongroup==1.2.2
|
| 14 |
+
executing==2.2.0
|
| 15 |
+
fastapi==0.115.8
|
| 16 |
+
ffmpy==0.5.0
|
| 17 |
+
filelock==3.17.0
|
| 18 |
+
fsspec==2025.2.0
|
| 19 |
+
gradio==5.16.1
|
| 20 |
+
gradio_client==1.7.0
|
| 21 |
+
h11==0.14.0
|
| 22 |
+
httpcore==1.0.7
|
| 23 |
+
httpx==0.28.1
|
| 24 |
+
huggingface-hub==0.28.1
|
| 25 |
+
idna==3.10
|
| 26 |
+
ipykernel==6.29.5
|
| 27 |
+
ipython==8.32.0
|
| 28 |
+
jedi==0.19.2
|
| 29 |
+
Jinja2==3.1.5
|
| 30 |
+
jupyter_client==8.6.3
|
| 31 |
+
jupyter_core==5.7.2
|
| 32 |
+
markdown-it-py==3.0.0
|
| 33 |
+
MarkupSafe==2.1.5
|
| 34 |
+
matplotlib-inline==0.1.7
|
| 35 |
+
mdurl==0.1.2
|
| 36 |
+
mpmath==1.3.0
|
| 37 |
+
nest-asyncio==1.6.0
|
| 38 |
+
networkx==3.4.2
|
| 39 |
+
numpy==2.2.3
|
| 40 |
+
orjson==3.10.15
|
| 41 |
+
packaging==24.2
|
| 42 |
+
pandas==2.2.3
|
| 43 |
+
parso==0.8.4
|
| 44 |
+
pexpect==4.9.0
|
| 45 |
+
pillow==11.1.0
|
| 46 |
+
platformdirs==4.3.6
|
| 47 |
+
prompt_toolkit==3.0.50
|
| 48 |
+
psutil==7.0.0
|
| 49 |
+
ptyprocess==0.7.0
|
| 50 |
+
pure_eval==0.2.3
|
| 51 |
+
pydantic==2.10.6
|
| 52 |
+
pydantic_core==2.27.2
|
| 53 |
+
pydub==0.25.1
|
| 54 |
+
Pygments==2.19.1
|
| 55 |
+
python-dateutil==2.9.0.post0
|
| 56 |
+
python-multipart==0.0.20
|
| 57 |
+
pytz==2025.1
|
| 58 |
+
PyYAML==6.0.2
|
| 59 |
+
pyzmq==26.2.1
|
| 60 |
+
regex==2024.11.6
|
| 61 |
+
requests==2.32.3
|
| 62 |
+
rich==13.9.4
|
| 63 |
+
ruff==0.9.6
|
| 64 |
+
safehttpx==0.1.6
|
| 65 |
+
safetensors==0.5.2
|
| 66 |
+
semantic-version==2.10.0
|
| 67 |
+
shellingham==1.5.4
|
| 68 |
+
six==1.17.0
|
| 69 |
+
sniffio==1.3.1
|
| 70 |
+
stack-data==0.6.3
|
| 71 |
+
starlette==0.45.3
|
| 72 |
+
sympy==1.13.1
|
| 73 |
+
tokenizers==0.21.0
|
| 74 |
+
tomlkit==0.13.2
|
| 75 |
+
torch==2.4.0
|
| 76 |
+
tornado==6.4.2
|
| 77 |
+
tqdm==4.67.1
|
| 78 |
+
traitlets==5.14.3
|
| 79 |
+
transformers==4.49.0
|
| 80 |
+
typer==0.15.1
|
| 81 |
+
typing_extensions==4.12.2
|
| 82 |
+
tzdata==2025.1
|
| 83 |
+
urllib3==2.3.0
|
| 84 |
+
uvicorn==0.34.0
|
| 85 |
+
wcwidth==0.2.13
|
| 86 |
+
websockets==14.2
|
| 87 |
+
|