Roman190928 commited on
Commit
c18214e
·
verified ·
1 Parent(s): ee69716

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -21,13 +21,14 @@ pipe = pipe.to(device)
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 2048
 
24
  def safe_infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_steps):
25
- # check if prompt contains any banned words
26
- if any(word in prompt.lower() for word in targets):
27
  print("Found at least one banned word!")
28
- return "Refused due to safety.", seed
 
29
 
30
- # otherwise run normal inference
31
  return infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_steps)
32
 
33
  # @spaces.GPU #[uncomment to use ZeroGPU]
@@ -130,7 +131,7 @@ with gr.Blocks(css=css) as demo:
130
  guidance_scale = gr.Slider(
131
  label="Guidance scale",
132
  minimum=0.0,
133
- maximum=10.0,
134
  step=0.1,
135
  value=3.6, # Replace with defaults that work for your model
136
  )
@@ -138,7 +139,7 @@ with gr.Blocks(css=css) as demo:
138
  num_inference_steps = gr.Slider(
139
  label="Number of inference steps",
140
  minimum=1,
141
- maximum=50,
142
  step=1,
143
  value=15, # Replace with defaults that work for your model
144
  )
@@ -146,8 +147,6 @@ with gr.Blocks(css=css) as demo:
146
  gr.Examples(examples=examples, inputs=[prompt])
147
  gr.on(
148
  triggers=[run_button.click, prompt.submit],
149
- if any(word in prompt for word in targets):
150
- print("Found at least one!")
151
  fn=safe_infer,
152
  inputs=[
153
  prompt,
 
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 2048
24
+ from PIL import Image
25
  def safe_infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_steps):
26
+ clean = ''.join(c for c in prompt.lower() if c.isalnum() or c.isspace())
27
+ if any(word in clean for word in targets):
28
  print("Found at least one banned word!")
29
+ blank = Image.new("RGB", (width, height), (0, 0, 0))
30
+ return blank, seed
31
 
 
32
  return infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_steps)
33
 
34
  # @spaces.GPU #[uncomment to use ZeroGPU]
 
131
  guidance_scale = gr.Slider(
132
  label="Guidance scale",
133
  minimum=0.0,
134
+ maximum=20.0,
135
  step=0.1,
136
  value=3.6, # Replace with defaults that work for your model
137
  )
 
139
  num_inference_steps = gr.Slider(
140
  label="Number of inference steps",
141
  minimum=1,
142
+ maximum=80,
143
  step=1,
144
  value=15, # Replace with defaults that work for your model
145
  )
 
147
  gr.Examples(examples=examples, inputs=[prompt])
148
  gr.on(
149
  triggers=[run_button.click, prompt.submit],
 
 
150
  fn=safe_infer,
151
  inputs=[
152
  prompt,