ishandutta commited on
Commit
b02d315
Β·
verified Β·
1 Parent(s): ee0ed55

Create rag_pipeline.py

Browse files
Files changed (1) hide show
  1. rag_pipeline.py +545 -0
rag_pipeline.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SCRIPT 4/5: rag_pipeline.py - Complete RAG Pipeline Integration for Shoe Search
3
+
4
+ Colab - https://colab.research.google.com/drive/1rq-ywjykHBw7xPXCmd3DmZdK6T9bhDtA?usp=sharing
5
+
6
+ This script integrates all three phases of the RAG pipeline:
7
+ 1. RETRIEVAL: Vector search and data management (from retriever.py)
8
+ 2. AUGMENTATION: Context enhancement and prompt engineering (from augmenter.py)
9
+ 3. GENERATION: LLM setup and response generation (from generator.py)
10
+
11
+ Key Concepts:
12
+ - RAG (Retrieval-Augmented Generation): A technique that combines information retrieval
13
+ with language generation to provide accurate, contextual responses
14
+ - Pipeline Integration: Connecting multiple AI components in sequence
15
+ - End-to-End Processing: Complete workflow from query to final response
16
+ - Multi-modal Search: Supporting both text and image queries
17
+
18
+ Required Dependencies:
19
+ - All dependencies from retriever.py, augmenter.py, and generator.py
20
+
21
+ Commands to run:
22
+ # Complete RAG pipeline with text query
23
+ python rag_pipeline.py --query "recommend running shoes for men"
24
+
25
+ # Complete RAG pipeline with image query
26
+ python rag_pipeline.py --query "hf_shoe_images/shoe_0000.jpg"
27
+
28
+ # RAG pipeline with OpenAI model (Requires API key)
29
+ python rag_pipeline.py --query "comfortable sneakers" --model-provider openai --openai-api-key YOUR_KEY
30
+
31
+ # RAG pipeline with detailed step tracking
32
+ python rag_pipeline.py --query "blue shoes" --detailed-steps
33
+
34
+ # Setup database and run pipeline
35
+ python rag_pipeline.py --setup-db --query "recommend me men's casual shoes"
36
+
37
+ # Pipeline without LLM (retrieval only)
38
+ python rag_pipeline.py --query "recommend me men's running shoes" --no-llm
39
+ """
40
+
41
+ import argparse
42
+ from typing import Any, Dict, List, Optional
43
+
44
+ from openai import OpenAI
45
+ from PIL import Image
46
+
47
+ from augmenter import QueryType, SimpleShoePrompts
48
+ from generator import (
49
+ generate_shoes_rag_response,
50
+ get_available_models,
51
+ setup_openai_client,
52
+ setup_qwen_model,
53
+ )
54
+
55
+ # Import components from other modules
56
+ from retriever import MyntraShoesEnhanced, create_shoes_table_from_hf, run_shoes_search
57
+
58
+
59
+ def run_complete_shoes_rag_pipeline(
60
+ database: str,
61
+ table_name: str,
62
+ schema: Any,
63
+ search_query: Any, # Can be text string or image path/PIL Image
64
+ limit: int = 3,
65
+ use_llm: bool = True,
66
+ use_advanced_prompts: bool = True,
67
+ search_type: str = "auto",
68
+ model_provider: str = "qwen",
69
+ model_name: str = "Qwen/Qwen2.5-0.5B-Instruct",
70
+ openai_api_key: Optional[str] = None,
71
+ ) -> Dict[str, Any]:
72
+ """Run complete RAG pipeline integrating Retrieval, Augmentation, and Generation."""
73
+
74
+ # SECTION 1: RETRIEVAL - Get relevant shoes from vector database
75
+ print("πŸ” RETRIEVAL: Searching for relevant shoes...")
76
+ results, actual_search_type = run_shoes_search(
77
+ database, table_name, schema, search_query, limit, search_type=search_type
78
+ )
79
+
80
+ if not results:
81
+ return {
82
+ "query": search_query,
83
+ "results": [],
84
+ "response": "No results found",
85
+ "search_type": actual_search_type,
86
+ }
87
+
88
+ if not use_llm:
89
+ return {
90
+ "query": search_query,
91
+ "results": results,
92
+ "response": None,
93
+ "search_type": actual_search_type,
94
+ }
95
+
96
+ # SECTION 2: AUGMENTATION - Process and enhance context with prompt engineering
97
+ try:
98
+ print("πŸ“ AUGMENTATION: Enhancing context with prompt engineering...")
99
+
100
+ # Set up prompt manager and analyze query
101
+ prompt_manager = SimpleShoePrompts()
102
+
103
+ # For image search, use appropriate query text
104
+ if actual_search_type == "image":
105
+ query_text = "similar shoes based on the provided image"
106
+ print(f" └─ Image search - using search query type")
107
+ else:
108
+ query_text = str(search_query)
109
+ query_type = prompt_manager.classify_query(query_text)
110
+ print(f" └─ Text query classified as: {query_type.value}")
111
+
112
+ # Format context and generate enhanced prompt
113
+ enhanced_prompt = prompt_manager.generate_prompt(
114
+ query_text, results, actual_search_type
115
+ )
116
+ print(f" └─ Context formatted with {len(results)} retrieved shoes")
117
+
118
+ # SECTION 3: GENERATION - Setup LLM and generate response
119
+ print("πŸ€– GENERATION: Setting up LLM and generating response...")
120
+
121
+ tokenizer, model, openai_client = None, None, None
122
+
123
+ if model_provider == "openai":
124
+ if not openai_api_key:
125
+ raise ValueError("OpenAI API key is required for OpenAI models")
126
+ openai_client = setup_openai_client(openai_api_key)
127
+ print(f" └─ OpenAI client setup with model: {model_name}")
128
+ else:
129
+ tokenizer, model = setup_qwen_model(model_name)
130
+ print(f" └─ Qwen model loaded: {model_name}")
131
+
132
+ # Generate final response using augmented context
133
+ response = generate_shoes_rag_response(
134
+ query=query_text,
135
+ retrieved_shoes=results,
136
+ model_provider=model_provider,
137
+ model_name=model_name,
138
+ openai_client=openai_client,
139
+ tokenizer=tokenizer,
140
+ model=model,
141
+ max_tokens=200,
142
+ use_advanced_prompts=use_advanced_prompts,
143
+ )
144
+
145
+ # Add prompt analysis
146
+ if actual_search_type == "image":
147
+ final_query_type = QueryType.SEARCH.value
148
+ else:
149
+ final_query_type = query_type.value
150
+
151
+ prompt_analysis = {
152
+ "query_type": final_query_type,
153
+ "num_results": len(results),
154
+ "search_type": actual_search_type,
155
+ }
156
+
157
+ return {
158
+ "query": search_query,
159
+ "results": results,
160
+ "response": response,
161
+ "prompt_analysis": prompt_analysis,
162
+ "search_type": actual_search_type,
163
+ }
164
+ except Exception as e:
165
+ print(f"LLM generation failed: {e}")
166
+ return {
167
+ "query": search_query,
168
+ "results": results,
169
+ "response": "LLM unavailable - showing search results only",
170
+ "search_type": actual_search_type,
171
+ }
172
+
173
+
174
+ def run_complete_shoes_rag_pipeline_with_details(
175
+ database: str,
176
+ table_name: str,
177
+ schema: Any,
178
+ search_query: Any, # Can be text string or image path/PIL Image
179
+ limit: int = 3,
180
+ use_llm: bool = True,
181
+ use_advanced_prompts: bool = True,
182
+ search_type: str = "auto",
183
+ model_provider: str = "qwen",
184
+ model_name: str = "Qwen/Qwen2.5-0.5B-Instruct",
185
+ openai_api_key: Optional[str] = None,
186
+ ) -> Dict[str, Any]:
187
+ """Run complete RAG pipeline with detailed step tracking."""
188
+
189
+ # Initialize step details
190
+ retrieval_details = ""
191
+ augmentation_details = ""
192
+ generation_details = ""
193
+
194
+ # SECTION 1: RETRIEVAL - Get relevant shoes from vector database
195
+ retrieval_details += "πŸ” RETRIEVAL PHASE\n"
196
+ retrieval_details += "=" * 50 + "\n"
197
+ retrieval_details += f"🎯 Query Type: {search_type}\n"
198
+ retrieval_details += f"πŸ” Searching vector database...\n"
199
+
200
+ results, actual_search_type = run_shoes_search(
201
+ database, table_name, schema, search_query, limit, search_type=search_type
202
+ )
203
+
204
+ retrieval_details += f"βœ… Search completed!\n"
205
+ retrieval_details += f"πŸ“Š Search Type Detected: {actual_search_type}\n"
206
+ retrieval_details += f"πŸ“ˆ Results Found: {len(results)}\n\n"
207
+
208
+ if results:
209
+ retrieval_details += "🎯 Retrieved Products:\n"
210
+ for i, result in enumerate(results, 1):
211
+ retrieval_details += f" {i}. {result.get('product_type', 'Shoe')} for {result.get('gender', 'Unisex')}\n"
212
+ retrieval_details += f" Color: {result.get('color', 'N/A')}\n"
213
+ retrieval_details += f" Pattern: {result.get('pattern', 'N/A')}\n"
214
+ if result.get("description"):
215
+ # Show full description without truncation
216
+ retrieval_details += f" Description: {result['description']}\n"
217
+ retrieval_details += "\n"
218
+ else:
219
+ retrieval_details += "❌ No results found\n"
220
+ return {
221
+ "query": search_query,
222
+ "results": [],
223
+ "response": "No results found",
224
+ "search_type": actual_search_type,
225
+ "retrieval_details": retrieval_details,
226
+ "augmentation_details": "⏭️ Skipped - No results to process",
227
+ "generation_details": "⏭️ Skipped - No results to process",
228
+ }
229
+
230
+ if not use_llm:
231
+ return {
232
+ "query": search_query,
233
+ "results": results,
234
+ "response": None,
235
+ "search_type": actual_search_type,
236
+ "retrieval_details": retrieval_details,
237
+ "augmentation_details": "⏭️ Skipped - LLM disabled",
238
+ "generation_details": "⏭️ Skipped - LLM disabled",
239
+ }
240
+
241
+ # SECTION 2: AUGMENTATION - Process and enhance context with prompt engineering
242
+ try:
243
+ augmentation_details += "πŸ“ AUGMENTATION PHASE\n"
244
+ augmentation_details += "=" * 50 + "\n"
245
+
246
+ # Set up prompt manager and analyze query
247
+ prompt_manager = SimpleShoePrompts()
248
+
249
+ # For image search, use appropriate query text
250
+ if actual_search_type == "image":
251
+ query_text = "similar shoes based on the provided image"
252
+ augmentation_details += f"πŸ–ΌοΈ Image Search Detected\n"
253
+ augmentation_details += f"πŸ”„ Query Text: '{query_text}'\n"
254
+ else:
255
+ query_text = str(search_query)
256
+ query_type = prompt_manager.classify_query(query_text)
257
+ augmentation_details += f"πŸ“ Text Query: '{query_text}'\n"
258
+ augmentation_details += f"🎯 Query Classification: {query_type.value}\n"
259
+
260
+ # Format context and generate enhanced prompt
261
+ enhanced_prompt = prompt_manager.generate_prompt(
262
+ query_text, results, actual_search_type
263
+ )
264
+
265
+ augmentation_details += f"πŸ“Š Context Processing:\n"
266
+ augmentation_details += f" β€’ Products formatted: {len(results)}\n"
267
+ augmentation_details += (
268
+ f" β€’ Prompt strategy: {'Advanced' if use_advanced_prompts else 'Basic'}\n"
269
+ )
270
+ augmentation_details += (
271
+ f" β€’ Prompt length: {len(enhanced_prompt)} characters\n\n"
272
+ )
273
+
274
+ # Show the full prompt instead of preview
275
+ augmentation_details += f"πŸ” Full Prompt:\n{enhanced_prompt}\n\n"
276
+
277
+ # SECTION 3: GENERATION - Setup LLM and generate response
278
+ generation_details += "πŸ€– GENERATION PHASE\n"
279
+ generation_details += "=" * 50 + "\n"
280
+ generation_details += f"🏭 Model Provider: {model_provider}\n"
281
+ generation_details += f"🎯 Model Name: {model_name}\n"
282
+
283
+ tokenizer, model, openai_client = None, None, None
284
+
285
+ if model_provider == "openai":
286
+ if not openai_api_key:
287
+ raise ValueError("OpenAI API key is required for OpenAI models")
288
+ openai_client = setup_openai_client(openai_api_key)
289
+ generation_details += f"βœ… OpenAI client initialized\n"
290
+ generation_details += f"πŸ”‘ API key: {'*' * (len(openai_api_key) - 8) + openai_api_key[-4:] if len(openai_api_key) > 8 else '****'}\n"
291
+ else:
292
+ tokenizer, model = setup_qwen_model(model_name)
293
+ generation_details += f"βœ… Qwen model loaded\n"
294
+ generation_details += f"πŸ’Ύ Model size: {model_name}\n"
295
+
296
+ generation_details += f"βš™οΈ Generation settings:\n"
297
+ generation_details += f" β€’ Max tokens: 200\n"
298
+ generation_details += f" β€’ Temperature: 0.1 (low for consistency)\n"
299
+ generation_details += f" β€’ Advanced prompts: {use_advanced_prompts}\n\n"
300
+
301
+ generation_details += f"πŸ”„ Generating response...\n"
302
+
303
+ # Generate final response using augmented context
304
+ response = generate_shoes_rag_response(
305
+ query=query_text,
306
+ retrieved_shoes=results,
307
+ model_provider=model_provider,
308
+ model_name=model_name,
309
+ openai_client=openai_client,
310
+ tokenizer=tokenizer,
311
+ model=model,
312
+ max_tokens=200,
313
+ use_advanced_prompts=use_advanced_prompts,
314
+ )
315
+
316
+ generation_details += f"βœ… Response generated!\n"
317
+ generation_details += f"πŸ“ Response length: {len(response)} characters\n"
318
+ generation_details += f"πŸ“ Full Response:\n{response}\n"
319
+
320
+ # Add prompt analysis
321
+ if actual_search_type == "image":
322
+ final_query_type = QueryType.SEARCH.value
323
+ else:
324
+ final_query_type = query_type.value
325
+
326
+ prompt_analysis = {
327
+ "query_type": final_query_type,
328
+ "num_results": len(results),
329
+ "search_type": actual_search_type,
330
+ }
331
+
332
+ return {
333
+ "query": search_query,
334
+ "results": results,
335
+ "response": response,
336
+ "prompt_analysis": prompt_analysis,
337
+ "search_type": actual_search_type,
338
+ "retrieval_details": retrieval_details,
339
+ "augmentation_details": augmentation_details,
340
+ "generation_details": generation_details,
341
+ }
342
+ except Exception as e:
343
+ error_msg = f"❌ LLM generation failed: {str(e)}"
344
+ generation_details += error_msg
345
+ return {
346
+ "query": search_query,
347
+ "results": results,
348
+ "response": "LLM unavailable - showing search results only",
349
+ "search_type": actual_search_type,
350
+ "retrieval_details": retrieval_details,
351
+ "augmentation_details": augmentation_details,
352
+ "generation_details": generation_details,
353
+ }
354
+
355
+
356
+ if __name__ == "__main__":
357
+ parser = argparse.ArgumentParser(
358
+ description="Complete RAG Pipeline for Shoe Search"
359
+ )
360
+ parser.add_argument(
361
+ "--query", type=str, help="Search query (text) or image file path"
362
+ )
363
+ parser.add_argument(
364
+ "--search-type",
365
+ choices=["auto", "text", "image"],
366
+ default="auto",
367
+ help="Force search type (default: auto-detect)",
368
+ )
369
+ parser.add_argument(
370
+ "--limit", type=int, default=3, help="Number of results to retrieve"
371
+ )
372
+ parser.add_argument(
373
+ "--database", type=str, default="myntra_shoes_db", help="Database path"
374
+ )
375
+ parser.add_argument(
376
+ "--table-name", type=str, default="myntra_shoes_table", help="Table name"
377
+ )
378
+
379
+ # Model configuration
380
+ parser.add_argument(
381
+ "--model-provider",
382
+ choices=["qwen", "openai"],
383
+ default="qwen",
384
+ help="Model provider to use",
385
+ )
386
+ parser.add_argument("--model-name", type=str, help="Model name to use")
387
+ parser.add_argument(
388
+ "--openai-api-key", type=str, help="OpenAI API key (required for OpenAI models)"
389
+ )
390
+ parser.add_argument(
391
+ "--use-advanced-prompts",
392
+ action="store_true",
393
+ default=True,
394
+ help="Use advanced prompt engineering",
395
+ )
396
+ parser.add_argument(
397
+ "--basic-prompts",
398
+ action="store_true",
399
+ help="Use basic prompts instead of advanced",
400
+ )
401
+
402
+ # Pipeline options
403
+ parser.add_argument(
404
+ "--no-llm", action="store_true", help="Run retrieval only, skip LLM generation"
405
+ )
406
+ parser.add_argument(
407
+ "--detailed-steps",
408
+ action="store_true",
409
+ help="Show detailed step-by-step breakdown",
410
+ )
411
+ parser.add_argument(
412
+ "--setup-db",
413
+ action="store_true",
414
+ help="Setup database from HuggingFace dataset",
415
+ )
416
+ parser.add_argument(
417
+ "--sample-size", type=int, default=500, help="Sample size for database setup"
418
+ )
419
+
420
+ args = parser.parse_args()
421
+
422
+ # Setup database if requested
423
+ if args.setup_db:
424
+ print("πŸ”„ Setting up database from HuggingFace dataset...")
425
+ create_shoes_table_from_hf(
426
+ database=args.database,
427
+ table_name=args.table_name,
428
+ sample_size=args.sample_size,
429
+ save_images=True,
430
+ )
431
+ print("βœ… Database setup complete!")
432
+ if not args.query:
433
+ exit(0)
434
+
435
+ # Validate query
436
+ if not args.query:
437
+ print("❌ Please provide a query using --query")
438
+ print("\nExample usage:")
439
+ print(" # Setup database first")
440
+ print(" python rag_pipeline.py --setup-db")
441
+ print(" # Complete RAG pipeline with text query")
442
+ print(" python rag_pipeline.py --query 'recommend running shoes for men'")
443
+ print(" # RAG pipeline with image query")
444
+ print(" python rag_pipeline.py --query 'path/to/shoe.jpg' --search-type image")
445
+ print(" # RAG pipeline with OpenAI")
446
+ print(
447
+ " python rag_pipeline.py --query 'comfortable sneakers' --model-provider openai --openai-api-key YOUR_KEY"
448
+ )
449
+ print(" # Detailed step tracking")
450
+ print(" python rag_pipeline.py --query 'blue shoes' --detailed-steps")
451
+ exit(1)
452
+
453
+ # Set default model names based on provider
454
+ available_models = get_available_models()
455
+ if not args.model_name:
456
+ args.model_name = available_models[args.model_provider][0]
457
+
458
+ # Handle basic prompts flag
459
+ use_advanced_prompts = (
460
+ not args.basic_prompts if args.basic_prompts else args.use_advanced_prompts
461
+ )
462
+
463
+ # Validate OpenAI setup
464
+ if args.model_provider == "openai":
465
+ if not args.openai_api_key:
466
+ print(
467
+ "❌ OpenAI API key is required for OpenAI models. Use --openai-api-key"
468
+ )
469
+ exit(1)
470
+
471
+ print("πŸš€ Starting Complete RAG Pipeline...")
472
+ print("=" * 60)
473
+ print(f"Query: {args.query}")
474
+ print(f"Search Type: {args.search_type}")
475
+ print(f"Model Provider: {args.model_provider}")
476
+ print(f"Model Name: {args.model_name}")
477
+ print(f"Use LLM: {not args.no_llm}")
478
+ print(f"Advanced Prompts: {use_advanced_prompts}")
479
+
480
+ # Run pipeline
481
+ if args.detailed_steps:
482
+ rag_result = run_complete_shoes_rag_pipeline_with_details(
483
+ database=args.database,
484
+ table_name=args.table_name,
485
+ schema=MyntraShoesEnhanced,
486
+ search_query=args.query,
487
+ limit=args.limit,
488
+ use_llm=not args.no_llm,
489
+ use_advanced_prompts=use_advanced_prompts,
490
+ search_type=args.search_type,
491
+ model_provider=args.model_provider,
492
+ model_name=args.model_name,
493
+ openai_api_key=args.openai_api_key,
494
+ )
495
+
496
+ # Display detailed results
497
+ print("\n" + "=" * 60)
498
+ print("πŸ“Š RAG PIPELINE DETAILED RESULTS")
499
+ print("=" * 60)
500
+
501
+ print("\n" + rag_result.get("retrieval_details", "No retrieval details"))
502
+ print("\n" + rag_result.get("augmentation_details", "No augmentation details"))
503
+ print("\n" + rag_result.get("generation_details", "No generation details"))
504
+
505
+ else:
506
+ rag_result = run_complete_shoes_rag_pipeline(
507
+ database=args.database,
508
+ table_name=args.table_name,
509
+ schema=MyntraShoesEnhanced,
510
+ search_query=args.query,
511
+ limit=args.limit,
512
+ use_llm=not args.no_llm,
513
+ use_advanced_prompts=use_advanced_prompts,
514
+ search_type=args.search_type,
515
+ model_provider=args.model_provider,
516
+ model_name=args.model_name,
517
+ openai_api_key=args.openai_api_key,
518
+ )
519
+
520
+ # Display results
521
+ print("\n" + "=" * 60)
522
+ print("πŸ“Š RAG PIPELINE RESULTS")
523
+ print("=" * 60)
524
+ print(f"Query: {rag_result['query']}")
525
+ print(f"Search Type: {rag_result['search_type']}")
526
+ if rag_result.get("prompt_analysis"):
527
+ print(f"Query Type: {rag_result['prompt_analysis']['query_type']}")
528
+ print(f"Results Found: {rag_result['prompt_analysis']['num_results']}")
529
+
530
+ if rag_result.get("response"):
531
+ print(f"\nπŸ’¬ RAG Response:")
532
+ print(rag_result["response"])
533
+
534
+ print(f"\nπŸ‘Ÿ Retrieved Shoes:")
535
+ for result in rag_result["results"]:
536
+ print(
537
+ f"- {result['product_type']} ({result['gender']}) - {result['color']} - {result['pattern']}"
538
+ )
539
+ if rag_result["search_type"] == "image":
540
+ print(f" πŸ“ Image saved: {result['image_path']}")
541
+
542
+ if rag_result["search_type"] == "image":
543
+ print(f"\nπŸ–ΌοΈ Search results images saved in: shoe_search_output/")
544
+
545
+ print("\nβœ… RAG Pipeline Complete!")