OpenAI Chat Completion Client

Source vllm-project/vllm.

  1. 1from openai import OpenAI
  2. 2
  3. 3# Modify OpenAI's API key and API base to use vLLM's API server.
  4. 4openai_api_key = "EMPTY"
  5. 5openai_api_base = "http://localhost:8000/v1"
  6. 6
  7. 7client = OpenAI(
  8. 8 # defaults to os.environ.get("OPENAI_API_KEY")
  9. 9 api_key=openai_api_key,
  10. 10 base_url=openai_api_base,
  11. 11)
  12. 12
  13. 13models = client.models.list()
  14. 14model = models.data[0].id
  15. 15
  16. 16chat_completion = client.chat.completions.create(
  17. 17 messages=[{
  18. 18 "role": "system",
  19. 19 "content": "You are a helpful assistant."
  20. 20 }, {
  21. 21 "role": "user",
  22. 22 "content": "Who won the world series in 2020?"
  23. 23 }, {
  24. 24 "role":
  25. 25 "assistant",
  26. 26 "content":
  27. 27 "The Los Angeles Dodgers won the World Series in 2020."
  28. 28 }, {
  29. 29 "role": "user",
  30. 30 "content": "Where was it played?"
  31. 31 }],
  32. 32 model=model,
  33. 33)
  34. 34
  35. 35print("Chat completion results:")
  36. 36print(chat_completion)