OpenAI Completion Client

Source vllm-project/vllm.

  1. 1from openai import OpenAI
  2. 2
  3. 3# Modify OpenAI's API key and API base to use vLLM's API server.
  4. 4openai_api_key = "EMPTY"
  5. 5openai_api_base = "http://localhost:8000/v1"
  6. 6
  7. 7client = OpenAI(
  8. 8 # defaults to os.environ.get("OPENAI_API_KEY")
  9. 9 api_key=openai_api_key,
  10. 10 base_url=openai_api_base,
  11. 11)
  12. 12
  13. 13models = client.models.list()
  14. 14model = models.data[0].id
  15. 15
  16. 16# Completion API
  17. 17stream = False
  18. 18completion = client.completions.create(
  19. 19 model=model,
  20. 20 prompt="A robot may not injure a human being",
  21. 21 echo=False,
  22. 22 n=2,
  23. 23 stream=stream,
  24. 24 logprobs=3)
  25. 25
  26. 26print("Completion results:")
  27. 27if stream:
  28. 28 for c in completion:
  29. 29 print(c)
  30. 30else:
  31. 31 print(completion)