import ollama # Call chat with stream=True to enable streaming stream = ollama.chat( model='llama3:8b', messages=[ { 'role': 'user', 'content': 'Hello, could you make a presentation of your self', } ], stream=True ) # Process the streamed response chunks for chunk in stream: print(chunk['message']['content'], end='', flush=True)