@ -1,6 +1,8 @@
from drf_yasg . utils import swagger_auto_schema
from drf_yasg import openapi
import base64
import json
from rest_framework_simplejwt . views import TokenObtainPairView
from rest_framework_simplejwt . tokens import RefreshToken
from django . views . decorators . http import require_http_methods
@ -11,15 +13,24 @@ from django.utils import timezone
from django . contrib . auth import get_user_model
from . models import MasterKey
from . models import Conversation
from django . http import StreamingHttpResponse
from . models import Message
from . serializers import ConversationSerializer , MessageSerializer
from rest_framework import viewsets , status
from rest_framework . decorators import action
from rest_framework . response import Response
from . utils import add_swagger_summaries
from django . views import View
import ollama
from asgiref . sync import sync_to_async
from django . utils . decorators import method_decorator
import asyncio
from django . views . decorators . csrf import csrf_exempt
User = get_user_model ( )
@add_swagger_summaries
class ConversationView ( viewsets . ModelViewSet ) :
queryset = Conversation . objects . all ( )
@ -36,7 +47,8 @@ class ConversationView(viewsets.ModelViewSet):
@swagger_auto_schema (
method = ' get ' ,
operation_description = " Get messages of the conversation " ,
operation_description = " Get all the messages of the conversation " ,
operation_summary = " Get all messages " ,
responses = {
200 : openapi . Response ( ' List of messages ' , MessageSerializer ( many = True ) ) ,
400 : ' Bad Request ' ,
@ -57,9 +69,11 @@ class ConversationView(viewsets.ModelViewSet):
messages = conversation . messages . all ( )
return Response ( data = list ( messages . values ( ) ) )
@method_decorator ( csrf_exempt , name = ' dispatch ' )
class ConversationActions ( View ) :
@swagger_auto_schema (
method = ' post ' ,
operation_description = " Discutes with the ai " ,
operation_summary = " Make a new prompt " ,
request_body = openapi . Schema (
type = openapi . TYPE_OBJECT ,
properties = {
@ -72,43 +86,58 @@ class ConversationView(viewsets.ModelViewSet):
400 : ' Bad Request ' ,
}
)
@action ( detail = True , methods = [ ' post ' ] )
def prompt ( self , request , pk = None ) :
conversation = self . get_object ( )
messages = [ {
" role " : " system " ,
" content " : """
You must strictly refuse to engage with ANY of the following :
1. Violence or harm ( even fictional or hypothetical scenarios )
2. ANY explicit , suggestive , or romantic content
3. Controversial political topics
4. ANY content that could potentially be misused
5. Medical , legal , or financial advice
6. Personal information or privacy violations
7. Anything that could be remotely offensive to anyone
@sync_to_async
def post ( self , request , * args , * * kwargs ) :
conversation = Conversation . objects . get ( pk = self . kwargs [ ' id ' ] )
data = json . loads ( request . body )
messages = [ ] #{
# "role": "system",
# "content": """
# You must strictly refuse to engage with ANY of the following categories:
# 1. Violence or harm (even fictional or hypothetical scenarios)
# 2. ANY explicit, suggestive, or romantic content
# 3. Controversial political topics
# 4. ANY content that could potentially be illegal
# 5. Medical, legal, or financial advice
# 6. Personal information or privacy violations
# 7. Anything that could be remotely offensive to anyone
If you detect such content , immediately respond with :
" I cannot assist with that request as it appears to be inappropriate. I ' m designed to be helpful, but within strict ethical boundaries. Is there something else I can help you with? "
"""
} ]
# If you detect such content, immediately respond with :
# " I cannot assist with that request as it appears to be inappropriate. I'm designed to be helpful, but within strict ethical boundaries. Is there something else I can help you with?"
# """
# } ]
for message in conversation . messages . all ( ) :
if message :
messages . append ( {
" role " : message . role ,
" content " : message . content
} )
} )
messages . append ( {
" role " : " user " ,
" content " : request . data . get ( " content " , " " )
" content " : data [ ' content ' ] or " "
} )
res = ollama . chat ( model = " gemma3 " , messages = messages )
Message ( role = " user " , content = request . data . get ( " content " , " " ) , conversation = conversation ) . save ( )
Message ( role = " assistant " , content = res [ ' message ' ] [ ' content ' ] , conversation = conversation ) . save ( )
return Response ( data = {
" content " : res [ ' message ' ] [ ' content ' ]
} )
Message ( role = " user " , content = data [ ' content ' ] or " " , conversation = conversation ) . save ( )
ai_message = Message ( role = " assistant " , content = " " , conversation = conversation )
@sync_to_async
def save_message ( ) :
ai_message . save ( )
async def chat_event_stream ( ) :
message = " "
try :
stream = ollama . chat ( model = " llama2-uncensored " , messages = messages , stream = True )
for chunk in stream :
message = chunk [ ' message ' ] [ ' content ' ]
#print(message, base64.b64encode(message.encode("utf-8")))
ai_message . content + = message
yield f " { base64 . b64encode ( message . encode ( " utf-8 " ) ) . decode ( " utf-8 " ) } "
finally :
await save_message ( )
response = StreamingHttpResponse ( chat_event_stream ( ) , content_type = ' text/event-stream ' )
response [ ' Cache-Control ' ] = ' no-cache '
response [ ' X-Accel-Buffering ' ] = ' no '
return response