@ -5,9 +5,9 @@ import time
import wget
import wget
import stat
import stat
class Llm :
class Llm :
def __init__ ( self , config ) :
def __init__ ( self , config ) :
self . interpreter = config [ " interpreter " ]
self . interpreter = config [ " interpreter " ]
config . pop ( " interpreter " , None )
config . pop ( " interpreter " , None )
@ -20,7 +20,6 @@ class Llm:
self . llm = self . interpreter . llm . completions
self . llm = self . interpreter . llm . completions
def install ( self , service_directory ) :
def install ( self , service_directory ) :
if platform . system ( ) == " Darwin " : # Check if the system is MacOS
if platform . system ( ) == " Darwin " : # Check if the system is MacOS
result = subprocess . run (
result = subprocess . run (
[ " xcode-select " , " -p " ] , stdout = subprocess . PIPE , stderr = subprocess . STDOUT
[ " xcode-select " , " -p " ] , stdout = subprocess . PIPE , stderr = subprocess . STDOUT
@ -30,7 +29,9 @@ class Llm:
" Llamafile requires Mac users to have Xcode installed. You can install Xcode from https://developer.apple.com/xcode/ . \n \n Alternatively, you can use `LM Studio`, `Jan.ai`, or `Ollama` to manage local language models. Learn more at https://docs.openinterpreter.com/guides/running-locally . "
" Llamafile requires Mac users to have Xcode installed. You can install Xcode from https://developer.apple.com/xcode/ . \n \n Alternatively, you can use `LM Studio`, `Jan.ai`, or `Ollama` to manage local language models. Learn more at https://docs.openinterpreter.com/guides/running-locally . "
)
)
time . sleep ( 3 )
time . sleep ( 3 )
raise Exception ( " Xcode is not installed. Please install Xcode and try again. " )
raise Exception (
" Xcode is not installed. Please install Xcode and try again. "
)
# Define the path to the models directory
# Define the path to the models directory
models_dir = os . path . join ( service_directory , " models " )
models_dir = os . path . join ( service_directory , " models " )
@ -52,8 +53,6 @@ class Llm:
url = " https://huggingface.co/jartine/phi-2-llamafile/resolve/main/phi-2.Q4_K_M.llamafile "
url = " https://huggingface.co/jartine/phi-2-llamafile/resolve/main/phi-2.Q4_K_M.llamafile "
wget . download ( url , llamafile_path )
wget . download ( url , llamafile_path )
# Make the new llamafile executable
# Make the new llamafile executable
if platform . system ( ) != " Windows " :
if platform . system ( ) != " Windows " :
st = os . stat ( llamafile_path )
st = os . stat ( llamafile_path )
@ -63,11 +62,15 @@ class Llm:
if os . path . exists ( llamafile_path ) :
if os . path . exists ( llamafile_path ) :
try :
try :
# Test if the llamafile is executable
# Test if the llamafile is executable
subprocess . check_call ( [ llamafile_path ] )
subprocess . check_call ( [ f ' " { llamafile_path } " ' ] , shell = True )
except subprocess . CalledProcessError :
except subprocess . CalledProcessError :
print ( " The llamafile is not executable. Please check the file permissions. " )
print (
" The llamafile is not executable. Please check the file permissions. "
)
raise
raise
subprocess . Popen ( [ llamafile_path , " -ngl " , " 9999 " ] )
subprocess . Popen (
f ' " { llamafile_path } " ' + " " . join ( [ " -ngl " , " 9999 " ] ) , shell = True
)
else :
else :
error_message = " The llamafile does not exist or is corrupted. Please ensure it has been downloaded correctly or try again. "
error_message = " The llamafile does not exist or is corrupted. Please ensure it has been downloaded correctly or try again. "
print ( error_message )
print ( error_message )