From 1a79475c1148f2d199507a43fea7aa02094b8e2f Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 13 Oct 2023 18:55:22 -0400 Subject: [PATCH] clean up --- example.py | 4 +- logs/SEP26.tt | 246 - logs/sep16_intepreter.txt | 40325 ---------------- openai_model.py | 9 + swarms/__init__.py | 29 +- swarms/agents/aot.py | 12 +- swarms/models/__init__.py | 7 +- swarms/models/openai_llm.py | 598 + swarms/models/openai_models.py | 958 + ...{battle_royal_swarm.py => battle_royal.py} | 35 +- swarms/tools/autogpt.py | 45 +- swarms/tools/code_intepretor.py | 73 - swarms/workers/worker.py | 4 +- 13 files changed, 1647 insertions(+), 40698 deletions(-) delete mode 100644 logs/SEP26.tt delete mode 100644 logs/sep16_intepreter.txt create mode 100644 openai_model.py create mode 100644 swarms/models/openai_llm.py create mode 100644 swarms/models/openai_models.py rename swarms/swarms/{battle_royal_swarm.py => battle_royal.py} (67%) delete mode 100644 swarms/tools/code_intepretor.py diff --git a/example.py b/example.py index d38f653c..cb72330b 100644 --- a/example.py +++ b/example.py @@ -2,14 +2,14 @@ from langchain.llms import OpenAIChat from swarms import Worker llm = OpenAIChat( - openai_api_key="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", + openai_api_key="", temperature=0.5, ) node = Worker( llm=llm, ai_name="Optimus Prime", - openai_api_key="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", + openai_api_key="", ai_role="Worker in a swarm", external_tools=None, human_in_the_loop=False, diff --git a/logs/SEP26.tt b/logs/SEP26.tt deleted file mode 100644 index e6121751..00000000 --- a/logs/SEP26.tt +++ /dev/null @@ -1,246 +0,0 @@ -defalt@owl swarms % /usr/local/bin/python3 /Users/defalt/Desktop/Athena/research/swarms/example_godmode.py - - - _________ __ __ _____ __________ _____ _________ - / _____// \ / \ / _ \ \______ \ / \ / _____/ - \_____ \ \ \/\/ // /_\ \ | _/ / \ / \ \_____ \ - / \ \ // | \| | \/ Y \ / \ -/_______ / \__/\ / \____|__ /|____|_ /\____|__ //_______ / - \/ \/ \/ \/ \/ \/ - - -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/scipy/__init__.py:155: UserWarning: A NumPy version >=1.18.5 and <1.26.0 is required for this version of SciPy (detected version 1.26.0 - warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.7.0) is available. It's recommended that you update to the latest version using `pip install -U deeplake`. - warnings.warn( - +-----------------------------------------------+ - | | - | | - | Add new LLMs: Give Feedback / Get Help | - | https://github.com/BerriAI/litellm/issues/new | - | | - | | - +-----------------------------------------------+ -Starting new HTTPS connection (1): raw.githubusercontent.com:443 -https://raw.githubusercontent.com:443 "GET /BerriAI/litellm/main/model_prices_and_context_window.json HTTP/1.1" 200 1207 -https://spire.bugout.dev:443 "POST /humbug/reports HTTP/1.1" 200 0 -Starting new HTTPS connection (1): pypi.org:443 -https://pypi.org:443 "GET /pypi/open-interpreter/json HTTP/1.1" 200 30961 -2023-09-26 09:57:03.526982: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. -To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. -Falling back to TensorFlow client; we recommended you install the Cloud TPU client directly with pip install cloud-tpu-client. -Creating converter from 7 to 5 -Creating converter from 5 to 7 -Creating converter from 7 to 5 -Creating converter from 5 to 7 -Popen(['git', 'version'], cwd=/Users/defalt/Desktop/Athena/research/swarms, universal_newlines=False, shell=None, istream=None) -Popen(['git', 'version'], cwd=/Users/defalt/Desktop/Athena/research/swarms, universal_newlines=False, shell=None, istream=None) -Trying paths: ['/Users/defalt/.docker/config.json', '/Users/defalt/.dockercfg'] -Found file at path: /Users/defalt/.docker/config.json -Found 'auths' section -Auth data for 916723593639.dkr.ecr.us-east-1.amazonaws.com is absent. Client might be using a credentials store instead. -Auth data for https://index.docker.io/v1/ is absent. Client might be using a credentials store instead. -Auth data for public.ecr.aws is absent. Client might be using a credentials store instead. -Found 'credsStore' section -Embeddings is not implemented for FAISS -Embeddings is not implemented for FAISS -Embeddings is not implemented for FAISS -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[1318]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None) -api_version=None data='{"input": [[1318]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -api_version=None data='{"input": [[1318]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None) -Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None) -Starting new HTTPS connection (1): api.openai.com:443 -Starting new HTTPS connection (1): api.openai.com:443 -Starting new HTTPS connection (1): api.openai.com:443 -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=17 request_id=60006235d989ec97bfce0be8d4395c99 response_code=200 -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=30 request_id=eddff53cda7fcc270cb89549d04e2393 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Sep 26 09:57:06 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[]\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Sep 26 09:57:06 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[]\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=51 request_id=f8af3b3392526dd22ae9dcdde0db839d response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Sep 26 09:57:06 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[]\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=29743 request_id=7586a691aee960b5bffc6118bb49b3f2 response_code=200 -{ - "thoughts": { - "text": "I need to find the winning Boston Marathon times for the past 5 years. To do this, I can use the 'query_webpage' command to search the internet for this information.", - "reasoning": "The Boston Marathon is a popular event and the results are likely to be published on multiple websites. Therefore, using the 'query_webpage' command is a reasonable approach.", - "plan": "- Use 'query_webpage' command to find the winning times for the Boston Marathon for the past 5 years (2018-2022).\n- Save the data to a file.\n- Use 'process_csv' command to generate a table from the saved data.", - "criticism": "None", - "speak": "I am going to look up the winning times for the Boston Marathon for the past 5 years." - }, - "command": { - "name": "query_webpage", - "args": { - "url": "https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results", - "question": "What were the winning Boston Marathon times for the past 5 years?" - } - } -} -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain_experimental/autonomous_agents/autogpt/agent.py:119: RuntimeWarning: coroutine 'async_load_playwright' was never awaited - observation = ( -RuntimeWarning: Enable tracemalloc to get the object allocation traceback -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[72803, 18321, 25, 341, 262, 330, 61665, 82, 794, 341, 286, 330, 1342, 794, 330, 40, 1205, 311, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 13, 2057, 656, 420, 11, 358, 649, 1005, 279, 364, 1663, 27050, 2964, 6, 3290, 311, 2778, 279, 7757, 369, 420, 2038, 10560, 286, 330, 20489, 287, 794, 330, 791, 10406, 51273, 374, 264, 5526, 1567, 323, 279, 3135, 527, 4461, 311, 387, 4756, 389, 5361, 13335, 13, 15636, 11, 1701, 279, 364, 1663, 27050, 2964, 6, 3290, 374, 264, 13579, 5603, 10560, 286, 330, 10609, 794, 6660, 5560, 364, 1663, 27050, 2964, 6, 3290, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 320, 679, 23, 12, 2366, 17, 73441, 77, 12, 10467, 279, 828, 311, 264, 1052, 7255, 77, 12, 5560, 364, 4734, 14347, 6, 3290, 311, 7068, 264, 2007, 505, 279, 6924, 828, 10560, 286, 330, 38096, 42914, 794, 330, 4155, 761, 286, 330, 82, 23635, 794, 330, 40, 1097, 2133, 311, 1427, 709, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 10246, 262, 1173, 262, 330, 5749, 794, 341, 286, 330, 609, 794, 330, 1663, 27050, 2964, 761, 286, 330, 2164, 794, 341, 310, 330, 1103, 794, 330, 2485, 1129, 2185, 960, 9611, 916, 2754, 3476, 3554, 9611, 71098, 24893, 14, 2366, 17, 14, 2371, 14, 972, 3554, 9611, 71098, 24893, 12, 2366, 17, 65870, 761, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 48469, 286, 457, 262, 457, 92, 720, 2122, 25, 7498, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 17, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 2185, 960, 9611, 916, 2754, 3476, 3554, 9611, 71098, 24893, 14, 2366, 17, 14, 2371, 14, 972, 3554, 9611, 71098, 24893, 12, 2366, 17, 65870, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 30, 8439, 220]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=38 request_id=043c667cc08452c8d6911705c2ddab5b response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[58, 35075, 2097, 15413, 1151, 35, 25296, 902, 1828, 3290, 311, 1005, 11, 323, 6013, 1701, 279, 3645, 5300, 3485, 17898, 5217, 37335, 68525, 3187, 5725, 705, 87868, 808, 15413, 1151, 36802, 77, 262, 330, 61665, 82, 794, 29252, 77, 286, 330, 1342, 794, 330, 40, 1205, 311, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 13, 2057, 656, 420, 11, 358, 649, 1005, 279, 28251, 1663, 27050, 2964, 10379, 3290, 311, 2778, 279, 7757, 369, 420, 2038, 10684, 59, 77, 286, 330, 20489, 287, 794, 330, 791, 10406, 51273, 374, 264, 5526, 1567, 323, 279, 3135, 527, 4461, 311, 387, 4756, 389, 5361, 13335, 13, 15636, 11, 1701, 279, 28251, 1663, 27050, 2964, 10379, 3290, 374, 264, 13579, 5603, 10684, 59, 77, 286, 330, 10609, 794, 6660, 5560, 28251, 1663, 27050, 2964, 10379, 3290, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 320, 679, 23, 12, 2366, 17, 570, 3505, 77, 12, 10467, 279, 828, 311, 264, 1052, 13, 3505, 77, 12, 5560, 28251, 4734, 14347, 10379, 3290, 311, 7068, 264, 2007, 505, 279, 6924, 828, 10684, 59, 77, 286, 330, 38096, 42914, 794, 330, 4155, 498, 59, 77, 286, 330, 82, 23635, 794, 330, 40, 1097, 2133, 311, 1427, 709, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 44043, 77, 262, 2529, 59, 77, 262, 330, 5749, 794, 29252, 77, 286, 330, 609, 794, 330, 1663, 27050, 2964, 498, 59, 77, 286, 330, 2164, 794, 29252, 77, 310, 330, 1103, 794, 330, 2485, 1129, 2185, 960, 9611, 916, 2754, 3476, 3554, 9611, 71098, 24893, 14, 2366, 17, 14, 2371, 14, 972, 3554, 9611, 71098, 24893, 12, 2366, 17, 65870, 498, 59, 77, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 7673, 59, 77, 286, 52400, 77, 262, 52400, 77, 17266, 5217, 37335, 68525, 3187, 5725, 705, 744, 2097, 15413, 429, 4153, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 17, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 2185, 960, 9611, 916, 2754, 3476, 3554, 9611, 71098, 24893, 14, 2366, 17, 14, 2371, 14, 972, 3554, 9611, 71098, 24893, 12, 2366, 17, 65870, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 30, 8439, 498, 5217, 37335, 1185, 5525, 60]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=30701 request_id=ccc67fbe4bf509e8d55c57efd5d23610 response_code=200 -{ - "thoughts": { - "text": "To find the winning Boston Marathon times for the past 5 years, I need to query a webpage that contains this information.", - "reasoning": "The Boston Marathon is a major annual event with significant media coverage, so the results should be readily available online. I can use the 'query_webpage' command to retrieve this information.", - "plan": "- Use 'query_webpage' to find the winning times for the Boston Marathon for the past 5 years.\n- Extract the relevant information and save it to a file.\n- Process the saved file to generate the requested table.", - "criticism": "This plan assumes that the information is available in a format that can be easily extracted and processed. If the information is presented in a less structured format, additional steps may be required.", - "speak": "I'm going to find the winning Boston Marathon times for the past 5 years by querying a webpage with this information." - }, - "command": { - "name": "query_webpage", - "args": { - "url": "https://www.baa.org/races/boston-marathon/results/champions", - "question": "What were the winning Boston Marathon times for the past 5 years (ending in 2022)?" - } - } -} -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[72803, 18321, 25, 341, 262, 330, 61665, 82, 794, 341, 286, 330, 1342, 794, 330, 1271, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 11, 358, 1205, 311, 3319, 264, 45710, 430, 5727, 420, 2038, 10560, 286, 330, 20489, 287, 794, 330, 791, 10406, 51273, 374, 264, 3682, 9974, 1567, 449, 5199, 3772, 10401, 11, 779, 279, 3135, 1288, 387, 31368, 2561, 2930, 13, 358, 649, 1005, 279, 364, 1663, 27050, 2964, 6, 3290, 311, 17622, 420, 2038, 10560, 286, 330, 10609, 794, 6660, 5560, 364, 1663, 27050, 2964, 6, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 7255, 77, 12, 23673, 279, 9959, 2038, 323, 3665, 433, 311, 264, 1052, 7255, 77, 12, 8773, 279, 6924, 1052, 311, 7068, 279, 11472, 2007, 10560, 286, 330, 38096, 42914, 794, 330, 2028, 3197, 22204, 430, 279, 2038, 374, 2561, 304, 264, 3645, 430, 649, 387, 6847, 28532, 323, 15590, 13, 1442, 279, 2038, 374, 10666, 304, 264, 2753, 34030, 3645, 11, 5217, 7504, 1253, 387, 2631, 10560, 286, 330, 82, 23635, 794, 330, 40, 2846, 2133, 311, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 555, 82198, 264, 45710, 449, 420, 2038, 10246, 262, 1173, 262, 330, 5749, 794, 341, 286, 330, 609, 794, 330, 1663, 27050, 2964, 761, 286, 330, 2164, 794, 341, 310, 330, 1103, 794, 330, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 761, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 12106, 702, 286, 457, 262, 457, 92, 720, 2122, 25, 7498, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 15, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 12106, 8439, 220]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=29 request_id=f68c1235fc86e1da9e0eab521610ce41 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Sep 26 09:57:37 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[\'Assistant Reply: {\\\\n \\"thoughts\\": {\\\\n \\"text\\": \\"I need to find the winning Boston Marathon times for the past 5 years. To do this, I can use the \\\\\'query_webpage\\\\\' command to search the internet for this information.\\",\\\\n \\"reasoning\\": \\"The Boston Marathon is a popular event and the results are likely to be published on multiple websites. Therefore, using the \\\\\'query_webpage\\\\\' command is a reasonable approach.\\",\\\\n \\"plan\\": \\"- Use \\\\\'query_webpage\\\\\' command to find the winning times for the Boston Marathon for the past 5 years (2018-2022).\\\\\\\\n- Save the data to a file.\\\\\\\\n- Use \\\\\'process_csv\\\\\' command to generate a table from the saved data.\\",\\\\n \\"criticism\\": \\"None\\",\\\\n \\"speak\\": \\"I am going to look up the winning times for the Boston Marathon for the past 5 years.\\"\\\\n },\\\\n \\"command\\": {\\\\n \\"name\\": \\"query_webpage\\",\\\\n \\"args\\": {\\\\n \\"url\\": \\"https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results\\",\\\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years?\\"\\\\n }\\\\n }\\\\n} \\\\nResult: Command query_webpage returned: Error: There is no current event loop in thread \\\\\'ThreadPoolExecutor-0_2\\\\\'., RuntimeError, args: {\\\\\'url\\\\\': \\\\\'https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results\\\\\', \\\\\'question\\\\\': \\\\\'What were the winning Boston Marathon times for the past 5 years?\\\\\'} \']\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\\n \\"thoughts\\": {\\n \\"text\\": \\"I need to find the winning Boston Marathon times for the past 5 years. To do this, I can use the \'query_webpage\' command to search the internet for this information.\\",\\n \\"reasoning\\": \\"The Boston Marathon is a popular event and the results are likely to be published on multiple websites. Therefore, using the \'query_webpage\' command is a reasonable approach.\\",\\n \\"plan\\": \\"- Use \'query_webpage\' command to find the winning times for the Boston Marathon for the past 5 years (2018-2022).\\\\n- Save the data to a file.\\\\n- Use \'process_csv\' command to generate a table from the saved data.\\",\\n \\"criticism\\": \\"None\\",\\n \\"speak\\": \\"I am going to look up the winning times for the Boston Marathon for the past 5 years.\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"query_webpage\\",\\n \\"args\\": {\\n \\"url\\": \\"https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results\\",\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years?\\"\\n }\\n }\\n}"}, {"role": "system", "content": "Command query_webpage returned: Error: There is no current event loop in thread \'ThreadPoolExecutor-0_2\'., RuntimeError, args: {\'url\': \'https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results\', \'question\': \'What were the winning Boston Marathon times for the past 5 years?\'}"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=34 request_id=ae4ba2060e50dddb22555a59604a48be response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[58, 35075, 2097, 15413, 1151, 35, 25296, 902, 1828, 3290, 311, 1005, 11, 323, 6013, 1701, 279, 3645, 5300, 3485, 17898, 5217, 37335, 68525, 3187, 5725, 705, 87868, 808, 15413, 1151, 36802, 77, 262, 330, 61665, 82, 794, 29252, 77, 286, 330, 1342, 794, 330, 1271, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 11, 358, 1205, 311, 3319, 264, 45710, 430, 5727, 420, 2038, 10684, 59, 77, 286, 330, 20489, 287, 794, 330, 791, 10406, 51273, 374, 264, 3682, 9974, 1567, 449, 5199, 3772, 10401, 11, 779, 279, 3135, 1288, 387, 31368, 2561, 2930, 13, 358, 649, 1005, 279, 28251, 1663, 27050, 2964, 10379, 3290, 311, 17622, 420, 2038, 10684, 59, 77, 286, 330, 10609, 794, 6660, 5560, 28251, 1663, 27050, 2964, 10379, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 13, 3505, 77, 12, 23673, 279, 9959, 2038, 323, 3665, 433, 311, 264, 1052, 13, 3505, 77, 12, 8773, 279, 6924, 1052, 311, 7068, 279, 11472, 2007, 10684, 59, 77, 286, 330, 38096, 42914, 794, 330, 2028, 3197, 22204, 430, 279, 2038, 374, 2561, 304, 264, 3645, 430, 649, 387, 6847, 28532, 323, 15590, 13, 1442, 279, 2038, 374, 10666, 304, 264, 2753, 34030, 3645, 11, 5217, 7504, 1253, 387, 2631, 10684, 59, 77, 286, 330, 82, 23635, 794, 330, 40, 10379, 76, 2133, 311, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 555, 82198, 264, 45710, 449, 420, 2038, 44043, 77, 262, 2529, 59, 77, 262, 330, 5749, 794, 29252, 77, 286, 330, 609, 794, 330, 1663, 27050, 2964, 498, 59, 77, 286, 330, 2164, 794, 29252, 77, 310, 330, 1103, 794, 330, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 498, 59, 77, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 8, 7673, 59, 77, 286, 52400, 77, 262, 52400, 77, 17266, 5217, 37335, 68525, 3187, 5725, 705, 744, 2097, 15413, 429, 4153, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 15, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 12106, 8439, 498, 5217, 37335, 1185, 5525, 60]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=42 request_id=64bca45925a5a4015ea9df1c9266d6ea response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Sep 26 09:57:38 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[\'Assistant Reply: {\\\\n \\"thoughts\\": {\\\\n \\"text\\": \\"To find the winning Boston Marathon times for the past 5 years, I need to query a webpage that contains this information.\\",\\\\n \\"reasoning\\": \\"The Boston Marathon is a major annual event with significant media coverage, so the results should be readily available online. I can use the \\\\\'query_webpage\\\\\' command to retrieve this information.\\",\\\\n \\"plan\\": \\"- Use \\\\\'query_webpage\\\\\' to find the winning times for the Boston Marathon for the past 5 years.\\\\\\\\n- Extract the relevant information and save it to a file.\\\\\\\\n- Process the saved file to generate the requested table.\\",\\\\n \\"criticism\\": \\"This plan assumes that the information is available in a format that can be easily extracted and processed. If the information is presented in a less structured format, additional steps may be required.\\",\\\\n \\"speak\\": \\"I\\\\\'m going to find the winning Boston Marathon times for the past 5 years by querying a webpage with this information.\\"\\\\n },\\\\n \\"command\\": {\\\\n \\"name\\": \\"query_webpage\\",\\\\n \\"args\\": {\\\\n \\"url\\": \\"https://www.baa.org/races/boston-marathon/results/champions\\",\\\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\"\\\\n }\\\\n }\\\\n} \\\\nResult: Command query_webpage returned: Error: There is no current event loop in thread \\\\\'ThreadPoolExecutor-0_0\\\\\'., RuntimeError, args: {\\\\\'url\\\\\': \\\\\'https://www.baa.org/races/boston-marathon/results/champions\\\\\', \\\\\'question\\\\\': \\\\\'What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\\\\'} \']\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\\n \\"thoughts\\": {\\n \\"text\\": \\"To find the winning Boston Marathon times for the past 5 years, I need to query a webpage that contains this information.\\",\\n \\"reasoning\\": \\"The Boston Marathon is a major annual event with significant media coverage, so the results should be readily available online. I can use the \'query_webpage\' command to retrieve this information.\\",\\n \\"plan\\": \\"- Use \'query_webpage\' to find the winning times for the Boston Marathon for the past 5 years.\\\\n- Extract the relevant information and save it to a file.\\\\n- Process the saved file to generate the requested table.\\",\\n \\"criticism\\": \\"This plan assumes that the information is available in a format that can be easily extracted and processed. If the information is presented in a less structured format, additional steps may be required.\\",\\n \\"speak\\": \\"I\'m going to find the winning Boston Marathon times for the past 5 years by querying a webpage with this information.\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"query_webpage\\",\\n \\"args\\": {\\n \\"url\\": \\"https://www.baa.org/races/boston-marathon/results/champions\\",\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\"\\n }\\n }\\n}"}, {"role": "system", "content": "Command query_webpage returned: Error: There is no current event loop in thread \'ThreadPoolExecutor-0_0\'., RuntimeError, args: {\'url\': \'https://www.baa.org/races/boston-marathon/results/champions\', \'question\': \'What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\'}"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' -^CError in sys.excepthook: -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 505, in _hook - self.error_report(error=exception_instance, tags=tags, publish=publish) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 247, in error_report - traceback.format_exception( -TypeError: format_exception() got an unexpected keyword argument 'etype' - -Original exception was: -Traceback (most recent call last): - File "/Users/defalt/Desktop/Athena/research/swarms/example_godmode.py", line 1, in - from swarms import GodMode - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/__init__.py", line 25, in - from swarms.swarms.dialogue_simulator import DialogueSimulator - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/swarms/__init__.py", line 6, in - from swarms.swarms.god_mode import GodMode - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/swarms/god_mode.py", line 55, in - god_mode.print_responses(task) - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/swarms/god_mode.py", line 36, in print_responses - responses = self.run_all(task) - ^^^^^^^^^^^^^^^^^^ - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/swarms/god_mode.py", line 26, in run_all - with ThreadPoolExecutor() as executor: - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py", line 647, in __exit__ - self.shutdown(wait=True) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/thread.py", line 235, in shutdown - t.join() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/threading.py", line 1112, in join - self._wait_for_tstate_lock() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/threading.py", line 1132, in _wait_for_tstate_lock - if lock.acquire(block, timeout): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -KeyboardInterrupt -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=39632 request_id=1c05913c3e03ea1306c880d30d75800a response_code=200 -{ - "thoughts": { - "text": "To gather the required information, I'll use the 'query_webpage' command to browse a webpage that contains the winning Boston Marathon times for the past 5 years (ending in 2022). This command allows me to retrieve the relevant information to the question.", - "reasoning": "I need to find a reliable source that contains the historical data of the Boston Marathon winners. The official Boston Marathon website or a reputable sports news website can be a potential source. Using the 'query_webpage' command, I can browse these websites and extract the required information.", - "plan": "- Use the 'query_webpage' command to find the Boston Marathon winning times for the past 5 years.\n- Once I have the information, store it in a file for future reference.\n- Generate a table of the year, name, country of origin, and times.", - "criticism": "I need to ensure that the source I'm using is reliable and up-to-date. Also, I must accurately extract the required information to avoid any errors in the final output.", - "speak": "I'm going to use the 'query_webpage' command to browse a webpage that contains the winning Boston Marathon times for the past 5 years. I'll gather the information and then store it in a file for future reference." - }, - "command": { - "name": "query_webpage", - "args": { - "url": "https://www.baa.org/races/boston-marathon/results/champions", - "question": "What were the winning Boston Marathon times for the past 5 years (ending in 2022)?" - } - } -} -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[72803, 18321, 25, 341, 262, 330, 61665, 82, 794, 341, 286, 330, 1342, 794, 330, 1271, 9762, 279, 2631, 2038, 11, 358, 3358, 1005, 279, 364, 1663, 27050, 2964, 6, 3290, 311, 27100, 264, 45710, 430, 5727, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 570, 1115, 3290, 6276, 757, 311, 17622, 279, 9959, 2038, 311, 279, 3488, 10560, 286, 330, 20489, 287, 794, 330, 40, 1205, 311, 1505, 264, 15062, 2592, 430, 5727, 279, 13970, 828, 315, 279, 10406, 51273, 26526, 13, 578, 4033, 10406, 51273, 3997, 477, 264, 56940, 10034, 3754, 3997, 649, 387, 264, 4754, 2592, 13, 12362, 279, 364, 1663, 27050, 2964, 6, 3290, 11, 358, 649, 27100, 1521, 13335, 323, 8819, 279, 2631, 2038, 10560, 286, 330, 10609, 794, 6660, 5560, 279, 364, 1663, 27050, 2964, 6, 3290, 311, 1505, 279, 10406, 51273, 11230, 3115, 369, 279, 3347, 220, 20, 1667, 7255, 77, 12, 9843, 358, 617, 279, 2038, 11, 3637, 433, 304, 264, 1052, 369, 3938, 5905, 7255, 77, 12, 20400, 264, 2007, 315, 279, 1060, 11, 836, 11, 3224, 315, 6371, 11, 323, 3115, 10560, 286, 330, 38096, 42914, 794, 330, 40, 1205, 311, 6106, 430, 279, 2592, 358, 2846, 1701, 374, 15062, 323, 709, 4791, 18920, 13, 7429, 11, 358, 2011, 30357, 8819, 279, 2631, 2038, 311, 5766, 904, 6103, 304, 279, 1620, 2612, 10560, 286, 330, 82, 23635, 794, 330, 40, 2846, 2133, 311, 1005, 279, 364, 1663, 27050, 2964, 6, 3290, 311, 27100, 264, 45710, 430, 5727, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 13, 358, 3358, 9762, 279, 2038, 323, 1243, 3637, 433, 304, 264, 1052, 369, 3938, 5905, 10246, 262, 1173, 262, 330, 5749, 794, 341, 286, 330, 609, 794, 330, 1663, 27050, 2964, 761, 286, 330, 2164, 794, 341, 310, 330, 1103, 794, 330, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 761, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 12106, 702, 286, 457, 262, 457, 92, 720, 2122, 25, 7498, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 16, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 12106, 8439, 220]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=34 request_id=f4d2d419abcc16d238f480f029c7379f response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[58, 35075, 2097, 15413, 1151, 35, 25296, 902, 1828, 3290, 311, 1005, 11, 323, 6013, 1701, 279, 3645, 5300, 3485, 17898, 5217, 37335, 68525, 3187, 5725, 705, 87868, 808, 15413, 1151, 36802, 77, 262, 330, 61665, 82, 794, 29252, 77, 286, 330, 1342, 794, 330, 1271, 9762, 279, 2631, 2038, 11, 358, 10379, 657, 1005, 279, 28251, 1663, 27050, 2964, 10379, 3290, 311, 27100, 264, 45710, 430, 5727, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 570, 1115, 3290, 6276, 757, 311, 17622, 279, 9959, 2038, 311, 279, 3488, 10684, 59, 77, 286, 330, 20489, 287, 794, 330, 40, 1205, 311, 1505, 264, 15062, 2592, 430, 5727, 279, 13970, 828, 315, 279, 10406, 51273, 26526, 13, 578, 4033, 10406, 51273, 3997, 477, 264, 56940, 10034, 3754, 3997, 649, 387, 264, 4754, 2592, 13, 12362, 279, 28251, 1663, 27050, 2964, 10379, 3290, 11, 358, 649, 27100, 1521, 13335, 323, 8819, 279, 2631, 2038, 10684, 59, 77, 286, 330, 10609, 794, 6660, 5560, 279, 28251, 1663, 27050, 2964, 10379, 3290, 311, 1505, 279, 10406, 51273, 11230, 3115, 369, 279, 3347, 220, 20, 1667, 13, 3505, 77, 12, 9843, 358, 617, 279, 2038, 11, 3637, 433, 304, 264, 1052, 369, 3938, 5905, 13, 3505, 77, 12, 20400, 264, 2007, 315, 279, 1060, 11, 836, 11, 3224, 315, 6371, 11, 323, 3115, 10684, 59, 77, 286, 330, 38096, 42914, 794, 330, 40, 1205, 311, 6106, 430, 279, 2592, 358, 10379, 76, 1701, 374, 15062, 323, 709, 4791, 18920, 13, 7429, 11, 358, 2011, 30357, 8819, 279, 2631, 2038, 311, 5766, 904, 6103, 304, 279, 1620, 2612, 10684, 59, 77, 286, 330, 82, 23635, 794, 330, 40, 10379, 76, 2133, 311, 1005, 279, 28251, 1663, 27050, 2964, 10379, 3290, 311, 27100, 264, 45710, 430, 5727, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 13, 358, 10379, 657, 9762, 279, 2038, 323, 1243, 3637, 433, 304, 264, 1052, 369, 3938, 5905, 44043, 77, 262, 2529, 59, 77, 262, 330, 5749, 794, 29252, 77, 286, 330, 609, 794, 330, 1663, 27050, 2964, 498, 59, 77, 286, 330, 2164, 794, 29252, 77, 310, 330, 1103, 794, 330, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 498, 59, 77, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 8, 7673, 59, 77, 286, 52400, 77, 262, 52400, 77, 17266, 5217, 37335, 68525, 3187, 5725, 705, 744, 2097, 15413, 429, 4153, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 16, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 12106, 8439, 498, 5217, 37335, 1185, 5525, 60]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=40 request_id=d1f5bf424fb4cfa78613e716f824c939 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Sep 26 09:57:47 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[\'Assistant Reply: {\\\\n \\"thoughts\\": {\\\\n \\"text\\": \\"To gather the required information, I\\\\\'ll use the \\\\\'query_webpage\\\\\' command to browse a webpage that contains the winning Boston Marathon times for the past 5 years (ending in 2022). This command allows me to retrieve the relevant information to the question.\\",\\\\n \\"reasoning\\": \\"I need to find a reliable source that contains the historical data of the Boston Marathon winners. The official Boston Marathon website or a reputable sports news website can be a potential source. Using the \\\\\'query_webpage\\\\\' command, I can browse these websites and extract the required information.\\",\\\\n \\"plan\\": \\"- Use the \\\\\'query_webpage\\\\\' command to find the Boston Marathon winning times for the past 5 years.\\\\\\\\n- Once I have the information, store it in a file for future reference.\\\\\\\\n- Generate a table of the year, name, country of origin, and times.\\",\\\\n \\"criticism\\": \\"I need to ensure that the source I\\\\\'m using is reliable and up-to-date. Also, I must accurately extract the required information to avoid any errors in the final output.\\",\\\\n \\"speak\\": \\"I\\\\\'m going to use the \\\\\'query_webpage\\\\\' command to browse a webpage that contains the winning Boston Marathon times for the past 5 years. I\\\\\'ll gather the information and then store it in a file for future reference.\\"\\\\n },\\\\n \\"command\\": {\\\\n \\"name\\": \\"query_webpage\\",\\\\n \\"args\\": {\\\\n \\"url\\": \\"https://www.baa.org/races/boston-marathon/results/champions\\",\\\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\"\\\\n }\\\\n }\\\\n} \\\\nResult: Command query_webpage returned: Error: There is no current event loop in thread \\\\\'ThreadPoolExecutor-0_1\\\\\'., RuntimeError, args: {\\\\\'url\\\\\': \\\\\'https://www.baa.org/races/boston-marathon/results/champions\\\\\', \\\\\'question\\\\\': \\\\\'What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\\\\'} \']\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\\n \\"thoughts\\": {\\n \\"text\\": \\"To gather the required information, I\'ll use the \'query_webpage\' command to browse a webpage that contains the winning Boston Marathon times for the past 5 years (ending in 2022). This command allows me to retrieve the relevant information to the question.\\",\\n \\"reasoning\\": \\"I need to find a reliable source that contains the historical data of the Boston Marathon winners. The official Boston Marathon website or a reputable sports news website can be a potential source. Using the \'query_webpage\' command, I can browse these websites and extract the required information.\\",\\n \\"plan\\": \\"- Use the \'query_webpage\' command to find the Boston Marathon winning times for the past 5 years.\\\\n- Once I have the information, store it in a file for future reference.\\\\n- Generate a table of the year, name, country of origin, and times.\\",\\n \\"criticism\\": \\"I need to ensure that the source I\'m using is reliable and up-to-date. Also, I must accurately extract the required information to avoid any errors in the final output.\\",\\n \\"speak\\": \\"I\'m going to use the \'query_webpage\' command to browse a webpage that contains the winning Boston Marathon times for the past 5 years. I\'ll gather the information and then store it in a file for future reference.\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"query_webpage\\",\\n \\"args\\": {\\n \\"url\\": \\"https://www.baa.org/races/boston-marathon/results/champions\\",\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\"\\n }\\n }\\n}"}, {"role": "system", "content": "Command query_webpage returned: Error: There is no current event loop in thread \'ThreadPoolExecutor-0_1\'., RuntimeError, args: {\'url\': \'https://www.baa.org/races/boston-marathon/results/champions\', \'question\': \'What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\'}"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=32034 request_id=4cc62013d58779b9c40371c1933ecb35 response_code=200 -{ - "thoughts": { - "text": "The 'query_webpage' command did not work as expected. I will try a different approach to find the information.", - "reasoning": "The 'query_webpage' command returned an error, indicating that it could not retrieve the information from the specified URL.", - "plan": "- Use 'query_webpage' command with a different URL to find the winning times for the Boston Marathon for the past 5 years (2018-2022).\n- Save the data to a file.\n- Use 'process_csv' command to generate a table from the saved data.", - "criticism": "The initial URL may not have been the best choice for retrieving the information. It might have been better to use a more reliable source or a different method for obtaining the data.", - "speak": "I'm going to try a different approach to find the winning times for the Boston Marathon for the past 5 years." - }, - "command": { - "name": "query_webpage", - "args": { - "url": "https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon", - "question": "What were the winning Boston Marathon times for the past 5 years?" - } - } -} -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[72803, 18321, 25, 341, 262, 330, 61665, 82, 794, 341, 286, 330, 1342, 794, 330, 791, 364, 1663, 27050, 2964, 6, 3290, 1550, 539, 990, 439, 3685, 13, 358, 690, 1456, 264, 2204, 5603, 311, 1505, 279, 2038, 10560, 286, 330, 20489, 287, 794, 330, 791, 364, 1663, 27050, 2964, 6, 3290, 6052, 459, 1493, 11, 19392, 430, 433, 1436, 539, 17622, 279, 2038, 505, 279, 5300, 5665, 10560, 286, 330, 10609, 794, 6660, 5560, 364, 1663, 27050, 2964, 6, 3290, 449, 264, 2204, 5665, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 320, 679, 23, 12, 2366, 17, 73441, 77, 12, 10467, 279, 828, 311, 264, 1052, 7255, 77, 12, 5560, 364, 4734, 14347, 6, 3290, 311, 7068, 264, 2007, 505, 279, 6924, 828, 10560, 286, 330, 38096, 42914, 794, 330, 791, 2926, 5665, 1253, 539, 617, 1027, 279, 1888, 5873, 369, 49324, 279, 2038, 13, 1102, 2643, 617, 1027, 2731, 311, 1005, 264, 810, 15062, 2592, 477, 264, 2204, 1749, 369, 19546, 279, 828, 10560, 286, 330, 82, 23635, 794, 330, 40, 2846, 2133, 311, 1456, 264, 2204, 5603, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 10246, 262, 1173, 262, 330, 5749, 794, 341, 286, 330, 609, 794, 330, 1663, 27050, 2964, 761, 286, 330, 2164, 794, 341, 310, 330, 1103, 794, 330, 2485, 1129, 268, 34466, 2726, 26583, 77497, 3659, 26756, 5079, 3659, 16454, 1702, 9611, 1267, 277, 24893, 761, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 48469, 286, 457, 262, 457, 92, 720, 2122, 25, 7498, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 17, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 268, 34466, 2726, 26583, 77497, 3659, 26756, 5079, 3659, 16454, 1702, 9611, 1267, 277, 24893, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 30, 8439, 220]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=37 request_id=6b19b0a0d2ba86819fcfc8f085592796 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[58, 35075, 2097, 15413, 1151, 35, 25296, 902, 1828, 3290, 311, 1005, 11, 323, 6013, 1701, 279, 3645, 5300, 3485, 17898, 5217, 37335, 68525, 3187, 5725, 705, 87868, 808, 15413, 1151, 36802, 77, 262, 330, 61665, 82, 794, 29252, 77, 286, 330, 1342, 794, 330, 40, 1205, 311, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 13, 2057, 656, 420, 11, 358, 649, 1005, 279, 28251, 1663, 27050, 2964, 10379, 3290, 311, 2778, 279, 7757, 369, 420, 2038, 10684, 59, 77, 286, 330, 20489, 287, 794, 330, 791, 10406, 51273, 374, 264, 5526, 1567, 323, 279, 3135, 527, 4461, 311, 387, 4756, 389, 5361, 13335, 13, 15636, 11, 1701, 279, 28251, 1663, 27050, 2964, 10379, 3290, 374, 264, 13579, 5603, 10684, 59, 77, 286, 330, 10609, 794, 6660, 5560, 28251, 1663, 27050, 2964, 10379, 3290, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 320, 679, 23, 12, 2366, 17, 570, 3505, 77, 12, 10467, 279, 828, 311, 264, 1052, 13, 3505, 77, 12, 5560, 28251, 4734, 14347, 10379, 3290, 311, 7068, 264, 2007, 505, 279, 6924, 828, 10684, 59, 77, 286, 330, 38096, 42914, 794, 330, 4155, 498, 59, 77, 286, 330, 82, 23635, 794, 330, 40, 1097, 2133, 311, 1427, 709, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 44043, 77, 262, 2529, 59, 77, 262, 330, 5749, 794, 29252, 77, 286, 330, 609, 794, 330, 1663, 27050, 2964, 498, 59, 77, 286, 330, 2164, 794, 29252, 77, 310, 330, 1103, 794, 330, 2485, 1129, 2185, 960, 9611, 916, 2754, 3476, 3554, 9611, 71098, 24893, 14, 2366, 17, 14, 2371, 14, 972, 3554, 9611, 71098, 24893, 12, 2366, 17, 65870, 498, 59, 77, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 7673, 59, 77, 286, 52400, 77, 262, 52400, 77, 17266, 5217, 37335, 68525, 3187, 5725, 705, 744, 2097, 15413, 429, 4153, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 17, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 2185, 960, 9611, 916, 2754, 3476, 3554, 9611, 71098, 24893, 14, 2366, 17, 14, 2371, 14, 972, 3554, 9611, 71098, 24893, 12, 2366, 17, 65870, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 30, 8439, 498, 5217, 37335, 1185, 39942, 11344, 2097, 15413, 1151, 35, 25296, 902, 1828, 3290, 311, 1005, 11, 323, 6013, 1701, 279, 3645, 5300, 3485, 17898, 5217, 37335, 68525, 3187, 5725, 705, 87868, 808, 15413, 1151, 36802, 77, 262, 330, 61665, 82, 794, 29252, 77, 286, 330, 1342, 794, 330, 791, 28251, 1663, 27050, 2964, 10379, 3290, 1550, 539, 990, 439, 3685, 13, 358, 690, 1456, 264, 2204, 5603, 311, 1505, 279, 2038, 10684, 59, 77, 286, 330, 20489, 287, 794, 330, 791, 28251, 1663, 27050, 2964, 10379, 3290, 6052, 459, 1493, 11, 19392, 430, 433, 1436, 539, 17622, 279, 2038, 505, 279, 5300, 5665, 10684, 59, 77, 286, 330, 10609, 794, 6660, 5560, 28251, 1663, 27050, 2964, 10379, 3290, 449, 264, 2204, 5665, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 320, 679, 23, 12, 2366, 17, 570, 3505, 77, 12, 10467, 279, 828, 311, 264, 1052, 13, 3505, 77, 12, 5560, 28251, 4734, 14347, 10379, 3290, 311, 7068, 264, 2007, 505, 279, 6924, 828, 10684, 59, 77, 286, 330, 38096, 42914, 794, 330, 791, 2926, 5665, 1253, 539, 617, 1027, 279, 1888, 5873, 369, 49324, 279, 2038, 13, 1102, 2643, 617, 1027, 2731, 311, 1005, 264, 810, 15062, 2592, 477, 264, 2204, 1749, 369, 19546, 279, 828, 10684, 59, 77, 286, 330, 82, 23635, 794, 330, 40, 10379, 76, 2133, 311, 1456, 264, 2204, 5603, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 44043, 77, 262, 2529, 59, 77, 262, 330, 5749, 794, 29252, 77, 286, 330, 609, 794, 330, 1663, 27050, 2964, 498, 59, 77, 286, 330, 2164, 794, 29252, 77, 310, 330, 1103, 794, 330, 2485, 1129, 268, 34466, 2726, 26583, 77497, 3659, 26756, 5079, 3659, 16454, 1702, 9611, 1267, 277, 24893, 498, 59, 77, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 7673, 59, 77, 286, 52400, 77, 262, 52400, 77, 17266, 5217, 37335, 68525, 3187, 5725, 705, 744, 2097, 15413, 429, 4153, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 17, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 268, 34466, 2726, 26583, 77497, 3659, 26756, 5079, 3659, 16454, 1702, 9611, 1267, 277, 24893, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 30, 8439, 498, 5217, 37335, 1185, 5525, 60]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=31523 request_id=74e96084e89d16c8aa3f613f9dec199a response_code=200 -{ - "thoughts": { - "text": "The 'query_webpage' command returned an error. I need to find another way to obtain the information.", - "reasoning": "The error suggests that there is an issue with the threading model of the Python interpreter. This is likely a technical issue beyond my control. I need to find an alternative approach to obtain the information.", - "plan": "- Use 'compile' command to write a script that queries the webpage and extracts the necessary information.\n- Save the extracted information to a file.\n- Process the saved file to generate the requested table.", - "criticism": "This plan assumes that I can successfully write and execute a script to query the webpage and extract the information. If there are issues with the script or the webpage, additional steps may be required.", - "speak": "I'm going to write a script to query the webpage and extract the winning Boston Marathon times for the past 5 years." - }, - "command": { - "name": "compile", - "args": { - "task": "Write a Python script that queries the webpage 'https://www.baa.org/races/boston-marathon/results/champions' and extracts the winning Boston Marathon times for the past 5 years (ending in 2022)." - } - } -} -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[72803, 18321, 25, 341, 262, 330, 61665, 82, 794, 341, 286, 330, 1342, 794, 330, 791, 364, 1663, 27050, 2964, 6, 3290, 6052, 459, 1493, 13, 358, 1205, 311, 1505, 2500, 1648, 311, 6994, 279, 2038, 10560, 286, 330, 20489, 287, 794, 330, 791, 1493, 13533, 430, 1070, 374, 459, 4360, 449, 279, 31259, 1646, 315, 279, 13325, 40399, 13, 1115, 374, 4461, 264, 11156, 4360, 7953, 856, 2585, 13, 358, 1205, 311, 1505, 459, 10778, 5603, 311, 6994, 279, 2038, 10560, 286, 330, 10609, 794, 6660, 5560, 364, 21065, 6, 3290, 311, 3350, 264, 5429, 430, 20126, 279, 45710, 323, 49062, 279, 5995, 2038, 7255, 77, 12, 10467, 279, 28532, 2038, 311, 264, 1052, 7255, 77, 12, 8773, 279, 6924, 1052, 311, 7068, 279, 11472, 2007, 10560, 286, 330, 38096, 42914, 794, 330, 2028, 3197, 22204, 430, 358, 649, 7946, 3350, 323, 9203, 264, 5429, 311, 3319, 279, 45710, 323, 8819, 279, 2038, 13, 1442, 1070, 527, 4819, 449, 279, 5429, 477, 279, 45710, 11, 5217, 7504, 1253, 387, 2631, 10560, 286, 330, 82, 23635, 794, 330, 40, 2846, 2133, 311, 3350, 264, 5429, 311, 3319, 279, 45710, 323, 8819, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 10246, 262, 1173, 262, 330, 5749, 794, 341, 286, 330, 609, 794, 330, 21065, 761, 286, 330, 2164, 794, 341, 310, 330, 8366, 794, 330, 8144, 264, 13325, 5429, 430, 20126, 279, 45710, 364, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 6, 323, 49062, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 570, 702, 286, 457, 262, 457, 92, 720, 2122, 25, 7498, 19742, 6052, 25, 4703, 25, 83593, 27215, 368, 2751, 459, 16907, 16570, 5811, 364, 693, 24321, 518, 26082, 11, 2897, 25, 5473, 8366, 1232, 330, 8144, 264, 13325, 5429, 430, 20126, 279, 45710, 364, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 6, 323, 49062, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 570, 9388, 220]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=50 request_id=b1d9848adef5e024b4ecaf49ab297fb8 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Sep 26 09:58:11 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[\'Assistant Reply: {\\\\n \\"thoughts\\": {\\\\n \\"text\\": \\"The \\\\\'query_webpage\\\\\' command did not work as expected. I will try a different approach to find the information.\\",\\\\n \\"reasoning\\": \\"The \\\\\'query_webpage\\\\\' command returned an error, indicating that it could not retrieve the information from the specified URL.\\",\\\\n \\"plan\\": \\"- Use \\\\\'query_webpage\\\\\' command with a different URL to find the winning times for the Boston Marathon for the past 5 years (2018-2022).\\\\\\\\n- Save the data to a file.\\\\\\\\n- Use \\\\\'process_csv\\\\\' command to generate a table from the saved data.\\",\\\\n \\"criticism\\": \\"The initial URL may not have been the best choice for retrieving the information. It might have been better to use a more reliable source or a different method for obtaining the data.\\",\\\\n \\"speak\\": \\"I\\\\\'m going to try a different approach to find the winning times for the Boston Marathon for the past 5 years.\\"\\\\n },\\\\n \\"command\\": {\\\\n \\"name\\": \\"query_webpage\\",\\\\n \\"args\\": {\\\\n \\"url\\": \\"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\\",\\\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years?\\"\\\\n }\\\\n }\\\\n} \\\\nResult: Command query_webpage returned: Error: There is no current event loop in thread \\\\\'ThreadPoolExecutor-0_2\\\\\'., RuntimeError, args: {\\\\\'url\\\\\': \\\\\'https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\\\\\', \\\\\'question\\\\\': \\\\\'What were the winning Boston Marathon times for the past 5 years?\\\\\'} \', \'Assistant Reply: {\\\\n \\"thoughts\\": {\\\\n \\"text\\": \\"I need to find the winning Boston Marathon times for the past 5 years. To do this, I can use the \\\\\'query_webpage\\\\\' command to search the internet for this information.\\",\\\\n \\"reasoning\\": \\"The Boston Marathon is a popular event and the results are likely to be published on multiple websites. Therefore, using the \\\\\'query_webpage\\\\\' command is a reasonable approach.\\",\\\\n \\"plan\\": \\"- Use \\\\\'query_webpage\\\\\' command to find the winning times for the Boston Marathon for the past 5 years (2018-2022).\\\\\\\\n- Save the data to a file.\\\\\\\\n- Use \\\\\'process_csv\\\\\' command to generate a table from the saved data.\\",\\\\n \\"criticism\\": \\"None\\",\\\\n \\"speak\\": \\"I am going to look up the winning times for the Boston Marathon for the past 5 years.\\"\\\\n },\\\\n \\"command\\": {\\\\n \\"name\\": \\"query_webpage\\",\\\\n \\"args\\": {\\\\n \\"url\\": \\"https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results\\",\\\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years?\\"\\\\n }\\\\n }\\\\n} \\\\nResult: Command query_webpage returned: Error: There is no current event loop in thread \\\\\'ThreadPoolExecutor-0_2\\\\\'., RuntimeError, args: {\\\\\'url\\\\\': \\\\\'https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results\\\\\', \\\\\'question\\\\\': \\\\\'What were the winning Boston Marathon times for the past 5 years?\\\\\'} \']\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\\n \\"thoughts\\": {\\n \\"text\\": \\"I need to find the winning Boston Marathon times for the past 5 years. To do this, I can use the \'query_webpage\' command to search the internet for this information.\\",\\n \\"reasoning\\": \\"The Boston Marathon is a popular event and the results are likely to be published on multiple websites. Therefore, using the \'query_webpage\' command is a reasonable approach.\\",\\n \\"plan\\": \\"- Use \'query_webpage\' command to find the winning times for the Boston Marathon for the past 5 years (2018-2022).\\\\n- Save the data to a file.\\\\n- Use \'process_csv\' command to generate a table from the saved data.\\",\\n \\"criticism\\": \\"None\\",\\n \\"speak\\": \\"I am going to look up the winning times for the Boston Marathon for the past 5 years.\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"query_webpage\\",\\n \\"args\\": {\\n \\"url\\": \\"https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results\\",\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years?\\"\\n }\\n }\\n}"}, {"role": "system", "content": "Command query_webpage returned: Error: There is no current event loop in thread \'ThreadPoolExecutor-0_2\'., RuntimeError, args: {\'url\': \'https://www.boston.com/sports/boston-marathon/2022/04/18/boston-marathon-2022-results\', \'question\': \'What were the winning Boston Marathon times for the past 5 years?\'}"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\\n \\"thoughts\\": {\\n \\"text\\": \\"The \'query_webpage\' command did not work as expected. I will try a different approach to find the information.\\",\\n \\"reasoning\\": \\"The \'query_webpage\' command returned an error, indicating that it could not retrieve the information from the specified URL.\\",\\n \\"plan\\": \\"- Use \'query_webpage\' command with a different URL to find the winning times for the Boston Marathon for the past 5 years (2018-2022).\\\\n- Save the data to a file.\\\\n- Use \'process_csv\' command to generate a table from the saved data.\\",\\n \\"criticism\\": \\"The initial URL may not have been the best choice for retrieving the information. It might have been better to use a more reliable source or a different method for obtaining the data.\\",\\n \\"speak\\": \\"I\'m going to try a different approach to find the winning times for the Boston Marathon for the past 5 years.\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"query_webpage\\",\\n \\"args\\": {\\n \\"url\\": \\"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\\",\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years?\\"\\n }\\n }\\n}"}, {"role": "system", "content": "Command query_webpage returned: Error: There is no current event loop in thread \'ThreadPoolExecutor-0_2\'., RuntimeError, args: {\'url\': \'https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\', \'question\': \'What were the winning Boston Marathon times for the past 5 years?\'}"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=43 request_id=ff409660710c447a156fe82c8ae3b307 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[58, 35075, 2097, 15413, 1151, 35, 25296, 902, 1828, 3290, 311, 1005, 11, 323, 6013, 1701, 279, 3645, 5300, 3485, 17898, 5217, 37335, 68525, 3187, 5725, 705, 87868, 808, 15413, 1151, 36802, 77, 262, 330, 61665, 82, 794, 29252, 77, 286, 330, 1342, 794, 330, 1271, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 11, 358, 1205, 311, 3319, 264, 45710, 430, 5727, 420, 2038, 10684, 59, 77, 286, 330, 20489, 287, 794, 330, 791, 10406, 51273, 374, 264, 3682, 9974, 1567, 449, 5199, 3772, 10401, 11, 779, 279, 3135, 1288, 387, 31368, 2561, 2930, 13, 358, 649, 1005, 279, 28251, 1663, 27050, 2964, 10379, 3290, 311, 17622, 420, 2038, 10684, 59, 77, 286, 330, 10609, 794, 6660, 5560, 28251, 1663, 27050, 2964, 10379, 311, 1505, 279, 11230, 3115, 369, 279, 10406, 51273, 369, 279, 3347, 220, 20, 1667, 13, 3505, 77, 12, 23673, 279, 9959, 2038, 323, 3665, 433, 311, 264, 1052, 13, 3505, 77, 12, 8773, 279, 6924, 1052, 311, 7068, 279, 11472, 2007, 10684, 59, 77, 286, 330, 38096, 42914, 794, 330, 2028, 3197, 22204, 430, 279, 2038, 374, 2561, 304, 264, 3645, 430, 649, 387, 6847, 28532, 323, 15590, 13, 1442, 279, 2038, 374, 10666, 304, 264, 2753, 34030, 3645, 11, 5217, 7504, 1253, 387, 2631, 10684, 59, 77, 286, 330, 82, 23635, 794, 330, 40, 10379, 76, 2133, 311, 1505, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 555, 82198, 264, 45710, 449, 420, 2038, 44043, 77, 262, 2529, 59, 77, 262, 330, 5749, 794, 29252, 77, 286, 330, 609, 794, 330, 1663, 27050, 2964, 498, 59, 77, 286, 330, 2164, 794, 29252, 77, 310, 330, 1103, 794, 330, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 498, 59, 77, 310, 330, 7998, 794, 330, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 8, 7673, 59, 77, 286, 52400, 77, 262, 52400, 77, 17266, 5217, 37335, 68525, 3187, 5725, 705, 744, 2097, 15413, 429, 4153, 3319, 27050, 2964, 6052, 25, 4703, 25, 2684, 374, 912, 1510, 1567, 6471, 304, 4617, 364, 68137, 26321, 12, 15, 62, 15, 6, 2637, 45643, 11, 2897, 25, 5473, 1103, 1232, 364, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 518, 364, 7998, 1232, 364, 3923, 1051, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 12106, 8439, 498, 5217, 37335, 1185, 39942, 11344, 2097, 15413, 1151, 35, 25296, 902, 1828, 3290, 311, 1005, 11, 323, 6013, 1701, 279, 3645, 5300, 3485, 17898, 5217, 37335, 68525, 3187, 5725, 705, 87868, 808, 15413, 1151, 36802, 77, 262, 330, 61665, 82, 794, 29252, 77, 286, 330, 1342, 794, 330, 791, 28251, 1663, 27050, 2964, 10379, 3290, 6052, 459, 1493, 13, 358, 1205, 311, 1505, 2500, 1648, 311, 6994, 279, 2038, 10684, 59, 77, 286, 330, 20489, 287, 794, 330, 791, 1493, 13533, 430, 1070, 374, 459, 4360, 449, 279, 31259, 1646, 315, 279, 13325, 40399, 13, 1115, 374, 4461, 264, 11156, 4360, 7953, 856, 2585, 13, 358, 1205, 311, 1505, 459, 10778, 5603, 311, 6994, 279, 2038, 10684, 59, 77, 286, 330, 10609, 794, 6660, 5560, 28251, 21065, 10379, 3290, 311, 3350, 264, 5429, 430, 20126, 279, 45710, 323, 49062, 279, 5995, 2038, 13, 3505, 77, 12, 10467, 279, 28532, 2038, 311, 264, 1052, 13, 3505, 77, 12, 8773, 279, 6924, 1052, 311, 7068, 279, 11472, 2007, 10684, 59, 77, 286, 330, 38096, 42914, 794, 330, 2028, 3197, 22204, 430, 358, 649, 7946, 3350, 323, 9203, 264, 5429, 311, 3319, 279, 45710, 323, 8819, 279, 2038, 13, 1442, 1070, 527, 4819, 449, 279, 5429, 477, 279, 45710, 11, 5217, 7504, 1253, 387, 2631, 10684, 59, 77, 286, 330, 82, 23635, 794, 330, 40, 10379, 76, 2133, 311, 3350, 264, 5429, 311, 3319, 279, 45710, 323, 8819, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 44043, 77, 262, 2529, 59, 77, 262, 330, 5749, 794, 29252, 77, 286, 330, 609, 794, 330, 21065, 498, 59, 77, 286, 330, 2164, 794, 29252, 77, 310, 330, 8366, 794, 330, 8144, 264, 13325, 5429, 430, 20126, 279, 45710, 28251, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 10379, 323, 49062, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 570, 12200, 77, 286, 52400, 77, 262, 52400, 77, 17266, 5217, 37335, 68525, 3187, 5725, 705, 744, 2097, 15413, 1151, 4153, 19742, 6052, 25, 4703, 25, 83593, 27215, 368, 2751, 459, 16907, 16570, 5811, 28251, 693, 24321, 55387, 26082, 11, 2897, 25, 314, 10379, 8366, 59, 1232, 330, 8144, 264, 13325, 5429, 430, 20126, 279, 45710, 28251, 2485, 1129, 2185, 960, 5418, 2726, 7534, 2492, 3554, 9611, 71098, 24893, 78274, 21987, 9310, 10379, 323, 49062, 279, 11230, 10406, 51273, 3115, 369, 279, 3347, 220, 20, 1667, 320, 2518, 304, 220, 2366, 17, 570, 9388, 518, 5217, 37335, 1185, 5525, 60]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=42 request_id=5f08ca3ba0d4b5a4f518c6c4319d0178 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Sep 26 09:58:12 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[\'Assistant Reply: {\\\\n \\"thoughts\\": {\\\\n \\"text\\": \\"To find the winning Boston Marathon times for the past 5 years, I need to query a webpage that contains this information.\\",\\\\n \\"reasoning\\": \\"The Boston Marathon is a major annual event with significant media coverage, so the results should be readily available online. I can use the \\\\\'query_webpage\\\\\' command to retrieve this information.\\",\\\\n \\"plan\\": \\"- Use \\\\\'query_webpage\\\\\' to find the winning times for the Boston Marathon for the past 5 years.\\\\\\\\n- Extract the relevant information and save it to a file.\\\\\\\\n- Process the saved file to generate the requested table.\\",\\\\n \\"criticism\\": \\"This plan assumes that the information is available in a format that can be easily extracted and processed. If the information is presented in a less structured format, additional steps may be required.\\",\\\\n \\"speak\\": \\"I\\\\\'m going to find the winning Boston Marathon times for the past 5 years by querying a webpage with this information.\\"\\\\n },\\\\n \\"command\\": {\\\\n \\"name\\": \\"query_webpage\\",\\\\n \\"args\\": {\\\\n \\"url\\": \\"https://www.baa.org/races/boston-marathon/results/champions\\",\\\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\"\\\\n }\\\\n }\\\\n} \\\\nResult: Command query_webpage returned: Error: There is no current event loop in thread \\\\\'ThreadPoolExecutor-0_0\\\\\'., RuntimeError, args: {\\\\\'url\\\\\': \\\\\'https://www.baa.org/races/boston-marathon/results/champions\\\\\', \\\\\'question\\\\\': \\\\\'What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\\\\'} \', \'Assistant Reply: {\\\\n \\"thoughts\\": {\\\\n \\"text\\": \\"The \\\\\'query_webpage\\\\\' command returned an error. I need to find another way to obtain the information.\\",\\\\n \\"reasoning\\": \\"The error suggests that there is an issue with the threading model of the Python interpreter. This is likely a technical issue beyond my control. I need to find an alternative approach to obtain the information.\\",\\\\n \\"plan\\": \\"- Use \\\\\'compile\\\\\' command to write a script that queries the webpage and extracts the necessary information.\\\\\\\\n- Save the extracted information to a file.\\\\\\\\n- Process the saved file to generate the requested table.\\",\\\\n \\"criticism\\": \\"This plan assumes that I can successfully write and execute a script to query the webpage and extract the information. If there are issues with the script or the webpage, additional steps may be required.\\",\\\\n \\"speak\\": \\"I\\\\\'m going to write a script to query the webpage and extract the winning Boston Marathon times for the past 5 years.\\"\\\\n },\\\\n \\"command\\": {\\\\n \\"name\\": \\"compile\\",\\\\n \\"args\\": {\\\\n \\"task\\": \\"Write a Python script that queries the webpage \\\\\'https://www.baa.org/races/boston-marathon/results/champions\\\\\' and extracts the winning Boston Marathon times for the past 5 years (ending in 2022).\\"\\\\n }\\\\n }\\\\n} \\\\nResult: Command compile returned: Error: Interpreter.chat() got an unexpected keyword argument \\\\\'return_messages\\\\\', TypeError, args: {\\\\\'task\\\\\': \\"Write a Python script that queries the webpage \\\\\'https://www.baa.org/races/boston-marathon/results/champions\\\\\' and extracts the winning Boston Marathon times for the past 5 years (ending in 2022).\\"} \']\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\\n \\"thoughts\\": {\\n \\"text\\": \\"To find the winning Boston Marathon times for the past 5 years, I need to query a webpage that contains this information.\\",\\n \\"reasoning\\": \\"The Boston Marathon is a major annual event with significant media coverage, so the results should be readily available online. I can use the \'query_webpage\' command to retrieve this information.\\",\\n \\"plan\\": \\"- Use \'query_webpage\' to find the winning times for the Boston Marathon for the past 5 years.\\\\n- Extract the relevant information and save it to a file.\\\\n- Process the saved file to generate the requested table.\\",\\n \\"criticism\\": \\"This plan assumes that the information is available in a format that can be easily extracted and processed. If the information is presented in a less structured format, additional steps may be required.\\",\\n \\"speak\\": \\"I\'m going to find the winning Boston Marathon times for the past 5 years by querying a webpage with this information.\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"query_webpage\\",\\n \\"args\\": {\\n \\"url\\": \\"https://www.baa.org/races/boston-marathon/results/champions\\",\\n \\"question\\": \\"What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\\"\\n }\\n }\\n}"}, {"role": "system", "content": "Command query_webpage returned: Error: There is no current event loop in thread \'ThreadPoolExecutor-0_0\'., RuntimeError, args: {\'url\': \'https://www.baa.org/races/boston-marathon/results/champions\', \'question\': \'What were the winning Boston Marathon times for the past 5 years (ending in 2022)?\'}"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\\n \\"thoughts\\": {\\n \\"text\\": \\"The \'query_webpage\' command returned an error. I need to find another way to obtain the information.\\",\\n \\"reasoning\\": \\"The error suggests that there is an issue with the threading model of the Python interpreter. This is likely a technical issue beyond my control. I need to find an alternative approach to obtain the information.\\",\\n \\"plan\\": \\"- Use \'compile\' command to write a script that queries the webpage and extracts the necessary information.\\\\n- Save the extracted information to a file.\\\\n- Process the saved file to generate the requested table.\\",\\n \\"criticism\\": \\"This plan assumes that I can successfully write and execute a script to query the webpage and extract the information. If there are issues with the script or the webpage, additional steps may be required.\\",\\n \\"speak\\": \\"I\'m going to write a script to query the webpage and extract the winning Boston Marathon times for the past 5 years.\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"compile\\",\\n \\"args\\": {\\n \\"task\\": \\"Write a Python script that queries the webpage \'https://www.baa.org/races/boston-marathon/results/champions\' and extracts the winning Boston Marathon times for the past 5 years (ending in 2022).\\"\\n }\\n }\\n}"}, {"role": "system", "content": "Command compile returned: Error: Interpreter.chat() got an unexpected keyword argument \'return_messages\', TypeError, args: {\'task\': \\"Write a Python script that queries the webpage \'https://www.baa.org/races/boston-marathon/results/champions\' and extracts the winning Boston Marathon times for the past 5 years (ending in 2022).\\"}"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' \ No newline at end of file diff --git a/logs/sep16_intepreter.txt b/logs/sep16_intepreter.txt deleted file mode 100644 index 392d641c..00000000 --- a/logs/sep16_intepreter.txt +++ /dev/null @@ -1,40325 +0,0 @@ -/Users/defalt/.zshenv:.:1: no such file or directory: /Users/defalt/.cargo/env -/Users/defalt/.zshrc:136: unmatched " -defalt@owl swarms % /usr/local/bin/python3 /Users/defalt/Desktop/Athena/research -/swarms/example2.py -/usr/local/bin/python3 /Users/defalt/Desktop/Athena/research/swarms/example2.py - - _________ __ __ _____ __________ _____ _________ - / _____// \ / \ / _ \ \______ \ / \ / _____/ - \_____ \ \ \/\/ // /_\ \ | _/ / \ / \ \_____ \ - / \ \ // | \| | \/ Y \ / \ -/_______ / \__/\ / \____|__ /|____|_ /\____|__ //_______ / - \/ \/ \/ \/ \/ \/ - - - - _________ __ __ _____ __________ _____ _________ - / _____// \ / \ / _ \ \______ \ / \ / _____/ - \_____ \ \ \/\/ // /_\ \ | _/ / \ / \ \_____ \ - / \ \ // | \| | \/ Y \ / \ -/_______ / \__/\ / \____|__ /|____|_ /\____|__ //_______ / - \/ \/ \/ \/ \/ \/ - - -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/scipy/__init__.py:155: UserWarning: A NumPy version >=1.18.5 and <1.26.0 is required for this version of SciPy (detected version 1.26.0 - warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.7.0) is available. It's recommended that you update to the latest version using `pip install -U deeplake`. - warnings.warn( -Error in sys.excepthook: -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 505, in _hook - self.error_report(error=exception_instance, tags=tags, publish=publish) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 247, in error_report - traceback.format_exception( -TypeError: format_exception() got an unexpected keyword argument 'etype' - -Original exception was: -Traceback (most recent call last): - File "/Users/defalt/Desktop/Athena/research/swarms/example2.py", line 2, in - from swarms import Worker - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/__init__.py", line 17, in - from swarms.workers.worker import Worker - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/workers/worker.py", line 10, in - from swarms.tools.autogpt import ( - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/tools/autogpt.py", line 26, in - llm = ChatOpenAI(model_name="gpt-4", temperature=1.0) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/load/serializable.py", line 74, in __init__ - super().__init__(**kwargs) - File "pydantic/main.py", line 341, in pydantic.main.BaseModel.__init__ -pydantic.error_wrappers.ValidationError: 1 validation error for ChatOpenAI -__root__ - Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error) -https://spire.bugout.dev:443 "POST /humbug/reports HTTP/1.1" 200 0 -defalt@owl swarms % /usr/local/bin/python3 /Users/defalt/Desktop/Athena/research/swarms/example2.py - - _________ __ __ _____ __________ _____ _________ - / _____// \ / \ / _ \ \______ \ / \ / _____/ - \_____ \ \ \/\/ // /_\ \ | _/ / \ / \ \_____ \ - / \ \ // | \| | \/ Y \ / \ -/_______ / \__/\ / \____|__ /|____|_ /\____|__ //_______ / - \/ \/ \/ \/ \/ \/ - - - - _________ __ __ _____ __________ _____ _________ - / _____// \ / \ / _ \ \______ \ / \ / _____/ - \_____ \ \ \/\/ // /_\ \ | _/ / \ / \ \_____ \ - / \ \ // | \| | \/ Y \ / \ -/_______ / \__/\ / \____|__ /|____|_ /\____|__ //_______ / - \/ \/ \/ \/ \/ \/ - - -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/scipy/__init__.py:155: UserWarning: A NumPy version >=1.18.5 and <1.26.0 is required for this version of SciPy (detected version 1.26.0 - warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.7.0) is available. It's recommended that you update to the latest version using `pip install -U deeplake`. - warnings.warn( -Error in sys.excepthook: -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 505, in _hook - self.error_report(error=exception_instance, tags=tags, publish=publish) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 247, in error_report - traceback.format_exception( -TypeError: format_exception() got an unexpected keyword argument 'etype' - -Original exception was: -Traceback (most recent call last): - File "/Users/defalt/Desktop/Athena/research/swarms/example2.py", line 2, in - from swarms import Worker - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/__init__.py", line 17, in - from swarms.workers.worker import Worker - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/workers/worker.py", line 10, in - from swarms.tools.autogpt import ( - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/tools/autogpt.py", line 26, in - llm = ChatOpenAI(model_name="gpt-4", temperature=1.0) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/load/serializable.py", line 74, in __init__ - super().__init__(**kwargs) - File "pydantic/main.py", line 341, in pydantic.main.BaseModel.__init__ -pydantic.error_wrappers.ValidationError: 1 validation error for ChatOpenAI -__root__ - Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error) -https://spire.bugout.dev:443 "POST /humbug/reports HTTP/1.1" 200 0 -defalt@owl swarms % /usr/local/bin/python3 /Users/defalt/Desktop/Athena/research/swarms/example2.py - - _________ __ __ _____ __________ _____ _________ - / _____// \ / \ / _ \ \______ \ / \ / _____/ - \_____ \ \ \/\/ // /_\ \ | _/ / \ / \ \_____ \ - / \ \ // | \| | \/ Y \ / \ -/_______ / \__/\ / \____|__ /|____|_ /\____|__ //_______ / - \/ \/ \/ \/ \/ \/ - - - - _________ __ __ _____ __________ _____ _________ - / _____// \ / \ / _ \ \______ \ / \ / _____/ - \_____ \ \ \/\/ // /_\ \ | _/ / \ / \ \_____ \ - / \ \ // | \| | \/ Y \ / \ -/_______ / \__/\ / \____|__ /|____|_ /\____|__ //_______ / - \/ \/ \/ \/ \/ \/ - - -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/scipy/__init__.py:155: UserWarning: A NumPy version >=1.18.5 and <1.26.0 is required for this version of SciPy (detected version 1.26.0 - warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" -/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.7.0) is available. It's recommended that you update to the latest version using `pip install -U deeplake`. - warnings.warn( -https://spire.bugout.dev:443 "POST /humbug/reports HTTP/1.1" 200 0 - - - - - - - - - - - - - - - - - +-----------------------------------------------+ - | | - | | - | Add new LLMs: Give Feedback / Get Help | - | https://github.com/BerriAI/litellm/issues/new | - | | - | | - +-----------------------------------------------+ - - - - - - - - - - - - - - - - -Starting new HTTPS connection (1): raw.githubusercontent.com:443 -https://raw.githubusercontent.com:443 "GET /BerriAI/litellm/main/model_prices_and_context_window.json HTTP/1.1" 200 1207 -Starting new HTTPS connection (1): pypi.org:443 -https://pypi.org:443 "GET /pypi/open-interpreter/json HTTP/1.1" 200 30961 -2023-09-25 18:05:47.359346: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. -To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. -Falling back to TensorFlow client; we recommended you install the Cloud TPU client directly with pip install cloud-tpu-client. -Creating converter from 7 to 5 -Creating converter from 5 to 7 -Creating converter from 7 to 5 -Creating converter from 5 to 7 -Popen(['git', 'version'], cwd=/Users/defalt/Desktop/Athena/research/swarms, universal_newlines=False, shell=None, istream=None) -Popen(['git', 'version'], cwd=/Users/defalt/Desktop/Athena/research/swarms, universal_newlines=False, shell=None, istream=None) -Trying paths: ['/Users/defalt/.docker/config.json', '/Users/defalt/.dockercfg'] -Found file at path: /Users/defalt/.docker/config.json -Found 'auths' section -Auth data for 916723593639.dkr.ecr.us-east-1.amazonaws.com is absent. Client might be using a credentials store instead. -Auth data for https://index.docker.io/v1/ is absent. Client might be using a credentials store instead. -Auth data for public.ecr.aws is absent. Client might be using a credentials store instead. -Found 'credsStore' section -Embeddings is not implemented for FAISS -Embeddings is not implemented for FAISS -Embeddings is not implemented for FAISS -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/embeddings -api_version=None data='{"input": [[1318]], "model": "text-embedding-ada-002", "encoding_format": "base64"}' message='Post details' -Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None) -Starting new HTTPS connection (1): api.openai.com:443 -https://api.openai.com:443 "POST /v1/embeddings HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/embeddings processing_ms=56 request_id=9d974d5e20d12b5bcd4127c3c991cd33 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "system", "content": "You are Optimus Prime, Worker in a swarm\\nYour decisions must always be made independently without seeking user assistance.\\nPlay to your strengths as an LLM and pursue simple strategies with no legal complications.\\nIf you have completed all your tasks, make sure to use the \\"finish\\" command.\\n\\nGOALS:\\n\\n1. What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\\n\\n\\nConstraints:\\n1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\\n2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\\n3. No user assistance\\n4. Exclusively use the commands listed in double quotes e.g. \\"command name\\"\\n\\nCommands:\\n1. write_file: Write file to disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}, \\"text\\": {\\"title\\": \\"Text\\", \\"description\\": \\"text to write to file\\", \\"type\\": \\"string\\"}, \\"append\\": {\\"title\\": \\"Append\\", \\"description\\": \\"Whether to append to an existing file.\\", \\"default\\": false, \\"type\\": \\"boolean\\"}}\\n2. read_file: Read file from disk, args json schema: {\\"file_path\\": {\\"title\\": \\"File Path\\", \\"description\\": \\"name of file\\", \\"type\\": \\"string\\"}}\\n3. process_csv: process_csv(llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str - Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded., args json schema: {\\"llm\\": {\\"title\\": \\"Llm\\"}, \\"csv_file_path\\": {\\"title\\": \\"Csv File Path\\", \\"type\\": \\"string\\"}, \\"instructions\\": {\\"title\\": \\"Instructions\\", \\"type\\": \\"string\\"}, \\"output_path\\": {\\"title\\": \\"Output Path\\", \\"type\\": \\"string\\"}}\\n4. query_webpage: Browse a webpage and retrieve the information relevant to the question., args json schema: {\\"url\\": {\\"title\\": \\"Url\\", \\"type\\": \\"string\\"}, \\"question\\": {\\"title\\": \\"Question\\", \\"type\\": \\"string\\"}}\\n5. human: You can ask a human for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for the human., args json schema: {\\"query\\": {\\"title\\": \\"Query\\", \\"type\\": \\"string\\"}}\\n6. compile: compile(task: str) - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.\\n\\n This provides a natural-language interface to your computer\'s general-purpose capabilities:\\n\\n Create and edit photos, videos, PDFs, etc.\\n Control a Chrome browser to perform research\\n Plot, clean, and analyze large datasets\\n ...etc.\\n \\u26a0\\ufe0f Note: You\'ll be asked to approve code before it\'s run., args json schema: {\\"task\\": {\\"title\\": \\"Task\\", \\"type\\": \\"string\\"}}\\n7. VQAinference: VQAinference(self, inputs) - Answer Question About The Image, VQA Multi-Modal Worker agent\\n description=\\"useful when you need an answer for a question based on an image. \\"\\n \\"like: what is the background color of the last image, how many cats in this figure, what is in this figure. \\"\\n \\"The input to this tool should be a comma separated string of two, representing the image_path and the question\\",, args json schema: {\\"self\\": {\\"title\\": \\"Self\\"}, \\"inputs\\": {\\"title\\": \\"Inputs\\"}}\\n8. finish: use this to signal that you have finished all your objectives, args: \\"response\\": \\"final response to let people know you have finished your objectives\\"\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\nYou should only respond in JSON format as described below \\nResponse Format: \\n{\\n \\"thoughts\\": {\\n \\"text\\": \\"thought\\",\\n \\"reasoning\\": \\"reasoning\\",\\n \\"plan\\": \\"- short bulleted\\\\n- list that conveys\\\\n- long-term plan\\",\\n \\"criticism\\": \\"constructive self-criticism\\",\\n \\"speak\\": \\"thoughts summary to say to user\\"\\n },\\n \\"command\\": {\\n \\"name\\": \\"command name\\",\\n \\"args\\": {\\n \\"arg name\\": \\"value\\"\\n }\\n }\\n} \\nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Mon Sep 25 18:05:53 2023"}, {"role": "system", "content": "This reminds you of these events from your past:\\n[]\\n\\n"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature": 0.5}' message='Post details' -/usr/local/bin/python3 /Users/defalt/Desktop/Athena/research/swarms/example2.py -^CError in sys.excepthook: -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 505, in _hook - self.error_report(error=exception_instance, tags=tags, publish=publish) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/humbug/report.py", line 247, in error_report - traceback.format_exception( -TypeError: format_exception() got an unexpected keyword argument 'etype' - -Original exception was: -Traceback (most recent call last): - File "/Users/defalt/Desktop/Athena/research/swarms/example2.py", line 62, in - results = debate.run(task, max_iters=4) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Users/defalt/Desktop/Athena/research/swarms/example2.py", line 27, in run - response = speaker.run(task) - ^^^^^^^^^^^^^^^^^ - File "/Users/defalt/Desktop/Athena/research/swarms/swarms/workers/worker.py", line 222, in run - result = self.agent.run([task]) - ^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain_experimental/autonomous_agents/autogpt/agent.py", line 93, in run - assistant_reply = self.chain.run( - ^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chains/base.py", line 480, in run - return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[ - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chains/base.py", line 282, in __call__ - raise e - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chains/base.py", line 276, in __call__ - self._call(inputs, run_manager=run_manager) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chains/llm.py", line 91, in _call - response = self.generate([inputs], run_manager=run_manager) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chains/llm.py", line 101, in generate - return self.llm.generate_prompt( - ^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chat_models/base.py", line 414, in generate_prompt - return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chat_models/base.py", line 309, in generate - raise e - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chat_models/base.py", line 299, in generate - self._generate_with_cache( - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chat_models/base.py", line 446, in _generate_with_cache - return self._generate( - ^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chat_models/openai.py", line 339, in _generate - response = self.completion_with_retry( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chat_models/openai.py", line 278, in completion_with_retry - return _completion_with_retry(**kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/tenacity/__init__.py", line 289, in wrapped_f - return self(f, *args, **kw) - ^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/tenacity/__init__.py", line 379, in __call__ - do = self.iter(retry_state=retry_state) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/tenacity/__init__.py", line 314, in iter - return fut.result() - ^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py", line 449, in result - return self.__get_result() - ^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py", line 401, in __get_result - raise self._exception - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/tenacity/__init__.py", line 382, in __call__ - result = fn(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/chat_models/openai.py", line 276, in _completion_with_retry - return self.client.create(**kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py", line 25, in create - return super().create(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py", line 153, in create - response, _, api_key = requestor.request( - ^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py", line 288, in request - result = self.request_raw( - ^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py", line 596, in request_raw - result = _thread_context.session.request( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/sessions.py", line 587, in request - resp = self.send(prep, **send_kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/sessions.py", line 701, in send - r = adapter.send(request, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/adapters.py", line 489, in send - resp = conn.urlopen( - ^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 703, in urlopen - httplib_response = self._make_request( - ^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 449, in _make_request - six.raise_from(e, None) - File "", line 3, in raise_from - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 444, in _make_request - httplib_response = conn.getresponse() - ^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/http/client.py", line 1374, in getresponse - response.begin() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/http/client.py", line 318, in begin - version, status, reason = self._read_status() - ^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/http/client.py", line 279, in _read_status - line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1") - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/socket.py", line 705, in readinto - return self._sock.recv_into(b) - ^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 1278, in recv_into - return self.read(nbytes, buffer) - ^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 1134, in read - return self._sslobj.read(len, buffer) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -KeyboardInterrupt - -defalt@owl swarms % git add . -g% -defalt@owl swarms % git commit -m 'omni modal worker' -[master d6b62b1] omni modal worker - 5 files changed, 539 insertions(+), 3 deletions(-) - rename swarms/{workers/multi_modal_worker.py => agents/multi_modal_agent.py} (92%) - rename swarms/{workers/omni_worker.py => agents/omni_modal_agent.py} (97%) - create mode 100644 swarms/agents/profitpilot.py -defalt@owl swarms % git push -Enumerating objects: 22, done. -Counting objects: 100% (22/22), done. -Delta compression using up to 8 threads -Compressing objects: 100% (13/13), done. -Writing objects: 100% (13/13), 7.53 KiB | 1.88 MiB/s, done. -Total 13 (delta 9), reused 0 (delta 0), pack-reused 0 -remote: Resolving deltas: 100% (9/9), completed with 9 local objects. -To https://github.com/kyegomez/swarms.git - e479c53..d6b62b1 master -> master -defalt@owl swarms % git add . -g% -defalt@owl swarms % git commit -m 'agents' -g[master 358d1ea] agents - 6 files changed, 19 insertions(+), 16 deletions(-) - rename swarms/{workers => agents}/neural_architecture_search_worker.py (99%) -defalt@owl swarms % git push -Enumerating objects: 20, done. -Counting objects: 100% (20/20), done. -Delta compression using up to 8 threads -Compressing objects: 100% (12/12), done. -Writing objects: 100% (12/12), 18.53 KiB | 3.09 MiB/s, done. -Total 12 (delta 7), reused 0 (delta 0), pack-reused 0 -remote: Resolving deltas: 100% (7/7), completed with 7 local objects. -To https://github.com/kyegomez/swarms.git - d6b62b1..358d1ea master -> master -defalt@owl swarms % git add . -defalt@owl swarms % git commit -m 'clean up' -[master bb1cb08] clean up - 2 files changed, 4 insertions(+), 4 deletions(-) -defalt@owl swarms % git push -Enumerating objects: 11, done. -Counting objects: 100% (11/11), done. -Delta compression using up to 8 threads -Compressing objects: 100% (6/6), done. -Writing objects: 100% (6/6), 509 bytes | 127.00 KiB/s, done. -Total 6 (delta 5), reused 0 (delta 0), pack-reused 0 -remote: Resolving deltas: 100% (5/5), completed with 5 local objects. -To https://github.com/kyegomez/swarms.git - 358d1ea..bb1cb08 master -> master -defalt@owl swarms % git add . -defalt@owl swarms % git commit -m 'cleanup' -[master 0e1aa78] cleanup - 1 file changed, 2 insertions(+), 1 deletion(-) -defalt@owl swarms % git push -Enumerating objects: 9, done. -Counting objects: 100% (9/9), done. -Delta compression using up to 8 threads -Compressing objects: 100% (5/5), done. -Writing objects: 100% (5/5), 417 bytes | 139.00 KiB/s, done. -Total 5 (delta 4), reused 0 (delta 0), pack-reused 0 -remote: Resolving deltas: 100% (4/4), completed with 4 local objects. -To https://github.com/kyegomez/swarms.git - bb1cb08..0e1aa78 master -> master -defalt@owl swarms % git add . -defalt@owl swarms % git commit m 'clean up' -error: pathspec 'm' did not match any file(s) known to git -error: pathspec 'clean up' did not match any file(s) known to git -defalt@owl swarms % git add . -defalt@owl swarms % git commit -m 'clean up' -[master 0807058] clean up - 2 files changed, 3 insertions(+), 1 deletion(-) -defalt@owl swarms % git push -Enumerating objects: 9, done. -Counting objects: 100% (9/9), done. -Delta compression using up to 8 threads -Compressing objects: 100% (5/5), done. -Writing objects: 100% (5/5), 450 bytes | 150.00 KiB/s, done. -Total 5 (delta 4), reused 0 (delta 0), pack-reused 0 -remote: Resolving deltas: 100% (4/4), completed with 4 local objects. -To https://github.com/kyegomez/swarms.git - 0e1aa78..0807058 master -> master -defalt@owl swarms % git add . -defalt@owl swarms % git commit -m 'clean up' -[master e6127ac] clean up - 4 files changed, 11 insertions(+), 12 deletions(-) -defalt@owl swarms % git push -Enumerating objects: 15, done. -Counting objects: 100% (15/15), done. -Delta compression using up to 8 threads -Compressing objects: 100% (8/8), done. -Writing objects: 100% (8/8), 674 bytes | 168.00 KiB/s, done. -Total 8 (delta 7), reused 0 (delta 0), pack-reused 0 -remote: Resolving deltas: 100% (7/7), completed with 7 local objects. -To https://github.com/kyegomez/swarms.git - 0807058..e6127ac master -> master -defalt@owl swarms % intepreter -zsh: command not found: intepreter -defalt@owl swarms % interpreter - +-----------------------------------------------+ - | | - | | - | Add new LLMs: Give Feedback / Get Help | - | https://github.com/BerriAI/litellm/issues/new | - | | - | | - +-----------------------------------------------+ - -▌ Model set to GPT-4 - -Open Interpreter will require approval before running code. Use interpreter -y to bypass this. - -Press CTRL-C to exit. - -> create a model to analyze soil data using pytorch - - Certainly, to build a model using Pytorch to analyze soil data, we would first need to inspect the data and understand its characteristics before proceeding. But before we begin our steps, could you - please provide the dataset for the soil data or a description of it including the different features it has? - - In general terms, we may outline the main steps for this task as follows: - - Plan: - - 1 Obtain the soil data. Ask the user to provide the soil data file or download it from a specified source. - 2 Load and explore the data. This step includes understanding the structure of the data and the types of variables included. We would usually generate some descriptive statistics and visualizations to - better understand the data. - 3 Preprocess the data. Depending on previous step, this could include handling missing values, transforming variables, etc. - 4 Split the data into training and testing sets. This is a common practice in machine learning to assess the performance of the model. - 5 Create a deep learning model using PyTorch. At this step, we’ll need to define our model architecture. - 6 Train the model on the training data. - 7 Evaluate the model on the test data. - 8 Fine-tune the model if necessary. - - Please note that Pytorch will be used as the main library for creating and training the model. If you don't have it already installed, we'll need to install it. Please confirm if you already have it - installed, and provide the data set. - -> you find the dataset, you create the model indenpently - - Alright, so since we don't have a specific soil dataset in mind, we can consider a public soil dataset available on UCI's Machine Learning Repository to carry out this assignment. The - "Qualitative_Bankruptcy" dataset is a simple and understandable one to start with. - - Let's plan this out: - - Plan: - - 1 Install necessary packages: PyTorch, NumPy, pandas, and sklearn if they are not already installed. - 2 Download the dataset from the UCI's Machine Learning Repository. - 3 Load and prepare the data: This includes reading the data into a pandas dataframe, encoding categorical variables if necessary, and scaling numerical variables. - 4 Split the data into training and testing sets - 5 Create the PyTorch model. We will make a simple Feed Forward Neural Network. - 6 Train the Neural Network on the training data. - 7 Evaluate the model on the test data. - - Let's start with installing necessary libraries. Would you like me to install the necessary packages? If so, please provide admin permission for installation, and also verify if you already have any of - the mentioned libraries installed. - -> yes - - - pip install torch numpy pandas scikit-learn - - - Would you like to run this code? (y/n) - - y - - - pip install torch numpy pandas scikit-learn - - - Output truncated. Showing the last 2000 characters. - - hon3.10/site-packages (from torch) (3.7.1) - Requirement already satisfied: typing-extensions in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from torch) (4.5.0) - Requirement already satisfied: sympy in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from torch) (1.12) - Requirement already satisfied: networkx in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from torch) (2.8.4) - Requirement already satisfied: jinja2 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from torch) (3.1.2) - Requirement already satisfied: python-dateutil>=2.8.1 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from pandas) (2.8.2) - Requirement already satisfied: pytz>=2020.1 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from pandas) (2022.1) - Requirement already satisfied: scipy>=1.3.2 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from scikit-learn) (1.8.0) - Requirement already satisfied: joblib>=1.0.0 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from scikit-learn) (1.1.0) - Requirement already satisfied: threadpoolctl>=2.0.0 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from scikit-learn) (3.1.0) - Requirement already satisfied: six>=1.5 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from python-dateutil>=2.8.1->pandas) (1.16.0) - Requirement already satisfied: MarkupSafe>=2.0 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from jinja2->torch) (2.1.1) - Requirement already satisfied: mpmath>=0.19 in /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages (from sympy->torch) (1.3.0) - A new release of pip is available: 23.1.2 -> 23.2.1 - To update, run: pip install --upgrade pip - - - Great, the necessary libraries have been installed successfully. The next step is to download the soil dataset. - - Plan: - - 1 Download the "Qualitative_Bankruptcy" dataset from UCI's Machine Learning repository. - 2 Load the data as a pandas dataframe. - 3 Explore the data and perform necessary preprocessing steps. - 4 Split the data into training and testing sets. - 5 Create the PyTorch model. - 6 Train the model - 7 Evaluate the model on the test set. - - As per the plan, we will now go ahead and download the dataset. - - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - data.head() - - - Would you like to run this code? (y/n) - - y - - - - import pandas as pd - - import pandas as pd - - - import pandas as pd - - - import pandas as pd - - # Download the dataset - - import pandas as pd - - # Download the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - - import pandas as pd - - # Download the dataset - url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt' - data = pd.read_csv(url, header=None) - - # Save the dataset locally - data.to_csv('qualitative_bankruptcy.csv', index=False) - - # Display the first few rows of the dataset - data.head() - - - Output truncated. Showing the last 2000 characters. - - ^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/pandas/io/common.py", line 713, in get_handle - ioargs = _get_filepath_or_buffer( - ^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/pandas/io/common.py", line 363, in _get_filepath_or_buffer - with urlopen(req_info) as req: - ^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/pandas/io/common.py", line 265, in urlopen - return urllib.request.urlopen(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/urllib/request.py", line 216, in urlopen - return opener.open(url, data, timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/urllib/request.py", line 519, in open - response = self._open(req, data) - ^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/urllib/request.py", line 536, in _open - result = self._call_chain(self.handle_open, protocol, protocol + - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/urllib/request.py", line 496, in _call_chain - result = func(*args) - ^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/urllib/request.py", line 1391, in https_open - return self.do_open(http.client.HTTPSConnection, req, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/urllib/request.py", line 1351, in do_open - raise URLError(err) - urllib.error.URLError: - - - It seems there was an SSL error when attempting to download the data, probably due to a certificate verification failure. To troubleshoot this, we'll download the data in a more roundabout way. - - I will now: - - 1 Download the data using the 'wget' command. - 2 Load the data into a pandas dataframe. - 3 Display the first few rows of data. - - Let's proceed with these steps. - - - - wget https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt -O Qualitative_Bankruptcy.data.txt - - - Would you like to run this code? (y/n) - - y - - - - - wget https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt -O Qualitative_Bankruptcy.data.txt - - - --2023-09-25 23:14:42-- https://archive.ics.uci.edu/ml/machine-learning-databases/qualitative-bankruptcy/Qualitative_Bankruptcy.data.txt - Resolving archive.ics.uci.edu (archive.ics.uci.edu)... 128.195.10.252 - Connecting to archive.ics.uci.edu (archive.ics.uci.edu)|128.195.10.252|:443... connected. - HTTP request sent, awaiting response... 404 Not Found - 2023-09-25 23:14:42 ERROR 404: Not Found. - An error occurred on line 11 - Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/respond.py", line 121, in respond - for line in code_interpreter.run(code): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/code_interpreters/subprocess_code_interpreter.py", line 103, in run - output = self.output_queue.get(timeout=0.3) # Waits for 0.3 seconds - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/queue.py", line 180, in get - self.not_empty.wait(remaining) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/threading.py", line 324, in wait - gotit = waiter.acquire(True, timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - KeyboardInterrupt - -> create a model first and then select a dataset from pytorch - - Sure! To build a PyTorch model, we'll first create a hypothetical structure. We can then use a built-in dataset from PyTorch. For this example, let's use the FashionMNIST dataset. - - Here is a step-by-step plan: - - Plan: - - 1 Specify the model architecture (For instance, a simple feed-forward network). - 2 Load the dataset from PyTorch's built-in datasets. - 3 Split the dataset into training and test sets. - 4 Train the model using the training data. - 5 Evaluate the model using the test data. - - Let's get started by specifying a simple model architecture using PyTorch. - - - - import torch - from torch import nn, optim - - # Define the model structure - class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 10, kernel_size=5) - self.conv2 = nn.Conv2d(10, 20, kernel_size=5) - self.conv2_drop = nn.Dropout2d() - self.fc1 = nn.Linear(320, 50) - self.fc2 = nn.Linear(50, 10) - - def forward(self, x): - x = nn.functional.relu(nn.functional.max_pool2d(self.conv1(x), 2)) - x = nn.functional.relu(nn.functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) - x = x.view(-1, 320) - x = nn.functional.relu(self.fc1(x)) - x = nn.functional.dropout(x, training=self.training) - x = self.fc2(x) - return nn.functional.log_softmax(x, dim=1) - - # Instanciate the model - model = Net() - print(model) - - - Would you like to run this code? (y/n) - - y - - - - import torch - - import torch - from torch import nn, optim - - import torch - from torch import nn, optim - - import torch - from torch import nn, optim - - import torch - from torch import nn, optim - - import torch - from torch import nn, optim - - # Define the model structure - class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 10, kernel_size=5) - self.conv2 = nn.Conv2d(10, 20, kernel_size=5) - self.conv2_drop = nn.Dropout2d() - self.fc1 = nn.Linear(320, 50) - self.fc2 = nn.Linear(50, 10) - - def forward(self, x): - x = nn.functional.relu(nn.functional.max_pool2d(self.conv1(x), 2)) - x = nn.functional.relu(nn.functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) - x = x.view(-1, 320) - x = nn.functional.relu(self.fc1(x)) - x = nn.functional.dropout(x, training=self.training) - x = self.fc2(x) - return nn.functional.log_softmax(x, dim=1) - - # Instanciate the model - model = Net() - print(model) - - - KeyboardInterrupt - KeyboardInterrupt - Net( - (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1)) - (conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1)) - (conv2_drop): Dropout2d(p=0.5, inplace=False) - (fc1): Linear(in_features=320, out_features=50, bias=True) - (fc2): Linear(in_features=50, out_features=10, bias=True) - ) - - - The model is created successfully. This model is a simple convolutional neural network, which is often used for image related tasks. - - Our created Net model contains two convolutional layers (conv1 and conv2 with dropout), followed by two full connected layers (fc1 and fc2). - - Now, let's fetch the built-in FashionMNIST dataset from torchvision and pre-process it. - - Plan for the next steps: - - 1 Load the FashionMNIST dataset from PyTorch's built-in datasets. - 2 Preprocess the data. - 3 Split the dataset into training and testing set. - 4 Train the model. - 5 Evaluate the model. - - Let's get started with the first 2 steps. We will download and preprocess the FashionMNIST dataset. - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Would you like to run this code? (y/n) - - y - - - - import torch - - import torch - from torchvision import datasets, transforms - - import torch - from torchvision import datasets, transforms - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - - import torch - from torchvision import datasets, transforms - - # Define a transform to normalize the data - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) - - # Download and load the training data - trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) - - testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) - - - Output truncated. Showing the last 2000 characters. - - 02 [00:05<00:03, 486579.75it/s] - 67%|██████▋ | 2949120/4422102 [00:05<00:02, 552588.02it/s] - 68%|██████▊ | 3014656/4422102 [00:05<00:02, 528567.36it/s] - 70%|██████▉ | 3080192/4422102 [00:05<00:02, 506041.03it/s] - 72%|███████▏ | 3178496/4422102 [00:05<00:02, 563098.88it/s] - 73%|███████▎ | 3244032/4422102 [00:05<00:02, 534399.96it/s] - 75%|███████▍ | 3309568/4422102 [00:05<00:02, 514477.24it/s] - 77%|███████▋ | 3407872/4422102 [00:06<00:01, 563181.33it/s] - 79%|███████▊ | 3473408/4422102 [00:06<00:01, 526526.13it/s] - 81%|████████ | 3571712/4422102 [00:06<00:01, 588404.74it/s] - 82%|████████▏ | 3637248/4422102 [00:06<00:01, 559076.45it/s] - 84%|████████▍ | 3735552/4422102 [00:06<00:01, 605619.42it/s] - 86%|████████▌ | 3801088/4422102 [00:06<00:01, 564461.46it/s] - 88%|████████▊ | 3899392/4422102 [00:06<00:00, 603119.83it/s] - 90%|████████▉ | 3964928/4422102 [00:07<00:00, 612781.06it/s] - 92%|█████████▏| 4063232/4422102 [00:07<00:00, 602438.14it/s] - 93%|█████████▎| 4128768/4422102 [00:07<00:00, 434414.41it/s] - 95%|█████████▍| 4194304/4422102 [00:07<00:00, 366068.66it/s] - 96%|█████████▋| 4259840/4422102 [00:07<00:00, 381062.66it/s] - 98%|█████████▊| 4325376/4422102 [00:08<00:00, 343513.61it/s] - 99%|█████████▉| 4390912/4422102 [00:08<00:00, 364858.35it/s] - 100%|██████████| 4422102/4422102 [00:08<00:00, 525415.52it/s] - Extracting /Users/defalt/.pytorch/F_MNIST_data/FashionMNIST/raw/t10k-images-idx3-ubyte.gz to /Users/defalt/.pytorch/F_MNIST_data/FashionMNIST/raw - Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz - Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz to /Users/defalt/.pytorch/F_MNIST_data/FashionMNIST/raw/t10k-labels-idx1-ubyte.gz - Extracting /Users/defalt/.pytorch/F_MNIST_data/FashionMNIST/raw/t10k-labels-idx1-ubyte.gz to /Users/defalt/.pytorch/F_MNIST_data/FashionMNIST/raw - 0%| | 0/5148 [00:00 - - from torch.autograd import Variable -Traceback (most recent call last): - - from torch.autograd import Variable - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/core.py", line 93, in _streaming_chat - - from torch.autograd import Variable - yield from self._respond() - - from torch.autograd import Variable - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/core.py", line 109, in _respond - - from torch.autograd import Variable - yield from respond(self) - - from torch.autograd import Variable -RuntimeError: generator ignored GeneratorExit - - from torch.autograd import Variable - - from torch.autograd import Variable - - from torch.autograd import Variable - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - for e in range(epochs): - running_loss = 0 - for images, labels in iter(trainloader): - steps += 1 - # Wrap images and labels in Variables so we can calculate gradients - inputs = Variable(images) - targets = Variable(labels) - optimizer.zero_grad() - - output = model.forward(inputs) - loss = loss_func(output, targets) - loss.backward() - optimizer.step() - - running_loss += loss.item() - - if steps % print_every == 0: - print('Epoch: {}/{}... '.format(e+1, epochs), 'Loss: {:.4f}'.format(running_loss/print_every)) - - # reset running loss - running_loss = 0 - - -> keep going - - Okay! We have trained our model, but the training loss alone is not a good indication of its efficacy. We should also evaluate its performance on unseen data. - - Next up: Evaluating the model. - - Our plan is this: - - 1 We will not update the weights while evaluating the model. We can ensure this in PyTorch by using torch.no_grad(). - 2 We will calculate the percentage of correct predictions on the test data and print it. - - Let's proceed with these steps: - - - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Would you like to run this code? (y/n) - - y - - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - - # Evaluation on the test set - - # Evaluation on the test set - model.eval() - - # Evaluation on the test set - model.eval() - correct = 0 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - och: 1/3... Loss: 2.2822 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - s: 2.2616 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - s: 2.2616 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - s: 2.2616 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - s: 2.2616 - - # Evaluation on the test set - model.eval() - correct = 0 - total = 0 - - with torch.no_grad(): - for images, labels in testloader: - outputs = model(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - - print('Accuracy of the network on test images: %d%%' % (100 * correct / total)) - - - Output truncated. Showing the last 2000 characters. - - s: 2.2616 - Epoch: 1/3... Loss: 2.2220 - Epoch: 1/3... Loss: 2.1515 - Epoch: 1/3... Loss: 2.0478 - Epoch: 1/3... Loss: 1.9226 - Epoch: 1/3... Loss: 1.7371 - Epoch: 1/3... Loss: 1.6033 - File "", line 20, in - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/dataloader.py", line 634, in __next__ - data = self._next_data() - ^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/dataloader.py", line 678, in _next_data - data = self._dataset_fetcher.fetch(index) # may raise StopIteration - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/fetch.py", line 51, in fetch - data = [self.dataset for idx in possibly_batched_index] - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/fetch.py", line 51, in - data = [self.dataset for idx in possibly_batched_index] - ~~~~~~~~~~~~^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torchvision/datasets/mnist.py", line 142, in __getitem__ - img = Image.fromarray(img.numpy(), mode="L") - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/PIL/Image.py", line 3072, in fromarray - strides = arr.get("strides", None) - ^^^^^^^^^^^^^^^^^^^^^^^^ - KeyboardInterrupt - Net( - (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1)) - (conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1)) - (conv2_drop): Dropout2d(p=0.5, inplace=False) - (fc1): Linear(in_features=320, out_features=50, bias=True) - (fc2): Linear(in_features=50, out_features=10, bias=True) - ) - Accuracy of the network on test images: 62% - - - The accuracy of our model on the test images is 62%. This accuracy might not be very high, but please keep in mind that the dataset's inherent complexity and architectural simplicity of the model - can factor into this. With more training and a more complex model (such as a deeper network or other types of layers), the accuracy is likely to improve. - - Next steps: - - 1 You can iterate on this model, trying different architectures and parameters. - 2 You could add more epochs to the training process. - 3 You could use different types of layers in your model. - - Would you like to proceed with any of these options? Or is there anything else you'd like to do? - -> yes - - Great! To improve the model, you could try doing one of the following: - - 1 Increase the number of epochs: This will give the model more opportunity to learn from the dataset. - 2 Modify the model architecture: You can add more layers, or change the types of layers. For example, you might want to add another convolutional layer, or increase the number of nodes in the - layers. - 3 Change the optimizer or amend the learning rate: Different optimization algorithms can yield different results, so you might want to experiment with others like Adam or RMSProp. - - Which of these would you like to proceed with? - -> 2 - - Alright! Let's modify the structure of our convolutional neural network (CNN). - - How about we add another convolutional layer to the network to allow it to potentially learn more complex features? The new model will have 3 Convolutional Layers and two Fully Connected layers. - - Here's what that would look like: - - 1 Convolutional Layer 1: Input channels = 1, Output channels = 10, Kernel size = 5 - 2 Convolutional Layer 2: Input channels = 10, Output channels = 20, Kernel size = 5 - 3 Dropout Layer - 4 Convolutional Layer 3: Input channels = 20, Output channels = 30, Kernel size = 5 - 5 Fully Connected Layer 1: Input features = 30, Output features = 60 - 6 Fully Connected Layer 2: Input features = 60, Output features = 10 - - We maintain the Dropout technique as it helps prevent overfitting. Let's define this new network structure now. - - - - # Define the model structure - class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 10, kernel_size=5) - self.conv2 = nn.Conv2d(10, 20, kernel_size=5) - self.conv3 = nn.Conv2d(20, 30, kernel_size=5) - self.conv2_drop = nn.Dropout2d() - self.fc1 = nn.Linear(30, 60) - self.fc2 = nn.Linear(60, 10) - - def forward(self, x): - x = nn.functional.relu(nn.functional.max_pool2d(self.conv1(x), 2)) - x = nn.functional.relu(nn.functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) - x = nn.functional.relu(nn.functional.max_pool2d(self.conv2_drop(self.conv3(x)), 2)) - x = x.view(-1, 30) - x = nn.functional.relu(self.fc1(x)) - x = nn.functional.dropout(x, training=self.training) - x = self.fc2(x) - return nn.functional.log_softmax(x, dim=1) - - # Instanciate the model - model = Net() - print(model) - - - Would you like to run this code? (y/n) - - y - - - - - - - # Define the model structure - class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 10, kernel_size=5) - self.conv2 = nn.Conv2d(10, 20, kernel_size=5) - self.conv3 = nn.Conv2d(20, 30, kernel_size=5) - self.conv2_drop = nn.Dropout2d() - self.fc1 = nn.Linear(30, 60) - self.fc2 = nn.Linear(60, 10) - - def forward(self, x): - x = nn.functional.relu(nn.functional.max_pool2d(self.conv1(x), 2)) - x = nn.functional.relu(nn.functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) - x = nn.functional.relu(nn.functional.max_pool2d(self.conv2_drop(self.conv3(x)), 2)) - x = x.view(-1, 30) - x = nn.functional.relu(self.fc1(x)) - x = nn.functional.dropout(x, training=self.training) - x = self.fc2(x) - return nn.functional.log_softmax(x, dim=1) - - # Instanciate the model - model = Net() - print(model) - - - Net( - (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1)) - (conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1)) - (conv3): Conv2d(20, 30, kernel_size=(5, 5), stride=(1, 1)) - (conv2_drop): Dropout2d(p=0.5, inplace=False) - (fc1): Linear(in_features=30, out_features=60, bias=True) - (fc2): Linear(in_features=60, out_features=10, bias=True) - ) - - - Our new model has been defined successfully. We've added an additional convolutional layer (conv3) and modified the fully connected layers (fc1 and fc2) to handle the output from the third - convolutional layer. - - The next steps would be: - - 1 Train this new model with the training data - 2 Then evaluate it on our test data - - Do you want to proceed with these steps now, or would you like to make further adjustments? - -> 1 - - Excellent! Let's proceed with training this updated model on our dataset. - - Now, we'll follow these steps: - - 1 As before, we'll use the negative log likelihood loss (NLLLoss) as our loss function and the Stochastic Gradient Descent (SGD) as our optimizer. - 2 We'll train our model for a few epochs and print out the training loss for each epoch. - - Given the size of the network and the dataset, the training process may take some time. Let's proceed. - - - - - - - - - - - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - for e in range(epochs): - running_loss = 0 - for images, labels in iter(trainloader): - steps += 1 - # Wrap images and labels in Variables so we can calculate gradients - inputs = Variable(images) - targets = Variable(labels) - optimizer.zero_grad() - - output = model.forward(inputs) - loss = loss_func(output, targets) - loss.backward() - optimizer.step() - - running_loss += loss.item() - - if steps % print_every == 0: - print('Epoch: {}/{}... '.format(e+1, epochs), 'Loss: {:.4f}'.format(running_loss/print_every)) - - # reset running loss - running_loss = 0 - - - Would you like to run this code? (y/n) - - y - - - # Specify the loss function - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the loss function - loss_func = nn.NLLLoss() - - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - for e in range(epochs): - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - for e in range(epochs): - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - for e in range(epochs): - running_loss = 0 -Failed to parse 'content'. - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - for e in range(epochs): - running_loss = 0 - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - for e in range(epochs): - - # Specify the loss function - loss_func = nn.NLLLoss() - - # Specify the optimizer - optimizer = optim.SGD(model.parameters(), lr=0.01) - - # Define a training loop - epochs = 3 - print_every = 50 - steps = 0 - - for e in range(epochs): - running_loss = 0 - for images, labels in iter(trainloader): - steps += 1 - # Wrap images and labels in Variables so we can calculate gradients - inputs = Variable(images) - targets = Variable(labels) - optimizer.zero_grad() - - output = model.forward(inputs) - loss = loss_func(output, targets) - loss.backward() - optimizer.step() - - running_loss += loss.item() - - if steps % print_every == 0: - print('Epoch: {}/{}... '.format(e+1, epochs), 'Loss: {:.4f}'.format(running_loss/print_every)) - - # reset running loss - running_loss = 0 - - - File "", line 28, in - File "", line 28, in forward - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl - return forward_call(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/nn/modules/conv.py", line 463, in forward - return self._conv_forward(input, self.weight, self.bias) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/nn/modules/conv.py", line 459, in _conv_forward - return F.conv2d(input, weight, bias, self.stride, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - RuntimeError: Calculated padded input size per channel: (4 x 4). Kernel size: (5 x 5). Kernel size can't be greater than actual input size - -> continue -^C> c^C^C -defalt@owl swarms % interpreter - - - - - - - - +-----------------------------------------------+ - | | - | | - | Add new LLMs: Give Feedback / Get Help | - | https://github.com/BerriAI/litellm/issues/new | - | | - | | - +-----------------------------------------------+ - - - - - - - - - -▌ Model set to GPT-4 - -Open Interpreter will require approval before running code. Use interpreter -y to bypass this. - -Press CTRL-C to exit. - -> create a satellite image detection model using pytorch, create the model and select the dataset - - Before starting with our task, let's first plan our steps so we can perform the whole task efficiently and without any errors: - - 1 Import necessary libraries. - 2 Download and load the dataset. - 3 Preprocess the dataset. - 4 Create the Convolutional Neural Network (CNN) model. - 5 Train the model. - 6 Evaluate the model. - - So first, we'll start by importing necessary libraries including PyTorch and torchvision for handling and processing dataset respectively. - - - - !pip install torch torchvision - import torch - import torchvision - import torchvision.transforms as transforms - - print('Libraries imported successfully.') - - - Would you like to run this code? (y/n) - - y - - - !pip install torch torchvision - import torch - import torchvision - import torchvision.transforms as transforms - - print('Libraries imported successfully.') - - - Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/code_interpreters/subprocess_code_interpreter.py", line 63, in run - code = self.preprocess_code(code) - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/code_interpreters/languages/python.py", line 12, in preprocess_code - return preprocess_python(code) - ^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/code_interpreters/languages/python.py", line 36, in preprocess_python - code = add_active_line_prints(code) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/code_interpreters/languages/python.py", line 57, in add_active_line_prints - tree = ast.parse(code) - ^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ast.py", line 50, in parse - return compile(source, filename, mode, flags, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "", line 2 - !pip install torch torchvision - ^ - SyntaxError: invalid syntax - - - It seems there was an error running the previous block of code because it's not possible to run pip install commands in this python code execution environment. However, you can manually install - these libraries in your local environment by running the following command: - - - pip install torch torchvision - - - After that, we will proceed with the task. Please notify me once you done with the installation of these packages so that I can move forward with the execution of following steps. - - Moving ahead with the plan, we need to select and load the dataset. There are several datasets available for satellite image detection. A common one is the NWPU-RESISC45 dataset, and another one is - the EuroSAT dataset. Could you please specify which dataset you would like to use, or if there's another specific dataset you have in mind? - -> you decide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - - Alright, we will use the EuroSAT dataset for this task. It contains 27000 labeled and geo-referenced images corresponding to different land use and land cover categories. This dataset will provide - us extensive data to train our model. - - As we are unable to execute the code here due to some restrictions of this environment, you will need to execute the following code snippets in your local environment. - - The first step of the plan here is: - - • Download and load the EuroSAT dataset - - Here's how you can do it: - - - import torch - from torchvision.datasets import ImageFolder - from torchvision import transforms - - # Define a transform to normalize the data - transform = transforms.Compose([ - transforms.Resize((64,64)), # resize images to 64x64 pixels - transforms.ToTensor(), # convert to tensor - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # Normalize the images - ]) - - # Download and load training dataset - dataset = ImageFolder(root='', transform=transform) - - # Create a dataloader - dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True) - - print('Dataset loaded successfully.') - - - Please replace '' with the path to where you have the EuroSAT data stored. - - Upon successful execution of this code block, our dataset will be loaded and transformed as per our needs. - - Once done, let me know so we can proceed to the next step which is preprocessing the data. - -> you decide - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - - As our environment has a restriction in place that prevents me from executing the code. However, one generally pre-processes the data as suggested in the previous block. - - After successful preprocessing of the dataset, the next step in our plan is to create our Convolutional Neural Network (CNN) model. Here is a simple CNN model structure that you can use. Here is an - example for a CNN model you could start with: - - - import torch.nn as nn - import torch.nn.functional as F - - class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 13 * 13, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 13 * 13) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - net = Net() - - - And then we define a loss function and optimizer: - - - import torch.optim as optim - - criterion = nn.CrossEntropyLoss() - optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - - Please replace nn.Conv2d(3, 6, 5),nn.Conv2d(6, 16, 5),nn.Linear(16 * 13 * 13, 120), nn.Linear(120, 84),nn.Linear(84, 10) with the respective shapes of convolution, pooling and fully connected - layers as per your dataset and problem requirements. - - Next, we'll train the model. Please confirm once you've created the model so we can proceed. - -> you decide - - Okay, Once the CNN model is ready, now we will train the model on our dataset. Here's the code that you can use to train the model. This code below runs the training loop for 2 epochs, but you may - choose to run for more epochs depending on your requirements: - - - for epoch in range(2): # loop over the dataset multiple times - - running_loss = 0.0 - for i, data in enumerate(dataloader, 0): - # get the inputs; data is a list of [inputs, labels] - inputs, labels = data - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(inputs) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: # print every 2000 mini-batches - print('[%d, %5d] loss: %.3f' % - (epoch + 1, i + 1, running_loss / 2000)) - running_loss = 0.0 - - print('Finished Training') - - - After completing this step, inform me so we can do the final step which is Evaluating the model. - -> run it - - - I'm sorry for the confusion, but I'm an AI model solely capable of text-based interactions designed to assist users in a text-based environment. I can provide you with code snippets, guidance, - instructions and even detailed plans to solve the listed problem, but I can't directly execute elaborate coding tasks in this environment. - - You can execute the given code in your local Python environment where you have PyT - -> Create an image that analyzes satelite data using pytorch to create the model and huggingface for the datasets - - - Incorporating Hugging Face datasets and PyTorch for creating a model to analyze satellite image data would typically involve the following steps - -> create an time series model that analyzes tabular finanical data using pytorch to create the model and for the dataset -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 703, in urlopen - httplib_response = self._make_request( - ^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 386, in _make_request - self._validate_conn(conn) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 1042, in _validate_conn - conn.connect() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connection.py", line 414, in connect - self.sock = ssl_wrap_socket( - ^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/util/ssl_.py", line 449, in ssl_wrap_socket - ssl_sock = _ssl_wrap_socket_impl( - ^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/util/ssl_.py", line 493, in _ssl_wrap_socket_impl - return ssl_context.wrap_socket(sock, server_hostname=server_hostname) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 517, in wrap_socket - return self.sslsocket_class._create( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 1075, in _create - self.do_handshake() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 1346, in do_handshake - self._sslobj.do_handshake() -ConnectionResetError: [Errno 54] Connection reset by peer - -During handling of the above exception, another exception occurred: - -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/adapters.py", line 489, in send - resp = conn.urlopen( - ^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 787, in urlopen - retries = retries.increment( - ^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/util/retry.py", line 550, in increment - raise six.reraise(type(error), error, _stacktrace) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/packages/six.py", line 769, in reraise - raise value.with_traceback(tb) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 703, in urlopen - httplib_response = self._make_request( - ^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 386, in _make_request - self._validate_conn(conn) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 1042, in _validate_conn - conn.connect() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connection.py", line 414, in connect - self.sock = ssl_wrap_socket( - ^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/util/ssl_.py", line 449, in ssl_wrap_socket - ssl_sock = _ssl_wrap_socket_impl( - ^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/util/ssl_.py", line 493, in _ssl_wrap_socket_impl - return ssl_context.wrap_socket(sock, server_hostname=server_hostname) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 517, in wrap_socket - return self.sslsocket_class._create( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 1075, in _create - self.do_handshake() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 1346, in do_handshake - self._sslobj.do_handshake() -urllib3.exceptions.ProtocolError: ('Connection aborted.', ConnectionResetError(54, 'Connection reset by peer')) - -During handling of the above exception, another exception occurred: - -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py", line 596, in request_raw - result = _thread_context.session.request( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/sessions.py", line 587, in request - resp = self.send(prep, **send_kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/sessions.py", line 701, in send - r = adapter.send(request, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/adapters.py", line 547, in send - raise ConnectionError(err, request=request) -requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(54, 'Connection reset by peer')) - -The above exception was the direct cause of the following exception: - -Traceback (most recent call last): - File "/Library/Frameworks/Python.framework/Versions/3.11/bin/interpreter", line 8, in - sys.exit(cli()) - ^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/core.py", line 21, in cli - cli(self) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/cli/cli.py", line 145, in cli - interpreter.chat() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/core.py", line 65, in chat - for _ in self._streaming_chat(message=message, display=display): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/core.py", line 86, in _streaming_chat - yield from terminal_interface(self, message) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/terminal_interface/terminal_interface.py", line 50, in terminal_interface - for chunk in interpreter.chat(message, display=False, stream=True): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/core.py", line 93, in _streaming_chat - yield from self._respond() - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/core.py", line 109, in _respond - yield from respond(self) - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/core/respond.py", line 56, in respond - for chunk in interpreter._llm(messages_for_llm): - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/interpreter/llm/setup_openai_coding_llm.py", line 79, in coding_llm - response = litellm.completion(**params) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py", line 671, in wrapper - raise e - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py", line 630, in wrapper - result = original_function(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py", line 53, in wrapper - result = future.result(timeout=local_timeout_duration) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py", line 456, in result - return self.__get_result() - ^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py", line 401, in __get_result - raise self._exception - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py", line 42, in async_func - return func(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py", line 1120, in completion - raise exception_type( - ^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py", line 2670, in exception_type - raise e - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py", line 2107, in exception_type - raise original_exception - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py", line 400, in completion - raise e - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py", line 382, in completion - response = openai.ChatCompletion.create( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py", line 25, in create - return super().create(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py", line 153, in create - response, _, api_key = requestor.request( - ^^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py", line 288, in request - result = self.request_raw( - ^^^^^^^^^^^^^^^^^ - File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py", line 609, in request_raw - raise error.APIConnectionError( -openai.error.APIConnectionError: Error communicating with OpenAI: ('Connection aborted.', ConnectionResetError(54, 'Connection reset by peer')) -defalt@owl swarms % \ No newline at end of file diff --git a/openai_model.py b/openai_model.py new file mode 100644 index 00000000..19ede0ee --- /dev/null +++ b/openai_model.py @@ -0,0 +1,9 @@ +from swarms.models.openai_models import OpenAIChat + +openai = OpenAIChat( + openai_api_key="", + verbose=False +) + +chat = openai("Are quantum fields everywhere?") +print(chat) diff --git a/swarms/__init__.py b/swarms/__init__.py index b9d8ccd4..c8f810cc 100644 --- a/swarms/__init__.py +++ b/swarms/__init__.py @@ -1,3 +1,19 @@ +# disable warnings +import warnings + +warnings.filterwarnings("ignore", category=UserWarning) + +# disable tensorflow warnings +import os + +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" + +# disable logging for +# import logging + +# logging.getLogger("requests").setLevel(logging.WARNING) +# logging.getLogger("urllib3").setLevel(logging.WARNING) + # swarms from swarms import agents from swarms.swarms.orchestrate import Orchestrator @@ -9,16 +25,3 @@ from swarms import workers from swarms.logo import logo2 print(logo2) - -# worker - -# boss -# from swarms.boss.boss_node import Boss - -# models - -# structs - -# swarms - -# agents diff --git a/swarms/agents/aot.py b/swarms/agents/aot.py index 48857406..771fb746 100644 --- a/swarms/agents/aot.py +++ b/swarms/agents/aot.py @@ -2,7 +2,7 @@ import logging import os import time -import openai +import openai_model logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" @@ -22,7 +22,7 @@ class OpenAI: if api_key == "" or api_key is None: api_key = os.environ.get("OPENAI_API_KEY", "") if api_key != "": - openai.api_key = api_key + openai_model.api_key = api_key else: raise Exception("Please provide OpenAI API key") @@ -32,7 +32,7 @@ class OpenAI: ) # if not set, use the default base path of "https://api.openai.com/v1" if api_base != "": # e.g. https://api.openai.com/v1/ or your custom url - openai.api_base = api_base + openai_model.api_base = api_base print(f"Using custom api_base {api_base}") if api_model == "" or api_model is None: @@ -52,14 +52,14 @@ class OpenAI: try: if self.use_chat_api: messages = [{"role": "user", "content": prompt}] - response = openai.ChatCompletion.create( + response = openai_model.ChatCompletion.create( model=self.api_model, messages=messages, max_tokens=max_tokens, temperature=temperature, ) else: - response = openai.Completion.create( + response = openai_model.Completion.create( engine=self.api_model, prompt=prompt, n=k, @@ -72,7 +72,7 @@ class OpenAI: "\n" + "-----------" + "\n" + "Prompt : " + prompt + "\n" ) return response - except openai.error.RateLimitError as e: + except openai_model.error.RateLimitError as e: sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30) print( f"{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT" diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index 2cd61d89..2a955ecc 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -1,9 +1,4 @@ -# prompts from swarms.models.anthropic import Anthropic - -# from swarms.models.palm import GooglePalm from swarms.models.petals import Petals - -# from swarms.models.chat_openai import OpenAIChat -from swarms.prompts.debate import * from swarms.models.mistral import Mistral +from swarms.models.openai_llm import OpenAIModel diff --git a/swarms/models/openai_llm.py b/swarms/models/openai_llm.py new file mode 100644 index 00000000..e0a8ae62 --- /dev/null +++ b/swarms/models/openai_llm.py @@ -0,0 +1,598 @@ +import importlib.util +import json +import os +from dataclasses import dataclass +from typing import Dict + +from huggingface_hub import hf_hub_download, list_spaces +from transformers.tools.base import ( + TASK_MAPPING, + TOOL_CONFIG_FILE, + Tool, + load_tool, + supports_remote, +) +from transformers.tools.prompts import CHAT_MESSAGE_PROMPT, download_prompt +from transformers.tools.python_interpreter import evaluate +from transformers.utils import is_offline_mode, is_openai_available, logging + +# utils +logger = logging.get_logger(__name__) + + +if is_openai_available(): + import openai + +else: + StoppingCriteria = object + +_tools_are_initialized = False + + +BASE_PYTHON_TOOLS = { + "print": print, + "range": range, + "float": float, + "int": int, + "bool": bool, + "str": str, +} + + +@dataclass +class PreTool: + task: str + description: str + repo_id: str + + +HUGGINGFACE_DEFAULT_TOOLS = {} + + +HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = [ + "image-transformation", + "text-download", + "text-to-image", + "text-to-video", +] + + +def get_remote_tools(organization="huggingface-tools"): + if is_offline_mode(): + logger.info("You are in offline mode, so remote tools are not available.") + return {} + + spaces = list_spaces(author=organization) + tools = {} + for space_info in spaces: + repo_id = space_info.id + resolved_config_file = hf_hub_download( + repo_id, TOOL_CONFIG_FILE, repo_type="space" + ) + with open(resolved_config_file, encoding="utf-8") as reader: + config = json.load(reader) + + task = repo_id.split("/")[-1] + tools[config["name"]] = PreTool( + task=task, description=config["description"], repo_id=repo_id + ) + + return tools + + +def _setup_default_tools(): + global HUGGINGFACE_DEFAULT_TOOLS + global _tools_are_initialized + + if _tools_are_initialized: + return + + main_module = importlib.import_module("transformers") + tools_module = main_module.tools + + remote_tools = get_remote_tools() + for task_name, tool_class_name in TASK_MAPPING.items(): + tool_class = getattr(tools_module, tool_class_name) + description = tool_class.description + HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool( + task=task_name, description=description, repo_id=None + ) + + if not is_offline_mode(): + for task_name in HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB: + found = False + for tool_name, tool in remote_tools.items(): + if tool.task == task_name: + HUGGINGFACE_DEFAULT_TOOLS[tool_name] = tool + found = True + break + + if not found: + raise ValueError(f"{task_name} is not implemented on the Hub.") + + _tools_are_initialized = True + + +def resolve_tools(code, toolbox, remote=False, cached_tools=None): + if cached_tools is None: + resolved_tools = BASE_PYTHON_TOOLS.copy() + else: + resolved_tools = cached_tools + for name, tool in toolbox.items(): + if name not in code or name in resolved_tools: + continue + + if isinstance(tool, Tool): + resolved_tools[name] = tool + else: + task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id + _remote = remote and supports_remote(task_or_repo_id) + resolved_tools[name] = load_tool(task_or_repo_id, remote=_remote) + + return resolved_tools + + +def get_tool_creation_code(code, toolbox, remote=False): + code_lines = ["from transformers import load_tool", ""] + for name, tool in toolbox.items(): + if name not in code or isinstance(tool, Tool): + continue + + task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id + line = f'{name} = load_tool("{task_or_repo_id}"' + if remote: + line += ", remote=True" + line += ")" + code_lines.append(line) + + return "\n".join(code_lines) + "\n" + + +def clean_code_for_chat(result): + lines = result.split("\n") + idx = 0 + while idx < len(lines) and not lines[idx].lstrip().startswith("```"): + idx += 1 + explanation = "\n".join(lines[:idx]).strip() + if idx == len(lines): + return explanation, None + + idx += 1 + start_idx = idx + while not lines[idx].lstrip().startswith("```"): + idx += 1 + code = "\n".join(lines[start_idx:idx]).strip() + + return explanation, code + + +def clean_code_for_run(result): + result = f"I will use the following {result}" + explanation, code = result.split("Answer:") + explanation = explanation.strip() + code = code.strip() + + code_lines = code.split("\n") + if code_lines[0] in ["```", "```py", "```python"]: + code_lines = code_lines[1:] + if code_lines[-1] == "```": + code_lines = code_lines[:-1] + code = "\n".join(code_lines) + + return explanation, code + + +class Agent: + """ + Base class for all agents which contains the main API methods. + + Args: + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + """ + + def __init__( + self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None + ): + _setup_default_tools() + + agent_name = self.__class__.__name__ + self.chat_prompt_template = download_prompt( + chat_prompt_template, agent_name, mode="chat" + ) + self.run_prompt_template = download_prompt( + run_prompt_template, agent_name, mode="run" + ) + self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy() + self.log = print + if additional_tools is not None: + if isinstance(additional_tools, (list, tuple)): + additional_tools = {t.name: t for t in additional_tools} + elif not isinstance(additional_tools, dict): + additional_tools = {additional_tools.name: additional_tools} + + replacements = { + name: tool + for name, tool in additional_tools.items() + if name in HUGGINGFACE_DEFAULT_TOOLS + } + self._toolbox.update(additional_tools) + if len(replacements) > 1: + names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()]) + logger.warning( + f"The following tools have been replaced by the ones provided in `additional_tools`:\n{names}." + ) + elif len(replacements) == 1: + name = list(replacements.keys())[0] + logger.warning( + f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`." + ) + + self.prepare_for_new_chat() + + @property + def toolbox(self) -> Dict[str, Tool]: + """Get all tool currently available to the agent""" + return self._toolbox + + def format_prompt(self, task, chat_mode=False): + description = "\n".join( + [f"- {name}: {tool.description}" for name, tool in self.toolbox.items()] + ) + if chat_mode: + if self.chat_history is None: + prompt = self.chat_prompt_template.replace("<>", description) + else: + prompt = self.chat_history + prompt += CHAT_MESSAGE_PROMPT.replace("<>", task) + else: + prompt = self.run_prompt_template.replace("<>", description) + prompt = prompt.replace("<>", task) + return prompt + + def set_stream(self, streamer): + """ + Set the function use to stream results (which is `print` by default). + + Args: + streamer (`callable`): The function to call when streaming results from the LLM. + """ + self.log = streamer + + def chat(self, task, *, return_code=False, remote=False, **kwargs): + """ + Sends a new request to the agent in a chat. Will use the previous ones in its history. + + Args: + task (`str`): The task to perform + return_code (`bool`, *optional*, defaults to `False`): + Whether to just return code and not evaluate it. + remote (`bool`, *optional*, defaults to `False`): + Whether or not to use remote tools (inference endpoints) instead of local ones. + kwargs (additional keyword arguments, *optional*): + Any keyword argument to send to the agent when evaluating the code. + + Example: + + ```py + from transformers import HfAgent + + agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") + agent.chat("Draw me a picture of rivers and lakes") + + agent.chat("Transform the picture so that there is a rock in there") + ``` + """ + prompt = self.format_prompt(task, chat_mode=True) + result = self.generate_one(prompt, stop=["Human:", "====="]) + self.chat_history = prompt + result.strip() + "\n" + explanation, code = clean_code_for_chat(result) + + self.log(f"==Explanation from the agent==\n{explanation}") + + if code is not None: + self.log(f"\n\n==Code generated by the agent==\n{code}") + if not return_code: + self.log("\n\n==Result==") + self.cached_tools = resolve_tools( + code, self.toolbox, remote=remote, cached_tools=self.cached_tools + ) + self.chat_state.update(kwargs) + return evaluate( + code, self.cached_tools, self.chat_state, chat_mode=True + ) + else: + tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) + return f"{tool_code}\n{code}" + + def prepare_for_new_chat(self): + """ + Clears the history of prior calls to [`~Agent.chat`]. + """ + self.chat_history = None + self.chat_state = {} + self.cached_tools = None + + def run(self, task, *, return_code=False, remote=False, **kwargs): + """ + Sends a request to the agent. + + Args: + task (`str`): The task to perform + return_code (`bool`, *optional*, defaults to `False`): + Whether to just return code and not evaluate it. + remote (`bool`, *optional*, defaults to `False`): + Whether or not to use remote tools (inference endpoints) instead of local ones. + kwargs (additional keyword arguments, *optional*): + Any keyword argument to send to the agent when evaluating the code. + + Example: + + ```py + from transformers import HfAgent + + agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") + agent.run("Draw me a picture of rivers and lakes") + ``` + """ + prompt = self.format_prompt(task) + result = self.generate_one(prompt, stop=["Task:"]) + explanation, code = clean_code_for_run(result) + + self.log(f"==Explanation from the agent==\n{explanation}") + + self.log(f"\n\n==Code generated by the agent==\n{code}") + if not return_code: + self.log("\n\n==Result==") + self.cached_tools = resolve_tools( + code, self.toolbox, remote=remote, cached_tools=self.cached_tools + ) + return evaluate(code, self.cached_tools, state=kwargs.copy()) + else: + tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) + return f"{tool_code}\n{code}" + + def generate_one(self, prompt, stop): + # This is the method to implement in your custom agent. + raise NotImplementedError + + def generate_many(self, prompts, stop): + # Override if you have a way to do batch generation faster than one by one + return [self.generate_one(prompt, stop) for prompt in prompts] + + +class OpenAIModel(Agent): + """ + Agent that uses the openai API to generate code. + + + + The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like + `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. + + + + Args: + model (`str`, *optional*, defaults to `"text-davinci-003"`): + The name of the OpenAI model to use. + api_key (`str`, *optional*): + The API key to use. If unset, will look for the environment variable `"OPENAI_API_KEY"`. + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + from swarms.models import OpenAI + + agent = OpenAiAgent(model="text-davinci-003", api_key=xxx) + agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") + ``` + """ + + def __init__( + self, + model="text-davinci-003", + api_key=None, + chat_prompt_template=None, + run_prompt_template=None, + additional_tools=None, + ): + if not is_openai_available(): + raise ImportError( + "Using `OpenAiAgent` requires `openai`: `pip install openai`." + ) + + if api_key is None: + api_key = os.environ.get("OPENAI_API_KEY", None) + if api_key is None: + raise ValueError( + "You need an openai key to use `OpenAIAgent`. You can get one here: Get one here " + "https://openai.com/api/`. If you have one, set it in your env with `os.environ['OPENAI_API_KEY'] = " + "xxx." + ) + else: + openai.api_key = api_key + self.model = model + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + def generate_many(self, prompts, stop): + if "gpt" in self.model: + return [self._chat_generate(prompt, stop) for prompt in prompts] + else: + return self._completion_generate(prompts, stop) + + def generate_one(self, prompt, stop): + if "gpt" in self.model: + return self._chat_generate(prompt, stop) + else: + return self._completion_generate([prompt], stop)[0] + + def _chat_generate(self, prompt, stop): + result = openai.ChatCompletion.create( + model=self.model, + messages=[{"role": "user", "content": prompt}], + temperature=0, + stop=stop, + ) + return result["choices"][0]["message"]["content"] + + def _completion_generate(self, prompts, stop): + result = openai.Completion.create( + model=self.model, + prompt=prompts, + temperature=0, + stop=stop, + max_tokens=200, + ) + return [answer["text"] for answer in result["choices"]] + + +class AzureOpenAI(Agent): + """ + Agent that uses Azure OpenAI to generate code. See the [official + documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) to learn how to deploy an openAI + model on Azure + + + + The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like + `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. + + + + Args: + deployment_id (`str`): + The name of the deployed Azure openAI model to use. + api_key (`str`, *optional*): + The API key to use. If unset, will look for the environment variable `"AZURE_OPENAI_API_KEY"`. + resource_name (`str`, *optional*): + The name of your Azure OpenAI Resource. If unset, will look for the environment variable + `"AZURE_OPENAI_RESOURCE_NAME"`. + api_version (`str`, *optional*, default to `"2022-12-01"`): + The API version to use for this agent. + is_chat_mode (`bool`, *optional*): + Whether you are using a completion model or a chat model (see note above, chat models won't be as + efficient). Will default to `gpt` being in the `deployment_id` or not. + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + from transformers import AzureOpenAiAgent + + agent = AzureAiAgent(deployment_id="Davinci-003", api_key=xxx, resource_name=yyy) + agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") + ``` + """ + + def __init__( + self, + deployment_id, + api_key=None, + resource_name=None, + api_version="2022-12-01", + is_chat_model=None, + chat_prompt_template=None, + run_prompt_template=None, + additional_tools=None, + ): + if not is_openai_available(): + raise ImportError( + "Using `OpenAiAgent` requires `openai`: `pip install openai`." + ) + + self.deployment_id = deployment_id + openai.api_type = "azure" + if api_key is None: + api_key = os.environ.get("AZURE_OPENAI_API_KEY", None) + if api_key is None: + raise ValueError( + "You need an Azure openAI key to use `AzureOpenAIAgent`. If you have one, set it in your env with " + "`os.environ['AZURE_OPENAI_API_KEY'] = xxx." + ) + else: + openai.api_key = api_key + if resource_name is None: + resource_name = os.environ.get("AZURE_OPENAI_RESOURCE_NAME", None) + if resource_name is None: + raise ValueError( + "You need a resource_name to use `AzureOpenAIAgent`. If you have one, set it in your env with " + "`os.environ['AZURE_OPENAI_RESOURCE_NAME'] = xxx." + ) + else: + openai.api_base = f"https://{resource_name}.openai.azure.com" + openai.api_version = api_version + + if is_chat_model is None: + is_chat_model = "gpt" in deployment_id.lower() + self.is_chat_model = is_chat_model + + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + def generate_many(self, prompts, stop): + if self.is_chat_model: + return [self._chat_generate(prompt, stop) for prompt in prompts] + else: + return self._completion_generate(prompts, stop) + + def generate_one(self, prompt, stop): + if self.is_chat_model: + return self._chat_generate(prompt, stop) + else: + return self._completion_generate([prompt], stop)[0] + + def _chat_generate(self, prompt, stop): + result = openai.ChatCompletion.create( + engine=self.deployment_id, + messages=[{"role": "user", "content": prompt}], + temperature=0, + stop=stop, + ) + return result["choices"][0]["message"]["content"] + + def _completion_generate(self, prompts, stop): + result = openai.Completion.create( + engine=self.deployment_id, + prompt=prompts, + temperature=0, + stop=stop, + max_tokens=200, + ) + return [answer["text"] for answer in result["choices"]] diff --git a/swarms/models/openai_models.py b/swarms/models/openai_models.py new file mode 100644 index 00000000..26a74df0 --- /dev/null +++ b/swarms/models/openai_models.py @@ -0,0 +1,958 @@ +from __future__ import annotations + +import logging +import sys +import warnings +from typing import ( + AbstractSet, + Any, + AsyncIterator, + Callable, + Collection, + Dict, + Iterator, + List, + Literal, + Mapping, + Optional, + Set, + Tuple, + Union, +) + +from langchain.callbacks.manager import ( + AsyncCallbackManagerForLLMRun, + CallbackManagerForLLMRun, +) +from langchain.llms.base import BaseLLM, create_base_retry_decorator +from langchain.pydantic_v1 import Field, root_validator +from langchain.schema import Generation, LLMResult +from langchain.schema.output import GenerationChunk +from langchain.utils import get_from_dict_or_env, get_pydantic_field_names +from langchain.utils.utils import build_extra_kwargs + +logger = logging.getLogger(__name__) + + +def update_token_usage( + keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] +) -> None: + """Update token usage.""" + _keys_to_use = keys.intersection(response["usage"]) + for _key in _keys_to_use: + if _key not in token_usage: + token_usage[_key] = response["usage"][_key] + else: + token_usage[_key] += response["usage"][_key] + + +def _stream_response_to_generation_chunk( + stream_response: Dict[str, Any], +) -> GenerationChunk: + """Convert a stream response to a generation chunk.""" + return GenerationChunk( + text=stream_response["choices"][0]["text"], + generation_info=dict( + finish_reason=stream_response["choices"][0].get("finish_reason", None), + logprobs=stream_response["choices"][0].get("logprobs", None), + ), + ) + + +def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: + """Update response from the stream response.""" + response["choices"][0]["text"] += stream_response["choices"][0]["text"] + response["choices"][0]["finish_reason"] = stream_response["choices"][0].get( + "finish_reason", None + ) + response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] + + +def _streaming_response_template() -> Dict[str, Any]: + return { + "choices": [ + { + "text": "", + "finish_reason": None, + "logprobs": None, + } + ] + } + + +def _create_retry_decorator( + llm: Union[BaseOpenAI, OpenAIChat], + run_manager: Optional[ + Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] + ] = None, +) -> Callable[[Any], Any]: + import openai + + errors = [ + openai.error.Timeout, + openai.error.APIError, + openai.error.APIConnectionError, + openai.error.RateLimitError, + openai.error.ServiceUnavailableError, + ] + return create_base_retry_decorator( + error_types=errors, max_retries=llm.max_retries, run_manager=run_manager + ) + + +def completion_with_retry( + llm: Union[BaseOpenAI, OpenAIChat], + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, +) -> Any: + """Use tenacity to retry the completion call.""" + retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) + + @retry_decorator + def _completion_with_retry(**kwargs: Any) -> Any: + return llm.client.create(**kwargs) + + return _completion_with_retry(**kwargs) + + +async def acompletion_with_retry( + llm: Union[BaseOpenAI, OpenAIChat], + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, +) -> Any: + """Use tenacity to retry the async completion call.""" + retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) + + @retry_decorator + async def _completion_with_retry(**kwargs: Any) -> Any: + # Use OpenAI's async api https://github.com/openai/openai-python#async-api + return await llm.client.acreate(**kwargs) + + return await _completion_with_retry(**kwargs) + + +class BaseOpenAI(BaseLLM): + """Base OpenAI large language model class.""" + + @property + def lc_secrets(self) -> Dict[str, str]: + return {"openai_api_key": "OPENAI_API_KEY"} + + @property + def lc_attributes(self) -> Dict[str, Any]: + attributes: Dict[str, Any] = {} + if self.openai_api_base != "": + attributes["openai_api_base"] = self.openai_api_base + + if self.openai_organization != "": + attributes["openai_organization"] = self.openai_organization + + if self.openai_proxy != "": + attributes["openai_proxy"] = self.openai_proxy + + return attributes + + @classmethod + def is_lc_serializable(cls) -> bool: + return True + + client: Any = None #: :meta private: + model_name: str = Field(default="text-davinci-003", alias="model") + """Model name to use.""" + temperature: float = 0.7 + """What sampling temperature to use.""" + max_tokens: int = 256 + """The maximum number of tokens to generate in the completion. + -1 returns as many tokens as possible given the prompt and + the models maximal context size.""" + top_p: float = 1 + """Total probability mass of tokens to consider at each step.""" + frequency_penalty: float = 0 + """Penalizes repeated tokens according to frequency.""" + presence_penalty: float = 0 + """Penalizes repeated tokens.""" + n: int = 1 + """How many completions to generate for each prompt.""" + best_of: int = 1 + """Generates best_of completions server-side and returns the "best".""" + model_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Holds any model parameters valid for `create` call not explicitly specified.""" + openai_api_key: Optional[str] = None + openai_api_base: Optional[str] = None + openai_organization: Optional[str] = None + # to support explicit proxy for OpenAI + openai_proxy: Optional[str] = None + batch_size: int = 20 + """Batch size to use when passing multiple documents to generate.""" + request_timeout: Optional[Union[float, Tuple[float, float]]] = None + """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" + logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) + """Adjust the probability of specific tokens being generated.""" + max_retries: int = 6 + """Maximum number of retries to make when generating.""" + streaming: bool = False + """Whether to stream the results or not.""" + allowed_special: Union[Literal["all"], AbstractSet[str]] = set() + """Set of special tokens that are allowed。""" + disallowed_special: Union[Literal["all"], Collection[str]] = "all" + """Set of special tokens that are not allowed。""" + tiktoken_model_name: Optional[str] = None + """The model name to pass to tiktoken when using this class. + Tiktoken is used to count the number of tokens in documents to constrain + them to be under a certain limit. By default, when set to None, this will + be the same as the embedding model name. However, there are some cases + where you may want to use this Embedding class with a model name not + supported by tiktoken. This can include when using Azure embeddings or + when using one of the many model providers that expose an OpenAI-like + API but with different models. In those cases, in order to avoid erroring + when tiktoken is called, you can specify a model name to use here.""" + + def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore + """Initialize the OpenAI object.""" + model_name = data.get("model_name", "") + return super().__new__(cls) + + class Config: + """Configuration for this pydantic object.""" + + allow_population_by_field_name = True + + @root_validator(pre=True) + def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: + """Build extra kwargs from additional params that were passed in.""" + all_required_field_names = get_pydantic_field_names(cls) + extra = values.get("model_kwargs", {}) + values["model_kwargs"] = build_extra_kwargs( + extra, values, all_required_field_names + ) + return values + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + values["openai_api_key"] = get_from_dict_or_env( + values, "openai_api_key", "OPENAI_API_KEY" + ) + values["openai_api_base"] = get_from_dict_or_env( + values, + "openai_api_base", + "OPENAI_API_BASE", + default="", + ) + values["openai_proxy"] = get_from_dict_or_env( + values, + "openai_proxy", + "OPENAI_PROXY", + default="", + ) + values["openai_organization"] = get_from_dict_or_env( + values, + "openai_organization", + "OPENAI_ORGANIZATION", + default="", + ) + try: + import openai + + values["client"] = openai.Completion + except ImportError: + raise ImportError( + "Could not import openai python package. " + "Please install it with `pip install openai`." + ) + if values["streaming"] and values["n"] > 1: + raise ValueError("Cannot stream results when n > 1.") + if values["streaming"] and values["best_of"] > 1: + raise ValueError("Cannot stream results when best_of > 1.") + return values + + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + normal_params = { + "temperature": self.temperature, + "max_tokens": self.max_tokens, + "top_p": self.top_p, + "frequency_penalty": self.frequency_penalty, + "presence_penalty": self.presence_penalty, + "n": self.n, + "request_timeout": self.request_timeout, + "logit_bias": self.logit_bias, + } + + # Azure gpt-35-turbo doesn't support best_of + # don't specify best_of if it is 1 + if self.best_of > 1: + normal_params["best_of"] = self.best_of + + return {**normal_params, **self.model_kwargs} + + def _stream( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[GenerationChunk]: + params = {**self._invocation_params, **kwargs, "stream": True} + self.get_sub_prompts(params, [prompt], stop) # this mutates params + for stream_resp in completion_with_retry( + self, prompt=prompt, run_manager=run_manager, **params + ): + chunk = _stream_response_to_generation_chunk(stream_resp) + yield chunk + if run_manager: + run_manager.on_llm_new_token( + chunk.text, + chunk=chunk, + verbose=self.verbose, + logprobs=chunk.generation_info["logprobs"] + if chunk.generation_info + else None, + ) + + async def _astream( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[GenerationChunk]: + params = {**self._invocation_params, **kwargs, "stream": True} + self.get_sub_prompts(params, [prompt], stop) # this mutate params + async for stream_resp in await acompletion_with_retry( + self, prompt=prompt, run_manager=run_manager, **params + ): + chunk = _stream_response_to_generation_chunk(stream_resp) + yield chunk + if run_manager: + await run_manager.on_llm_new_token( + chunk.text, + chunk=chunk, + verbose=self.verbose, + logprobs=chunk.generation_info["logprobs"] + if chunk.generation_info + else None, + ) + + def _generate( + self, + prompts: List[str], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + """Call out to OpenAI's endpoint with k unique prompts. + + Args: + prompts: The prompts to pass into the model. + stop: Optional list of stop words to use when generating. + + Returns: + The full LLM output. + + Example: + .. code-block:: python + + response = openai.generate(["Tell me a joke."]) + """ + # TODO: write a unit test for this + params = self._invocation_params + params = {**params, **kwargs} + sub_prompts = self.get_sub_prompts(params, prompts, stop) + choices = [] + token_usage: Dict[str, int] = {} + # Get the token usage from the response. + # Includes prompt, completion, and total tokens used. + _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} + for _prompts in sub_prompts: + if self.streaming: + if len(_prompts) > 1: + raise ValueError("Cannot stream results with multiple prompts.") + + generation: Optional[GenerationChunk] = None + for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs): + if generation is None: + generation = chunk + else: + generation += chunk + assert generation is not None + choices.append( + { + "text": generation.text, + "finish_reason": generation.generation_info.get("finish_reason") + if generation.generation_info + else None, + "logprobs": generation.generation_info.get("logprobs") + if generation.generation_info + else None, + } + ) + else: + response = completion_with_retry( + self, prompt=_prompts, run_manager=run_manager, **params + ) + choices.extend(response["choices"]) + update_token_usage(_keys, response, token_usage) + return self.create_llm_result(choices, prompts, token_usage) + + async def _agenerate( + self, + prompts: List[str], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + """Call out to OpenAI's endpoint async with k unique prompts.""" + params = self._invocation_params + params = {**params, **kwargs} + sub_prompts = self.get_sub_prompts(params, prompts, stop) + choices = [] + token_usage: Dict[str, int] = {} + # Get the token usage from the response. + # Includes prompt, completion, and total tokens used. + _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} + for _prompts in sub_prompts: + if self.streaming: + if len(_prompts) > 1: + raise ValueError("Cannot stream results with multiple prompts.") + + generation: Optional[GenerationChunk] = None + async for chunk in self._astream( + _prompts[0], stop, run_manager, **kwargs + ): + if generation is None: + generation = chunk + else: + generation += chunk + assert generation is not None + choices.append( + { + "text": generation.text, + "finish_reason": generation.generation_info.get("finish_reason") + if generation.generation_info + else None, + "logprobs": generation.generation_info.get("logprobs") + if generation.generation_info + else None, + } + ) + else: + response = await acompletion_with_retry( + self, prompt=_prompts, run_manager=run_manager, **params + ) + choices.extend(response["choices"]) + update_token_usage(_keys, response, token_usage) + return self.create_llm_result(choices, prompts, token_usage) + + def get_sub_prompts( + self, + params: Dict[str, Any], + prompts: List[str], + stop: Optional[List[str]] = None, + ) -> List[List[str]]: + """Get the sub prompts for llm call.""" + if stop is not None: + if "stop" in params: + raise ValueError("`stop` found in both the input and default params.") + params["stop"] = stop + if params["max_tokens"] == -1: + if len(prompts) != 1: + raise ValueError( + "max_tokens set to -1 not supported for multiple inputs." + ) + params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) + sub_prompts = [ + prompts[i : i + self.batch_size] + for i in range(0, len(prompts), self.batch_size) + ] + return sub_prompts + + def create_llm_result( + self, choices: Any, prompts: List[str], token_usage: Dict[str, int] + ) -> LLMResult: + """Create the LLMResult from the choices and prompts.""" + generations = [] + for i, _ in enumerate(prompts): + sub_choices = choices[i * self.n : (i + 1) * self.n] + generations.append( + [ + Generation( + text=choice["text"], + generation_info=dict( + finish_reason=choice.get("finish_reason"), + logprobs=choice.get("logprobs"), + ), + ) + for choice in sub_choices + ] + ) + llm_output = {"token_usage": token_usage, "model_name": self.model_name} + return LLMResult(generations=generations, llm_output=llm_output) + + @property + def _invocation_params(self) -> Dict[str, Any]: + """Get the parameters used to invoke the model.""" + openai_creds: Dict[str, Any] = { + "api_key": self.openai_api_key, + "api_base": self.openai_api_base, + "organization": self.openai_organization, + } + if self.openai_proxy: + import openai + + openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 + return {**openai_creds, **self._default_params} + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return {**{"model_name": self.model_name}, **self._default_params} + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "openai" + + def get_token_ids(self, text: str) -> List[int]: + """Get the token IDs using the tiktoken package.""" + # tiktoken NOT supported for Python < 3.8 + if sys.version_info[1] < 8: + return super().get_num_tokens(text) + try: + import tiktoken + except ImportError: + raise ImportError( + "Could not import tiktoken python package. " + "This is needed in order to calculate get_num_tokens. " + "Please install it with `pip install tiktoken`." + ) + + model_name = self.tiktoken_model_name or self.model_name + try: + enc = tiktoken.encoding_for_model(model_name) + except KeyError: + logger.warning("Warning: model not found. Using cl100k_base encoding.") + model = "cl100k_base" + enc = tiktoken.get_encoding(model) + + return enc.encode( + text, + allowed_special=self.allowed_special, + disallowed_special=self.disallowed_special, + ) + + @staticmethod + def modelname_to_contextsize(modelname: str) -> int: + """Calculate the maximum number of tokens possible to generate for a model. + + Args: + modelname: The modelname we want to know the context size for. + + Returns: + The maximum context size + + Example: + .. code-block:: python + + max_tokens = openai.modelname_to_contextsize("text-davinci-003") + """ + model_token_mapping = { + "gpt-4": 8192, + "gpt-4-0314": 8192, + "gpt-4-0613": 8192, + "gpt-4-32k": 32768, + "gpt-4-32k-0314": 32768, + "gpt-4-32k-0613": 32768, + "gpt-3.5-turbo": 4096, + "gpt-3.5-turbo-0301": 4096, + "gpt-3.5-turbo-0613": 4096, + "gpt-3.5-turbo-16k": 16385, + "gpt-3.5-turbo-16k-0613": 16385, + "gpt-3.5-turbo-instruct": 4096, + "text-ada-001": 2049, + "ada": 2049, + "text-babbage-001": 2040, + "babbage": 2049, + "text-curie-001": 2049, + "curie": 2049, + "davinci": 2049, + "text-davinci-003": 4097, + "text-davinci-002": 4097, + "code-davinci-002": 8001, + "code-davinci-001": 8001, + "code-cushman-002": 2048, + "code-cushman-001": 2048, + } + + # handling finetuned models + if "ft-" in modelname: + modelname = modelname.split(":")[0] + + context_size = model_token_mapping.get(modelname, None) + + if context_size is None: + raise ValueError( + f"Unknown model: {modelname}. Please provide a valid OpenAI model name." + "Known models are: " + ", ".join(model_token_mapping.keys()) + ) + + return context_size + + @property + def max_context_size(self) -> int: + """Get max context size for this model.""" + return self.modelname_to_contextsize(self.model_name) + + def max_tokens_for_prompt(self, prompt: str) -> int: + """Calculate the maximum number of tokens possible to generate for a prompt. + + Args: + prompt: The prompt to pass into the model. + + Returns: + The maximum number of tokens to generate for a prompt. + + Example: + .. code-block:: python + + max_tokens = openai.max_token_for_prompt("Tell me a joke.") + """ + num_tokens = self.get_num_tokens(prompt) + return self.max_context_size - num_tokens + + +class OpenAI(BaseOpenAI): + """OpenAI large language models. + + To use, you should have the ``openai`` python package installed, and the + environment variable ``OPENAI_API_KEY`` set with your API key. + + Any parameters that are valid to be passed to the openai.create call can be passed + in, even if not explicitly saved on this class. + + Example: + .. code-block:: python + + from langchain.llms import OpenAI + openai = OpenAI(model_name="text-davinci-003") + """ + + @property + def _invocation_params(self) -> Dict[str, Any]: + return {**{"model": self.model_name}, **super()._invocation_params} + + +class AzureOpenAI(BaseOpenAI): + """Azure-specific OpenAI large language models. + + To use, you should have the ``openai`` python package installed, and the + environment variable ``OPENAI_API_KEY`` set with your API key. + + Any parameters that are valid to be passed to the openai.create call can be passed + in, even if not explicitly saved on this class. + + Example: + .. code-block:: python + + from langchain.llms import AzureOpenAI + openai = AzureOpenAI(model_name="text-davinci-003") + """ + + deployment_name: str = "" + """Deployment name to use.""" + openai_api_type: str = "" + openai_api_version: str = "" + + @root_validator() + def validate_azure_settings(cls, values: Dict) -> Dict: + values["openai_api_version"] = get_from_dict_or_env( + values, + "openai_api_version", + "OPENAI_API_VERSION", + ) + values["openai_api_type"] = get_from_dict_or_env( + values, "openai_api_type", "OPENAI_API_TYPE", "azure" + ) + return values + + @property + def _identifying_params(self) -> Mapping[str, Any]: + return { + **{"deployment_name": self.deployment_name}, + **super()._identifying_params, + } + + @property + def _invocation_params(self) -> Dict[str, Any]: + openai_params = { + "engine": self.deployment_name, + "api_type": self.openai_api_type, + "api_version": self.openai_api_version, + } + return {**openai_params, **super()._invocation_params} + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "azure" + + @property + def lc_attributes(self) -> Dict[str, Any]: + return { + "openai_api_type": self.openai_api_type, + "openai_api_version": self.openai_api_version, + } + + +class OpenAIChat(BaseLLM): + """OpenAI Chat large language models. + + To use, you should have the ``openai`` python package installed, and the + environment variable ``OPENAI_API_KEY`` set with your API key. + + Any parameters that are valid to be passed to the openai.create call can be passed + in, even if not explicitly saved on this class. + + Example: + .. code-block:: python + + from langchain.llms import OpenAIChat + openaichat = OpenAIChat(model_name="gpt-3.5-turbo") + """ + + client: Any #: :meta private: + model_name: str = "gpt-3.5-turbo" + """Model name to use.""" + model_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Holds any model parameters valid for `create` call not explicitly specified.""" + openai_api_key: Optional[str] = None + openai_api_base: Optional[str] = None + # to support explicit proxy for OpenAI + openai_proxy: Optional[str] = None + max_retries: int = 6 + """Maximum number of retries to make when generating.""" + prefix_messages: List = Field(default_factory=list) + """Series of messages for Chat input.""" + streaming: bool = False + """Whether to stream the results or not.""" + allowed_special: Union[Literal["all"], AbstractSet[str]] = set() + """Set of special tokens that are allowed。""" + disallowed_special: Union[Literal["all"], Collection[str]] = "all" + """Set of special tokens that are not allowed。""" + + @root_validator(pre=True) + def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: + """Build extra kwargs from additional params that were passed in.""" + all_required_field_names = {field.alias for field in cls.__fields__.values()} + + extra = values.get("model_kwargs", {}) + for field_name in list(values): + if field_name not in all_required_field_names: + if field_name in extra: + raise ValueError(f"Found {field_name} supplied twice.") + extra[field_name] = values.pop(field_name) + values["model_kwargs"] = extra + return values + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + openai_api_key = get_from_dict_or_env( + values, "openai_api_key", "OPENAI_API_KEY" + ) + openai_api_base = get_from_dict_or_env( + values, + "openai_api_base", + "OPENAI_API_BASE", + default="", + ) + openai_proxy = get_from_dict_or_env( + values, + "openai_proxy", + "OPENAI_PROXY", + default="", + ) + openai_organization = get_from_dict_or_env( + values, "openai_organization", "OPENAI_ORGANIZATION", default="" + ) + try: + import openai + + openai.api_key = openai_api_key + if openai_api_base: + openai.api_base = openai_api_base + if openai_organization: + openai.organization = openai_organization + if openai_proxy: + openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 + except ImportError: + raise ImportError( + "Could not import openai python package. " + "Please install it with `pip install openai`." + ) + try: + values["client"] = openai.ChatCompletion + except AttributeError: + raise ValueError( + "`openai` has no `ChatCompletion` attribute, this is likely " + "due to an old version of the openai package. Try upgrading it " + "with `pip install --upgrade openai`." + ) + return values + + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + return self.model_kwargs + + def _get_chat_params( + self, prompts: List[str], stop: Optional[List[str]] = None + ) -> Tuple: + if len(prompts) > 1: + raise ValueError( + f"OpenAIChat currently only supports single prompt, got {prompts}" + ) + messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] + params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} + if stop is not None: + if "stop" in params: + raise ValueError("`stop` found in both the input and default params.") + params["stop"] = stop + if params.get("max_tokens") == -1: + # for ChatGPT api, omitting max_tokens is equivalent to having no limit + del params["max_tokens"] + return messages, params + + def _stream( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[GenerationChunk]: + messages, params = self._get_chat_params([prompt], stop) + params = {**params, **kwargs, "stream": True} + for stream_resp in completion_with_retry( + self, messages=messages, run_manager=run_manager, **params + ): + token = stream_resp["choices"][0]["delta"].get("content", "") + chunk = GenerationChunk(text=token) + yield chunk + if run_manager: + run_manager.on_llm_new_token(token, chunk=chunk) + + async def _astream( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[GenerationChunk]: + messages, params = self._get_chat_params([prompt], stop) + params = {**params, **kwargs, "stream": True} + async for stream_resp in await acompletion_with_retry( + self, messages=messages, run_manager=run_manager, **params + ): + token = stream_resp["choices"][0]["delta"].get("content", "") + chunk = GenerationChunk(text=token) + yield chunk + if run_manager: + await run_manager.on_llm_new_token(token, chunk=chunk) + + def _generate( + self, + prompts: List[str], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + if self.streaming: + generation: Optional[GenerationChunk] = None + for chunk in self._stream(prompts[0], stop, run_manager, **kwargs): + if generation is None: + generation = chunk + else: + generation += chunk + assert generation is not None + return LLMResult(generations=[[generation]]) + + messages, params = self._get_chat_params(prompts, stop) + params = {**params, **kwargs} + full_response = completion_with_retry( + self, messages=messages, run_manager=run_manager, **params + ) + llm_output = { + "token_usage": full_response["usage"], + "model_name": self.model_name, + } + return LLMResult( + generations=[ + [Generation(text=full_response["choices"][0]["message"]["content"])] + ], + llm_output=llm_output, + ) + + async def _agenerate( + self, + prompts: List[str], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + if self.streaming: + generation: Optional[GenerationChunk] = None + async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs): + if generation is None: + generation = chunk + else: + generation += chunk + assert generation is not None + return LLMResult(generations=[[generation]]) + + messages, params = self._get_chat_params(prompts, stop) + params = {**params, **kwargs} + full_response = await acompletion_with_retry( + self, messages=messages, run_manager=run_manager, **params + ) + llm_output = { + "token_usage": full_response["usage"], + "model_name": self.model_name, + } + return LLMResult( + generations=[ + [Generation(text=full_response["choices"][0]["message"]["content"])] + ], + llm_output=llm_output, + ) + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return {**{"model_name": self.model_name}, **self._default_params} + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "openai-chat" + + def get_token_ids(self, text: str) -> List[int]: + """Get the token IDs using the tiktoken package.""" + # tiktoken NOT supported for Python < 3.8 + if sys.version_info[1] < 8: + return super().get_token_ids(text) + try: + import tiktoken + except ImportError: + raise ImportError( + "Could not import tiktoken python package. " + "This is needed in order to calculate get_num_tokens. " + "Please install it with `pip install tiktoken`." + ) + + enc = tiktoken.encoding_for_model(self.model_name) + return enc.encode( + text, + allowed_special=self.allowed_special, + disallowed_special=self.disallowed_special, + ) diff --git a/swarms/swarms/battle_royal_swarm.py b/swarms/swarms/battle_royal.py similarity index 67% rename from swarms/swarms/battle_royal_swarm.py rename to swarms/swarms/battle_royal.py index ff1ebf5a..2a02186e 100644 --- a/swarms/swarms/battle_royal_swarm.py +++ b/swarms/swarms/battle_royal.py @@ -20,12 +20,40 @@ from swarms.workers.worker import Worker class BattleRoyalSwarm: - def __init__(self, human_evaluator=None): - self.workers = [Worker() for _ in range(100)] + """ + Battle Royal Swarm + + Parameters: + - `human_evaluator` (function): Function to evaluate and score two solutions. + - `num_workers` (int): Number of workers in the swarm. + - `num_teams` (int): Number of teams in the swarm. + + Example: + + # User evaluator function to evaluate and score two solutions + def human_evaluator(solution1, solution2): + # Placeholder; in a real-world application, the user would input scores here + score1 = int(input(f"Score for solution 1 - '{solution1}': ")) + score2 = int(input(f"Score for solution 2 - '{solution2}': ")) + return score1, score2 + + # Example usage + swarm = BattleRoyalSwarm(human_evaluator) + swarm.broadcast_question("What is the capital of France?") + + """ + + def __init__( + self, + human_evaluator=None, + num_workers: int = 100, + ): + self.workers = [Worker() for _ in range(num_workers)] self.teams = self.form_teams() self.human_evaluator = human_evaluator def form_teams(self): + """Form teams of 1, 3 or 4 workers.""" teams = [] unassigned_workers = self.workers.copy() while unassigned_workers: @@ -40,6 +68,7 @@ class BattleRoyalSwarm: return teams def broadcast_question(self, question: str): + """Broadcast a question to the swarm.""" responses = {} for worker in self.workers: response = worker.run(question) @@ -57,12 +86,14 @@ class BattleRoyalSwarm: print(f"Worker {winner.id} won over Worker {loser.id}") def communicate(self, sender: Worker, reciever: Worker, message: str): + """Communicate a message from one worker to another.""" if sender.is_within_proximity(reciever) or any( team in sender.teams for team in reciever.teams ): pass def clash(self, worker1: Worker, worker2: Worker, question: str): + """Clash two workers and return the winner.""" solution1 = worker1.run(question) solution2 = worker2.run(question) score1, score2 = self.human_evaluator(solution1, solution2) diff --git a/swarms/tools/autogpt.py b/swarms/tools/autogpt.py index 1755d259..22267fce 100644 --- a/swarms/tools/autogpt.py +++ b/swarms/tools/autogpt.py @@ -3,7 +3,6 @@ import os from contextlib import contextmanager from typing import Optional -import interpreter import pandas as pd import torch from langchain.agents import tool @@ -147,28 +146,28 @@ class WebpageQATool(BaseTool): raise NotImplementedError -@tool -def compile(task: str): - """ - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. - You can chat with Open Interpreter through a ChatGPT-like interface in your terminal - by running $ interpreter after installing. - - This provides a natural-language interface to your computer's general-purpose capabilities: - - Create and edit photos, videos, PDFs, etc. - Control a Chrome browser to perform research - Plot, clean, and analyze large datasets - ...etc. - ⚠️ Note: You'll be asked to approve code before it's run. - """ - task = interpreter.chat(task, return_messages=True) - interpreter.chat() - interpreter.reset(task) - - os.environ["INTERPRETER_CLI_AUTO_RUN"] = True - os.environ["INTERPRETER_CLI_FAST_MODE"] = True - os.environ["INTERPRETER_CLI_DEBUG"] = True +# @tool +# def compile(task: str): +# """ +# Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. +# You can chat with Open Interpreter through a ChatGPT-like interface in your terminal +# by running $ interpreter after installing. + +# This provides a natural-language interface to your computer's general-purpose capabilities: + +# Create and edit photos, videos, PDFs, etc. +# Control a Chrome browser to perform research +# Plot, clean, and analyze large datasets +# ...etc. +# ⚠️ Note: You'll be asked to approve code before it's run. +# """ +# task = interpreter.chat(task, return_messages=True) +# interpreter.chat() +# interpreter.reset(task) + +# os.environ["INTERPRETER_CLI_AUTO_RUN"] = True +# os.environ["INTERPRETER_CLI_FAST_MODE"] = True +# os.environ["INTERPRETER_CLI_DEBUG"] = True # mm model workers diff --git a/swarms/tools/code_intepretor.py b/swarms/tools/code_intepretor.py deleted file mode 100644 index 05dc6a0b..00000000 --- a/swarms/tools/code_intepretor.py +++ /dev/null @@ -1,73 +0,0 @@ -# props to shroominic -from swarms.tools.base import Tool, ToolException -from typing import Any, List -from codeinterpreterapi import CodeInterpreterSession, File, ToolException - - -class CodeInterpreter(Tool): - def __init__(self, name: str, description: str): - super().__init__(name, description, self.run) - - def run(self, user_request: str, file_paths: List[str] = []) -> Any: - # create a session - session = CodeInterpreterSession() - session.start() - - # create files from paths - files = [File.from_path(file_path) for file_path in file_paths] - - try: - # generate a response based on user input - response = session.generate_response(user_request, files=files) - - # output the response (text + image) - print("AI: ", response.content) - for file in response.files: - file.show_image() - except Exception as e: - raise ToolException(f"Error running CodeInterpreter: {e}") - finally: - # terminate the session - session.stop() - - async def arun(self, user_request: str, file_paths: List[str] = []) -> Any: - # create a session - session = CodeInterpreterSession() - await session.astart() - - # create files from paths - files = [File.from_path(file_path) for file_path in file_paths] - - try: - # generate a response based on user input - response = await session.generate_response(user_request, files=files) - - # output the response (text + image) - print("AI: ", response.content) - for file in response.files: - file.show_image() - except Exception as e: - raise ToolException(f"Error running CodeInterpreter: {e}") - finally: - # terminate the session - await session.astop() - - -""" - -tool = CodeInterpreter("Code Interpreter", "A tool to interpret code and generate useful outputs.") -tool.run("Plot the bitcoin chart of 2023 YTD") - -# Or with file inputs -tool.run("Analyze this dataset and plot something interesting about it.", ["examples/assets/iris.csv"]) - - - -import asyncio - -tool = CodeInterpreter("Code Interpreter", "A tool to interpret code and generate useful outputs.") -asyncio.run(tool.arun("Plot the bitcoin chart of 2023 YTD")) - -# Or with file inputs -asyncio.run(tool.arun("Analyze this dataset and plot something interesting about it.", ["examples/assets/iris.csv"])) -""" diff --git a/swarms/workers/worker.py b/swarms/workers/worker.py index df247c11..127826ab 100644 --- a/swarms/workers/worker.py +++ b/swarms/workers/worker.py @@ -14,7 +14,7 @@ from swarms.tools.autogpt import ( ReadFileTool, WebpageQATool, WriteFileTool, - compile, + # compile, load_qa_with_sources_chain, process_csv, ) @@ -138,7 +138,7 @@ class Worker: process_csv, query_website_tool, HumanInputRun(), - compile, + # compile, # VQAinference, ] if external_tools is not None: