From 0277c2b97650b6c59130aba318254ee468ca465e Mon Sep 17 00:00:00 2001 From: David Marx Date: Sun, 11 Dec 2022 13:04:18 -0500 Subject: [PATCH 1/3] add draft a/b test notebook --- nbs/A_B.ipynb | 580 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 580 insertions(+) create mode 100644 nbs/A_B.ipynb diff --git a/nbs/A_B.ipynb b/nbs/A_B.ipynb new file mode 100644 index 00000000..ac1acb39 --- /dev/null +++ b/nbs/A_B.ipynb @@ -0,0 +1,580 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Stability.AI A/B Testing Notebook\n", + "\n", + "by David Marx (@DigThatData)\n", + "\n", + "## Usage Instructions\n", + "\n", + "1. Complete the input fields below as appropriate to specify output and config file locations.\n", + "2. Specify your experiment in yaml\n", + " - See the `%%writefile` cell for an example. You can either modify the values in that cell to write a new `test_config.yaml` to parameterize your experiments, or upload an appropriate yaml file.\n", + " - **`defaults`**: settings that will be used across test cases.\n", + " - **`combinatorial_parameters`**: permuted to generate randomized settings which will be shared across a given sample of images shown to the user.\n", + " - **`differentiators`**: specifies the experiment names and what settings are specific to each experimental test case.\n", + " - Parameterize your experiment with two or more test cases. The current example uses three.\n", + " - Each test case (each top level entry below `differentiators`) will be assigned its own api client, so you can use settings like `engine` or `grpc_host` as differentiating attributes\n", + "3. Running the \"Load a random sample\" cell will:\n", + " 1. log the results from the previous sample\n", + " 2. generate a visualization of the recorded experiment outcomes\n", + " 3. load a new random set of images to compare.\n", + " - The ordering in which the respective test cases are displayed is randomized each time the cell is executed (i.e. with each new set of images)\n", + " - Push the button below an image to pick it as your favorite. \n", + " - Click again to deselect if you cahnge your mind\n", + " - The notebook does not currently constrain the user to only select one option, but that's how we recommend you use it. \n", + " - When you're satisfied with your selection, execute the cell again to log your feedback and generate a new set of images." + ], + "metadata": { + "id": "i0RWQRJAkdJe" + } + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "cellView": "form", + "id": "MlSp8emhkXXg" + }, + "outputs": [], + "source": [ + "%%capture\n", + "\n", + "########################\n", + "# install dependencies #\n", + "########################\n", + "\n", + "try:\n", + " import stability_sdk\n", + "except ImportError:\n", + " # to do: requirements file\n", + " !pip install stability-sdk\n", + " !pip install omegaconf panel loguru\n", + "\n", + "###########\n", + "# imports #\n", + "###########\n", + "\n", + "# python stdlib\n", + "from collections import Counter\n", + "import copy\n", + "import importlib.metadata\n", + "import io\n", + "from itertools import product\n", + "import json\n", + "import os\n", + "from pathlib import Path\n", + "import random\n", + "import time\n", + "import warnings\n", + "\n", + "# google colab stdlib\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from PIL import Image\n", + "from scipy.stats import beta\n", + "\n", + "# external deps\n", + "from loguru import logger\n", + "from omegaconf import OmegaConf\n", + "import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation\n", + "\n", + "########################\n", + "# user workspace setup #\n", + "########################\n", + "\n", + "notebook_user_id = 'digthatdata' # @param {type: 'string'}\n", + "if not notebook_user_id:\n", + " raise ValueError(\"\\n> New fone, who dis?\\nPlease identify yourself.\")\n", + "\n", + "explog_fname = \"outcomes.log\"\n", + "\n", + "proj_root_str = '${active_project}'\n", + "mount_gdrive = True # @param {type:'boolean'}\n", + "if mount_gdrive:\n", + " from google.colab import drive\n", + " drive.mount('/content/drive')\n", + " proj_root_str = '/content/drive/MyDrive/AI/StabilityAbTesting/${active_project}'\n", + "\n", + "\n", + "project_name = 'abtest1' # @param {type:'string'}\n", + "if not project_name.strip():\n", + " project_name = str(time.time())\n", + "\n", + "experiments_configfile_name = 'test_config.yaml' # @param {type:'string'}\n", + "\n", + "if not Path(experiments_configfile_name).exists():\n", + " warnings.warn(\n", + " f\"Experiment config file {experiments_configfile_name} \"\n", + " \"not detected. You may need to upload the experiment config \"\n", + " \"to this workspace. Alternatively, you can create one by running \"\n", + " \"the `%%writefile` cell below\"\n", + " )\n", + "\n", + "# @markdown if gdrive is mounted and `local_config_priority` is selected, the notebook will check for a\n", + "# @markdown local experiment config file. If it finds one, it will load the \n", + "# @markdown experiment from the local file and copy the config to the project folder on gdrive\n", + "# @markdown -- overwriting the remote experiment config if it exists.\n", + "\n", + "# @markdown if `local_config_priority` is not selected, the notebook will only\n", + "# @markdown look for an experiment config in the project folder (which will be on the gdrive if that's been mounted).\n", + "\n", + "\n", + "# @markdown The intention here is to facilitate modifying experiment parameters from the notebook's `%%writefile` cell mid-experiment\n", + "\n", + "local_config_priority = True # @param {type:'boolean'}\n", + "\n", + "#################################\n", + "# Config for notebook workspace #\n", + "#################################\n", + "\n", + "workspace_cfg = OmegaConf.create({\n", + " 'active_project':project_name,\n", + " 'project_root':proj_root_str,\n", + " 'gdrive_mounted':mount_gdrive,\n", + " 'notebook_user_id':notebook_user_id,\n", + " 'exp_cfg_fname': experiments_configfile_name,\n", + " 'explog_fname':explog_fname,\n", + " 'local_config_priority':local_config_priority,\n", + "})\n", + "\n", + "with open('config.yaml','w') as fp:\n", + " OmegaConf.save(config=workspace_cfg, f=fp.name)\n", + "\n", + "#############################\n", + "# misc setup #\n", + "# - global variables (yuck) #\n", + "# - function definitions #\n", + "#############################\n", + "\n", + "exp_cfg_fpath = Path(workspace_cfg.project_root) / workspace_cfg.exp_cfg_fname\n", + "\n", + "# to do -> mirror this to the notebook workspace\n", + "running_score = Counter()\n", + "\n", + "# this stuff could come from the local config, e.g. to increment\n", + "# sample numebr without needing a prefix\n", + "SAMPLE_IDX = 0\n", + "RANDOM_PREFIX = str(time.time())\n", + "\n", + "# would be nice if there was a way to query the API for its version\n", + "SDK_VERSION = importlib.metadata.version(\"stability_sdk\") # if installing from local repo, use git commit hash instead (or as suffix?)\n", + "\n", + "import pandas as pd\n", + "\n", + "def barchart(running_score):\n", + " df = pd.Series(running_score)\n", + " df.sort_index().plot.barh() # why doesn't sort_index() do what I need it to?\n", + " plt.show()\n", + "\n", + "def posterior_plot(running_score, alpha=0.95):\n", + " barchart(running_score)\n", + " # try:\n", + " # posterior_plot_binary(running_score, alpha)\n", + " # except NotImplementedError:\n", + " # barchart(running_score)\n", + "\n", + "\n", + "def posterior_plot_binary(running_score, alpha=0.95):\n", + " \"\"\"\n", + " Plots a MAP estimate for a binomial probability.\n", + " I.e. counts are used as the parameters for a beta\n", + " PDF, and a credible interval about the median is\n", + " estimated. Really we want the interval to be around\n", + " the mode, but this was easier to code and it's close \n", + " enough.\n", + " \n", + " alpha: significance level of CI\n", + " \"\"\"\n", + " if len(running_score) > 2:\n", + " raise NotImplementedError\n", + "\n", + " fig, ax = plt.subplots(1, 1)\n", + "\n", + " a,b = list(running_score.values())\n", + " # jeffrey's prior\n", + " a+=0.5\n", + " b+=0.5\n", + " x = np.linspace(\n", + " #beta.ppf(0.01, a, b),\n", + " #beta.ppf(0.99, a, b),\n", + " 0,1, \n", + " 100)\n", + " ax.plot(x, beta.pdf(x, a=a, b=b),\n", + " 'r-', label='beta pdf')\n", + " ax.set_xlim(0,1)\n", + " ax.get_yaxis().set_visible(False)\n", + " plt.title(f\"MAP estimate for likelihood that {list(running_score.keys())[0]} is preferred\")\n", + "\n", + " mu = a / (a+b)\n", + " print(f\"mu: {mu}\")\n", + " median = beta.median(a=a,b=b)\n", + " print(f\"median: {median}\")\n", + " plt.vlines(x=mu, ymin=0, ymax=beta.pdf(mu, a, b), linestyles='dashed', color='blue')\n", + " mode = mu\n", + " if (a>1) and (b>1):\n", + " mode = (a-1) / (a+b-2) #\\frac{\\alpha-1}{\\alpha+\\beta-2}\\! fo\n", + " print(f\"mode: {mode}\")\n", + " plt.vlines(x=mode, ymin=0, ymax=beta.pdf(mode, a, b), linestyles='dashed')\n", + " # fuck it. this is an interval around the median instead of the mode, but good enough.\n", + " lwr, upr = beta.interval(a=a,b=b, alpha=alpha)\n", + " plt.vlines(x=lwr, ymin=0, ymax=beta.pdf(lwr, a, b), linestyles='dashed')\n", + " plt.vlines(x=upr, ymin=0, ymax=beta.pdf(upr, a, b), linestyles='dashed')\n", + " xs2 = np.linspace(lwr, upr, 100)\n", + " plt.fill_between(xs2, beta.pdf(xs2, a=a, b=b), color='r', alpha=0.4)\n", + "\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "source": [ + "%%writefile test_config.yaml\n", + "\n", + "### settings that will be used across test cases.\n", + "defaults:\n", + " grpc_host: grpc.stability.ai:443\n", + " # If API key not provided in test_config.yaml, user prompted with getpass\n", + " key:\n", + "\n", + "### randomly permuted to produce settings shared across test cases for a generated set of images\n", + "combinatorial_parameters:\n", + " prompt:\n", + " - mom's spaghetti, knees weak, arm's sweaty. but for real, mom's spaghetti is delicious\n", + " - prompt with an optional middle part. {middle} this is the end of the prompt.\n", + " cfg_scale:\n", + " - 7\n", + " - 9\n", + " - 12\n", + " - 15\n", + " steps:\n", + " - 40\n", + " - 50\n", + " - 60\n", + "\n", + "## specifies the experiment names and what settings are specific to each experimental test case.\n", + "# not a fan of this name. maybe call this section \"experiments\"?\n", + "differentiators:\n", + " test_case_A:\n", + " engine: stable-diffusion-512-v2-0\n", + " prompt_chunks:\n", + " middle: ''\n", + " test_case_B:\n", + " engine: stable-diffusion-512-v2-1\n", + " prompt_chunks:\n", + " middle: this is the optional middle of the prompt. it only goes with test_case_B.\n", + " test_case_C:\n", + " engine: stable-diffusion-v1-5\n", + " # If using prompt chunks, all test_cases need at least a prompt_chunks dict with the same keys and \n", + " # empty strings as values. If empty strings aren't specified, you'll get \"None\" as the filler chunk.\n", + " prompt_chunks:\n", + " middle: ''\n", + " # Don't do this, results in `middle:\"None\"`\n", + " # middle:\n" + ], + "metadata": { + "id": "GX85BLyFrGlJ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# @markdown ## Load Experiments\n", + "\n", + "from omegaconf import OmegaConf\n", + "import getpass\n", + "from stability_sdk import client\n", + "\n", + "import panel as pn\n", + "pn.extension()\n", + "\n", + "\n", + "exp_cfg_fpath_out = Path(workspace_cfg.project_root) / workspace_cfg.exp_cfg_fname\n", + "exp_cfg_fpath = exp_cfg_fpath_out\n", + "if workspace_cfg.local_config_priority:\n", + " exp_cfg_fpath = Path(workspace_cfg.exp_cfg_fname)\n", + "if exp_cfg_fpath.exists():\n", + " cfg = OmegaConf.load(exp_cfg_fpath)\n", + "elif exp_cfg_fpath_out.exists():\n", + " cfg = OmegaConf.load(exp_cfg_fpath_out)\n", + "else:\n", + " raise RuntimeError(\n", + " f\"Experiment config file {workspace_cfg.exp_cfg_fname} not found.\"\n", + " \"Make sure you've saved or uploaded the file, \"\n", + " \"and that it's named correctly.\"\n", + ")\n", + "\n", + "Path(workspace_cfg.project_root).mkdir(parents=True, exist_ok=True)\n", + "with exp_cfg_fpath_out.open('w') as fp:\n", + " OmegaConf.save(config=cfg, f=fp)\n", + "\n", + "\n", + "########################\n", + "# propogate invariants #\n", + "########################\n", + "\n", + "test_case_names = list(cfg.differentiators.keys())\n", + "invariant_attr_names = list(cfg.defaults.keys())\n", + "for test_case in cfg.differentiators:\n", + " for param in cfg.defaults:\n", + " if param not in cfg.differentiators[test_case]:\n", + " cfg.differentiators[test_case][param] = cfg.defaults[param]\n", + "\n", + "\n", + "#####################################\n", + "# request from user if not provided #\n", + "#####################################\n", + "\n", + "required_attributes = [\n", + " 'grpc_host',\n", + " #'api_key'\n", + " 'key',\n", + "]\n", + "\n", + "for test_case in cfg.differentiators:\n", + " for attr in required_attributes:\n", + " if not cfg.differentiators[test_case].get(attr):\n", + " cfg.differentiators[test_case][attr] = getpass.getpass(f\"[{test_case}] {attr}: \")\n", + "\n", + "##########################################\n", + "# Build a client for each differentiator #\n", + "##########################################\n", + "\n", + "# Doing this because it's likely the differentiators are different engines or grpc endpoints\n", + "clients = {}\n", + "for test_case in cfg.differentiators:\n", + " kargs = {}\n", + " for arg in ['host','key','engine']:\n", + " if arg in cfg.differentiators[test_case]:\n", + " kargs[arg] = cfg.differentiators[test_case][arg]\n", + " clients[test_case] = client.StabilityInference(**kargs)\n", + "\n", + "#######################################\n", + "# precompute combinations and shuffle #\n", + "#######################################\n", + "\n", + "gen = product(*cfg.combinatorial_parameters.values())\n", + "experiments = list(gen)\n", + "random.shuffle(experiments)\n", + "\n", + "items = [] # initialize this null so we can log outcomes at the top of the loop\n", + "\n", + "\n", + "for test_case in cfg.differentiators:\n", + " running_score[test_case]+=0\n", + "\n" + ], + "metadata": { + "id": "xl_t3CjZlWvq", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# @markdown # Load a random sample to score preference\n", + "\n", + "SAMPLE_IDX += 1\n", + "\n", + "##########################\n", + "# Log experiment outcome #\n", + "##########################\n", + "\n", + "save_images = False # @param {type:'boolean'}\n", + "save_favorite_only = False # @param {type:'boolean'}\n", + "\n", + "\n", + "# to do: make this not a closure.\n", + "def log_items(items):\n", + " #for (img, test_case, kwargs_gen, is_preference) in items: # to do: dictify\n", + " recs = []\n", + " for item in items:\n", + " # assign image a filename\n", + " img_fname = f\"{RANDOM_PREFIX}_{SAMPLE_IDX}_{item['test_case']}.png\"\n", + " #rec = copy.deepcopy(item)\n", + " rec = item\n", + " img_fpath = Path(workspace_cfg.project_root) / img_fname\n", + " # save image\n", + " img = rec.pop('img')\n", + " save_im = False\n", + " if save_images or save_favorite_only:\n", + " save_im = True\n", + " if save_favorite_only and not rec['is_preference']:\n", + " save_im = False\n", + " if save_im:\n", + " print(img_fpath)\n", + " rec['img_fpath'] = str(img_fpath)\n", + " img.save(img_fpath)\n", + " # update outcome\n", + " rec['is_preference'] = rec['button'].value\n", + " if rec['is_preference']:\n", + " running_score[rec['test_case']] += 1\n", + " rec.pop('button')\n", + " # log outcome\n", + " recs.append(rec)\n", + " outfile = Path(workspace_cfg.project_root) / explog_fname\n", + " #with open(outfile, 'a') as f:\n", + " with outfile.open('a') as f:\n", + " json.dump(recs, f)\n", + " f.write('\\n')\n", + " logger.debug(running_score)\n", + " \n", + "if items:\n", + " try:\n", + " log_items(items)\n", + " posterior_plot(running_score)\n", + " except KeyError:\n", + " # fuck it\n", + " pass\n", + "\n", + "\n", + "SEED = random.randrange(0, 4294967295)\n", + "\n", + "blind_test = False # @param {type: \"boolean\"}\n", + "\n", + "def item_to_ux(\n", + " item\n", + " ):\n", + " img = item['img']\n", + " test_case = item['test_case']\n", + " kwargs = item['kwargs']\n", + "\n", + " output = [f\"# {test_case}\"]\n", + " if blind_test:\n", + " output = []\n", + " output += [img]\n", + " if not blind_test:\n", + " output += [f\"{kwargs}\"]\n", + " else:\n", + " output += [f\"{kwargs_exp}\"]\n", + " toggle = pn.widgets.Toggle(name='Favorite', button_type='success')\n", + " output.append(toggle)\n", + " item['button'] = toggle\n", + " item['is_preference'] = toggle.value\n", + " return pn.Column(*output)\n", + "\n", + "\n", + "non_generation_arguments = ['grpc_host', 'engine', 'key']\n", + "\n", + "rec = random.choice(experiments)\n", + "\n", + "keys = cfg.combinatorial_parameters.keys()\n", + "kwargs_exp = dict(zip(keys, rec))\n", + "kwargs_exp['seed'] = SEED\n", + "\n", + "items = []\n", + "for test_case, api in clients.items():\n", + " logger.debug(f\"requesting image for {test_case}\")\n", + " kwargs_test = copy.deepcopy(kwargs_exp)\n", + " kwargs_diff = cfg.differentiators[test_case]\n", + " kwargs_test.update(kwargs_diff)\n", + " kwargs_test.pop('key')\n", + " kwargs_gen = copy.deepcopy(kwargs_test)\n", + " for key in non_generation_arguments:\n", + " if key in kwargs_gen:\n", + " kwargs_gen.pop(key)\n", + " #########\n", + " # handle prompt_chunks\n", + " chunks = kwargs_gen.pop('prompt_chunks', {})\n", + " if '{' in kwargs_gen['prompt']: \n", + " kwargs_gen['prompt'] = kwargs_gen['prompt'].format(**chunks)\n", + " #########\n", + "\n", + " answers = api.generate(**kwargs_gen)\n", + " for resp in answers:\n", + " for artifact in resp.artifacts:\n", + " if artifact.finish_reason == generation.FILTER:\n", + " warnings.warn(\n", + " \"Your request activated the API's safety filters and could not be processed.\"\n", + " \"Please modify the prompt and try again.\")\n", + " if artifact.type == generation.ARTIFACT_IMAGE:\n", + " img = Image.open(io.BytesIO(artifact.binary))\n", + " img = img.resize([512, 512])\n", + " items.append({\n", + " 'img':img,\n", + " 'test_case':test_case,\n", + " 'kwargs':kwargs_gen,\n", + " 'is_preference':False,\n", + " # additional metadata\n", + " 'SDK_VERSION':SDK_VERSION,\n", + " 'timestamp':time.time(),\n", + " 'user_id': workspace_cfg.notebook_user_id,\n", + " 'project_name':workspace_cfg.active_project,\n", + " })\n", + "\n", + "random.shuffle(items)\n", + "pn.Row(*[item_to_ux(it) for it in items])" + ], + "metadata": { + "id": "ogPWQhWm7BXN", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# TO do:\n", + "#\n", + "# - fix `save_favorite_only` behavior\n", + "#\n", + "# - Make notebook more robust to errors, resuming\n", + "# - read outcomes.log if it exists, use it to populate running_score, SAMPLE_IDX, etc. \n", + "# - move notebook to VCS\n", + "# - log time taken for generation\n", + "# - maybe writing images to gdrive is slower than writing them locally?\n", + "# - log time taken by user to choose\n", + "# - log incidence of safety mitigation trigger\n", + "# - log incidence of errors and conditions that emitted them\n", + "# - record \"pass\" - i.e. when user regenerates without selecting a favorite\n", + "# - optionally use different seed for samples\n", + "# - optionally request multiple samples per test case\n", + "# - use plotly (or some such) to permit user to interactively facet histogram on combinatorial options\n", + "# - e.g. to see if there are specific prompts for which one or other test case is preferred\n", + "# - check old checkins project for a potential solution here\n", + "# - https://medium.com/plotly/introducing-jupyterdash-811f1f57c02e\n", + "# - https://github.com/dmarx/checkin/blob/master/checkin/datavis%20sandbox.ipynb\n", + "\n", + "# Additional ratings\n", + "# - aesthetic preference\n", + "# - relevance to prompt\n", + "# - least weird / most devoid of artifacts\n", + "# - best fit to style\n", + "# - best fit to content\n", + "# - best foreground\n", + "# - best background\n", + "# - best composition\n", + "# - best small details\n", + "# - best texture\n", + "# - most photorealistic\n", + "# - most artistic" + ], + "metadata": { + "id": "rvzAMAgPMhuy" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file From 6da0fab73d12f1b027218b0d319c9d2e27b0edef Mon Sep 17 00:00:00 2001 From: David Marx Date: Sun, 11 Dec 2022 13:08:51 -0500 Subject: [PATCH 2/3] tidying --- nbs/A_B.ipynb | 48 ++---------------------------------------------- 1 file changed, 2 insertions(+), 46 deletions(-) diff --git a/nbs/A_B.ipynb b/nbs/A_B.ipynb index ac1acb39..126440f4 100644 --- a/nbs/A_B.ipynb +++ b/nbs/A_B.ipynb @@ -47,7 +47,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "cellView": "form", "id": "MlSp8emhkXXg" @@ -531,50 +531,6 @@ }, "execution_count": null, "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# TO do:\n", - "#\n", - "# - fix `save_favorite_only` behavior\n", - "#\n", - "# - Make notebook more robust to errors, resuming\n", - "# - read outcomes.log if it exists, use it to populate running_score, SAMPLE_IDX, etc. \n", - "# - move notebook to VCS\n", - "# - log time taken for generation\n", - "# - maybe writing images to gdrive is slower than writing them locally?\n", - "# - log time taken by user to choose\n", - "# - log incidence of safety mitigation trigger\n", - "# - log incidence of errors and conditions that emitted them\n", - "# - record \"pass\" - i.e. when user regenerates without selecting a favorite\n", - "# - optionally use different seed for samples\n", - "# - optionally request multiple samples per test case\n", - "# - use plotly (or some such) to permit user to interactively facet histogram on combinatorial options\n", - "# - e.g. to see if there are specific prompts for which one or other test case is preferred\n", - "# - check old checkins project for a potential solution here\n", - "# - https://medium.com/plotly/introducing-jupyterdash-811f1f57c02e\n", - "# - https://github.com/dmarx/checkin/blob/master/checkin/datavis%20sandbox.ipynb\n", - "\n", - "# Additional ratings\n", - "# - aesthetic preference\n", - "# - relevance to prompt\n", - "# - least weird / most devoid of artifacts\n", - "# - best fit to style\n", - "# - best fit to content\n", - "# - best foreground\n", - "# - best background\n", - "# - best composition\n", - "# - best small details\n", - "# - best texture\n", - "# - most photorealistic\n", - "# - most artistic" - ], - "metadata": { - "id": "rvzAMAgPMhuy" - }, - "execution_count": null, - "outputs": [] } ] -} \ No newline at end of file +} From b178807e5f385061ffaf23180078c2713eef3d65 Mon Sep 17 00:00:00 2001 From: David Marx Date: Sun, 11 Dec 2022 13:27:24 -0500 Subject: [PATCH 3/3] reduce narcissism --- nbs/A_B.ipynb | 2 -- 1 file changed, 2 deletions(-) diff --git a/nbs/A_B.ipynb b/nbs/A_B.ipynb index 126440f4..1e334539 100644 --- a/nbs/A_B.ipynb +++ b/nbs/A_B.ipynb @@ -19,8 +19,6 @@ "source": [ "# Stability.AI A/B Testing Notebook\n", "\n", - "by David Marx (@DigThatData)\n", - "\n", "## Usage Instructions\n", "\n", "1. Complete the input fields below as appropriate to specify output and config file locations.\n",