diff --git a/src/Chat/Talent.hbs b/src/Chat/Talent.hbs
new file mode 100644
index 0000000..0440d48
--- /dev/null
+++ b/src/Chat/Talent.hbs
@@ -0,0 +1,39 @@
+
diff --git a/src/Dialogs/Talent.hbs b/src/Dialogs/Talent.hbs
new file mode 100644
index 0000000..1cc5294
--- /dev/null
+++ b/src/Dialogs/Talent.hbs
@@ -0,0 +1,4 @@
+
diff --git a/src/EditableInput.hbs b/src/EditableInput.hbs
index 4a7f721..21ed573 100644
--- a/src/EditableInput.hbs
+++ b/src/EditableInput.hbs
@@ -1,8 +1,8 @@
{{#if (eq type "checkbox")}}
-
+
{{else}}
-
+
{{/if}}
{{#if placeholder}}
diff --git a/src/ItemSheets/Talent.hbs b/src/ItemSheets/Talent.hbs
new file mode 100644
index 0000000..19339ad
--- /dev/null
+++ b/src/ItemSheets/Talent.hbs
@@ -0,0 +1,55 @@
+
+
+

+
+
+ {{>editable-input type="text" name="name" value=item.name placeholder=(localize "DSA41.name")}}
+
+
+
{{localize "DSA41.talente.label_kategorie"}}
+
+ {{>editable-input type="text" name="system.behinderung" value=item.system.behinderung placeholder=(localize "DSA41.talente.label_behinderung")}}
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/lang/de.json b/src/lang/de.json
index 05b7c19..0fec8d0 100644
--- a/src/lang/de.json
+++ b/src/lang/de.json
@@ -3,7 +3,8 @@
"Item": {
"Gegenstand": "Gegenstand",
"Ruestung": "Rüstung",
- "Bewaffnung": "Bewaffnung"
+ "Bewaffnung": "Bewaffnung",
+ "Talent": "Talent"
}
},
@@ -15,6 +16,18 @@
"weight": "Gewicht",
"price": "Preis",
+ "chat": {
+ "result": "Ergebnis",
+ "success": "Geschafft",
+ "failure": "Fehlgeschlagen",
+
+ "attribute": "Eigenshaft",
+ "value": "Wert",
+ "roll": "Wurf",
+
+ "talentwert_short": "TaW"
+ },
+
"roll_types": {
"courage": "Mut",
"cleverness": "Klugheit",
@@ -25,6 +38,8 @@
"constitution": "Konstitution",
"strength": "Körperkraft",
+ "talent": "Talent",
+
"attacke": "Attacke",
"parade": "Parade",
"trefferpunkte": "Trefferpunkte",
@@ -73,6 +88,8 @@
"label": "Talent",
"label_eigenschaften": "Eigenschaften",
"label_talentwert": "Talentwert",
+ "label_kategorie": "Kategorie",
+ "label_behinderung": "Behinderung",
"kampf": {
"label": "Kampf Talente",
@@ -116,24 +133,24 @@
"label": "Körperliche Talente",
"name": {
- "akrobatik": "Akrobatik",
- "athletik": "Athletik",
- "fliegen": "Fliegen",
- "gaukeleien": "Gaukeleien",
- "klettern": "Klettern",
- "koerperbeherrschung": "Körperbeherrschung",
- "reiten": "Reiten",
- "schleichen": "Schleichen",
- "schwimmen": "Schwimmen",
- "selbstbeherrschung": "Selbstbeherrschung",
- "sich_verstecken": "Sich Verstecken",
- "singen": "Singen",
- "sinnenschärfe": "Sinnenschärfe",
- "skifahren": "Skifahren",
- "stimmen_imitieren": "Stimmen Imitieren",
- "tanzen": "Tanzen",
- "taschendiebstahl": "Taschendiebstahl",
- "zechen": "Zechen"
+ "Akrobatik": "Akrobatik",
+ "Athletik": "Athletik",
+ "Fliegen": "Fliegen",
+ "Gaukeleien": "Gaukeleien",
+ "Klettern": "Klettern",
+ "Körperbeherrschung": "Körperbeherrschung",
+ "Reiten": "Reiten",
+ "Schleichen": "Schleichen",
+ "Schwimmen": "Schwimmen",
+ "Selbstbeherrschung": "Selbstbeherrschung",
+ "Sich Verstecken": "Sich Verstecken",
+ "Singen": "Singen",
+ "Sinnenschärfe": "Sinnenschärfe",
+ "Skifahren": "Skifahren",
+ "Stimmen Imitieren": "Stimmen Imitieren",
+ "Tanzen": "Tanzen",
+ "Taschendiebstahl": "Taschendiebstahl",
+ "Zechen": "Zechen"
}
},
@@ -141,16 +158,16 @@
"label": "Gesellschaftliche Talente",
"name": {
- "betoeren": "Betören",
- "etikette": "Etikette",
- "gassenwissen": "Gassenwissen",
- "lehren": "Lehren",
- "menschenkenntnis": "Menschenkenntnis",
- "schauspielerei": "Schauspielerei",
- "schriftlicher_ausdruck": "Schriftlicher Ausdruck",
- "sich_verkleiden": "Sich Verkleiden",
- "ueberreden": "Überreden",
- "ueberzeugen": "Überzeugen"
+ "Betören": "Betören",
+ "Etikette": "Etikette",
+ "Gassenwissen": "Gassenwissen",
+ "Lehren": "Lehren",
+ "Menschenkenntnis": "Menschenkenntnis",
+ "Schauspielerei": "Schauspielerei",
+ "Schriftlicher Ausdruck": "Schriftlicher Ausdruck",
+ "Sich Verkleiden": "Sich Verkleiden",
+ "Überreden": "Überreden",
+ "Überzeugen": "Überzeugen"
}
},
@@ -158,13 +175,13 @@
"label": "Natur-Talente",
"name": {
- "faehrtensuchen": "Fährtensuchen",
- "fallenstellen": "Fallenstellen",
- "fesseln": "Fesseln/Entfesseln",
- "fischen": "Fischen/Angeln",
- "orientierung": "Orientierung",
- "wettervorhersage": "Wettervorhersage",
- "wildnisleben": "Wildnisleben"
+ "Fährtensuchen": "Fährtensuchen",
+ "Fallenstellen": "Fallenstellen",
+ "Fesseln/Entfesseln": "Fesseln/Entfesseln",
+ "Fischen/Angeln": "Fischen/Angeln",
+ "Orientierung": "Orientierung",
+ "Wettervorhersage": "Wettervorhersage",
+ "Wildnisleben": "Wildnisleben"
}
},
@@ -172,29 +189,29 @@
"label": "Wissenstalente",
"name": {
- "anatomie": "Anatomie",
- "baukunst": "Baukunst",
- "brett_kartenspiel": "Brett-/Kartenspiel",
- "geographie": "Geographie",
- "geschichtswissen": "Geschichtswissen",
- "gesteinskunde": "Gesteinskunde",
- "goetter_kulte": "Götter/Kulte",
- "heraldik": "Heraldik",
- "huettenkunde": "Hüttenkunde",
- "kriegskunst": "Kriegskunst",
- "kryptographie": "Kryptographie",
- "magiekunde": "Magiekunde",
- "mechanik": "Mechanik",
- "pflanzenkunde": "Pflanzenkunde",
- "philosophie": "Philosophie",
- "rechnen": "Rechnen",
- "rechtskunde": "Rechtskunde",
- "sagen_legenden": "Sagen/Legenden",
- "schaetzen": "Schätzen",
- "sprachenkunde": "Sprachenkunde",
- "staatskunst": "Staatskunst",
- "sternkunde": "Sternenkunde",
- "tierkunde": "Tierkunde"
+ "Anatomie": "Anatomie",
+ "Baukunst": "Baukunst",
+ "Brett-/Kartenspiel": "Brett-/Kartenspiel",
+ "Geographie": "Geographie",
+ "Geschichtswissen": "Geschichtswissen",
+ "Gesteinskunde": "Gesteinskunde",
+ "Götter/Kulte": "Götter/Kulte",
+ "Heraldik": "Heraldik",
+ "Hüttenkunde": "Hüttenkunde",
+ "Kriegskunst": "Kriegskunst",
+ "Kryptographie": "Kryptographie",
+ "Magiekunde": "Magiekunde",
+ "Mechanik": "Mechanik",
+ "Pflanzenkunde": "Pflanzenkunde",
+ "Philosophie": "Philosophie",
+ "Rechnen": "Rechnen",
+ "Rechtskunde": "Rechtskunde",
+ "Sagen/Legenden": "Sagen/Legenden",
+ "Schätzen": "Schätzen",
+ "Sprachenkunde": "Sprachenkunde",
+ "Staatskunst": "Staatskunst",
+ "Sternenkunde": "Sternenkunde",
+ "Tierkunde": "Tierkunde"
}
},
@@ -212,53 +229,53 @@
"label": "Handwerkstalente",
"name": {
- "abrichten": "Abrichten",
- "ackerbau": "Ackerbau",
- "alchimie": "Alchimie",
- "bergbau": "Bergbau",
- "bogenbau": "Bogenbau",
- "boote_fahren": "Boote Fahren",
- "brauer": "Brauer",
- "drucker": "Drucker",
- "fahrzeug_lenken": "Fahrzeug Lenken",
- "falschspiel": "Falschspiel",
- "feinmechanik": "Feinmechanik",
- "feuersteinbearbeitung": "Feuersteinbearbeitung",
- "fleischer": "Fleischer",
- "gerber": "Gerber/Kürschner",
- "glaskunst": "Glaskunst",
- "grobschmied": "Grobschmied",
- "handel": "Handel",
- "hauswirtschaft": "Hauswirtschaft",
- "heilkunde_gift": "Heilkunde Gift",
- "heilkunde_krankheiten": "Heilkunde Krankheiten",
- "heilkunde_seele": "Heilkunde Seele",
- "heilkunde_wunden": "Heilkunde Wunden",
- "holzbearbeitung": "Holzbearbeitung",
- "instrumentenbauer": "Instrumentenbauer",
- "kartographie": "Kartographie",
- "kochen": "Kochen",
- "kristallzucht": "Kristallzucht",
- "lederarbeiten": "Lederarbeiten",
- "malen_zeichnen": "Malen/Zeichnen",
- "maurer": "Maurer",
- "metallguss": "Metallguss",
- "musizieren": "Musizieren",
- "schloesser_knacken": "Schlösser Knacken",
- "schnapps_brennen": "Schnapps Brennen",
- "schneidern": "Schneidern",
- "seefahrt": "Seefahrt",
- "seiler": "Seiler",
- "steinmetz": "Steinmetz",
- "juwelier": "Steinschneider/Juwelier",
- "stellmacher": "Stellmacher",
- "stoffe_faerben": "Stoffe Faerben",
- "taetowieren": "Tätowieren",
- "toepfern": "Töpfern",
- "viehzucht": "Viehzucht",
- "webkunst": "Webkunst",
- "winzer": "Winzer",
- "zimmermann": "Zimmermann"
+ "Abrichten": "Abrichten",
+ "Ackerbau": "Ackerbau",
+ "Alchimie": "Alchimie",
+ "Bergbau": "Bergbau",
+ "Bogenbau": "Bogenbau",
+ "Boote Fahren": "Boote Fahren",
+ "Brauer": "Brauer",
+ "Drucker": "Drucker",
+ "Fahrzeug Lenken": "Fahrzeug Lenken",
+ "Falschspiel": "Falschspiel",
+ "Feinmechanik": "Feinmechanik",
+ "Feuersteinbearbeitung": "Feuersteinbearbeitung",
+ "Fleischer": "Fleischer",
+ "Gerber/Kürschner": "Gerber/Kürschner",
+ "Glaskunst": "Glaskunst",
+ "Grobschmied": "Grobschmied",
+ "Handel": "Handel",
+ "Hauswirtschaft": "Hauswirtschaft",
+ "Heilkunde Gift": "Heilkunde Gift",
+ "Heilkunde Krankheiten": "Heilkunde Krankheiten",
+ "Heilkunde Seele": "Heilkunde Seele",
+ "Heilkunde Wunden": "Heilkunde Wunden",
+ "Holzbearbeitung": "Holzbearbeitung",
+ "Instrumentenbauer": "Instrumentenbauer",
+ "Kartographie": "Kartographie",
+ "Kochen": "Kochen",
+ "Kristallzucht": "Kristallzucht",
+ "Lederarbeiten": "Lederarbeiten",
+ "Malen/Zeichnen": "Malen/Zeichnen",
+ "Maurer": "Maurer",
+ "Metallguss": "Metallguss",
+ "Musizieren": "Musizieren",
+ "Schlösser Knacken": "Schlösser Knacken",
+ "Schnapps Brennen": "Schnapps Brennen",
+ "Schneidern": "Schneidern",
+ "Seefahrt": "Seefahrt",
+ "Seiler": "Seiler",
+ "Steinmetz": "Steinmetz",
+ "Steinschneider/Juwelier": "Steinschneider/Juwelier",
+ "Stellmacher": "Stellmacher",
+ "Stoffe Faerben": "Stoffe Faerben",
+ "Tätowieren": "Tätowieren",
+ "Töpfern": "Töpfern",
+ "Viehzucht": "Viehzucht",
+ "Webkunst": "Webkunst",
+ "Winzer": "Winzer",
+ "Zimmermann": "Zimmermann"
}
}
},
diff --git a/src/main.css b/src/main.css
index a989ade..dbba2a1 100644
--- a/src/main.css
+++ b/src/main.css
@@ -42,6 +42,11 @@
align-items: center;
}
+ & .fit-content {
+ width: fit-content;
+ height: fit-content;
+ }
+
& .row {
display: flex;
flex-direction: row;
@@ -117,14 +122,14 @@
&.editable-number {
text-align: center;
}
+ }
- & .placeholder {
- font-size: 0.8em;
- border-top: 1px solid;
- overflow: hidden;
- white-space: nowrap;
- text-overflow: ellipsis;
- }
+ & .placeholder {
+ font-size: 0.8em;
+ border-top: 1px solid;
+ overflow: hidden;
+ white-space: nowrap;
+ text-overflow: ellipsis;
}
& .character-image {
@@ -153,10 +158,15 @@
grid-column: 1;
}
+ &.die-type {
+ width: 2em;
+ height: 2em;
+ }
+
&.die-courage { fill: #b22319; }
&.die-cleverness { fill: #8158a3; }
&.die-intuition { fill: #388834; }
- &.die-charisma { fill: #0c0c0c; }
+ &.die-charisma { fill: #d96600; }
&.die-dexterity { fill: #d4b366; }
&.die-agility { fill: #678ec3; }
&.die-constitution { fill: #a3a3a3; }
@@ -164,10 +174,10 @@
&.die-attacke { fill: #b22319; }
&.die-parade { fill: #388834; }
- &.die-trefferpunkte { fill: #0c0c0c; }
+ &.die-trefferpunkte { fill: #a2a0ee; }
&.die-fernkampf-attacke { fill: #388834; }
- &.die-fernkampf-trefferpunkte { fill: #0c0c0c; }
+ &.die-fernkampf-trefferpunkte { fill: #a2a0ee; }
}
& .tabs {
@@ -194,7 +204,7 @@
& .list {
display: grid;
- background: #252830;
+ border-radius: 5px 5px 5px 5px;
box-shadow: 0 0 6px rgba(0, 0, 0, 0.45);
& .item-image {
@@ -226,11 +236,14 @@
align-items: center;
+ background: #252830;
+
padding: 0.25rem;
border-bottom: 1px dotted;
&:last-child {
border: none;
+ border-radius: 0px 0px 5px 5px;
}
}
@@ -277,18 +290,14 @@
}
&[data-tab="tab2"] {
- & .Kampftalente {
- display: grid;
- grid-template-columns: minmax(0, max-content) repeat(5, minmax(0, 1fr));
- align-items: center;
- gap: 0.5rem;
+ grid-template-columns: minmax(0, max-content) repeat(2, minmax(0, 1fr)) min-content;
+
+ & > * {
+ grid-column: 1 / -1;
}
- & .Talente {
- display: grid;
- grid-template-columns: minmax(0, max-content) repeat(2, minmax(0, 1fr));
- align-items: center;
- gap: 0.5rem;
+ & .Kampftalente {
+ grid-template-columns: minmax(0, max-content) repeat(5, minmax(0, 1fr));
}
}
@@ -322,3 +331,21 @@
padding-top: 0.5rem;
}
}
+
+.talent_chat_message {
+ & .info {
+ display: grid;
+ grid-template-columns: repeat(4, minmax(min-content, 1fr));
+ text-wrap: nowrap;
+
+ & > * {
+ display: grid;
+ grid-column: 1 / -1;
+ grid-template-columns: subgrid;
+
+ & > *:not(:first-child) {
+ text-align: center;
+ }
+ }
+ }
+}
diff --git a/src/main.mjs b/src/main.mjs
index 711a82b..72bb443 100644
--- a/src/main.mjs
+++ b/src/main.mjs
@@ -4,6 +4,10 @@ const { ActorSheetV2, ItemSheetV2 } = foundry.applications.sheets;
const { ApplicationV2, HandlebarsApplicationMixin } = foundry.applications.api;
const { OperatorTerm, NumericTerm } = foundry.dice.terms;
+Hooks.once("i18nInit", async function() {
+ game.i18n._fallback = foundry.utils.mergeObject(await game.i18n._getTranslations("de"), game.i18n._fallback);
+});
+
Hooks.once("init", async function() {
CONFIG.Combat.initiative.formula = "1d6 + @computed.ini_basiswert[INI-Basiswert]";
@@ -13,6 +17,7 @@ Hooks.once("init", async function() {
CONFIG.Item.dataModels.Gegenstand = DSA41_GegenstandData;
CONFIG.Item.dataModels.Ruestung = DSA41_RuestungData;
CONFIG.Item.dataModels.Bewaffnung = DSA41_BewaffnungData;
+ CONFIG.Item.dataModels.Talent = DSA41_TalentData;
DocumentSheetConfig.unregisterSheet(Actor, "core", ActorSheet);
DocumentSheetConfig.registerSheet(Actor, "dsa41", DSA41_ActorSheet, {
@@ -29,9 +34,19 @@ Hooks.once("init", async function() {
"Gegenstand",
"Ruestung",
"Bewaffnung",
+ "Talent",
]
});
+ Handlebars.registerHelper({
+ maybeLocalize: (value, options) => {
+ const prefix = options.hash.prefix ? options.hash.prefix.string : null;
+ if (prefix)
+ return game.i18n.localize(prefix + "." + value);
+ return value;
+ }
+ });
+
await loadTemplates({
"editable-input": "systems/dsa-4th-edition/src/EditableInput.hbs",
@@ -44,10 +59,7 @@ Hooks.once("init", async function() {
"fernkampf_attacke_tooltip": "systems/dsa-4th-edition/src/Tooltips/FernkampfAttacke.hbs",
"fernkampf_trefferpunkte_tooltip": "systems/dsa-4th-edition/src/Tooltips/FernkampfTrefferpunkte.hbs",
- "trefferpunkte_dialog": "systems/dsa-4th-edition/src/Dialog/Trefferpunkte.hbs",
-
- "fernkampf_angriff_dialog": "systems/dsa-4th-edition/src/Dialogs/FernkampfAngriff.hbs",
- "fernkampf_trefferpunkte_dialog": "systems/dsa-4th-edition/src/Dialogs/FernkampfTrefferpunkte.hbs",
+ "talent_chat": "systems/dsa-4th-edition/src/Chat/Talent.hbs",
});
});
@@ -119,8 +131,56 @@ class CombatTalentField extends SchemaField {
}
}
-class DSA41_CharacterDocument extends Actor {
+
+class DSA41_CharacterDocument extends Actor {
+ static async create(data, operation) {
+ const actor = await super.create(data, operation);
+
+ if (data.type === "Player") {
+ const talente_compendium = game.packs.get("dsa-4th-edition.talente");
+ const talente = await talente_compendium.getDocuments({ name__in: [
+ // Basis Körperliche Talente
+ "Athletik",
+ "Klettern",
+ "Körperbeherrschung",
+ "Schleichen",
+ "Schwimmen",
+ "Selbstbeherrschung",
+ "Sich Verstecken",
+ "Singen",
+ "Sinnenschärfe",
+ "Tanzen",
+ "Zechen",
+
+ // Basis Gesellschaftliche Talente
+ "Menschenkenntnis",
+ "Überreden",
+
+ // Basis Natur Talente
+ "Fährtensuchen",
+ "Orientierung",
+ "Wildnisleben",
+
+ // Basis Wissens Talente
+ "Götter/Kulte",
+ "Rechnen",
+ "Sagen/Legenden",
+
+ // Basis Handwerks Talente
+ "Heilkunde Wunden",
+ "Holzbearbeitung",
+ "Kochen",
+ "Lederarbeiten",
+ "Malen/Zeichnen",
+ "Schneidern",
+ ]});
+
+ await actor.createEmbeddedDocuments("Item", talente);
+ }
+
+ return actor;
+ }
}
class DSA41_CharacterData extends TypeDataModel {
@@ -170,134 +230,6 @@ class DSA41_CharacterData extends TypeDataModel {
zweihand_hiebwaffen: new CombatTalentField(),
zweihandschwerter: new CombatTalentField(),
}),
-
- talente: new SchemaField({
- koerperliche: new SchemaField({
- akrobatik: new NumberField({ integer: true, initial: 0 }),
- athletik: new NumberField({ integer: true, initial: 0 }),
- fliegen: new NumberField({ integer: true, initial: 0 }),
- gaukeleien: new NumberField({ integer: true, initial: 0 }),
- klettern: new NumberField({ integer: true, initial: 0 }),
- koerperbeherrschung: new NumberField({ integer: true, initial: 0 }),
- reiten: new NumberField({ integer: true, initial: 0 }),
- schleichen: new NumberField({ integer: true, initial: 0 }),
- schwimmen: new NumberField({ integer: true, initial: 0 }),
- selbstbeherrschung: new NumberField({ integer: true, initial: 0 }),
- sich_verstecken: new NumberField({ integer: true, initial: 0 }),
- singen: new NumberField({ integer: true, initial: 0 }),
- sinnenschärfe: new NumberField({ integer: true, initial: 0 }),
- skifahren: new NumberField({ integer: true, initial: 0 }),
- stimmen_imitieren: new NumberField({ integer: true, initial: 0 }),
- tanzen: new NumberField({ integer: true, initial: 0 }),
- taschendiebstahl: new NumberField({ integer: true, initial: 0 }),
- zechen: new NumberField({ integer: true, initial: 0 }),
- }),
-
- gesellschaftliche: new SchemaField({
- betoeren: new NumberField({ integer: true, initial: 0 }),
- etikette: new NumberField({ integer: true, initial: 0 }),
- gassenwissen: new NumberField({ integer: true, initial: 0 }),
- lehren: new NumberField({ integer: true, initial: 0 }),
- menschenkenntnis: new NumberField({ integer: true, initial: 0 }),
- schauspielerei: new NumberField({ integer: true, initial: 0 }),
- schriftlicher_ausdruck: new NumberField({ integer: true, initial: 0 }),
- sich_verkleiden: new NumberField({ integer: true, initial: 0 }),
- ueberreden: new NumberField({ integer: true, initial: 0 }),
- ueberzeugen: new NumberField({ integer: true, initial: 0 }),
- }),
-
- natur: new SchemaField({
- faehrtensuchen: new NumberField({ integer: true, initial: 0 }),
- fallenstellen: new NumberField({ integer: true, initial: 0 }),
- fesseln: new NumberField({ integer: true, initial: 0 }),
- fischen: new NumberField({ integer: true, initial: 0 }),
- orientierung: new NumberField({ integer: true, initial: 0 }),
- wettervorhersage: new NumberField({ integer: true, initial: 0 }),
- wildnisleben: new NumberField({ integer: true, initial: 0 }),
- }),
-
- wissens: new SchemaField({
- anatomie: new NumberField({ integer: true, initial: 0 }),
- baukunst: new NumberField({ integer: true, initial: 0 }),
- brett_kartenspiel: new NumberField({ integer: true, initial: 0 }),
- geographie: new NumberField({ integer: true, initial: 0 }),
- geschichtswissen: new NumberField({ integer: true, initial: 0 }),
- gesteinskunde: new NumberField({ integer: true, initial: 0 }),
- goetter_kulte: new NumberField({ integer: true, initial: 0 }),
- heraldik: new NumberField({ integer: true, initial: 0 }),
- huettenkunde: new NumberField({ integer: true, initial: 0 }),
- kriegskunst: new NumberField({ integer: true, initial: 0 }),
- kryptographie: new NumberField({ integer: true, initial: 0 }),
- magiekunde: new NumberField({ integer: true, initial: 0 }),
- mechanik: new NumberField({ integer: true, initial: 0 }),
- pflanzenkunde: new NumberField({ integer: true, initial: 0 }),
- philosophie: new NumberField({ integer: true, initial: 0 }),
- rechnen: new NumberField({ integer: true, initial: 0 }),
- rechtskunde: new NumberField({ integer: true, initial: 0 }),
- sagen_legenden: new NumberField({ integer: true, initial: 0 }),
- schaetzen: new NumberField({ integer: true, initial: 0 }),
- sprachenkunde: new NumberField({ integer: true, initial: 0 }),
- staatskunst: new NumberField({ integer: true, initial: 0 }),
- sternkunde: new NumberField({ integer: true, initial: 0 }),
- tierkunde: new NumberField({ integer: true, initial: 0 }),
- }),
-
- sprachen: new SchemaField({
- lesen_schreiben: new NumberField({ integer: true, initial: 0 }),
- muttersprache: new NumberField({ integer: true, initial: 0 }),
- fremdsprache: new NumberField({ integer: true, initial: 0 }),
- }),
-
- handwerks: new SchemaField({
- abrichten: new NumberField({ integer: true, initial: 0 }),
- ackerbau: new NumberField({ integer: true, initial: 0 }),
- alchimie: new NumberField({ integer: true, initial: 0 }),
- bergbau: new NumberField({ integer: true, initial: 0 }),
- bogenbau: new NumberField({ integer: true, initial: 0 }),
- boote_fahren: new NumberField({ integer: true, initial: 0 }),
- brauer: new NumberField({ integer: true, initial: 0 }),
- drucker: new NumberField({ integer: true, initial: 0 }),
- fahrzeug_lenken: new NumberField({ integer: true, initial: 0 }),
- falschspiel: new NumberField({ integer: true, initial: 0 }),
- feinmechanik: new NumberField({ integer: true, initial: 0 }),
- feuersteinbearbeitung: new NumberField({ integer: true, initial: 0 }),
- fleischer: new NumberField({ integer: true, initial: 0 }),
- gerber: new NumberField({ integer: true, initial: 0 }),
- glaskunst: new NumberField({ integer: true, initial: 0 }),
- grobschmied: new NumberField({ integer: true, initial: 0 }),
- handel: new NumberField({ integer: true, initial: 0 }),
- hauswirtschaft: new NumberField({ integer: true, initial: 0 }),
- heilkunde_gift: new NumberField({ integer: true, initial: 0 }),
- heilkunde_krankheiten: new NumberField({ integer: true, initial: 0 }),
- heilkunde_seele: new NumberField({ integer: true, initial: 0 }),
- heilkunde_wunden: new NumberField({ integer: true, initial: 0 }),
- holzbearbeitung: new NumberField({ integer: true, initial: 0 }),
- instrumentenbauer: new NumberField({ integer: true, initial: 0 }),
- kartographie: new NumberField({ integer: true, initial: 0 }),
- kochen: new NumberField({ integer: true, initial: 0 }),
- kristallzucht: new NumberField({ integer: true, initial: 0 }),
- lederarbeiten: new NumberField({ integer: true, initial: 0 }),
- malen_zeichnen: new NumberField({ integer: true, initial: 0 }),
- maurer: new NumberField({ integer: true, initial: 0 }),
- metallguss: new NumberField({ integer: true, initial: 0 }),
- musizieren: new NumberField({ integer: true, initial: 0 }),
- schloesser_knacken: new NumberField({ integer: true, initial: 0 }),
- schnapps_brennen: new NumberField({ integer: true, initial: 0 }),
- schneidern: new NumberField({ integer: true, initial: 0 }),
- seefahrt: new NumberField({ integer: true, initial: 0 }),
- seiler: new NumberField({ integer: true, initial: 0 }),
- steinmetz: new NumberField({ integer: true, initial: 0 }),
- juwelier: new NumberField({ integer: true, initial: 0 }),
- stellmacher: new NumberField({ integer: true, initial: 0 }),
- stoffe_faerben: new NumberField({ integer: true, initial: 0 }),
- taetowieren: new NumberField({ integer: true, initial: 0 }),
- toepfern: new NumberField({ integer: true, initial: 0 }),
- viehzucht: new NumberField({ integer: true, initial: 0 }),
- webkunst: new NumberField({ integer: true, initial: 0 }),
- winzer: new NumberField({ integer: true, initial: 0 }),
- zimmermann: new NumberField({ integer: true, initial: 0 }),
- }),
- }),
}
}
@@ -460,6 +392,15 @@ class DSA41_CharacterData extends TypeDataModel {
computed.trefferpunkte = get_minified_formula(item.system.fernkampfwaffe.basis);
computed.trefferpunkte_display = computed.trefferpunkte.replace(/[\+\-]/, (op) => "
" + op);
}
+
+ const talente = this.parent.items.filter((x) => x.type === "Talent").sort((a, b) => a.name > b.name);
+ this.talente = {
+ koerperliche: talente.filter((x) => x.system.kategorie === "koerperliche"),
+ gesellschaftliche: talente.filter((x) => x.system.kategorie === "gesellschaftliche"),
+ natur: talente.filter((x) => x.system.kategorie === "natur"),
+ wissens: talente.filter((x) => x.system.kategorie === "wissens"),
+ handwerks: talente.filter((x) => x.system.kategorie === "handwerks"),
+ };
}
}
@@ -572,6 +513,21 @@ class DSA41_BewaffnungData extends TypeDataModel {
}
}
+class DSA41_TalentData extends TypeDataModel {
+ static defineSchema() {
+ return {
+ kategorie: new StringField(),
+ behinderung: new StringField(),
+
+ attribute1: new StringField(),
+ attribute2: new StringField(),
+ attribute3: new StringField(),
+
+ talentwert: new NumberField({ integer: true, initial: 0 }),
+ };
+ }
+}
+
function DSA41_ApplicationMixin(BaseApplication) {
class DSA41_Application extends HandlebarsApplicationMixin(BaseApplication) {
static DEFAULT_OPTIONS= {
@@ -591,6 +547,7 @@ function DSA41_ApplicationMixin(BaseApplication) {
class DSA41_Dialog extends DSA41_ApplicationMixin(ApplicationV2) {
static PARTS = {
Eigenschaft: { template: "systems/dsa-4th-edition/src/Dialogs/Attribute.hbs" },
+ Talent: { template: "systems/dsa-4th-edition/src/Dialogs/Talent.hbs" },
Attacke: { template: "systems/dsa-4th-edition/src/Dialogs/Attacke.hbs" },
Parade: { template: "systems/dsa-4th-edition/src/Dialogs/Parade.hbs" },
@@ -672,8 +629,8 @@ class DSA41_ActorSheet extends DSA41_ApplicationMixin(ActorSheetV2) {
position: { width: "800", height: "650" },
actions: {
"roll": async function(event, target) {
- var roll_formula = event.target.closest("[data-roll]").dataset.roll;
- const roll_type = event.target.closest("[data-roll-type]").dataset.rollType;
+ var roll_formula = event.target.closest("[data-roll]")?.dataset.roll;
+ const roll_type = event.target.closest("[data-roll-type]")?.dataset.rollType;
const success_value = event.target.closest("[data-success-value]")?.dataset.successValue;
const item_id = event.target.closest("[data-item-id]")?.dataset.itemId;
const item = this.document.items.get(item_id);
@@ -702,10 +659,59 @@ class DSA41_ActorSheet extends DSA41_ApplicationMixin(ActorSheetV2) {
return;
}
- if (roll_type == "attacke") {
- const item = this.document.system.computed.kampf.waffen[item_id];
+ if (roll_type == "talent") {
const title = game.i18n.localize("DSA41.roll_types." + roll_type) + ": " + item.name;
- const data = await DSA41_Dialog.wait("Attacke", { window: { title: title }, item: item });
+ const data = await DSA41_Dialog.wait("Talent", { window: {title: title}, item: item });
+ const talentwert = item.system.talentwert + data.modifikator;
+
+ const roll1 = (await new Roll("1d20").evaluate()).total;
+ const roll2 = (await new Roll("1d20").evaluate()).total;
+ const roll3 = (await new Roll("1d20").evaluate()).total;
+
+ const attribute1 = this.document.system.computed.attributes[item.system.attribute1];
+ const attribute2 = this.document.system.computed.attributes[item.system.attribute2];
+ const attribute3 = this.document.system.computed.attributes[item.system.attribute3];
+
+ const needed_taw_roll1 = Math.max(roll1 - attribute1, 0);
+ const needed_taw_roll2 = Math.max(roll2 - attribute2, 0);
+ const needed_taw_roll3 = Math.max(roll3 - attribute3, 0);
+
+ const leftover_taw = talentwert - needed_taw_roll1 - needed_taw_roll2 - needed_taw_roll3;
+
+ const context = {
+ talent: item,
+ modifikator: data.modifikator,
+
+ attribute1: { type: item.system.attribute1, value: attribute1 },
+ attribute2: { type: item.system.attribute2, value: attribute3 },
+ attribute3: { type: item.system.attribute3, value: attribute3 },
+
+ roll1: roll1,
+ roll2: roll2,
+ roll3: roll3,
+
+ needed_taw_roll1: -needed_taw_roll1,
+ needed_taw_roll2: -needed_taw_roll2,
+ needed_taw_roll3: -needed_taw_roll3,
+
+ leftover_taw: leftover_taw,
+ };
+
+ const message = await ChatMessage.create(
+ {
+ content: await renderTemplate("talent_chat", context),
+ speaker: { actor: this.actor },
+ sound: CONFIG.sounds.dice,
+ },
+ );
+
+ return;
+ }
+
+ if (roll_type == "attacke") {
+ const item = this.document.system.computed.kampf.waffen[item_id];
+ const title = game.i18n.localize("DSA41.roll_types." + roll_type) + ": " + item.name;
+ const data = await DSA41_Dialog.wait("Attacke", { window: { title: title }, item: item });
let flavor = game.i18n.localize("DSA41.roll_types." + roll_type);
if (typeof success_value !== 'undefined') {
@@ -836,6 +842,18 @@ class DSA41_ActorSheet extends DSA41_ApplicationMixin(ActorSheetV2) {
});
}
+ // allow changing embedded item fields
+ async _onChangeForm(formConfig, event) {
+ const item_id = event.target.closest("[data-item-id]")?.dataset.itemId;
+ const data_name = event.target.dataset.name;
+ if (!item_id || !data_name) return super._onChangeForm(formConfig, event);
+
+ event.stopImmediatePropagation();
+ const item = await this.actor.items.get(item_id);
+ const value = event.target.value;
+ item.update({ [data_name]: value });
+ }
+
tabGroups = { primary: "tab1" };
}
@@ -844,6 +862,7 @@ class DSA41_ItemSheetV2 extends DSA41_ApplicationMixin(ItemSheetV2) {
Bewaffnung: { template: "systems/dsa-4th-edition/src/ItemSheets/Bewaffnung.hbs" },
Gegenstand: { template: "systems/dsa-4th-edition/src/ItemSheets/Gegenstand.hbs" },
Ruestung: { template: "systems/dsa-4th-edition/src/ItemSheets/Ruestung.hbs" },
+ Talent: { template: "systems/dsa-4th-edition/src/ItemSheets/Talent.hbs" },
};
static DEFAULT_OPTIONS = {
diff --git a/system.json b/system.json
index bec4451..8c786ca 100644
--- a/system.json
+++ b/system.json
@@ -5,21 +5,39 @@
"esmodules": ["src/main.mjs"],
"styles": ["src/main.css"],
+ "documentTypes": {
+ "Actor": {
+ "Player": {}
+ },
+ "Item": {
+ "Gegenstand": {},
+ "Ruestung": {},
+ "Bewaffnung": {},
+
+ "Talent": {}
+ }
+ },
+
+ "packs": [
+ {
+ "system": "dsa-4th-edition",
+ "path": "packs/talente",
+ "type": "Item",
+ "name": "talente",
+ "label": "Talente"
+ }
+ ],
+
"languages": [
{
"lang": "de",
"name": "German (Deutsch)",
"path": "src/lang/de.json"
- },
- {
- "lang": "en",
- "name": "English",
- "path": "src/lang/en.json"
}
],
"compatibility": {
- "minimum": "12",
+ "minimum": "12",
"verified": "12"
},
diff --git a/template.json b/template.json
deleted file mode 100644
index cd6ada5..0000000
--- a/template.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "Actor": {
- "types": [
- "Player"
- ]
- },
- "Item": {
- "types": [
- "Gegenstand",
- "Ruestung",
- "Bewaffnung"
- ]
- }
-}
\ No newline at end of file
diff --git a/zig/compendium_creator.zig b/zig/compendium_creator.zig
new file mode 100644
index 0000000..80abe12
--- /dev/null
+++ b/zig/compendium_creator.zig
@@ -0,0 +1,160 @@
+const std = @import("std");
+const leveldb = @import("leveldb");
+const foundry = @import("foundry.zig");
+const system = @import("system.zig");
+
+const talente: system.ItemCompendium = .{ .entries = &.{
+ // Körperliche Talente
+ .{ .Folder = .{ .name = "Körperliche Talente", .entries = &.{
+ .{ .Talent = .{ .name = "Akrobatik", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .GE, .attribute3 = .KK, .behinderung = "@BE * 2" } } },
+ .{ .Talent = .{ .name = "Athletik", .system = .{ .kategorie = .koerperliche, .attribute1 = .GE, .attribute2 = .KO, .attribute3 = .KK, .behinderung = "@BE * 2" } } },
+ .{ .Talent = .{ .name = "Fliegen", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .IN, .attribute3 = .GE, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Gaukeleien", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .CH, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Klettern", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .GE, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Körperbeherrschung", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .IN, .attribute3 = .GE, .behinderung = "@BE * 2" } } },
+ .{ .Talent = .{ .name = "Reiten", .system = .{ .kategorie = .koerperliche, .attribute1 = .CH, .attribute2 = .GE, .attribute3 = .KK, .behinderung = "@BE - 2" } } },
+ .{ .Talent = .{ .name = "Schleichen", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .IN, .attribute3 = .GE, .behinderung = "@BE" } } },
+ .{ .Talent = .{ .name = "Schwimmen", .system = .{ .kategorie = .koerperliche, .attribute1 = .GE, .attribute2 = .KO, .attribute3 = .KK, .behinderung = "@BE * 2" } } },
+ .{ .Talent = .{ .name = "Selbstbeherrschung", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .KO, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Sich Verstecken", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .IN, .attribute3 = .GE, .behinderung = "@BE * 2" } } },
+ .{ .Talent = .{ .name = "Singen", .system = .{ .kategorie = .koerperliche, .attribute1 = .IN, .attribute2 = .CH, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Singen", .system = .{ .kategorie = .koerperliche, .attribute1 = .IN, .attribute2 = .CH, .attribute3 = .KO, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Sinnenschärfe", .system = .{ .kategorie = .koerperliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Sinnenschärfe", .system = .{ .kategorie = .koerperliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Skifahren", .system = .{ .kategorie = .koerperliche, .attribute1 = .GE, .attribute2 = .GE, .attribute3 = .KO, .behinderung = "@BE - 2" } } },
+ .{ .Talent = .{ .name = "Stimmen Imitieren", .system = .{ .kategorie = .koerperliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "@BE - 4" } } },
+ .{ .Talent = .{ .name = "Tanzen", .system = .{ .kategorie = .koerperliche, .attribute1 = .CH, .attribute2 = .GE, .attribute3 = .GE, .behinderung = "@BE * 2" } } },
+ .{ .Talent = .{ .name = "Taschendiebstahl", .system = .{ .kategorie = .koerperliche, .attribute1 = .MU, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "@BE * 2" } } },
+ .{ .Talent = .{ .name = "Zechen", .system = .{ .kategorie = .koerperliche, .attribute1 = .IN, .attribute2 = .KO, .attribute3 = .KK, .behinderung = "" } } },
+ }}},
+
+ // Gesellschaftliche Talente
+ .{ .Folder = .{ .name = "Gesellschaftliche Talente", .entries = &.{
+ .{ .Talent = .{ .name = "Betören", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .IN, .attribute2 = .CH, .attribute3 = .CH, .behinderung = "BE - 2" } } },
+ .{ .Talent = .{ .name = "Etikette", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "BE - 2" } } },
+ .{ .Talent = .{ .name = "Gassenwissen", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "BE - 4" } } },
+ .{ .Talent = .{ .name = "Lehren", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Menschenkenntnis", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Schauspielerei", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .MU, .attribute2 = .KL, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Schriftlicher Ausdruck", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Sich Verkleiden", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .MU, .attribute2 = .CH, .attribute3 = .GE, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Überreden", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .MU, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Überzeugen", .system = .{ .kategorie = .gesellschaftliche, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "" } } },
+ }}},
+
+ // Natur Talente
+ .{ .Folder = .{ .name = "Natur-Talente", .entries = &.{
+ .{ .Talent = .{ .name = "Fährtensuchen", .system = .{ .kategorie = .natur, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Fährtensuchen", .system = .{ .kategorie = .natur, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .KO, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Fallenstellen", .system = .{ .kategorie = .natur, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Fesseln/Entfesseln", .system = .{ .kategorie = .natur, .attribute1 = .FF, .attribute2 = .GE, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Fischen/Angeln", .system = .{ .kategorie = .natur, .attribute1 = .IN, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Orientierung", .system = .{ .kategorie = .natur, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Wettervorhersage", .system = .{ .kategorie = .natur, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Wildnisleben", .system = .{ .kategorie = .natur, .attribute1 = .IN, .attribute2 = .GE, .attribute3 = .KO, .behinderung = "" } } },
+ }}},
+
+ // Wissens Talente
+ .{ .Folder = .{ .name = "Wissens Talente", .entries = &.{
+ .{ .Talent = .{ .name = "Anatomie", .system = .{ .kategorie = .wissens, .attribute1 = .MU, .attribute2 = .KL, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Baukunst", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Brett-/Kartenspiel", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Geographie", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Geschichtswissen", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Gesteinskunde", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Götter/Kulte", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Heraldik", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Hüttenkunde", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .KO, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Kriegskunst", .system = .{ .kategorie = .wissens, .attribute1 = .MU, .attribute2 = .KL, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Kryptographie", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Magiekunde", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Mechanik", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Pflanzenkunde", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Philosophie", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Rechnen", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Rechtskunde", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Sagen/Legenden", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Schätzen", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Sprachenkunde", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Staatskunst", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Sternkunde", .system = .{ .kategorie = .wissens, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Tierkunde", .system = .{ .kategorie = .wissens, .attribute1 = .MU, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ }}},
+
+ // Handwerks Talente
+ .{ .Folder = .{ .name = "Handwerks Talente", .entries = &.{
+ .{ .Talent = .{ .name = "Abrichten", .system = .{ .kategorie = .handwerks, .attribute1 = .MU, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Ackerbau", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .FF, .attribute3 = .KO, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Alchimie", .system = .{ .kategorie = .handwerks, .attribute1 = .MU, .attribute2 = .KL, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Bergbau", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .KO, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Bogenbau", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Boote Fahren", .system = .{ .kategorie = .handwerks, .attribute1 = .GE, .attribute2 = .KO, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Brauer", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Drucker", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Fahrzeug Lenken", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .CH, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Falschspiel", .system = .{ .kategorie = .handwerks, .attribute1 = .MU, .attribute2 = .CH, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Feinmechanik", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Feuersteinbearbeitung", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Fleischer", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Gerber/Kürschner", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KO, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Glaskunst", .system = .{ .kategorie = .handwerks, .attribute1 = .FF, .attribute2 = .FF, .attribute3 = .KO, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Grobschmied", .system = .{ .kategorie = .handwerks, .attribute1 = .FF, .attribute2 = .KO, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Handel", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Hauswirtschaft", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .CH, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Heilkunde Gift", .system = .{ .kategorie = .handwerks, .attribute1 = .MU, .attribute2 = .KL, .attribute3 = .IN, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Heilkunde Krankheiten", .system = .{ .kategorie = .handwerks, .attribute1 = .MU, .attribute2 = .KL, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Heilkunde Seele", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .CH, .attribute3 = .CH, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Heilkunde Wunden", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .CH, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Holzbearbeitung", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Instrumentenbauer", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Kartographie", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .KL, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Kochen", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Kristallzucht", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Lederarbeiten", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Malen/Zeichnen", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Maurer", .system = .{ .kategorie = .handwerks, .attribute1 = .FF, .attribute2 = .GE, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Metallguss", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Musizieren", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .CH, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Schlösser Knacken", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .FF, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Schnaps Brennen", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Schneidern", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Seefahrt", .system = .{ .kategorie = .handwerks, .attribute1 = .FF, .attribute2 = .GE, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Seiler", .system = .{ .kategorie = .handwerks, .attribute1 = .FF, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Steinmetz", .system = .{ .kategorie = .handwerks, .attribute1 = .FF, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Steinschneider/Juwelier", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .FF, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Stellmacher", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Stoffe Färben", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Tätowieren", .system = .{ .kategorie = .handwerks, .attribute1 = .IN, .attribute2 = .FF, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Töpfern", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .FF, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Viehzucht", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .IN, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Webkunst", .system = .{ .kategorie = .handwerks, .attribute1 = .FF, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Winzer", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ .{ .Talent = .{ .name = "Zimmermann", .system = .{ .kategorie = .handwerks, .attribute1 = .KL, .attribute2 = .FF, .attribute3 = .KK, .behinderung = "" } } },
+ }}},
+}};
+
+pub fn main() !void {
+ const progress = std.Progress.start(.{ .root_name = "Building compendiums", .estimated_total_items = 1 });
+ defer progress.end();
+
+ try std.fs.cwd().makePath("packs");
+ try talente.serialize("packs/talente"); progress.completeOne();
+}
+
+fn print_contents(path: [:0]const u8) !void {
+ var diagnostic: leveldb.Diagnostic = null;
+ const db = leveldb.open(.{ .path = path, .diagnostic = &diagnostic }) catch |err| {
+ std.log.err("leveldb.open failed: {s}", .{ diagnostic.? });
+ return err;
+ };
+ defer db.close();
+
+ const iter = db.iterator(.{});
+ defer iter.destroy();
+
+ iter.seek_to_first();
+ while(iter.is_valid()) {
+ defer iter.next();
+ std.log.debug("key: '{s}' value: '{s}'", .{ iter.key(), iter.value() });
+ }
+}
diff --git a/zig/foundry.zig b/zig/foundry.zig
new file mode 100644
index 0000000..4edcf91
--- /dev/null
+++ b/zig/foundry.zig
@@ -0,0 +1,164 @@
+const std = @import("std");
+const leveldb = @import("leveldb");
+const system = @import("system.zig");
+
+const String = []const u8;
+
+const CORE_VERSION = "12.331";
+const SYSTEM_NAME = system.SYSTEM_NAME;
+const SYSTEM_VERSION = system.SYSTEM_VERSION;
+
+// Entry must be a tagged union with one tag being
+// Folder: struct {
+// name: String,
+// entries: []const Entry,
+// },
+// and the other tags being the respective Foundry type name
+pub fn Compendium(base_type: BaseType, Entry: type) type {
+ if (std.meta.activeTag(@typeInfo(Entry)) != .Union) @compileError("Entry must be a tagged union.");
+ if (@typeInfo(Entry).Union.tag_type == null) @compileError("Entry must be a tagged union.");
+
+ return struct {
+ entries: []const Entry = &.{},
+
+ pub fn serialize(self: @This(), path: [:0]const u8) !void {
+ try leveldb.destroy(.{ .path = path });
+
+ var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
+ defer arena.deinit();
+
+ var diagnostic: leveldb.Diagnostic = null;
+ const db = leveldb.open(.{ .path = path, .diagnostic = &diagnostic, .options = .{ .create_if_missing = true, .compression = .Snappy } }) catch |err| {
+ std.log.err("leveldb.open failed: {s}", .{ diagnostic.? });
+ return err;
+ };
+ defer db.close();
+
+ for (self.entries) |entry| {
+ defer _ = arena.reset(.retain_capacity);
+ try serialize_entry(arena.allocator(), db, entry, null);
+ }
+ }
+
+ fn serialize_entry(allocator: std.mem.Allocator, db: leveldb, entry: Entry, folder: ?String) !void{
+ switch (entry) {
+ .Folder => |_folder| {
+ const foundry_folder: Folder(base_type) = .{
+ .name = _folder.name,
+ ._id = &random_id(),
+ .folder = folder,
+ };
+
+ const key = try std.fmt.allocPrintZ(allocator, "!folders!{s}", .{ foundry_folder._id.? });
+ const value = try std.json.stringifyAlloc(allocator, foundry_folder, .{});
+ try db.put(.{ .key = key, .value = value });
+
+ for (_folder.entries) |folder_entry| {
+ try serialize_entry(allocator, db, folder_entry, foundry_folder._id);
+ }
+ },
+ inline else => |item| {
+ std.debug.assert(item.folder == null);
+
+ var foundry_item = item;
+ foundry_item.folder = folder;
+ if (foundry_item._id == null)
+ foundry_item._id = &random_id();
+
+ const key = try std.fmt.allocPrintZ(allocator, "!{s}!{s}", .{ base_type.to_compendium_type(), foundry_item._id.? });
+ const value = try std.json.stringifyAlloc(allocator, foundry_item, .{});
+ try db.put(.{ .key = key, .value = value });
+ }
+ }
+ }
+ };
+}
+
+const BaseType = enum {
+ Item,
+ Actor,
+
+ fn to_compendium_type(self: @This()) String {
+ return switch (self) {
+ .Item => "items",
+ .Actor => "actors",
+ };
+ }
+};
+
+fn Folder(base_type: BaseType) type {
+ return struct {
+ _id: ?String = null,
+ name: String,
+ @"type": BaseType = base_type,
+
+ description: String = "",
+
+ folder: ?String = null,
+
+ sorting: enum { a, m } = .a,
+ sort: u64 = 0,
+
+ color: ?String = null,
+
+ flags: struct {} = .{},
+ _stats: DocumentStats = .{},
+ };
+}
+
+pub fn Item(comptime typename: String, comptime T: type) type {
+ return struct {
+ const Type: BaseType = .Item;
+
+ _id: ?String = null,
+ name: String,
+ @"type": String = typename,
+ img: String = "icons/svg/item-bag.svg",
+
+ system: T,
+
+ effects: []u0 = &.{},
+ folder: ?String = null,
+ sort: u64 = 0,
+
+ ownership: struct {
+ default: u8 = 0,
+ } = .{},
+
+ flags: struct {} = .{},
+ _stats: DocumentStats = .{},
+ };
+}
+
+pub const DocumentStats = struct {
+ coreVersion: String = CORE_VERSION,
+ systemId: String = SYSTEM_NAME,
+ systemVersion: String = SYSTEM_VERSION,
+
+ createdTime: ?u64 = null,
+ modifiedTime: ?u64 = null,
+ lastModifiedBy: ?String = null,
+
+ compendiumSource: ?String = null,
+ duplicateSource: ?String = null,
+};
+
+inline fn random_char(alphabet: []const u8) u8 {
+ return alphabet[std.crypto.random.uintLessThan(u8, alphabet.len)];
+}
+
+var id_set = std.BufSet.init(std.heap.c_allocator);
+
+fn random_id() [16]u8 {
+ const alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
+
+ var result: [16]u8 = undefined;
+ inline for (&result) |*c| c.* = random_char(alphabet);
+
+ if (id_set.contains(&result)) {
+ return random_id();
+ }
+
+ id_set.insert(&result) catch unreachable;
+ return result;
+}
diff --git a/zig/libs/leveldb/build.zig b/zig/libs/leveldb/build.zig
new file mode 100644
index 0000000..9f1bbb0
--- /dev/null
+++ b/zig/libs/leveldb/build.zig
@@ -0,0 +1,81 @@
+const std = @import("std");
+
+pub fn build(b: *std.Build) void {
+ const target = b.standardTargetOptions (.{});
+ const optimize = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseFast });
+
+ const leveldb_static_lib = build_leveldb(b, target, optimize);
+
+ const module = b.addModule("leveldb", .{
+ .root_source_file = b.path("src/leveldb.zig"),
+ .target = target,
+ .optimize = optimize,
+ });
+ module.linkLibrary(leveldb_static_lib);
+}
+
+fn build_leveldb(b: *std.Build, target: std.Build.ResolvedTarget, optimize: std.builtin.OptimizeMode) *std.Build.Step.Compile {
+ const source = b.dependency("leveldb", .{});
+ const static_lib = b.addStaticLibrary(.{
+ .name = "leveldb",
+ .target = target,
+ .optimize = optimize,
+ });
+ static_lib.linkLibCpp();
+ static_lib.addIncludePath(source.path(""));
+ static_lib.addIncludePath(source.path("include"));
+ static_lib.installHeadersDirectory(source.path("include"), "", .{});
+
+ if (target.result.os.tag == .windows) {
+ static_lib.defineCMacro("LEVELDB_PLATFORM_WINDOWS", "1");
+ static_lib.addCSourceFile(.{ .file = source.path("util/env_windows.cc") });
+ } else {
+ static_lib.defineCMacro("LEVELDB_PLATFORM_POSIX", "1");
+ static_lib.addCSourceFile(.{ .file = source.path("util/env_posix.cc") });
+ }
+
+ static_lib.addCSourceFiles(.{
+ .root = source.path(""),
+ .files = &.{
+ "db/builder.cc",
+ "db/c.cc",
+ "db/db_impl.cc",
+ "db/db_iter.cc",
+ "db/dbformat.cc",
+ "db/dumpfile.cc",
+ "db/filename.cc",
+ "db/log_reader.cc",
+ "db/log_writer.cc",
+ "db/memtable.cc",
+ "db/repair.cc",
+ "db/table_cache.cc",
+ "db/version_edit.cc",
+ "db/version_set.cc",
+ "db/write_batch.cc",
+ "table/block_builder.cc",
+ "table/block.cc",
+ "table/filter_block.cc",
+ "table/format.cc",
+ "table/iterator.cc",
+ "table/merger.cc",
+ "table/table_builder.cc",
+ "table/table.cc",
+ "table/two_level_iterator.cc",
+ "util/arena.cc",
+ "util/bloom.cc",
+ "util/cache.cc",
+ "util/coding.cc",
+ "util/comparator.cc",
+ "util/crc32c.cc",
+ "util/env.cc",
+ "util/filter_policy.cc",
+ "util/hash.cc",
+ "util/logging.cc",
+ "util/options.cc",
+ "util/status.cc",
+ },
+ });
+
+ b.installArtifact(static_lib);
+ return static_lib;
+}
diff --git a/zig/libs/leveldb/build.zig.zon b/zig/libs/leveldb/build.zig.zon
new file mode 100644
index 0000000..9268d6a
--- /dev/null
+++ b/zig/libs/leveldb/build.zig.zon
@@ -0,0 +1,15 @@
+.{
+ .name = "leveldb",
+ .version = "1.0.0",
+ .paths = .{
+ "build.zig",
+ "build.zig.zon",
+ "leveldb.zig",
+ },
+ .dependencies = .{
+ .leveldb = .{
+ .path = "libs/leveldb"
+ },
+ },
+
+}
diff --git a/zig/libs/leveldb/libs/leveldb/.clang-format b/zig/libs/leveldb/libs/leveldb/.clang-format
new file mode 100644
index 0000000..f493f75
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/.clang-format
@@ -0,0 +1,18 @@
+# Run manually to reformat a file:
+# clang-format -i --style=file
+# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
+BasedOnStyle: Google
+DerivePointerAlignment: false
+
+# Public headers are in a different location in the internal Google repository.
+# Order them so that when imported to the authoritative repository they will be
+# in correct alphabetical order.
+IncludeCategories:
+ - Regex: '^(<|"(benchmarks|db|helpers)/)'
+ Priority: 1
+ - Regex: '^"(leveldb)/'
+ Priority: 2
+ - Regex: '^(<|"(issues|port|table|third_party|util)/)'
+ Priority: 3
+ - Regex: '.*'
+ Priority: 4
diff --git a/zig/libs/leveldb/libs/leveldb/.github/workflows/build.yml b/zig/libs/leveldb/libs/leveldb/.github/workflows/build.yml
new file mode 100644
index 0000000..d28902e
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/.github/workflows/build.yml
@@ -0,0 +1,102 @@
+# Copyright 2021 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+name: ci
+on: [push, pull_request]
+
+permissions:
+ contents: read
+
+jobs:
+ build-and-test:
+ name: >-
+ CI
+ ${{ matrix.os }}
+ ${{ matrix.compiler }}
+ ${{ matrix.optimized && 'release' || 'debug' }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ compiler: [clang, gcc, msvc]
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ optimized: [true, false]
+ exclude:
+ # MSVC only works on Windows.
+ - os: ubuntu-latest
+ compiler: msvc
+ - os: macos-latest
+ compiler: msvc
+ # Not testing with GCC on macOS.
+ - os: macos-latest
+ compiler: gcc
+ # Only testing with MSVC on Windows.
+ - os: windows-latest
+ compiler: clang
+ - os: windows-latest
+ compiler: gcc
+ include:
+ - compiler: clang
+ CC: clang
+ CXX: clang++
+ - compiler: gcc
+ CC: gcc
+ CXX: g++
+ - compiler: msvc
+ CC:
+ CXX:
+
+ env:
+ CMAKE_BUILD_DIR: ${{ github.workspace }}/build
+ CMAKE_BUILD_TYPE: ${{ matrix.optimized && 'RelWithDebInfo' || 'Debug' }}
+ CC: ${{ matrix.CC }}
+ CXX: ${{ matrix.CXX }}
+ BINARY_SUFFIX: ${{ startsWith(matrix.os, 'windows') && '.exe' || '' }}
+ BINARY_PATH: >-
+ ${{ format(
+ startsWith(matrix.os, 'windows') && '{0}\build\{1}\' || '{0}/build/',
+ github.workspace,
+ matrix.optimized && 'RelWithDebInfo' || 'Debug') }}
+
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ submodules: true
+
+ - name: Install dependencies on Linux
+ if: ${{ runner.os == 'Linux' }}
+ # libgoogle-perftools-dev is temporarily removed from the package list
+ # because it is currently broken on GitHub's Ubuntu 22.04.
+ run: |
+ sudo apt-get update
+ sudo apt-get install libkyotocabinet-dev libsnappy-dev libsqlite3-dev
+
+ - name: Generate build config
+ run: >-
+ cmake -S "${{ github.workspace }}" -B "${{ env.CMAKE_BUILD_DIR }}"
+ -DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }}
+ -DCMAKE_INSTALL_PREFIX=${{ runner.temp }}/install_test/
+
+ - name: Build
+ run: >-
+ cmake --build "${{ env.CMAKE_BUILD_DIR }}"
+ --config "${{ env.CMAKE_BUILD_TYPE }}"
+
+ - name: Run Tests
+ working-directory: ${{ github.workspace }}/build
+ run: ctest -C "${{ env.CMAKE_BUILD_TYPE }}" --verbose
+
+ - name: Run LevelDB Benchmarks
+ run: ${{ env.BINARY_PATH }}db_bench${{ env.BINARY_SUFFIX }}
+
+ - name: Run SQLite Benchmarks
+ if: ${{ runner.os != 'Windows' }}
+ run: ${{ env.BINARY_PATH }}db_bench_sqlite3${{ env.BINARY_SUFFIX }}
+
+ - name: Run Kyoto Cabinet Benchmarks
+ if: ${{ runner.os == 'Linux' && matrix.compiler == 'clang' }}
+ run: ${{ env.BINARY_PATH }}db_bench_tree_db${{ env.BINARY_SUFFIX }}
+
+ - name: Test CMake installation
+ run: cmake --build "${{ env.CMAKE_BUILD_DIR }}" --target install
diff --git a/zig/libs/leveldb/libs/leveldb/.gitignore b/zig/libs/leveldb/libs/leveldb/.gitignore
new file mode 100644
index 0000000..c4b2425
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/.gitignore
@@ -0,0 +1,8 @@
+# Editors.
+*.sw*
+.vscode
+.DS_Store
+
+# Build directory.
+build/
+out/
diff --git a/zig/libs/leveldb/libs/leveldb/.gitmodules b/zig/libs/leveldb/libs/leveldb/.gitmodules
new file mode 100644
index 0000000..6e6d3f0
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/.gitmodules
@@ -0,0 +1,6 @@
+[submodule "third_party/googletest"]
+ path = third_party/googletest
+ url = https://github.com/google/googletest.git
+[submodule "third_party/benchmark"]
+ path = third_party/benchmark
+ url = https://github.com/google/benchmark
diff --git a/zig/libs/leveldb/libs/leveldb/AUTHORS b/zig/libs/leveldb/libs/leveldb/AUTHORS
new file mode 100644
index 0000000..2439d7a
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/AUTHORS
@@ -0,0 +1,12 @@
+# Names should be added to this file like so:
+# Name or Organization
+
+Google Inc.
+
+# Initial version authors:
+Jeffrey Dean
+Sanjay Ghemawat
+
+# Partial list of contributors:
+Kevin Regan
+Johan Bilien
diff --git a/zig/libs/leveldb/libs/leveldb/CMakeLists.txt b/zig/libs/leveldb/libs/leveldb/CMakeLists.txt
new file mode 100644
index 0000000..fda9e01
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/CMakeLists.txt
@@ -0,0 +1,519 @@
+# Copyright 2017 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+cmake_minimum_required(VERSION 3.9)
+# Keep the version below in sync with the one in db.h
+project(leveldb VERSION 1.23.0 LANGUAGES C CXX)
+
+# C standard can be overridden when this is used as a sub-project.
+if(NOT CMAKE_C_STANDARD)
+ # This project can use C11, but will gracefully decay down to C89.
+ set(CMAKE_C_STANDARD 11)
+ set(CMAKE_C_STANDARD_REQUIRED OFF)
+ set(CMAKE_C_EXTENSIONS OFF)
+endif(NOT CMAKE_C_STANDARD)
+
+# C++ standard can be overridden when this is used as a sub-project.
+if(NOT CMAKE_CXX_STANDARD)
+ # This project requires C++11.
+ set(CMAKE_CXX_STANDARD 11)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+ set(CMAKE_CXX_EXTENSIONS OFF)
+endif(NOT CMAKE_CXX_STANDARD)
+
+if (WIN32)
+ set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)
+ # TODO(cmumford): Make UNICODE configurable for Windows.
+ add_definitions(-D_UNICODE -DUNICODE)
+else (WIN32)
+ set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_POSIX)
+endif (WIN32)
+
+option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
+option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
+option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
+
+include(CheckIncludeFile)
+check_include_file("unistd.h" HAVE_UNISTD_H)
+
+include(CheckLibraryExists)
+check_library_exists(crc32c crc32c_value "" HAVE_CRC32C)
+check_library_exists(snappy snappy_compress "" HAVE_SNAPPY)
+check_library_exists(zstd zstd_compress "" HAVE_ZSTD)
+check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC)
+
+include(CheckCXXSymbolExists)
+# Using check_cxx_symbol_exists() instead of check_c_symbol_exists() because
+# we're including the header from C++, and feature detection should use the same
+# compiler language that the project will use later. Principles aside, some
+# versions of do not expose fdatasync() in in standard C mode
+# (-std=c11), but do expose the function in standard C++ mode (-std=c++11).
+check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
+check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
+check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC)
+
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-")
+ add_definitions(-D_HAS_EXCEPTIONS=0)
+
+ # Disable RTTI.
+ string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Enable strict prototype warnings for C code in clang and gcc.
+ if(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
+ endif(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes")
+
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
+
+ # Disable RTTI.
+ string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+# Test whether -Wthread-safety is available. See
+# https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+include(CheckCXXCompilerFlag)
+check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY)
+
+# Used by googletest.
+check_cxx_compiler_flag(-Wno-missing-field-initializers
+ LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
+include(CheckCXXSourceCompiles)
+
+# Test whether C++17 __has_include is available.
+check_cxx_source_compiles("
+#if defined(__has_include) && __has_include()
+#include
+#endif
+int main() { std::string str; return 0; }
+" HAVE_CXX17_HAS_INCLUDE)
+
+set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb")
+set(LEVELDB_PORT_CONFIG_DIR "include/port")
+
+configure_file(
+ "port/port_config.h.in"
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+)
+
+include_directories(
+ "${PROJECT_BINARY_DIR}/include"
+ "."
+)
+
+if(BUILD_SHARED_LIBS)
+ # Only export LEVELDB_EXPORT symbols from the shared library.
+ add_compile_options(-fvisibility=hidden)
+endif(BUILD_SHARED_LIBS)
+
+# Must be included before CMAKE_INSTALL_INCLUDEDIR is used.
+include(GNUInstallDirs)
+
+add_library(leveldb "")
+target_sources(leveldb
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "db/builder.cc"
+ "db/builder.h"
+ "db/c.cc"
+ "db/db_impl.cc"
+ "db/db_impl.h"
+ "db/db_iter.cc"
+ "db/db_iter.h"
+ "db/dbformat.cc"
+ "db/dbformat.h"
+ "db/dumpfile.cc"
+ "db/filename.cc"
+ "db/filename.h"
+ "db/log_format.h"
+ "db/log_reader.cc"
+ "db/log_reader.h"
+ "db/log_writer.cc"
+ "db/log_writer.h"
+ "db/memtable.cc"
+ "db/memtable.h"
+ "db/repair.cc"
+ "db/skiplist.h"
+ "db/snapshot.h"
+ "db/table_cache.cc"
+ "db/table_cache.h"
+ "db/version_edit.cc"
+ "db/version_edit.h"
+ "db/version_set.cc"
+ "db/version_set.h"
+ "db/write_batch_internal.h"
+ "db/write_batch.cc"
+ "port/port_stdcxx.h"
+ "port/port.h"
+ "port/thread_annotations.h"
+ "table/block_builder.cc"
+ "table/block_builder.h"
+ "table/block.cc"
+ "table/block.h"
+ "table/filter_block.cc"
+ "table/filter_block.h"
+ "table/format.cc"
+ "table/format.h"
+ "table/iterator_wrapper.h"
+ "table/iterator.cc"
+ "table/merger.cc"
+ "table/merger.h"
+ "table/table_builder.cc"
+ "table/table.cc"
+ "table/two_level_iterator.cc"
+ "table/two_level_iterator.h"
+ "util/arena.cc"
+ "util/arena.h"
+ "util/bloom.cc"
+ "util/cache.cc"
+ "util/coding.cc"
+ "util/coding.h"
+ "util/comparator.cc"
+ "util/crc32c.cc"
+ "util/crc32c.h"
+ "util/env.cc"
+ "util/filter_policy.cc"
+ "util/hash.cc"
+ "util/hash.h"
+ "util/logging.cc"
+ "util/logging.h"
+ "util/mutexlock.h"
+ "util/no_destructor.h"
+ "util/options.cc"
+ "util/random.h"
+ "util/status.cc"
+
+ # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
+ $<$:PUBLIC>
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+)
+
+if (WIN32)
+ target_sources(leveldb
+ PRIVATE
+ "util/env_windows.cc"
+ "util/windows_logger.h"
+ )
+else (WIN32)
+ target_sources(leveldb
+ PRIVATE
+ "util/env_posix.cc"
+ "util/posix_logger.h"
+ )
+endif (WIN32)
+
+# MemEnv is not part of the interface and could be pulled to a separate library.
+target_sources(leveldb
+ PRIVATE
+ "helpers/memenv/memenv.cc"
+ "helpers/memenv/memenv.h"
+)
+
+target_include_directories(leveldb
+ PUBLIC
+ $
+ $
+)
+
+set_target_properties(leveldb
+ PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR})
+
+target_compile_definitions(leveldb
+ PRIVATE
+ # Used by include/export.h when building shared libraries.
+ LEVELDB_COMPILE_LIBRARY
+ # Used by port/port.h.
+ ${LEVELDB_PLATFORM_NAME}=1
+)
+if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions(leveldb
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(leveldb
+ PUBLIC
+ # Used by include/export.h.
+ LEVELDB_SHARED_LIBRARY
+ )
+endif(BUILD_SHARED_LIBS)
+
+if(HAVE_CLANG_THREAD_SAFETY)
+ target_compile_options(leveldb
+ PUBLIC
+ -Werror -Wthread-safety)
+endif(HAVE_CLANG_THREAD_SAFETY)
+
+if(HAVE_CRC32C)
+ target_link_libraries(leveldb crc32c)
+endif(HAVE_CRC32C)
+if(HAVE_SNAPPY)
+ target_link_libraries(leveldb snappy)
+endif(HAVE_SNAPPY)
+if(HAVE_ZSTD)
+ target_link_libraries(leveldb zstd)
+endif(HAVE_ZSTD)
+if(HAVE_TCMALLOC)
+ target_link_libraries(leveldb tcmalloc)
+endif(HAVE_TCMALLOC)
+
+# Needed by port_stdcxx.h
+find_package(Threads REQUIRED)
+target_link_libraries(leveldb Threads::Threads)
+
+add_executable(leveldbutil
+ "db/leveldbutil.cc"
+)
+target_link_libraries(leveldbutil leveldb)
+
+if(LEVELDB_BUILD_TESTS)
+ enable_testing()
+
+ # Prevent overriding the parent project's compiler/linker settings on Windows.
+ set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
+ set(install_gtest OFF)
+ set(install_gmock OFF)
+ set(build_gmock ON)
+
+ # This project is tested using GoogleTest.
+ add_subdirectory("third_party/googletest")
+
+ # GoogleTest triggers a missing field initializers warning.
+ if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+ set_property(TARGET gtest
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+ set_property(TARGET gmock
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+ endif(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
+ add_executable(leveldb_tests "")
+ target_sources(leveldb_tests
+ PRIVATE
+ # "db/fault_injection_test.cc"
+ # "issues/issue178_test.cc"
+ # "issues/issue200_test.cc"
+ # "issues/issue320_test.cc"
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ # "util/env_test.cc"
+ "util/status_test.cc"
+ "util/no_destructor_test.cc"
+ "util/testutil.cc"
+ "util/testutil.h"
+ )
+ if(NOT BUILD_SHARED_LIBS)
+ target_sources(leveldb_tests
+ PRIVATE
+ "db/autocompact_test.cc"
+ "db/corruption_test.cc"
+ "db/db_test.cc"
+ "db/dbformat_test.cc"
+ "db/filename_test.cc"
+ "db/log_test.cc"
+ "db/recovery_test.cc"
+ "db/skiplist_test.cc"
+ "db/version_edit_test.cc"
+ "db/version_set_test.cc"
+ "db/write_batch_test.cc"
+ "helpers/memenv/memenv_test.cc"
+ "table/filter_block_test.cc"
+ "table/table_test.cc"
+ "util/arena_test.cc"
+ "util/bloom_test.cc"
+ "util/cache_test.cc"
+ "util/coding_test.cc"
+ "util/crc32c_test.cc"
+ "util/hash_test.cc"
+ "util/logging_test.cc"
+ )
+ endif(NOT BUILD_SHARED_LIBS)
+ target_link_libraries(leveldb_tests leveldb gmock gtest gtest_main)
+ target_compile_definitions(leveldb_tests
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions(leveldb_tests
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+ add_test(NAME "leveldb_tests" COMMAND "leveldb_tests")
+
+ function(leveldb_test test_file)
+ get_filename_component(test_target_name "${test_file}" NAME_WE)
+
+ add_executable("${test_target_name}" "")
+ target_sources("${test_target_name}"
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "util/testutil.cc"
+ "util/testutil.h"
+
+ "${test_file}"
+ )
+ target_link_libraries("${test_target_name}" leveldb gmock gtest)
+ target_compile_definitions("${test_target_name}"
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions("${test_target_name}"
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+ add_test(NAME "${test_target_name}" COMMAND "${test_target_name}")
+ endfunction(leveldb_test)
+
+ leveldb_test("db/c_test.c")
+
+ if(NOT BUILD_SHARED_LIBS)
+ # TODO(costan): This test also uses
+ # "util/env_{posix|windows}_test_helper.h"
+ if (WIN32)
+ leveldb_test("util/env_windows_test.cc")
+ else (WIN32)
+ leveldb_test("util/env_posix_test.cc")
+ endif (WIN32)
+ endif(NOT BUILD_SHARED_LIBS)
+endif(LEVELDB_BUILD_TESTS)
+
+if(LEVELDB_BUILD_BENCHMARKS)
+ # This project uses Google benchmark for benchmarking.
+ set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
+ set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE)
+ add_subdirectory("third_party/benchmark")
+
+ function(leveldb_benchmark bench_file)
+ get_filename_component(bench_target_name "${bench_file}" NAME_WE)
+
+ add_executable("${bench_target_name}" "")
+ target_sources("${bench_target_name}"
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "util/histogram.cc"
+ "util/histogram.h"
+ "util/testutil.cc"
+ "util/testutil.h"
+
+ "${bench_file}"
+ )
+ target_link_libraries("${bench_target_name}" leveldb gmock gtest benchmark)
+ target_compile_definitions("${bench_target_name}"
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions("${bench_target_name}"
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+ endfunction(leveldb_benchmark)
+
+ if(NOT BUILD_SHARED_LIBS)
+ leveldb_benchmark("benchmarks/db_bench.cc")
+ endif(NOT BUILD_SHARED_LIBS)
+
+ check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)
+ if(HAVE_SQLITE3)
+ leveldb_benchmark("benchmarks/db_bench_sqlite3.cc")
+ target_link_libraries(db_bench_sqlite3 sqlite3)
+ endif(HAVE_SQLITE3)
+
+ # check_library_exists is insufficient here because the library names have
+ # different manglings when compiled with clang or gcc, at least when installed
+ # with Homebrew on Mac.
+ set(OLD_CMAKE_REQURED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES kyotocabinet)
+ check_cxx_source_compiles("
+#include
+
+int main() {
+ kyotocabinet::TreeDB* db = new kyotocabinet::TreeDB();
+ delete db;
+ return 0;
+}
+ " HAVE_KYOTOCABINET)
+ set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES})
+ if(HAVE_KYOTOCABINET)
+ leveldb_benchmark("benchmarks/db_bench_tree_db.cc")
+ target_link_libraries(db_bench_tree_db kyotocabinet)
+ endif(HAVE_KYOTOCABINET)
+endif(LEVELDB_BUILD_BENCHMARKS)
+
+if(LEVELDB_INSTALL)
+ install(TARGETS leveldb
+ EXPORT leveldbTargets
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ )
+ install(
+ FILES
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/leveldb"
+ )
+
+ include(CMakePackageConfigHelpers)
+ configure_package_config_file(
+ "cmake/${PROJECT_NAME}Config.cmake.in"
+ "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake"
+ INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
+ )
+ write_basic_package_version_file(
+ "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake"
+ COMPATIBILITY SameMajorVersion
+ )
+ install(
+ EXPORT leveldbTargets
+ NAMESPACE leveldb::
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
+ )
+ install(
+ FILES
+ "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake"
+ "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
+ )
+endif(LEVELDB_INSTALL)
diff --git a/zig/libs/leveldb/libs/leveldb/CONTRIBUTING.md b/zig/libs/leveldb/libs/leveldb/CONTRIBUTING.md
new file mode 100644
index 0000000..3cf27bb
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution;
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code Reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+See [the README](README.md#contributing-to-the-leveldb-project) for areas
+where we are likely to accept external contributions.
+
+## Community Guidelines
+
+This project follows [Google's Open Source Community
+Guidelines](https://opensource.google/conduct/).
\ No newline at end of file
diff --git a/zig/libs/leveldb/libs/leveldb/LICENSE b/zig/libs/leveldb/libs/leveldb/LICENSE
new file mode 100644
index 0000000..8e80208
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/zig/libs/leveldb/libs/leveldb/NEWS b/zig/libs/leveldb/libs/leveldb/NEWS
new file mode 100644
index 0000000..3fd9924
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/NEWS
@@ -0,0 +1,17 @@
+Release 1.2 2011-05-16
+----------------------
+
+Fixes for larger databases (tested up to one billion 100-byte entries,
+i.e., ~100GB).
+
+(1) Place hard limit on number of level-0 files. This fixes errors
+of the form "too many open files".
+
+(2) Fixed memtable management. Before the fix, a heavy write burst
+could cause unbounded memory usage.
+
+A fix for a logging bug where the reader would incorrectly complain
+about corruption.
+
+Allow public access to WriteBatch contents so that users can easily
+wrap a DB.
diff --git a/zig/libs/leveldb/libs/leveldb/README.md b/zig/libs/leveldb/libs/leveldb/README.md
new file mode 100644
index 0000000..a5e5416
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/README.md
@@ -0,0 +1,246 @@
+LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.
+
+> **This repository is receiving very limited maintenance. We will only review the following types of changes.**
+>
+> * Fixes for critical bugs, such as data loss or memory corruption
+> * Changes absolutely needed by internally supported leveldb clients. These typically fix breakage introduced by a language/standard library/OS update
+
+[](https://github.com/google/leveldb/actions/workflows/build.yml)
+
+Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
+
+# Features
+
+ * Keys and values are arbitrary byte arrays.
+ * Data is stored sorted by key.
+ * Callers can provide a custom comparison function to override the sort order.
+ * The basic operations are `Put(key,value)`, `Get(key)`, `Delete(key)`.
+ * Multiple changes can be made in one atomic batch.
+ * Users can create a transient snapshot to get a consistent view of data.
+ * Forward and backward iteration is supported over the data.
+ * Data is automatically compressed using the [Snappy compression library](https://google.github.io/snappy/), but [Zstd compression](https://facebook.github.io/zstd/) is also supported.
+ * External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
+
+# Documentation
+
+ [LevelDB library documentation](https://github.com/google/leveldb/blob/main/doc/index.md) is online and bundled with the source code.
+
+# Limitations
+
+ * This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
+ * Only a single process (possibly multi-threaded) can access a particular database at a time.
+ * There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+
+# Getting the Source
+
+```bash
+git clone --recurse-submodules https://github.com/google/leveldb.git
+```
+
+# Building
+
+This project supports [CMake](https://cmake.org/) out of the box.
+
+### Build for POSIX
+
+Quick start:
+
+```bash
+mkdir -p build && cd build
+cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
+```
+
+### Building for Windows
+
+First generate the Visual Studio 2017 project/solution files:
+
+```cmd
+mkdir build
+cd build
+cmake -G "Visual Studio 15" ..
+```
+The default default will build for x86. For 64-bit run:
+
+```cmd
+cmake -G "Visual Studio 15 Win64" ..
+```
+
+To compile the Windows solution from the command-line:
+
+```cmd
+devenv /build Debug leveldb.sln
+```
+
+or open leveldb.sln in Visual Studio and build from within.
+
+Please see the CMake documentation and `CMakeLists.txt` for more advanced usage.
+
+# Contributing to the leveldb Project
+
+> **This repository is receiving very limited maintenance. We will only review the following types of changes.**
+>
+> * Bug fixes
+> * Changes absolutely needed by internally supported leveldb clients. These typically fix breakage introduced by a language/standard library/OS update
+
+The leveldb project welcomes contributions. leveldb's primary goal is to be
+a reliable and fast key/value store. Changes that are in line with the
+features/limitations outlined above, and meet the requirements below,
+will be considered.
+
+Contribution requirements:
+
+1. **Tested platforms only**. We _generally_ will only accept changes for
+ platforms that are compiled and tested. This means POSIX (for Linux and
+ macOS) or Windows. Very small changes will sometimes be accepted, but
+ consider that more of an exception than the rule.
+
+2. **Stable API**. We strive very hard to maintain a stable API. Changes that
+ require changes for projects using leveldb _might_ be rejected without
+ sufficient benefit to the project.
+
+3. **Tests**: All changes must be accompanied by a new (or changed) test, or
+ a sufficient explanation as to why a new (or changed) test is not required.
+
+4. **Consistent Style**: This project conforms to the
+ [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
+ To ensure your changes are properly formatted please run:
+
+ ```
+ clang-format -i --style=file
+ ```
+
+We are unlikely to accept contributions to the build configuration files, such
+as `CMakeLists.txt`. We are focused on maintaining a build configuration that
+allows us to test that the project works in a few supported configurations
+inside Google. We are not currently interested in supporting other requirements,
+such as different operating systems, compilers, or build systems.
+
+## Submitting a Pull Request
+
+Before any pull request will be accepted the author must first sign a
+Contributor License Agreement (CLA) at https://cla.developers.google.com/.
+
+In order to keep the commit timeline linear
+[squash](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits)
+your changes down to a single commit and [rebase](https://git-scm.com/docs/git-rebase)
+on google/leveldb/main. This keeps the commit timeline linear and more easily sync'ed
+with the internal repository at Google. More information at GitHub's
+[About Git rebase](https://help.github.com/articles/about-git-rebase/) page.
+
+# Performance
+
+Here is a performance report (with explanations) from the run of the
+included db_bench program. The results are somewhat noisy, but should
+be enough to get a ballpark performance estimate.
+
+## Setup
+
+We use a database with a million entries. Each entry has a 16 byte
+key, and a 100 byte value. Values used by the benchmark compress to
+about half their original size.
+
+ LevelDB: version 1.1
+ Date: Sun May 1 12:11:26 2011
+ CPU: 4 x Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz
+ CPUCache: 4096 KB
+ Keys: 16 bytes each
+ Values: 100 bytes each (50 bytes after compression)
+ Entries: 1000000
+ Raw Size: 110.6 MB (estimated)
+ File Size: 62.9 MB (estimated)
+
+## Write performance
+
+The "fill" benchmarks create a brand new database, in either
+sequential, or random order. The "fillsync" benchmark flushes data
+from the operating system to the disk after every operation; the other
+write operations leave the data sitting in the operating system buffer
+cache for a while. The "overwrite" benchmark does random writes that
+update existing keys in the database.
+
+ fillseq : 1.765 micros/op; 62.7 MB/s
+ fillsync : 268.409 micros/op; 0.4 MB/s (10000 ops)
+ fillrandom : 2.460 micros/op; 45.0 MB/s
+ overwrite : 2.380 micros/op; 46.5 MB/s
+
+Each "op" above corresponds to a write of a single key/value pair.
+I.e., a random write benchmark goes at approximately 400,000 writes per second.
+
+Each "fillsync" operation costs much less (0.3 millisecond)
+than a disk seek (typically 10 milliseconds). We suspect that this is
+because the hard disk itself is buffering the update in its memory and
+responding before the data has been written to the platter. This may
+or may not be safe based on whether or not the hard disk has enough
+power to save its memory in the event of a power failure.
+
+## Read performance
+
+We list the performance of reading sequentially in both the forward
+and reverse direction, and also the performance of a random lookup.
+Note that the database created by the benchmark is quite small.
+Therefore the report characterizes the performance of leveldb when the
+working set fits in memory. The cost of reading a piece of data that
+is not present in the operating system buffer cache will be dominated
+by the one or two disk seeks needed to fetch the data from disk.
+Write performance will be mostly unaffected by whether or not the
+working set fits in memory.
+
+ readrandom : 16.677 micros/op; (approximately 60,000 reads per second)
+ readseq : 0.476 micros/op; 232.3 MB/s
+ readreverse : 0.724 micros/op; 152.9 MB/s
+
+LevelDB compacts its underlying storage data in the background to
+improve read performance. The results listed above were done
+immediately after a lot of random writes. The results after
+compactions (which are usually triggered automatically) are better.
+
+ readrandom : 11.602 micros/op; (approximately 85,000 reads per second)
+ readseq : 0.423 micros/op; 261.8 MB/s
+ readreverse : 0.663 micros/op; 166.9 MB/s
+
+Some of the high cost of reads comes from repeated decompression of blocks
+read from disk. If we supply enough cache to the leveldb so it can hold the
+uncompressed blocks in memory, the read performance improves again:
+
+ readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction)
+ readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction)
+
+## Repository contents
+
+See [doc/index.md](doc/index.md) for more explanation. See
+[doc/impl.md](doc/impl.md) for a brief overview of the implementation.
+
+The public interface is in include/leveldb/*.h. Callers should not include or
+rely on the details of any other header files in this package. Those
+internal APIs may be changed without warning.
+
+Guide to header files:
+
+* **include/leveldb/db.h**: Main interface to the DB: Start here.
+
+* **include/leveldb/options.h**: Control over the behavior of an entire database,
+and also control over the behavior of individual reads and writes.
+
+* **include/leveldb/comparator.h**: Abstraction for user-specified comparison function.
+If you want just bytewise comparison of keys, you can use the default
+comparator, but clients can write their own comparator implementations if they
+want custom ordering (e.g. to handle different character encodings, etc.).
+
+* **include/leveldb/iterator.h**: Interface for iterating over data. You can get
+an iterator from a DB object.
+
+* **include/leveldb/write_batch.h**: Interface for atomically applying multiple
+updates to a database.
+
+* **include/leveldb/slice.h**: A simple module for maintaining a pointer and a
+length into some other byte array.
+
+* **include/leveldb/status.h**: Status is returned from many of the public interfaces
+and is used to report success and various kinds of errors.
+
+* **include/leveldb/env.h**:
+Abstraction of the OS environment. A posix implementation of this interface is
+in util/env_posix.cc.
+
+* **include/leveldb/table.h, include/leveldb/table_builder.h**: Lower-level modules that most
+clients probably won't use directly.
diff --git a/zig/libs/leveldb/libs/leveldb/TODO b/zig/libs/leveldb/libs/leveldb/TODO
new file mode 100644
index 0000000..e603c07
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/TODO
@@ -0,0 +1,14 @@
+ss
+- Stats
+
+db
+- Maybe implement DB::BulkDeleteForRange(start_key, end_key)
+ that would blow away files whose ranges are entirely contained
+ within [start_key..end_key]? For Chrome, deletion of obsolete
+ object stores, etc. can be done in the background anyway, so
+ probably not that important.
+- There have been requests for MultiGet.
+
+After a range is completely deleted, what gets rid of the
+corresponding files if we do no future changes to that range. Make
+the conditions for triggering compactions fire in more situations?
diff --git a/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench.cc b/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench.cc
new file mode 100644
index 0000000..8e3f4e7
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench.cc
@@ -0,0 +1,1138 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include
+
+#include
+#include
+#include
+
+#include "leveldb/cache.h"
+#include "leveldb/comparator.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
+#include "leveldb/write_batch.h"
+#include "port/port.h"
+#include "util/crc32c.h"
+#include "util/histogram.h"
+#include "util/mutexlock.h"
+#include "util/random.h"
+#include "util/testutil.h"
+
+// Comma-separated list of operations to run in the specified order
+// Actual benchmarks:
+// fillseq -- write N values in sequential key order in async mode
+// fillrandom -- write N values in random key order in async mode
+// overwrite -- overwrite N values in random key order in async mode
+// fillsync -- write N/100 values in random key order in sync mode
+// fill100K -- write N/1000 100K values in random order in async mode
+// deleteseq -- delete N keys in sequential order
+// deleterandom -- delete N keys in random order
+// readseq -- read N times sequentially
+// readreverse -- read N times in reverse order
+// readrandom -- read N times in random order
+// readmissing -- read N missing keys in random order
+// readhot -- read N times in random order from 1% section of DB
+// seekrandom -- N random seeks
+// seekordered -- N ordered seeks
+// open -- cost of opening a DB
+// crc32c -- repeated crc32c of 4K of data
+// Meta operations:
+// compact -- Compact the entire DB
+// stats -- Print DB stats
+// sstables -- Print sstable info
+// heapprofile -- Dump a heap profile (if supported by this port)
+static const char* FLAGS_benchmarks =
+ "fillseq,"
+ "fillsync,"
+ "fillrandom,"
+ "overwrite,"
+ "readrandom,"
+ "readrandom," // Extra run to allow previous compactions to quiesce
+ "readseq,"
+ "readreverse,"
+ "compact,"
+ "readrandom,"
+ "readseq,"
+ "readreverse,"
+ "fill100K,"
+ "crc32c,"
+ "snappycomp,"
+ "snappyuncomp,"
+ "zstdcomp,"
+ "zstduncomp,";
+
+// Number of key/values to place in database
+static int FLAGS_num = 1000000;
+
+// Number of read operations to do. If negative, do FLAGS_num reads.
+static int FLAGS_reads = -1;
+
+// Number of concurrent threads to run.
+static int FLAGS_threads = 1;
+
+// Size of each value
+static int FLAGS_value_size = 100;
+
+// Arrange to generate values that shrink to this fraction of
+// their original size after compression
+static double FLAGS_compression_ratio = 0.5;
+
+// Print histogram of operation timings
+static bool FLAGS_histogram = false;
+
+// Count the number of string comparisons performed
+static bool FLAGS_comparisons = false;
+
+// Number of bytes to buffer in memtable before compacting
+// (initialized to default value by "main")
+static int FLAGS_write_buffer_size = 0;
+
+// Number of bytes written to each file.
+// (initialized to default value by "main")
+static int FLAGS_max_file_size = 0;
+
+// Approximate size of user data packed per block (before compression.
+// (initialized to default value by "main")
+static int FLAGS_block_size = 0;
+
+// Number of bytes to use as a cache of uncompressed data.
+// Negative means use default settings.
+static int FLAGS_cache_size = -1;
+
+// Maximum number of files to keep open at the same time (use default if == 0)
+static int FLAGS_open_files = 0;
+
+// Bloom filter bits per key.
+// Negative means use default settings.
+static int FLAGS_bloom_bits = -1;
+
+// Common key prefix length.
+static int FLAGS_key_prefix = 0;
+
+// If true, do not destroy the existing database. If you set this
+// flag and also specify a benchmark that wants a fresh database, that
+// benchmark will fail.
+static bool FLAGS_use_existing_db = false;
+
+// If true, reuse existing log/MANIFEST files when re-opening a database.
+static bool FLAGS_reuse_logs = false;
+
+// If true, use compression.
+static bool FLAGS_compression = true;
+
+// Use the db with the following name.
+static const char* FLAGS_db = nullptr;
+
+// ZSTD compression level to try out
+static int FLAGS_zstd_compression_level = 1;
+
+namespace leveldb {
+
+namespace {
+leveldb::Env* g_env = nullptr;
+
+class CountComparator : public Comparator {
+ public:
+ CountComparator(const Comparator* wrapped) : wrapped_(wrapped) {}
+ ~CountComparator() override {}
+ int Compare(const Slice& a, const Slice& b) const override {
+ count_.fetch_add(1, std::memory_order_relaxed);
+ return wrapped_->Compare(a, b);
+ }
+ const char* Name() const override { return wrapped_->Name(); }
+ void FindShortestSeparator(std::string* start,
+ const Slice& limit) const override {
+ wrapped_->FindShortestSeparator(start, limit);
+ }
+
+ void FindShortSuccessor(std::string* key) const override {
+ return wrapped_->FindShortSuccessor(key);
+ }
+
+ size_t comparisons() const { return count_.load(std::memory_order_relaxed); }
+
+ void reset() { count_.store(0, std::memory_order_relaxed); }
+
+ private:
+ mutable std::atomic count_{0};
+ const Comparator* const wrapped_;
+};
+
+// Helper for quickly generating random data.
+class RandomGenerator {
+ private:
+ std::string data_;
+ int pos_;
+
+ public:
+ RandomGenerator() {
+ // We use a limited amount of data over and over again and ensure
+ // that it is larger than the compression window (32KB), and also
+ // large enough to serve all typical value sizes we want to write.
+ Random rnd(301);
+ std::string piece;
+ while (data_.size() < 1048576) {
+ // Add a short fragment that is as compressible as specified
+ // by FLAGS_compression_ratio.
+ test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
+ data_.append(piece);
+ }
+ pos_ = 0;
+ }
+
+ Slice Generate(size_t len) {
+ if (pos_ + len > data_.size()) {
+ pos_ = 0;
+ assert(len < data_.size());
+ }
+ pos_ += len;
+ return Slice(data_.data() + pos_ - len, len);
+ }
+};
+
+class KeyBuffer {
+ public:
+ KeyBuffer() {
+ assert(FLAGS_key_prefix < sizeof(buffer_));
+ memset(buffer_, 'a', FLAGS_key_prefix);
+ }
+ KeyBuffer& operator=(KeyBuffer& other) = delete;
+ KeyBuffer(KeyBuffer& other) = delete;
+
+ void Set(int k) {
+ std::snprintf(buffer_ + FLAGS_key_prefix,
+ sizeof(buffer_) - FLAGS_key_prefix, "%016d", k);
+ }
+
+ Slice slice() const { return Slice(buffer_, FLAGS_key_prefix + 16); }
+
+ private:
+ char buffer_[1024];
+};
+
+#if defined(__linux)
+static Slice TrimSpace(Slice s) {
+ size_t start = 0;
+ while (start < s.size() && isspace(s[start])) {
+ start++;
+ }
+ size_t limit = s.size();
+ while (limit > start && isspace(s[limit - 1])) {
+ limit--;
+ }
+ return Slice(s.data() + start, limit - start);
+}
+#endif
+
+static void AppendWithSpace(std::string* str, Slice msg) {
+ if (msg.empty()) return;
+ if (!str->empty()) {
+ str->push_back(' ');
+ }
+ str->append(msg.data(), msg.size());
+}
+
+class Stats {
+ private:
+ double start_;
+ double finish_;
+ double seconds_;
+ int done_;
+ int next_report_;
+ int64_t bytes_;
+ double last_op_finish_;
+ Histogram hist_;
+ std::string message_;
+
+ public:
+ Stats() { Start(); }
+
+ void Start() {
+ next_report_ = 100;
+ hist_.Clear();
+ done_ = 0;
+ bytes_ = 0;
+ seconds_ = 0;
+ message_.clear();
+ start_ = finish_ = last_op_finish_ = g_env->NowMicros();
+ }
+
+ void Merge(const Stats& other) {
+ hist_.Merge(other.hist_);
+ done_ += other.done_;
+ bytes_ += other.bytes_;
+ seconds_ += other.seconds_;
+ if (other.start_ < start_) start_ = other.start_;
+ if (other.finish_ > finish_) finish_ = other.finish_;
+
+ // Just keep the messages from one thread
+ if (message_.empty()) message_ = other.message_;
+ }
+
+ void Stop() {
+ finish_ = g_env->NowMicros();
+ seconds_ = (finish_ - start_) * 1e-6;
+ }
+
+ void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
+
+ void FinishedSingleOp() {
+ if (FLAGS_histogram) {
+ double now = g_env->NowMicros();
+ double micros = now - last_op_finish_;
+ hist_.Add(micros);
+ if (micros > 20000) {
+ std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+ std::fflush(stderr);
+ }
+ last_op_finish_ = now;
+ }
+
+ done_++;
+ if (done_ >= next_report_) {
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
+ std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+ std::fflush(stderr);
+ }
+ }
+
+ void AddBytes(int64_t n) { bytes_ += n; }
+
+ void Report(const Slice& name) {
+ // Pretend at least one op was done in case we are running a benchmark
+ // that does not call FinishedSingleOp().
+ if (done_ < 1) done_ = 1;
+
+ std::string extra;
+ if (bytes_ > 0) {
+ // Rate is computed on actual elapsed time, not the sum of per-thread
+ // elapsed times.
+ double elapsed = (finish_ - start_) * 1e-6;
+ char rate[100];
+ std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+ (bytes_ / 1048576.0) / elapsed);
+ extra = rate;
+ }
+ AppendWithSpace(&extra, message_);
+
+ std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+ name.ToString().c_str(), seconds_ * 1e6 / done_,
+ (extra.empty() ? "" : " "), extra.c_str());
+ if (FLAGS_histogram) {
+ std::fprintf(stdout, "Microseconds per op:\n%s\n",
+ hist_.ToString().c_str());
+ }
+ std::fflush(stdout);
+ }
+};
+
+// State shared by all concurrent executions of the same benchmark.
+struct SharedState {
+ port::Mutex mu;
+ port::CondVar cv GUARDED_BY(mu);
+ int total GUARDED_BY(mu);
+
+ // Each thread goes through the following states:
+ // (1) initializing
+ // (2) waiting for others to be initialized
+ // (3) running
+ // (4) done
+
+ int num_initialized GUARDED_BY(mu);
+ int num_done GUARDED_BY(mu);
+ bool start GUARDED_BY(mu);
+
+ SharedState(int total)
+ : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
+};
+
+// Per-thread state for concurrent executions of the same benchmark.
+struct ThreadState {
+ int tid; // 0..n-1 when running in n threads
+ Random rand; // Has different seeds for different threads
+ Stats stats;
+ SharedState* shared;
+
+ ThreadState(int index, int seed) : tid(index), rand(seed), shared(nullptr) {}
+};
+
+void Compress(
+ ThreadState* thread, std::string name,
+ std::function compress_func) {
+ RandomGenerator gen;
+ Slice input = gen.Generate(Options().block_size);
+ int64_t bytes = 0;
+ int64_t produced = 0;
+ bool ok = true;
+ std::string compressed;
+ while (ok && bytes < 1024 * 1048576) { // Compress 1G
+ ok = compress_func(input.data(), input.size(), &compressed);
+ produced += compressed.size();
+ bytes += input.size();
+ thread->stats.FinishedSingleOp();
+ }
+
+ if (!ok) {
+ thread->stats.AddMessage("(" + name + " failure)");
+ } else {
+ char buf[100];
+ std::snprintf(buf, sizeof(buf), "(output: %.1f%%)",
+ (produced * 100.0) / bytes);
+ thread->stats.AddMessage(buf);
+ thread->stats.AddBytes(bytes);
+ }
+}
+
+void Uncompress(
+ ThreadState* thread, std::string name,
+ std::function compress_func,
+ std::function uncompress_func) {
+ RandomGenerator gen;
+ Slice input = gen.Generate(Options().block_size);
+ std::string compressed;
+ bool ok = compress_func(input.data(), input.size(), &compressed);
+ int64_t bytes = 0;
+ char* uncompressed = new char[input.size()];
+ while (ok && bytes < 1024 * 1048576) { // Compress 1G
+ ok = uncompress_func(compressed.data(), compressed.size(), uncompressed);
+ bytes += input.size();
+ thread->stats.FinishedSingleOp();
+ }
+ delete[] uncompressed;
+
+ if (!ok) {
+ thread->stats.AddMessage("(" + name + " failure)");
+ } else {
+ thread->stats.AddBytes(bytes);
+ }
+}
+
+} // namespace
+
+class Benchmark {
+ private:
+ Cache* cache_;
+ const FilterPolicy* filter_policy_;
+ DB* db_;
+ int num_;
+ int value_size_;
+ int entries_per_batch_;
+ WriteOptions write_options_;
+ int reads_;
+ int heap_counter_;
+ CountComparator count_comparator_;
+ int total_thread_count_;
+
+ void PrintHeader() {
+ const int kKeySize = 16 + FLAGS_key_prefix;
+ PrintEnvironment();
+ std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
+ std::fprintf(
+ stdout, "Values: %d bytes each (%d bytes after compression)\n",
+ FLAGS_value_size,
+ static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
+ std::fprintf(stdout, "Entries: %d\n", num_);
+ std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
+ ((static_cast(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
+ std::fprintf(
+ stdout, "FileSize: %.1f MB (estimated)\n",
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
+ PrintWarnings();
+ std::fprintf(stdout, "------------------------------------------------\n");
+ }
+
+ void PrintWarnings() {
+#if defined(__GNUC__) && !defined(__OPTIMIZE__)
+ std::fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
+#endif
+#ifndef NDEBUG
+ std::fprintf(
+ stdout,
+ "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+#endif
+
+ // See if snappy is working by attempting to compress a compressible string
+ const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
+ std::string compressed;
+ if (!port::Snappy_Compress(text, sizeof(text), &compressed)) {
+ std::fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
+ } else if (compressed.size() >= sizeof(text)) {
+ std::fprintf(stdout, "WARNING: Snappy compression is not effective\n");
+ }
+ }
+
+ void PrintEnvironment() {
+ std::fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
+ kMinorVersion);
+
+#if defined(__linux)
+ time_t now = time(nullptr);
+ std::fprintf(stderr, "Date: %s",
+ ctime(&now)); // ctime() adds newline
+
+ FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
+ if (cpuinfo != nullptr) {
+ char line[1000];
+ int num_cpus = 0;
+ std::string cpu_type;
+ std::string cache_size;
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
+ const char* sep = strchr(line, ':');
+ if (sep == nullptr) {
+ continue;
+ }
+ Slice key = TrimSpace(Slice(line, sep - 1 - line));
+ Slice val = TrimSpace(Slice(sep + 1));
+ if (key == "model name") {
+ ++num_cpus;
+ cpu_type = val.ToString();
+ } else if (key == "cache size") {
+ cache_size = val.ToString();
+ }
+ }
+ std::fclose(cpuinfo);
+ std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
+ std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
+ }
+#endif
+ }
+
+ public:
+ Benchmark()
+ : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
+ filter_policy_(FLAGS_bloom_bits >= 0
+ ? NewBloomFilterPolicy(FLAGS_bloom_bits)
+ : nullptr),
+ db_(nullptr),
+ num_(FLAGS_num),
+ value_size_(FLAGS_value_size),
+ entries_per_batch_(1),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ heap_counter_(0),
+ count_comparator_(BytewiseComparator()),
+ total_thread_count_(0) {
+ std::vector files;
+ g_env->GetChildren(FLAGS_db, &files);
+ for (size_t i = 0; i < files.size(); i++) {
+ if (Slice(files[i]).starts_with("heap-")) {
+ g_env->RemoveFile(std::string(FLAGS_db) + "/" + files[i]);
+ }
+ }
+ if (!FLAGS_use_existing_db) {
+ DestroyDB(FLAGS_db, Options());
+ }
+ }
+
+ ~Benchmark() {
+ delete db_;
+ delete cache_;
+ delete filter_policy_;
+ }
+
+ void Run() {
+ PrintHeader();
+ Open();
+
+ const char* benchmarks = FLAGS_benchmarks;
+ while (benchmarks != nullptr) {
+ const char* sep = strchr(benchmarks, ',');
+ Slice name;
+ if (sep == nullptr) {
+ name = benchmarks;
+ benchmarks = nullptr;
+ } else {
+ name = Slice(benchmarks, sep - benchmarks);
+ benchmarks = sep + 1;
+ }
+
+ // Reset parameters that may be overridden below
+ num_ = FLAGS_num;
+ reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads);
+ value_size_ = FLAGS_value_size;
+ entries_per_batch_ = 1;
+ write_options_ = WriteOptions();
+
+ void (Benchmark::*method)(ThreadState*) = nullptr;
+ bool fresh_db = false;
+ int num_threads = FLAGS_threads;
+
+ if (name == Slice("open")) {
+ method = &Benchmark::OpenBench;
+ num_ /= 10000;
+ if (num_ < 1) num_ = 1;
+ } else if (name == Slice("fillseq")) {
+ fresh_db = true;
+ method = &Benchmark::WriteSeq;
+ } else if (name == Slice("fillbatch")) {
+ fresh_db = true;
+ entries_per_batch_ = 1000;
+ method = &Benchmark::WriteSeq;
+ } else if (name == Slice("fillrandom")) {
+ fresh_db = true;
+ method = &Benchmark::WriteRandom;
+ } else if (name == Slice("overwrite")) {
+ fresh_db = false;
+ method = &Benchmark::WriteRandom;
+ } else if (name == Slice("fillsync")) {
+ fresh_db = true;
+ num_ /= 1000;
+ write_options_.sync = true;
+ method = &Benchmark::WriteRandom;
+ } else if (name == Slice("fill100K")) {
+ fresh_db = true;
+ num_ /= 1000;
+ value_size_ = 100 * 1000;
+ method = &Benchmark::WriteRandom;
+ } else if (name == Slice("readseq")) {
+ method = &Benchmark::ReadSequential;
+ } else if (name == Slice("readreverse")) {
+ method = &Benchmark::ReadReverse;
+ } else if (name == Slice("readrandom")) {
+ method = &Benchmark::ReadRandom;
+ } else if (name == Slice("readmissing")) {
+ method = &Benchmark::ReadMissing;
+ } else if (name == Slice("seekrandom")) {
+ method = &Benchmark::SeekRandom;
+ } else if (name == Slice("seekordered")) {
+ method = &Benchmark::SeekOrdered;
+ } else if (name == Slice("readhot")) {
+ method = &Benchmark::ReadHot;
+ } else if (name == Slice("readrandomsmall")) {
+ reads_ /= 1000;
+ method = &Benchmark::ReadRandom;
+ } else if (name == Slice("deleteseq")) {
+ method = &Benchmark::DeleteSeq;
+ } else if (name == Slice("deleterandom")) {
+ method = &Benchmark::DeleteRandom;
+ } else if (name == Slice("readwhilewriting")) {
+ num_threads++; // Add extra thread for writing
+ method = &Benchmark::ReadWhileWriting;
+ } else if (name == Slice("compact")) {
+ method = &Benchmark::Compact;
+ } else if (name == Slice("crc32c")) {
+ method = &Benchmark::Crc32c;
+ } else if (name == Slice("snappycomp")) {
+ method = &Benchmark::SnappyCompress;
+ } else if (name == Slice("snappyuncomp")) {
+ method = &Benchmark::SnappyUncompress;
+ } else if (name == Slice("zstdcomp")) {
+ method = &Benchmark::ZstdCompress;
+ } else if (name == Slice("zstduncomp")) {
+ method = &Benchmark::ZstdUncompress;
+ } else if (name == Slice("heapprofile")) {
+ HeapProfile();
+ } else if (name == Slice("stats")) {
+ PrintStats("leveldb.stats");
+ } else if (name == Slice("sstables")) {
+ PrintStats("leveldb.sstables");
+ } else {
+ if (!name.empty()) { // No error message for empty name
+ std::fprintf(stderr, "unknown benchmark '%s'\n",
+ name.ToString().c_str());
+ }
+ }
+
+ if (fresh_db) {
+ if (FLAGS_use_existing_db) {
+ std::fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
+ name.ToString().c_str());
+ method = nullptr;
+ } else {
+ delete db_;
+ db_ = nullptr;
+ DestroyDB(FLAGS_db, Options());
+ Open();
+ }
+ }
+
+ if (method != nullptr) {
+ RunBenchmark(num_threads, name, method);
+ }
+ }
+ }
+
+ private:
+ struct ThreadArg {
+ Benchmark* bm;
+ SharedState* shared;
+ ThreadState* thread;
+ void (Benchmark::*method)(ThreadState*);
+ };
+
+ static void ThreadBody(void* v) {
+ ThreadArg* arg = reinterpret_cast(v);
+ SharedState* shared = arg->shared;
+ ThreadState* thread = arg->thread;
+ {
+ MutexLock l(&shared->mu);
+ shared->num_initialized++;
+ if (shared->num_initialized >= shared->total) {
+ shared->cv.SignalAll();
+ }
+ while (!shared->start) {
+ shared->cv.Wait();
+ }
+ }
+
+ thread->stats.Start();
+ (arg->bm->*(arg->method))(thread);
+ thread->stats.Stop();
+
+ {
+ MutexLock l(&shared->mu);
+ shared->num_done++;
+ if (shared->num_done >= shared->total) {
+ shared->cv.SignalAll();
+ }
+ }
+ }
+
+ void RunBenchmark(int n, Slice name,
+ void (Benchmark::*method)(ThreadState*)) {
+ SharedState shared(n);
+
+ ThreadArg* arg = new ThreadArg[n];
+ for (int i = 0; i < n; i++) {
+ arg[i].bm = this;
+ arg[i].method = method;
+ arg[i].shared = &shared;
+ ++total_thread_count_;
+ // Seed the thread's random state deterministically based upon thread
+ // creation across all benchmarks. This ensures that the seeds are unique
+ // but reproducible when rerunning the same set of benchmarks.
+ arg[i].thread = new ThreadState(i, /*seed=*/1000 + total_thread_count_);
+ arg[i].thread->shared = &shared;
+ g_env->StartThread(ThreadBody, &arg[i]);
+ }
+
+ shared.mu.Lock();
+ while (shared.num_initialized < n) {
+ shared.cv.Wait();
+ }
+
+ shared.start = true;
+ shared.cv.SignalAll();
+ while (shared.num_done < n) {
+ shared.cv.Wait();
+ }
+ shared.mu.Unlock();
+
+ for (int i = 1; i < n; i++) {
+ arg[0].thread->stats.Merge(arg[i].thread->stats);
+ }
+ arg[0].thread->stats.Report(name);
+ if (FLAGS_comparisons) {
+ fprintf(stdout, "Comparisons: %zu\n", count_comparator_.comparisons());
+ count_comparator_.reset();
+ fflush(stdout);
+ }
+
+ for (int i = 0; i < n; i++) {
+ delete arg[i].thread;
+ }
+ delete[] arg;
+ }
+
+ void Crc32c(ThreadState* thread) {
+ // Checksum about 500MB of data total
+ const int size = 4096;
+ const char* label = "(4K per op)";
+ std::string data(size, 'x');
+ int64_t bytes = 0;
+ uint32_t crc = 0;
+ while (bytes < 500 * 1048576) {
+ crc = crc32c::Value(data.data(), size);
+ thread->stats.FinishedSingleOp();
+ bytes += size;
+ }
+ // Print so result is not dead
+ std::fprintf(stderr, "... crc=0x%x\r", static_cast(crc));
+
+ thread->stats.AddBytes(bytes);
+ thread->stats.AddMessage(label);
+ }
+
+ void SnappyCompress(ThreadState* thread) {
+ Compress(thread, "snappy", &port::Snappy_Compress);
+ }
+
+ void SnappyUncompress(ThreadState* thread) {
+ Uncompress(thread, "snappy", &port::Snappy_Compress,
+ &port::Snappy_Uncompress);
+ }
+
+ void ZstdCompress(ThreadState* thread) {
+ Compress(thread, "zstd",
+ [](const char* input, size_t length, std::string* output) {
+ return port::Zstd_Compress(FLAGS_zstd_compression_level, input,
+ length, output);
+ });
+ }
+
+ void ZstdUncompress(ThreadState* thread) {
+ Uncompress(
+ thread, "zstd",
+ [](const char* input, size_t length, std::string* output) {
+ return port::Zstd_Compress(FLAGS_zstd_compression_level, input,
+ length, output);
+ },
+ &port::Zstd_Uncompress);
+ }
+
+ void Open() {
+ assert(db_ == nullptr);
+ Options options;
+ options.env = g_env;
+ options.create_if_missing = !FLAGS_use_existing_db;
+ options.block_cache = cache_;
+ options.write_buffer_size = FLAGS_write_buffer_size;
+ options.max_file_size = FLAGS_max_file_size;
+ options.block_size = FLAGS_block_size;
+ if (FLAGS_comparisons) {
+ options.comparator = &count_comparator_;
+ }
+ options.max_open_files = FLAGS_open_files;
+ options.filter_policy = filter_policy_;
+ options.reuse_logs = FLAGS_reuse_logs;
+ options.compression =
+ FLAGS_compression ? kSnappyCompression : kNoCompression;
+ Status s = DB::Open(options, FLAGS_db, &db_);
+ if (!s.ok()) {
+ std::fprintf(stderr, "open error: %s\n", s.ToString().c_str());
+ std::exit(1);
+ }
+ }
+
+ void OpenBench(ThreadState* thread) {
+ for (int i = 0; i < num_; i++) {
+ delete db_;
+ Open();
+ thread->stats.FinishedSingleOp();
+ }
+ }
+
+ void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
+
+ void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
+
+ void DoWrite(ThreadState* thread, bool seq) {
+ if (num_ != FLAGS_num) {
+ char msg[100];
+ std::snprintf(msg, sizeof(msg), "(%d ops)", num_);
+ thread->stats.AddMessage(msg);
+ }
+
+ RandomGenerator gen;
+ WriteBatch batch;
+ Status s;
+ int64_t bytes = 0;
+ KeyBuffer key;
+ for (int i = 0; i < num_; i += entries_per_batch_) {
+ batch.Clear();
+ for (int j = 0; j < entries_per_batch_; j++) {
+ const int k = seq ? i + j : thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ batch.Put(key.slice(), gen.Generate(value_size_));
+ bytes += value_size_ + key.slice().size();
+ thread->stats.FinishedSingleOp();
+ }
+ s = db_->Write(write_options_, &batch);
+ if (!s.ok()) {
+ std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
+ std::exit(1);
+ }
+ }
+ thread->stats.AddBytes(bytes);
+ }
+
+ void ReadSequential(ThreadState* thread) {
+ Iterator* iter = db_->NewIterator(ReadOptions());
+ int i = 0;
+ int64_t bytes = 0;
+ for (iter->SeekToFirst(); i < reads_ && iter->Valid(); iter->Next()) {
+ bytes += iter->key().size() + iter->value().size();
+ thread->stats.FinishedSingleOp();
+ ++i;
+ }
+ delete iter;
+ thread->stats.AddBytes(bytes);
+ }
+
+ void ReadReverse(ThreadState* thread) {
+ Iterator* iter = db_->NewIterator(ReadOptions());
+ int i = 0;
+ int64_t bytes = 0;
+ for (iter->SeekToLast(); i < reads_ && iter->Valid(); iter->Prev()) {
+ bytes += iter->key().size() + iter->value().size();
+ thread->stats.FinishedSingleOp();
+ ++i;
+ }
+ delete iter;
+ thread->stats.AddBytes(bytes);
+ }
+
+ void ReadRandom(ThreadState* thread) {
+ ReadOptions options;
+ std::string value;
+ int found = 0;
+ KeyBuffer key;
+ for (int i = 0; i < reads_; i++) {
+ const int k = thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ if (db_->Get(options, key.slice(), &value).ok()) {
+ found++;
+ }
+ thread->stats.FinishedSingleOp();
+ }
+ char msg[100];
+ std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
+ thread->stats.AddMessage(msg);
+ }
+
+ void ReadMissing(ThreadState* thread) {
+ ReadOptions options;
+ std::string value;
+ KeyBuffer key;
+ for (int i = 0; i < reads_; i++) {
+ const int k = thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ Slice s = Slice(key.slice().data(), key.slice().size() - 1);
+ db_->Get(options, s, &value);
+ thread->stats.FinishedSingleOp();
+ }
+ }
+
+ void ReadHot(ThreadState* thread) {
+ ReadOptions options;
+ std::string value;
+ const int range = (FLAGS_num + 99) / 100;
+ KeyBuffer key;
+ for (int i = 0; i < reads_; i++) {
+ const int k = thread->rand.Uniform(range);
+ key.Set(k);
+ db_->Get(options, key.slice(), &value);
+ thread->stats.FinishedSingleOp();
+ }
+ }
+
+ void SeekRandom(ThreadState* thread) {
+ ReadOptions options;
+ int found = 0;
+ KeyBuffer key;
+ for (int i = 0; i < reads_; i++) {
+ Iterator* iter = db_->NewIterator(options);
+ const int k = thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ iter->Seek(key.slice());
+ if (iter->Valid() && iter->key() == key.slice()) found++;
+ delete iter;
+ thread->stats.FinishedSingleOp();
+ }
+ char msg[100];
+ snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
+ thread->stats.AddMessage(msg);
+ }
+
+ void SeekOrdered(ThreadState* thread) {
+ ReadOptions options;
+ Iterator* iter = db_->NewIterator(options);
+ int found = 0;
+ int k = 0;
+ KeyBuffer key;
+ for (int i = 0; i < reads_; i++) {
+ k = (k + (thread->rand.Uniform(100))) % FLAGS_num;
+ key.Set(k);
+ iter->Seek(key.slice());
+ if (iter->Valid() && iter->key() == key.slice()) found++;
+ thread->stats.FinishedSingleOp();
+ }
+ delete iter;
+ char msg[100];
+ std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
+ thread->stats.AddMessage(msg);
+ }
+
+ void DoDelete(ThreadState* thread, bool seq) {
+ RandomGenerator gen;
+ WriteBatch batch;
+ Status s;
+ KeyBuffer key;
+ for (int i = 0; i < num_; i += entries_per_batch_) {
+ batch.Clear();
+ for (int j = 0; j < entries_per_batch_; j++) {
+ const int k = seq ? i + j : (thread->rand.Uniform(FLAGS_num));
+ key.Set(k);
+ batch.Delete(key.slice());
+ thread->stats.FinishedSingleOp();
+ }
+ s = db_->Write(write_options_, &batch);
+ if (!s.ok()) {
+ std::fprintf(stderr, "del error: %s\n", s.ToString().c_str());
+ std::exit(1);
+ }
+ }
+ }
+
+ void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
+
+ void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
+
+ void ReadWhileWriting(ThreadState* thread) {
+ if (thread->tid > 0) {
+ ReadRandom(thread);
+ } else {
+ // Special thread that keeps writing until other threads are done.
+ RandomGenerator gen;
+ KeyBuffer key;
+ while (true) {
+ {
+ MutexLock l(&thread->shared->mu);
+ if (thread->shared->num_done + 1 >= thread->shared->num_initialized) {
+ // Other threads have finished
+ break;
+ }
+ }
+
+ const int k = thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ Status s =
+ db_->Put(write_options_, key.slice(), gen.Generate(value_size_));
+ if (!s.ok()) {
+ std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
+ std::exit(1);
+ }
+ }
+
+ // Do not count any of the preceding work/delay in stats.
+ thread->stats.Start();
+ }
+ }
+
+ void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
+
+ void PrintStats(const char* key) {
+ std::string stats;
+ if (!db_->GetProperty(key, &stats)) {
+ stats = "(failed)";
+ }
+ std::fprintf(stdout, "\n%s\n", stats.c_str());
+ }
+
+ static void WriteToFile(void* arg, const char* buf, int n) {
+ reinterpret_cast(arg)->Append(Slice(buf, n));
+ }
+
+ void HeapProfile() {
+ char fname[100];
+ std::snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db,
+ ++heap_counter_);
+ WritableFile* file;
+ Status s = g_env->NewWritableFile(fname, &file);
+ if (!s.ok()) {
+ std::fprintf(stderr, "%s\n", s.ToString().c_str());
+ return;
+ }
+ bool ok = port::GetHeapProfile(WriteToFile, file);
+ delete file;
+ if (!ok) {
+ std::fprintf(stderr, "heap profiling not supported\n");
+ g_env->RemoveFile(fname);
+ }
+ }
+};
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ FLAGS_write_buffer_size = leveldb::Options().write_buffer_size;
+ FLAGS_max_file_size = leveldb::Options().max_file_size;
+ FLAGS_block_size = leveldb::Options().block_size;
+ FLAGS_open_files = leveldb::Options().max_open_files;
+ std::string default_db_path;
+
+ for (int i = 1; i < argc; i++) {
+ double d;
+ int n;
+ char junk;
+ if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
+ FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
+ } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
+ FLAGS_compression_ratio = d;
+ } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_histogram = n;
+ } else if (sscanf(argv[i], "--comparisons=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_comparisons = n;
+ } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_use_existing_db = n;
+ } else if (sscanf(argv[i], "--reuse_logs=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_reuse_logs = n;
+ } else if (sscanf(argv[i], "--compression=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_compression = n;
+ } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
+ FLAGS_num = n;
+ } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
+ FLAGS_reads = n;
+ } else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) {
+ FLAGS_threads = n;
+ } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
+ FLAGS_value_size = n;
+ } else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
+ FLAGS_write_buffer_size = n;
+ } else if (sscanf(argv[i], "--max_file_size=%d%c", &n, &junk) == 1) {
+ FLAGS_max_file_size = n;
+ } else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) {
+ FLAGS_block_size = n;
+ } else if (sscanf(argv[i], "--key_prefix=%d%c", &n, &junk) == 1) {
+ FLAGS_key_prefix = n;
+ } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
+ FLAGS_cache_size = n;
+ } else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
+ FLAGS_bloom_bits = n;
+ } else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) {
+ FLAGS_open_files = n;
+ } else if (strncmp(argv[i], "--db=", 5) == 0) {
+ FLAGS_db = argv[i] + 5;
+ } else {
+ std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+ std::exit(1);
+ }
+ }
+
+ leveldb::g_env = leveldb::Env::Default();
+
+ // Choose a location for the test database if none given with --db=
+ if (FLAGS_db == nullptr) {
+ leveldb::g_env->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
+ }
+
+ leveldb::Benchmark benchmark;
+ benchmark.Run();
+ return 0;
+}
diff --git a/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_log.cc b/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_log.cc
new file mode 100644
index 0000000..a1845bf
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_log.cc
@@ -0,0 +1,92 @@
+// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include
+#include
+#include
+
+#include "gtest/gtest.h"
+#include "benchmark/benchmark.h"
+#include "db/version_set.h"
+#include "leveldb/comparator.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/options.h"
+#include "port/port.h"
+#include "util/mutexlock.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+namespace {
+
+std::string MakeKey(unsigned int num) {
+ char buf[30];
+ std::snprintf(buf, sizeof(buf), "%016u", num);
+ return std::string(buf);
+}
+
+void BM_LogAndApply(benchmark::State& state) {
+ const int num_base_files = state.range(0);
+
+ std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
+ DestroyDB(dbname, Options());
+
+ DB* db = nullptr;
+ Options opts;
+ opts.create_if_missing = true;
+ Status s = DB::Open(opts, dbname, &db);
+ ASSERT_LEVELDB_OK(s);
+ ASSERT_TRUE(db != nullptr);
+
+ delete db;
+ db = nullptr;
+
+ Env* env = Env::Default();
+
+ port::Mutex mu;
+ MutexLock l(&mu);
+
+ InternalKeyComparator cmp(BytewiseComparator());
+ Options options;
+ VersionSet vset(dbname, &options, nullptr, &cmp);
+ bool save_manifest;
+ ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
+ VersionEdit vbase;
+ uint64_t fnum = 1;
+ for (int i = 0; i < num_base_files; i++) {
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
+ vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
+ }
+ ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
+
+ uint64_t start_micros = env->NowMicros();
+
+ for (auto st : state) {
+ VersionEdit vedit;
+ vedit.RemoveFile(2, fnum);
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
+ vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
+ vset.LogAndApply(&vedit, &mu);
+ }
+
+ uint64_t stop_micros = env->NowMicros();
+ unsigned int us = stop_micros - start_micros;
+ char buf[16];
+ std::snprintf(buf, sizeof(buf), "%d", num_base_files);
+ std::fprintf(stderr,
+ "BM_LogAndApply/%-6s %8" PRIu64
+ " iters : %9u us (%7.0f us / iter)\n",
+ buf, state.iterations(), us, ((float)us) / state.iterations());
+}
+
+BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
+
+} // namespace
+
+} // namespace leveldb
+
+BENCHMARK_MAIN();
diff --git a/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_sqlite3.cc b/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_sqlite3.cc
new file mode 100644
index 0000000..c9be652
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_sqlite3.cc
@@ -0,0 +1,726 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include
+
+#include
+#include
+
+#include "util/histogram.h"
+#include "util/random.h"
+#include "util/testutil.h"
+
+// Comma-separated list of operations to run in the specified order
+// Actual benchmarks:
+//
+// fillseq -- write N values in sequential key order in async mode
+// fillseqsync -- write N/100 values in sequential key order in sync mode
+// fillseqbatch -- batch write N values in sequential key order in async mode
+// fillrandom -- write N values in random key order in async mode
+// fillrandsync -- write N/100 values in random key order in sync mode
+// fillrandbatch -- batch write N values in sequential key order in async mode
+// overwrite -- overwrite N values in random key order in async mode
+// fillrand100K -- write N/1000 100K values in random order in async mode
+// fillseq100K -- write N/1000 100K values in sequential order in async mode
+// readseq -- read N times sequentially
+// readrandom -- read N times in random order
+// readrand100K -- read N/1000 100K values in sequential order in async mode
+static const char* FLAGS_benchmarks =
+ "fillseq,"
+ "fillseqsync,"
+ "fillseqbatch,"
+ "fillrandom,"
+ "fillrandsync,"
+ "fillrandbatch,"
+ "overwrite,"
+ "overwritebatch,"
+ "readrandom,"
+ "readseq,"
+ "fillrand100K,"
+ "fillseq100K,"
+ "readseq,"
+ "readrand100K,";
+
+// Number of key/values to place in database
+static int FLAGS_num = 1000000;
+
+// Number of read operations to do. If negative, do FLAGS_num reads.
+static int FLAGS_reads = -1;
+
+// Size of each value
+static int FLAGS_value_size = 100;
+
+// Print histogram of operation timings
+static bool FLAGS_histogram = false;
+
+// Arrange to generate values that shrink to this fraction of
+// their original size after compression
+static double FLAGS_compression_ratio = 0.5;
+
+// Page size. Default 1 KB.
+static int FLAGS_page_size = 1024;
+
+// Number of pages.
+// Default cache size = FLAGS_page_size * FLAGS_num_pages = 4 MB.
+static int FLAGS_num_pages = 4096;
+
+// If true, do not destroy the existing database. If you set this
+// flag and also specify a benchmark that wants a fresh database, that
+// benchmark will fail.
+static bool FLAGS_use_existing_db = false;
+
+// If true, the SQLite table has ROWIDs.
+static bool FLAGS_use_rowids = false;
+
+// If true, we allow batch writes to occur
+static bool FLAGS_transaction = true;
+
+// If true, we enable Write-Ahead Logging
+static bool FLAGS_WAL_enabled = true;
+
+// Use the db with the following name.
+static const char* FLAGS_db = nullptr;
+
+inline static void ExecErrorCheck(int status, char* err_msg) {
+ if (status != SQLITE_OK) {
+ std::fprintf(stderr, "SQL error: %s\n", err_msg);
+ sqlite3_free(err_msg);
+ std::exit(1);
+ }
+}
+
+inline static void StepErrorCheck(int status) {
+ if (status != SQLITE_DONE) {
+ std::fprintf(stderr, "SQL step error: status = %d\n", status);
+ std::exit(1);
+ }
+}
+
+inline static void ErrorCheck(int status) {
+ if (status != SQLITE_OK) {
+ std::fprintf(stderr, "sqlite3 error: status = %d\n", status);
+ std::exit(1);
+ }
+}
+
+inline static void WalCheckpoint(sqlite3* db_) {
+ // Flush all writes to disk
+ if (FLAGS_WAL_enabled) {
+ sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
+ nullptr);
+ }
+}
+
+namespace leveldb {
+
+// Helper for quickly generating random data.
+namespace {
+class RandomGenerator {
+ private:
+ std::string data_;
+ int pos_;
+
+ public:
+ RandomGenerator() {
+ // We use a limited amount of data over and over again and ensure
+ // that it is larger than the compression window (32KB), and also
+ // large enough to serve all typical value sizes we want to write.
+ Random rnd(301);
+ std::string piece;
+ while (data_.size() < 1048576) {
+ // Add a short fragment that is as compressible as specified
+ // by FLAGS_compression_ratio.
+ test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
+ data_.append(piece);
+ }
+ pos_ = 0;
+ }
+
+ Slice Generate(int len) {
+ if (pos_ + len > data_.size()) {
+ pos_ = 0;
+ assert(len < data_.size());
+ }
+ pos_ += len;
+ return Slice(data_.data() + pos_ - len, len);
+ }
+};
+
+static Slice TrimSpace(Slice s) {
+ int start = 0;
+ while (start < s.size() && isspace(s[start])) {
+ start++;
+ }
+ int limit = s.size();
+ while (limit > start && isspace(s[limit - 1])) {
+ limit--;
+ }
+ return Slice(s.data() + start, limit - start);
+}
+
+} // namespace
+
+class Benchmark {
+ private:
+ sqlite3* db_;
+ int db_num_;
+ int num_;
+ int reads_;
+ double start_;
+ double last_op_finish_;
+ int64_t bytes_;
+ std::string message_;
+ Histogram hist_;
+ RandomGenerator gen_;
+ Random rand_;
+
+ // State kept for progress messages
+ int done_;
+ int next_report_; // When to report next
+
+ void PrintHeader() {
+ const int kKeySize = 16;
+ PrintEnvironment();
+ std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
+ std::fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
+ std::fprintf(stdout, "Entries: %d\n", num_);
+ std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
+ ((static_cast(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
+ PrintWarnings();
+ std::fprintf(stdout, "------------------------------------------------\n");
+ }
+
+ void PrintWarnings() {
+#if defined(__GNUC__) && !defined(__OPTIMIZE__)
+ std::fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
+#endif
+#ifndef NDEBUG
+ std::fprintf(
+ stdout,
+ "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+#endif
+ }
+
+ void PrintEnvironment() {
+ std::fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
+
+#if defined(__linux)
+ time_t now = time(nullptr);
+ std::fprintf(stderr, "Date: %s",
+ ctime(&now)); // ctime() adds newline
+
+ FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
+ if (cpuinfo != nullptr) {
+ char line[1000];
+ int num_cpus = 0;
+ std::string cpu_type;
+ std::string cache_size;
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
+ const char* sep = strchr(line, ':');
+ if (sep == nullptr) {
+ continue;
+ }
+ Slice key = TrimSpace(Slice(line, sep - 1 - line));
+ Slice val = TrimSpace(Slice(sep + 1));
+ if (key == "model name") {
+ ++num_cpus;
+ cpu_type = val.ToString();
+ } else if (key == "cache size") {
+ cache_size = val.ToString();
+ }
+ }
+ std::fclose(cpuinfo);
+ std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
+ std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
+ }
+#endif
+ }
+
+ void Start() {
+ start_ = Env::Default()->NowMicros() * 1e-6;
+ bytes_ = 0;
+ message_.clear();
+ last_op_finish_ = start_;
+ hist_.Clear();
+ done_ = 0;
+ next_report_ = 100;
+ }
+
+ void FinishedSingleOp() {
+ if (FLAGS_histogram) {
+ double now = Env::Default()->NowMicros() * 1e-6;
+ double micros = (now - last_op_finish_) * 1e6;
+ hist_.Add(micros);
+ if (micros > 20000) {
+ std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+ std::fflush(stderr);
+ }
+ last_op_finish_ = now;
+ }
+
+ done_++;
+ if (done_ >= next_report_) {
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
+ std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+ std::fflush(stderr);
+ }
+ }
+
+ void Stop(const Slice& name) {
+ double finish = Env::Default()->NowMicros() * 1e-6;
+
+ // Pretend at least one op was done in case we are running a benchmark
+ // that does not call FinishedSingleOp().
+ if (done_ < 1) done_ = 1;
+
+ if (bytes_ > 0) {
+ char rate[100];
+ std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+ (bytes_ / 1048576.0) / (finish - start_));
+ if (!message_.empty()) {
+ message_ = std::string(rate) + " " + message_;
+ } else {
+ message_ = rate;
+ }
+ }
+
+ std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+ name.ToString().c_str(), (finish - start_) * 1e6 / done_,
+ (message_.empty() ? "" : " "), message_.c_str());
+ if (FLAGS_histogram) {
+ std::fprintf(stdout, "Microseconds per op:\n%s\n",
+ hist_.ToString().c_str());
+ }
+ std::fflush(stdout);
+ }
+
+ public:
+ enum Order { SEQUENTIAL, RANDOM };
+ enum DBState { FRESH, EXISTING };
+
+ Benchmark()
+ : db_(nullptr),
+ db_num_(0),
+ num_(FLAGS_num),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ bytes_(0),
+ rand_(301) {
+ std::vector files;
+ std::string test_dir;
+ Env::Default()->GetTestDirectory(&test_dir);
+ Env::Default()->GetChildren(test_dir, &files);
+ if (!FLAGS_use_existing_db) {
+ for (int i = 0; i < files.size(); i++) {
+ if (Slice(files[i]).starts_with("dbbench_sqlite3")) {
+ std::string file_name(test_dir);
+ file_name += "/";
+ file_name += files[i];
+ Env::Default()->RemoveFile(file_name.c_str());
+ }
+ }
+ }
+ }
+
+ ~Benchmark() {
+ int status = sqlite3_close(db_);
+ ErrorCheck(status);
+ }
+
+ void Run() {
+ PrintHeader();
+ Open();
+
+ const char* benchmarks = FLAGS_benchmarks;
+ while (benchmarks != nullptr) {
+ const char* sep = strchr(benchmarks, ',');
+ Slice name;
+ if (sep == nullptr) {
+ name = benchmarks;
+ benchmarks = nullptr;
+ } else {
+ name = Slice(benchmarks, sep - benchmarks);
+ benchmarks = sep + 1;
+ }
+
+ bytes_ = 0;
+ Start();
+
+ bool known = true;
+ bool write_sync = false;
+ if (name == Slice("fillseq")) {
+ Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1);
+ WalCheckpoint(db_);
+ } else if (name == Slice("fillseqbatch")) {
+ Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1000);
+ WalCheckpoint(db_);
+ } else if (name == Slice("fillrandom")) {
+ Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1);
+ WalCheckpoint(db_);
+ } else if (name == Slice("fillrandbatch")) {
+ Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1000);
+ WalCheckpoint(db_);
+ } else if (name == Slice("overwrite")) {
+ Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1);
+ WalCheckpoint(db_);
+ } else if (name == Slice("overwritebatch")) {
+ Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1000);
+ WalCheckpoint(db_);
+ } else if (name == Slice("fillrandsync")) {
+ write_sync = true;
+ Write(write_sync, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1);
+ WalCheckpoint(db_);
+ } else if (name == Slice("fillseqsync")) {
+ write_sync = true;
+ Write(write_sync, SEQUENTIAL, FRESH, num_ / 100, FLAGS_value_size, 1);
+ WalCheckpoint(db_);
+ } else if (name == Slice("fillrand100K")) {
+ Write(write_sync, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1);
+ WalCheckpoint(db_);
+ } else if (name == Slice("fillseq100K")) {
+ Write(write_sync, SEQUENTIAL, FRESH, num_ / 1000, 100 * 1000, 1);
+ WalCheckpoint(db_);
+ } else if (name == Slice("readseq")) {
+ ReadSequential();
+ } else if (name == Slice("readrandom")) {
+ Read(RANDOM, 1);
+ } else if (name == Slice("readrand100K")) {
+ int n = reads_;
+ reads_ /= 1000;
+ Read(RANDOM, 1);
+ reads_ = n;
+ } else {
+ known = false;
+ if (name != Slice()) { // No error message for empty name
+ std::fprintf(stderr, "unknown benchmark '%s'\n",
+ name.ToString().c_str());
+ }
+ }
+ if (known) {
+ Stop(name);
+ }
+ }
+ }
+
+ void Open() {
+ assert(db_ == nullptr);
+
+ int status;
+ char file_name[100];
+ char* err_msg = nullptr;
+ db_num_++;
+
+ // Open database
+ std::string tmp_dir;
+ Env::Default()->GetTestDirectory(&tmp_dir);
+ std::snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
+ tmp_dir.c_str(), db_num_);
+ status = sqlite3_open(file_name, &db_);
+ if (status) {
+ std::fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
+ std::exit(1);
+ }
+
+ // Change SQLite cache size
+ char cache_size[100];
+ std::snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
+ FLAGS_num_pages);
+ status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
+ ExecErrorCheck(status, err_msg);
+
+ // FLAGS_page_size is defaulted to 1024
+ if (FLAGS_page_size != 1024) {
+ char page_size[100];
+ std::snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
+ FLAGS_page_size);
+ status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
+ ExecErrorCheck(status, err_msg);
+ }
+
+ // Change journal mode to WAL if WAL enabled flag is on
+ if (FLAGS_WAL_enabled) {
+ std::string WAL_stmt = "PRAGMA journal_mode = WAL";
+
+ // LevelDB's default cache size is a combined 4 MB
+ std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
+ status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
+ ExecErrorCheck(status, err_msg);
+ status =
+ sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
+ ExecErrorCheck(status, err_msg);
+ }
+
+ // Change locking mode to exclusive and create tables/index for database
+ std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
+ std::string create_stmt =
+ "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
+ if (!FLAGS_use_rowids) create_stmt += " WITHOUT ROWID";
+ std::string stmt_array[] = {locking_stmt, create_stmt};
+ int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
+ for (int i = 0; i < stmt_array_length; i++) {
+ status =
+ sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
+ ExecErrorCheck(status, err_msg);
+ }
+ }
+
+ void Write(bool write_sync, Order order, DBState state, int num_entries,
+ int value_size, int entries_per_batch) {
+ // Create new database if state == FRESH
+ if (state == FRESH) {
+ if (FLAGS_use_existing_db) {
+ message_ = "skipping (--use_existing_db is true)";
+ return;
+ }
+ sqlite3_close(db_);
+ db_ = nullptr;
+ Open();
+ Start();
+ }
+
+ if (num_entries != num_) {
+ char msg[100];
+ std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
+ message_ = msg;
+ }
+
+ char* err_msg = nullptr;
+ int status;
+
+ sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt;
+ std::string replace_str = "REPLACE INTO test (key, value) VALUES (?, ?)";
+ std::string begin_trans_str = "BEGIN TRANSACTION;";
+ std::string end_trans_str = "END TRANSACTION;";
+
+ // Check for synchronous flag in options
+ std::string sync_stmt =
+ (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
+ status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
+ ExecErrorCheck(status, err_msg);
+
+ // Preparing sqlite3 statements
+ status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
+ nullptr);
+ ErrorCheck(status);
+ status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
+ &begin_trans_stmt, nullptr);
+ ErrorCheck(status);
+ status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+ nullptr);
+ ErrorCheck(status);
+
+ bool transaction = (entries_per_batch > 1);
+ for (int i = 0; i < num_entries; i += entries_per_batch) {
+ // Begin write transaction
+ if (FLAGS_transaction && transaction) {
+ status = sqlite3_step(begin_trans_stmt);
+ StepErrorCheck(status);
+ status = sqlite3_reset(begin_trans_stmt);
+ ErrorCheck(status);
+ }
+
+ // Create and execute SQL statements
+ for (int j = 0; j < entries_per_batch; j++) {
+ const char* value = gen_.Generate(value_size).data();
+
+ // Create values for key-value pair
+ const int k =
+ (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
+ char key[100];
+ std::snprintf(key, sizeof(key), "%016d", k);
+
+ // Bind KV values into replace_stmt
+ status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
+ ErrorCheck(status);
+ status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
+ SQLITE_STATIC);
+ ErrorCheck(status);
+
+ // Execute replace_stmt
+ bytes_ += value_size + strlen(key);
+ status = sqlite3_step(replace_stmt);
+ StepErrorCheck(status);
+
+ // Reset SQLite statement for another use
+ status = sqlite3_clear_bindings(replace_stmt);
+ ErrorCheck(status);
+ status = sqlite3_reset(replace_stmt);
+ ErrorCheck(status);
+
+ FinishedSingleOp();
+ }
+
+ // End write transaction
+ if (FLAGS_transaction && transaction) {
+ status = sqlite3_step(end_trans_stmt);
+ StepErrorCheck(status);
+ status = sqlite3_reset(end_trans_stmt);
+ ErrorCheck(status);
+ }
+ }
+
+ status = sqlite3_finalize(replace_stmt);
+ ErrorCheck(status);
+ status = sqlite3_finalize(begin_trans_stmt);
+ ErrorCheck(status);
+ status = sqlite3_finalize(end_trans_stmt);
+ ErrorCheck(status);
+ }
+
+ void Read(Order order, int entries_per_batch) {
+ int status;
+ sqlite3_stmt *read_stmt, *begin_trans_stmt, *end_trans_stmt;
+
+ std::string read_str = "SELECT * FROM test WHERE key = ?";
+ std::string begin_trans_str = "BEGIN TRANSACTION;";
+ std::string end_trans_str = "END TRANSACTION;";
+
+ // Preparing sqlite3 statements
+ status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
+ &begin_trans_stmt, nullptr);
+ ErrorCheck(status);
+ status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+ nullptr);
+ ErrorCheck(status);
+ status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
+ ErrorCheck(status);
+
+ bool transaction = (entries_per_batch > 1);
+ for (int i = 0; i < reads_; i += entries_per_batch) {
+ // Begin read transaction
+ if (FLAGS_transaction && transaction) {
+ status = sqlite3_step(begin_trans_stmt);
+ StepErrorCheck(status);
+ status = sqlite3_reset(begin_trans_stmt);
+ ErrorCheck(status);
+ }
+
+ // Create and execute SQL statements
+ for (int j = 0; j < entries_per_batch; j++) {
+ // Create key value
+ char key[100];
+ int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_);
+ std::snprintf(key, sizeof(key), "%016d", k);
+
+ // Bind key value into read_stmt
+ status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC);
+ ErrorCheck(status);
+
+ // Execute read statement
+ while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
+ }
+ StepErrorCheck(status);
+
+ // Reset SQLite statement for another use
+ status = sqlite3_clear_bindings(read_stmt);
+ ErrorCheck(status);
+ status = sqlite3_reset(read_stmt);
+ ErrorCheck(status);
+ FinishedSingleOp();
+ }
+
+ // End read transaction
+ if (FLAGS_transaction && transaction) {
+ status = sqlite3_step(end_trans_stmt);
+ StepErrorCheck(status);
+ status = sqlite3_reset(end_trans_stmt);
+ ErrorCheck(status);
+ }
+ }
+
+ status = sqlite3_finalize(read_stmt);
+ ErrorCheck(status);
+ status = sqlite3_finalize(begin_trans_stmt);
+ ErrorCheck(status);
+ status = sqlite3_finalize(end_trans_stmt);
+ ErrorCheck(status);
+ }
+
+ void ReadSequential() {
+ int status;
+ sqlite3_stmt* pStmt;
+ std::string read_str = "SELECT * FROM test ORDER BY key";
+
+ status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
+ ErrorCheck(status);
+ for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) {
+ bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2);
+ FinishedSingleOp();
+ }
+
+ status = sqlite3_finalize(pStmt);
+ ErrorCheck(status);
+ }
+};
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ std::string default_db_path;
+ for (int i = 1; i < argc; i++) {
+ double d;
+ int n;
+ char junk;
+ if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
+ FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
+ } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_histogram = n;
+ } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
+ FLAGS_compression_ratio = d;
+ } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_use_existing_db = n;
+ } else if (sscanf(argv[i], "--use_rowids=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_use_rowids = n;
+ } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
+ FLAGS_num = n;
+ } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
+ FLAGS_reads = n;
+ } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
+ FLAGS_value_size = n;
+ } else if (leveldb::Slice(argv[i]) == leveldb::Slice("--no_transaction")) {
+ FLAGS_transaction = false;
+ } else if (sscanf(argv[i], "--page_size=%d%c", &n, &junk) == 1) {
+ FLAGS_page_size = n;
+ } else if (sscanf(argv[i], "--num_pages=%d%c", &n, &junk) == 1) {
+ FLAGS_num_pages = n;
+ } else if (sscanf(argv[i], "--WAL_enabled=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_WAL_enabled = n;
+ } else if (strncmp(argv[i], "--db=", 5) == 0) {
+ FLAGS_db = argv[i] + 5;
+ } else {
+ std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+ std::exit(1);
+ }
+ }
+
+ // Choose a location for the test database if none given with --db=
+ if (FLAGS_db == nullptr) {
+ leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
+ }
+
+ leveldb::Benchmark benchmark;
+ benchmark.Run();
+ return 0;
+}
diff --git a/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_tree_db.cc b/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_tree_db.cc
new file mode 100644
index 0000000..533600b
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/benchmarks/db_bench_tree_db.cc
@@ -0,0 +1,531 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include
+
+#include
+#include
+
+#include "util/histogram.h"
+#include "util/random.h"
+#include "util/testutil.h"
+
+// Comma-separated list of operations to run in the specified order
+// Actual benchmarks:
+//
+// fillseq -- write N values in sequential key order in async mode
+// fillrandom -- write N values in random key order in async mode
+// overwrite -- overwrite N values in random key order in async mode
+// fillseqsync -- write N/100 values in sequential key order in sync mode
+// fillrandsync -- write N/100 values in random key order in sync mode
+// fillrand100K -- write N/1000 100K values in random order in async mode
+// fillseq100K -- write N/1000 100K values in seq order in async mode
+// readseq -- read N times sequentially
+// readseq100K -- read N/1000 100K values in sequential order in async mode
+// readrand100K -- read N/1000 100K values in sequential order in async mode
+// readrandom -- read N times in random order
+static const char* FLAGS_benchmarks =
+ "fillseq,"
+ "fillseqsync,"
+ "fillrandsync,"
+ "fillrandom,"
+ "overwrite,"
+ "readrandom,"
+ "readseq,"
+ "fillrand100K,"
+ "fillseq100K,"
+ "readseq100K,"
+ "readrand100K,";
+
+// Number of key/values to place in database
+static int FLAGS_num = 1000000;
+
+// Number of read operations to do. If negative, do FLAGS_num reads.
+static int FLAGS_reads = -1;
+
+// Size of each value
+static int FLAGS_value_size = 100;
+
+// Arrange to generate values that shrink to this fraction of
+// their original size after compression
+static double FLAGS_compression_ratio = 0.5;
+
+// Print histogram of operation timings
+static bool FLAGS_histogram = false;
+
+// Cache size. Default 4 MB
+static int FLAGS_cache_size = 4194304;
+
+// Page size. Default 1 KB
+static int FLAGS_page_size = 1024;
+
+// If true, do not destroy the existing database. If you set this
+// flag and also specify a benchmark that wants a fresh database, that
+// benchmark will fail.
+static bool FLAGS_use_existing_db = false;
+
+// Compression flag. If true, compression is on. If false, compression
+// is off.
+static bool FLAGS_compression = true;
+
+// Use the db with the following name.
+static const char* FLAGS_db = nullptr;
+
+inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
+ // Synchronize will flush writes to disk
+ if (!db_->synchronize()) {
+ std::fprintf(stderr, "synchronize error: %s\n", db_->error().name());
+ }
+}
+
+namespace leveldb {
+
+// Helper for quickly generating random data.
+namespace {
+class RandomGenerator {
+ private:
+ std::string data_;
+ int pos_;
+
+ public:
+ RandomGenerator() {
+ // We use a limited amount of data over and over again and ensure
+ // that it is larger than the compression window (32KB), and also
+ // large enough to serve all typical value sizes we want to write.
+ Random rnd(301);
+ std::string piece;
+ while (data_.size() < 1048576) {
+ // Add a short fragment that is as compressible as specified
+ // by FLAGS_compression_ratio.
+ test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
+ data_.append(piece);
+ }
+ pos_ = 0;
+ }
+
+ Slice Generate(int len) {
+ if (pos_ + len > data_.size()) {
+ pos_ = 0;
+ assert(len < data_.size());
+ }
+ pos_ += len;
+ return Slice(data_.data() + pos_ - len, len);
+ }
+};
+
+static Slice TrimSpace(Slice s) {
+ int start = 0;
+ while (start < s.size() && isspace(s[start])) {
+ start++;
+ }
+ int limit = s.size();
+ while (limit > start && isspace(s[limit - 1])) {
+ limit--;
+ }
+ return Slice(s.data() + start, limit - start);
+}
+
+} // namespace
+
+class Benchmark {
+ private:
+ kyotocabinet::TreeDB* db_;
+ int db_num_;
+ int num_;
+ int reads_;
+ double start_;
+ double last_op_finish_;
+ int64_t bytes_;
+ std::string message_;
+ Histogram hist_;
+ RandomGenerator gen_;
+ Random rand_;
+ kyotocabinet::LZOCompressor comp_;
+
+ // State kept for progress messages
+ int done_;
+ int next_report_; // When to report next
+
+ void PrintHeader() {
+ const int kKeySize = 16;
+ PrintEnvironment();
+ std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
+ std::fprintf(
+ stdout, "Values: %d bytes each (%d bytes after compression)\n",
+ FLAGS_value_size,
+ static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
+ std::fprintf(stdout, "Entries: %d\n", num_);
+ std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
+ ((static_cast(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
+ std::fprintf(
+ stdout, "FileSize: %.1f MB (estimated)\n",
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
+ PrintWarnings();
+ std::fprintf(stdout, "------------------------------------------------\n");
+ }
+
+ void PrintWarnings() {
+#if defined(__GNUC__) && !defined(__OPTIMIZE__)
+ std::fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
+#endif
+#ifndef NDEBUG
+ std::fprintf(
+ stdout,
+ "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+#endif
+ }
+
+ void PrintEnvironment() {
+ std::fprintf(
+ stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n",
+ kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
+
+#if defined(__linux)
+ time_t now = time(nullptr);
+ std::fprintf(stderr, "Date: %s",
+ ctime(&now)); // ctime() adds newline
+
+ FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
+ if (cpuinfo != nullptr) {
+ char line[1000];
+ int num_cpus = 0;
+ std::string cpu_type;
+ std::string cache_size;
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
+ const char* sep = strchr(line, ':');
+ if (sep == nullptr) {
+ continue;
+ }
+ Slice key = TrimSpace(Slice(line, sep - 1 - line));
+ Slice val = TrimSpace(Slice(sep + 1));
+ if (key == "model name") {
+ ++num_cpus;
+ cpu_type = val.ToString();
+ } else if (key == "cache size") {
+ cache_size = val.ToString();
+ }
+ }
+ std::fclose(cpuinfo);
+ std::fprintf(stderr, "CPU: %d * %s\n", num_cpus,
+ cpu_type.c_str());
+ std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
+ }
+#endif
+ }
+
+ void Start() {
+ start_ = Env::Default()->NowMicros() * 1e-6;
+ bytes_ = 0;
+ message_.clear();
+ last_op_finish_ = start_;
+ hist_.Clear();
+ done_ = 0;
+ next_report_ = 100;
+ }
+
+ void FinishedSingleOp() {
+ if (FLAGS_histogram) {
+ double now = Env::Default()->NowMicros() * 1e-6;
+ double micros = (now - last_op_finish_) * 1e6;
+ hist_.Add(micros);
+ if (micros > 20000) {
+ std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+ std::fflush(stderr);
+ }
+ last_op_finish_ = now;
+ }
+
+ done_++;
+ if (done_ >= next_report_) {
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
+ std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+ std::fflush(stderr);
+ }
+ }
+
+ void Stop(const Slice& name) {
+ double finish = Env::Default()->NowMicros() * 1e-6;
+
+ // Pretend at least one op was done in case we are running a benchmark
+ // that does not call FinishedSingleOp().
+ if (done_ < 1) done_ = 1;
+
+ if (bytes_ > 0) {
+ char rate[100];
+ std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+ (bytes_ / 1048576.0) / (finish - start_));
+ if (!message_.empty()) {
+ message_ = std::string(rate) + " " + message_;
+ } else {
+ message_ = rate;
+ }
+ }
+
+ std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+ name.ToString().c_str(), (finish - start_) * 1e6 / done_,
+ (message_.empty() ? "" : " "), message_.c_str());
+ if (FLAGS_histogram) {
+ std::fprintf(stdout, "Microseconds per op:\n%s\n",
+ hist_.ToString().c_str());
+ }
+ std::fflush(stdout);
+ }
+
+ public:
+ enum Order { SEQUENTIAL, RANDOM };
+ enum DBState { FRESH, EXISTING };
+
+ Benchmark()
+ : db_(nullptr),
+ num_(FLAGS_num),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ bytes_(0),
+ rand_(301) {
+ std::vector files;
+ std::string test_dir;
+ Env::Default()->GetTestDirectory(&test_dir);
+ Env::Default()->GetChildren(test_dir.c_str(), &files);
+ if (!FLAGS_use_existing_db) {
+ for (int i = 0; i < files.size(); i++) {
+ if (Slice(files[i]).starts_with("dbbench_polyDB")) {
+ std::string file_name(test_dir);
+ file_name += "/";
+ file_name += files[i];
+ Env::Default()->RemoveFile(file_name.c_str());
+ }
+ }
+ }
+ }
+
+ ~Benchmark() {
+ if (!db_->close()) {
+ std::fprintf(stderr, "close error: %s\n", db_->error().name());
+ }
+ }
+
+ void Run() {
+ PrintHeader();
+ Open(false);
+
+ const char* benchmarks = FLAGS_benchmarks;
+ while (benchmarks != nullptr) {
+ const char* sep = strchr(benchmarks, ',');
+ Slice name;
+ if (sep == nullptr) {
+ name = benchmarks;
+ benchmarks = nullptr;
+ } else {
+ name = Slice(benchmarks, sep - benchmarks);
+ benchmarks = sep + 1;
+ }
+
+ Start();
+
+ bool known = true;
+ bool write_sync = false;
+ if (name == Slice("fillseq")) {
+ Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1);
+ DBSynchronize(db_);
+ } else if (name == Slice("fillrandom")) {
+ Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1);
+ DBSynchronize(db_);
+ } else if (name == Slice("overwrite")) {
+ Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1);
+ DBSynchronize(db_);
+ } else if (name == Slice("fillrandsync")) {
+ write_sync = true;
+ Write(write_sync, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1);
+ DBSynchronize(db_);
+ } else if (name == Slice("fillseqsync")) {
+ write_sync = true;
+ Write(write_sync, SEQUENTIAL, FRESH, num_ / 100, FLAGS_value_size, 1);
+ DBSynchronize(db_);
+ } else if (name == Slice("fillrand100K")) {
+ Write(write_sync, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1);
+ DBSynchronize(db_);
+ } else if (name == Slice("fillseq100K")) {
+ Write(write_sync, SEQUENTIAL, FRESH, num_ / 1000, 100 * 1000, 1);
+ DBSynchronize(db_);
+ } else if (name == Slice("readseq")) {
+ ReadSequential();
+ } else if (name == Slice("readrandom")) {
+ ReadRandom();
+ } else if (name == Slice("readrand100K")) {
+ int n = reads_;
+ reads_ /= 1000;
+ ReadRandom();
+ reads_ = n;
+ } else if (name == Slice("readseq100K")) {
+ int n = reads_;
+ reads_ /= 1000;
+ ReadSequential();
+ reads_ = n;
+ } else {
+ known = false;
+ if (name != Slice()) { // No error message for empty name
+ std::fprintf(stderr, "unknown benchmark '%s'\n",
+ name.ToString().c_str());
+ }
+ }
+ if (known) {
+ Stop(name);
+ }
+ }
+ }
+
+ private:
+ void Open(bool sync) {
+ assert(db_ == nullptr);
+
+ // Initialize db_
+ db_ = new kyotocabinet::TreeDB();
+ char file_name[100];
+ db_num_++;
+ std::string test_dir;
+ Env::Default()->GetTestDirectory(&test_dir);
+ std::snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
+ test_dir.c_str(), db_num_);
+
+ // Create tuning options and open the database
+ int open_options =
+ kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE;
+ int tune_options =
+ kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR;
+ if (FLAGS_compression) {
+ tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
+ db_->tune_compressor(&comp_);
+ }
+ db_->tune_options(tune_options);
+ db_->tune_page_cache(FLAGS_cache_size);
+ db_->tune_page(FLAGS_page_size);
+ db_->tune_map(256LL << 20);
+ if (sync) {
+ open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
+ }
+ if (!db_->open(file_name, open_options)) {
+ std::fprintf(stderr, "open error: %s\n", db_->error().name());
+ }
+ }
+
+ void Write(bool sync, Order order, DBState state, int num_entries,
+ int value_size, int entries_per_batch) {
+ // Create new database if state == FRESH
+ if (state == FRESH) {
+ if (FLAGS_use_existing_db) {
+ message_ = "skipping (--use_existing_db is true)";
+ return;
+ }
+ delete db_;
+ db_ = nullptr;
+ Open(sync);
+ Start(); // Do not count time taken to destroy/open
+ }
+
+ if (num_entries != num_) {
+ char msg[100];
+ std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
+ message_ = msg;
+ }
+
+ // Write to database
+ for (int i = 0; i < num_entries; i++) {
+ const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
+ char key[100];
+ std::snprintf(key, sizeof(key), "%016d", k);
+ bytes_ += value_size + strlen(key);
+ std::string cpp_key = key;
+ if (!db_->set(cpp_key, gen_.Generate(value_size).ToString())) {
+ std::fprintf(stderr, "set error: %s\n", db_->error().name());
+ }
+ FinishedSingleOp();
+ }
+ }
+
+ void ReadSequential() {
+ kyotocabinet::DB::Cursor* cur = db_->cursor();
+ cur->jump();
+ std::string ckey, cvalue;
+ while (cur->get(&ckey, &cvalue, true)) {
+ bytes_ += ckey.size() + cvalue.size();
+ FinishedSingleOp();
+ }
+ delete cur;
+ }
+
+ void ReadRandom() {
+ std::string value;
+ for (int i = 0; i < reads_; i++) {
+ char key[100];
+ const int k = rand_.Next() % reads_;
+ std::snprintf(key, sizeof(key), "%016d", k);
+ db_->get(key, &value);
+ FinishedSingleOp();
+ }
+ }
+};
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ std::string default_db_path;
+ for (int i = 1; i < argc; i++) {
+ double d;
+ int n;
+ char junk;
+ if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
+ FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
+ } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
+ FLAGS_compression_ratio = d;
+ } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_histogram = n;
+ } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
+ FLAGS_num = n;
+ } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
+ FLAGS_reads = n;
+ } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
+ FLAGS_value_size = n;
+ } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
+ FLAGS_cache_size = n;
+ } else if (sscanf(argv[i], "--page_size=%d%c", &n, &junk) == 1) {
+ FLAGS_page_size = n;
+ } else if (sscanf(argv[i], "--compression=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_compression = (n == 1) ? true : false;
+ } else if (strncmp(argv[i], "--db=", 5) == 0) {
+ FLAGS_db = argv[i] + 5;
+ } else {
+ std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+ std::exit(1);
+ }
+ }
+
+ // Choose a location for the test database if none given with --db=
+ if (FLAGS_db == nullptr) {
+ leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
+ }
+
+ leveldb::Benchmark benchmark;
+ benchmark.Run();
+ return 0;
+}
diff --git a/zig/libs/leveldb/libs/leveldb/cmake/leveldbConfig.cmake.in b/zig/libs/leveldb/libs/leveldb/cmake/leveldbConfig.cmake.in
new file mode 100644
index 0000000..2572728
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/cmake/leveldbConfig.cmake.in
@@ -0,0 +1,9 @@
+# Copyright 2019 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+@PACKAGE_INIT@
+
+include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
+
+check_required_components(leveldb)
\ No newline at end of file
diff --git a/zig/libs/leveldb/libs/leveldb/db/autocompact_test.cc b/zig/libs/leveldb/libs/leveldb/db/autocompact_test.cc
new file mode 100644
index 0000000..69341e3
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/db/autocompact_test.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2013 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "gtest/gtest.h"
+#include "db/db_impl.h"
+#include "leveldb/cache.h"
+#include "leveldb/db.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+class AutoCompactTest : public testing::Test {
+ public:
+ AutoCompactTest() {
+ dbname_ = testing::TempDir() + "autocompact_test";
+ tiny_cache_ = NewLRUCache(100);
+ options_.block_cache = tiny_cache_;
+ DestroyDB(dbname_, options_);
+ options_.create_if_missing = true;
+ options_.compression = kNoCompression;
+ EXPECT_LEVELDB_OK(DB::Open(options_, dbname_, &db_));
+ }
+
+ ~AutoCompactTest() {
+ delete db_;
+ DestroyDB(dbname_, Options());
+ delete tiny_cache_;
+ }
+
+ std::string Key(int i) {
+ char buf[100];
+ std::snprintf(buf, sizeof(buf), "key%06d", i);
+ return std::string(buf);
+ }
+
+ uint64_t Size(const Slice& start, const Slice& limit) {
+ Range r(start, limit);
+ uint64_t size;
+ db_->GetApproximateSizes(&r, 1, &size);
+ return size;
+ }
+
+ void DoReads(int n);
+
+ private:
+ std::string dbname_;
+ Cache* tiny_cache_;
+ Options options_;
+ DB* db_;
+};
+
+static const int kValueSize = 200 * 1024;
+static const int kTotalSize = 100 * 1024 * 1024;
+static const int kCount = kTotalSize / kValueSize;
+
+// Read through the first n keys repeatedly and check that they get
+// compacted (verified by checking the size of the key space).
+void AutoCompactTest::DoReads(int n) {
+ std::string value(kValueSize, 'x');
+ DBImpl* dbi = reinterpret_cast(db_);
+
+ // Fill database
+ for (int i = 0; i < kCount; i++) {
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), Key(i), value));
+ }
+ ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
+
+ // Delete everything
+ for (int i = 0; i < kCount; i++) {
+ ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), Key(i)));
+ }
+ ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
+
+ // Get initial measurement of the space we will be reading.
+ const int64_t initial_size = Size(Key(0), Key(n));
+ const int64_t initial_other_size = Size(Key(n), Key(kCount));
+
+ // Read until size drops significantly.
+ std::string limit_key = Key(n);
+ for (int read = 0; true; read++) {
+ ASSERT_LT(read, 100) << "Taking too long to compact";
+ Iterator* iter = db_->NewIterator(ReadOptions());
+ for (iter->SeekToFirst();
+ iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) {
+ // Drop data
+ }
+ delete iter;
+ // Wait a little bit to allow any triggered compactions to complete.
+ Env::Default()->SleepForMicroseconds(1000000);
+ uint64_t size = Size(Key(0), Key(n));
+ std::fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
+ size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
+ if (size <= initial_size / 10) {
+ break;
+ }
+ }
+
+ // Verify that the size of the key space not touched by the reads
+ // is pretty much unchanged.
+ const int64_t final_other_size = Size(Key(n), Key(kCount));
+ ASSERT_LE(final_other_size, initial_other_size + 1048576);
+ ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
+}
+
+TEST_F(AutoCompactTest, ReadAll) { DoReads(kCount); }
+
+TEST_F(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
+
+} // namespace leveldb
diff --git a/zig/libs/leveldb/libs/leveldb/db/builder.cc b/zig/libs/leveldb/libs/leveldb/db/builder.cc
new file mode 100644
index 0000000..e6329e0
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/db/builder.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/builder.h"
+
+#include "db/dbformat.h"
+#include "db/filename.h"
+#include "db/table_cache.h"
+#include "db/version_edit.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/iterator.h"
+
+namespace leveldb {
+
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+ TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
+ Status s;
+ meta->file_size = 0;
+ iter->SeekToFirst();
+
+ std::string fname = TableFileName(dbname, meta->number);
+ if (iter->Valid()) {
+ WritableFile* file;
+ s = env->NewWritableFile(fname, &file);
+ if (!s.ok()) {
+ return s;
+ }
+
+ TableBuilder* builder = new TableBuilder(options, file);
+ meta->smallest.DecodeFrom(iter->key());
+ Slice key;
+ for (; iter->Valid(); iter->Next()) {
+ key = iter->key();
+ builder->Add(key, iter->value());
+ }
+ if (!key.empty()) {
+ meta->largest.DecodeFrom(key);
+ }
+
+ // Finish and check for builder errors
+ s = builder->Finish();
+ if (s.ok()) {
+ meta->file_size = builder->FileSize();
+ assert(meta->file_size > 0);
+ }
+ delete builder;
+
+ // Finish and check for file errors
+ if (s.ok()) {
+ s = file->Sync();
+ }
+ if (s.ok()) {
+ s = file->Close();
+ }
+ delete file;
+ file = nullptr;
+
+ if (s.ok()) {
+ // Verify that the table is usable
+ Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number,
+ meta->file_size);
+ s = it->status();
+ delete it;
+ }
+ }
+
+ // Check for input iterator errors
+ if (!iter->status().ok()) {
+ s = iter->status();
+ }
+
+ if (s.ok() && meta->file_size > 0) {
+ // Keep it
+ } else {
+ env->RemoveFile(fname);
+ }
+ return s;
+}
+
+} // namespace leveldb
diff --git a/zig/libs/leveldb/libs/leveldb/db/builder.h b/zig/libs/leveldb/libs/leveldb/db/builder.h
new file mode 100644
index 0000000..7bd0b80
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/db/builder.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_DB_BUILDER_H_
+#define STORAGE_LEVELDB_DB_BUILDER_H_
+
+#include "leveldb/status.h"
+
+namespace leveldb {
+
+struct Options;
+struct FileMetaData;
+
+class Env;
+class Iterator;
+class TableCache;
+class VersionEdit;
+
+// Build a Table file from the contents of *iter. The generated file
+// will be named according to meta->number. On success, the rest of
+// *meta will be filled with metadata about the generated table.
+// If no data is present in *iter, meta->file_size will be set to
+// zero, and no Table file will be produced.
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+ TableCache* table_cache, Iterator* iter, FileMetaData* meta);
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_DB_BUILDER_H_
diff --git a/zig/libs/leveldb/libs/leveldb/db/c.cc b/zig/libs/leveldb/libs/leveldb/db/c.cc
new file mode 100644
index 0000000..8bdde38
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/db/c.cc
@@ -0,0 +1,565 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "leveldb/c.h"
+
+#include
+
+#include
+#include
+
+#include "leveldb/cache.h"
+#include "leveldb/comparator.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
+#include "leveldb/iterator.h"
+#include "leveldb/options.h"
+#include "leveldb/status.h"
+#include "leveldb/write_batch.h"
+
+using leveldb::Cache;
+using leveldb::Comparator;
+using leveldb::CompressionType;
+using leveldb::DB;
+using leveldb::Env;
+using leveldb::FileLock;
+using leveldb::FilterPolicy;
+using leveldb::Iterator;
+using leveldb::kMajorVersion;
+using leveldb::kMinorVersion;
+using leveldb::Logger;
+using leveldb::NewBloomFilterPolicy;
+using leveldb::NewLRUCache;
+using leveldb::Options;
+using leveldb::RandomAccessFile;
+using leveldb::Range;
+using leveldb::ReadOptions;
+using leveldb::SequentialFile;
+using leveldb::Slice;
+using leveldb::Snapshot;
+using leveldb::Status;
+using leveldb::WritableFile;
+using leveldb::WriteBatch;
+using leveldb::WriteOptions;
+
+extern "C" {
+
+struct leveldb_t {
+ DB* rep;
+};
+struct leveldb_iterator_t {
+ Iterator* rep;
+};
+struct leveldb_writebatch_t {
+ WriteBatch rep;
+};
+struct leveldb_snapshot_t {
+ const Snapshot* rep;
+};
+struct leveldb_readoptions_t {
+ ReadOptions rep;
+};
+struct leveldb_writeoptions_t {
+ WriteOptions rep;
+};
+struct leveldb_options_t {
+ Options rep;
+};
+struct leveldb_cache_t {
+ Cache* rep;
+};
+struct leveldb_seqfile_t {
+ SequentialFile* rep;
+};
+struct leveldb_randomfile_t {
+ RandomAccessFile* rep;
+};
+struct leveldb_writablefile_t {
+ WritableFile* rep;
+};
+struct leveldb_logger_t {
+ Logger* rep;
+};
+struct leveldb_filelock_t {
+ FileLock* rep;
+};
+
+struct leveldb_comparator_t : public Comparator {
+ ~leveldb_comparator_t() override { (*destructor_)(state_); }
+
+ int Compare(const Slice& a, const Slice& b) const override {
+ return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
+ }
+
+ const char* Name() const override { return (*name_)(state_); }
+
+ // No-ops since the C binding does not support key shortening methods.
+ void FindShortestSeparator(std::string*, const Slice&) const override {}
+ void FindShortSuccessor(std::string* key) const override {}
+
+ void* state_;
+ void (*destructor_)(void*);
+ int (*compare_)(void*, const char* a, size_t alen, const char* b,
+ size_t blen);
+ const char* (*name_)(void*);
+};
+
+struct leveldb_filterpolicy_t : public FilterPolicy {
+ ~leveldb_filterpolicy_t() override { (*destructor_)(state_); }
+
+ const char* Name() const override { return (*name_)(state_); }
+
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
+ std::vector key_pointers(n);
+ std::vector key_sizes(n);
+ for (int i = 0; i < n; i++) {
+ key_pointers[i] = keys[i].data();
+ key_sizes[i] = keys[i].size();
+ }
+ size_t len;
+ char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len);
+ dst->append(filter, len);
+ std::free(filter);
+ }
+
+ bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
+ return (*key_match_)(state_, key.data(), key.size(), filter.data(),
+ filter.size());
+ }
+
+ void* state_;
+ void (*destructor_)(void*);
+ const char* (*name_)(void*);
+ char* (*create_)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length);
+ uint8_t (*key_match_)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length);
+};
+
+struct leveldb_env_t {
+ Env* rep;
+ bool is_default;
+};
+
+static bool SaveError(char** errptr, const Status& s) {
+ assert(errptr != nullptr);
+ if (s.ok()) {
+ return false;
+ } else if (*errptr == nullptr) {
+ *errptr = strdup(s.ToString().c_str());
+ } else {
+ // TODO(sanjay): Merge with existing error?
+ std::free(*errptr);
+ *errptr = strdup(s.ToString().c_str());
+ }
+ return true;
+}
+
+static char* CopyString(const std::string& str) {
+ char* result =
+ reinterpret_cast(std::malloc(sizeof(char) * str.size()));
+ std::memcpy(result, str.data(), sizeof(char) * str.size());
+ return result;
+}
+
+leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name,
+ char** errptr) {
+ DB* db;
+ if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
+ return nullptr;
+ }
+ leveldb_t* result = new leveldb_t;
+ result->rep = db;
+ return result;
+}
+
+void leveldb_close(leveldb_t* db) {
+ delete db->rep;
+ delete db;
+}
+
+void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, const char* val, size_t vallen,
+ char** errptr) {
+ SaveError(errptr,
+ db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
+}
+
+void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, char** errptr) {
+ SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
+}
+
+void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options,
+ leveldb_writebatch_t* batch, char** errptr) {
+ SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
+}
+
+char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options,
+ const char* key, size_t keylen, size_t* vallen,
+ char** errptr) {
+ char* result = nullptr;
+ std::string tmp;
+ Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
+ if (s.ok()) {
+ *vallen = tmp.size();
+ result = CopyString(tmp);
+ } else {
+ *vallen = 0;
+ if (!s.IsNotFound()) {
+ SaveError(errptr, s);
+ }
+ }
+ return result;
+}
+
+leveldb_iterator_t* leveldb_create_iterator(
+ leveldb_t* db, const leveldb_readoptions_t* options) {
+ leveldb_iterator_t* result = new leveldb_iterator_t;
+ result->rep = db->rep->NewIterator(options->rep);
+ return result;
+}
+
+const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) {
+ leveldb_snapshot_t* result = new leveldb_snapshot_t;
+ result->rep = db->rep->GetSnapshot();
+ return result;
+}
+
+void leveldb_release_snapshot(leveldb_t* db,
+ const leveldb_snapshot_t* snapshot) {
+ db->rep->ReleaseSnapshot(snapshot->rep);
+ delete snapshot;
+}
+
+char* leveldb_property_value(leveldb_t* db, const char* propname) {
+ std::string tmp;
+ if (db->rep->GetProperty(Slice(propname), &tmp)) {
+ // We use strdup() since we expect human readable output.
+ return strdup(tmp.c_str());
+ } else {
+ return nullptr;
+ }
+}
+
+void leveldb_approximate_sizes(leveldb_t* db, int num_ranges,
+ const char* const* range_start_key,
+ const size_t* range_start_key_len,
+ const char* const* range_limit_key,
+ const size_t* range_limit_key_len,
+ uint64_t* sizes) {
+ Range* ranges = new Range[num_ranges];
+ for (int i = 0; i < num_ranges; i++) {
+ ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
+ ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
+ }
+ db->rep->GetApproximateSizes(ranges, num_ranges, sizes);
+ delete[] ranges;
+}
+
+void leveldb_compact_range(leveldb_t* db, const char* start_key,
+ size_t start_key_len, const char* limit_key,
+ size_t limit_key_len) {
+ Slice a, b;
+ db->rep->CompactRange(
+ // Pass null Slice if corresponding "const char*" is null
+ (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
+ (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
+}
+
+void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
+ char** errptr) {
+ SaveError(errptr, DestroyDB(name, options->rep));
+}
+
+void leveldb_repair_db(const leveldb_options_t* options, const char* name,
+ char** errptr) {
+ SaveError(errptr, RepairDB(name, options->rep));
+}
+
+void leveldb_iter_destroy(leveldb_iterator_t* iter) {
+ delete iter->rep;
+ delete iter;
+}
+
+uint8_t leveldb_iter_valid(const leveldb_iterator_t* iter) {
+ return iter->rep->Valid();
+}
+
+void leveldb_iter_seek_to_first(leveldb_iterator_t* iter) {
+ iter->rep->SeekToFirst();
+}
+
+void leveldb_iter_seek_to_last(leveldb_iterator_t* iter) {
+ iter->rep->SeekToLast();
+}
+
+void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) {
+ iter->rep->Seek(Slice(k, klen));
+}
+
+void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); }
+
+void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); }
+
+const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
+ Slice s = iter->rep->key();
+ *klen = s.size();
+ return s.data();
+}
+
+const char* leveldb_iter_value(const leveldb_iterator_t* iter, size_t* vlen) {
+ Slice s = iter->rep->value();
+ *vlen = s.size();
+ return s.data();
+}
+
+void leveldb_iter_get_error(const leveldb_iterator_t* iter, char** errptr) {
+ SaveError(errptr, iter->rep->status());
+}
+
+leveldb_writebatch_t* leveldb_writebatch_create() {
+ return new leveldb_writebatch_t;
+}
+
+void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; }
+
+void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); }
+
+void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key,
+ size_t klen, const char* val, size_t vlen) {
+ b->rep.Put(Slice(key, klen), Slice(val, vlen));
+}
+
+void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key,
+ size_t klen) {
+ b->rep.Delete(Slice(key, klen));
+}
+
+void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
+ void (*put)(void*, const char* k, size_t klen,
+ const char* v, size_t vlen),
+ void (*deleted)(void*, const char* k,
+ size_t klen)) {
+ class H : public WriteBatch::Handler {
+ public:
+ void* state_;
+ void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen);
+ void (*deleted_)(void*, const char* k, size_t klen);
+ void Put(const Slice& key, const Slice& value) override {
+ (*put_)(state_, key.data(), key.size(), value.data(), value.size());
+ }
+ void Delete(const Slice& key) override {
+ (*deleted_)(state_, key.data(), key.size());
+ }
+ };
+ H handler;
+ handler.state_ = state;
+ handler.put_ = put;
+ handler.deleted_ = deleted;
+ b->rep.Iterate(&handler);
+}
+
+void leveldb_writebatch_append(leveldb_writebatch_t* destination,
+ const leveldb_writebatch_t* source) {
+ destination->rep.Append(source->rep);
+}
+
+leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; }
+
+void leveldb_options_destroy(leveldb_options_t* options) { delete options; }
+
+void leveldb_options_set_comparator(leveldb_options_t* opt,
+ leveldb_comparator_t* cmp) {
+ opt->rep.comparator = cmp;
+}
+
+void leveldb_options_set_filter_policy(leveldb_options_t* opt,
+ leveldb_filterpolicy_t* policy) {
+ opt->rep.filter_policy = policy;
+}
+
+void leveldb_options_set_create_if_missing(leveldb_options_t* opt, uint8_t v) {
+ opt->rep.create_if_missing = v;
+}
+
+void leveldb_options_set_error_if_exists(leveldb_options_t* opt, uint8_t v) {
+ opt->rep.error_if_exists = v;
+}
+
+void leveldb_options_set_paranoid_checks(leveldb_options_t* opt, uint8_t v) {
+ opt->rep.paranoid_checks = v;
+}
+
+void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) {
+ opt->rep.env = (env ? env->rep : nullptr);
+}
+
+void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
+ opt->rep.info_log = (l ? l->rep : nullptr);
+}
+
+void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
+ opt->rep.write_buffer_size = s;
+}
+
+void leveldb_options_set_max_open_files(leveldb_options_t* opt, int n) {
+ opt->rep.max_open_files = n;
+}
+
+void leveldb_options_set_cache(leveldb_options_t* opt, leveldb_cache_t* c) {
+ opt->rep.block_cache = c->rep;
+}
+
+void leveldb_options_set_block_size(leveldb_options_t* opt, size_t s) {
+ opt->rep.block_size = s;
+}
+
+void leveldb_options_set_block_restart_interval(leveldb_options_t* opt, int n) {
+ opt->rep.block_restart_interval = n;
+}
+
+void leveldb_options_set_max_file_size(leveldb_options_t* opt, size_t s) {
+ opt->rep.max_file_size = s;
+}
+
+void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
+ opt->rep.compression = static_cast(t);
+}
+
+leveldb_comparator_t* leveldb_comparator_create(
+ void* state, void (*destructor)(void*),
+ int (*compare)(void*, const char* a, size_t alen, const char* b,
+ size_t blen),
+ const char* (*name)(void*)) {
+ leveldb_comparator_t* result = new leveldb_comparator_t;
+ result->state_ = state;
+ result->destructor_ = destructor;
+ result->compare_ = compare;
+ result->name_ = name;
+ return result;
+}
+
+void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; }
+
+leveldb_filterpolicy_t* leveldb_filterpolicy_create(
+ void* state, void (*destructor)(void*),
+ char* (*create_filter)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length),
+ uint8_t (*key_may_match)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length),
+ const char* (*name)(void*)) {
+ leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
+ result->state_ = state;
+ result->destructor_ = destructor;
+ result->create_ = create_filter;
+ result->key_match_ = key_may_match;
+ result->name_ = name;
+ return result;
+}
+
+void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t* filter) {
+ delete filter;
+}
+
+leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
+ // Make a leveldb_filterpolicy_t, but override all of its methods so
+ // they delegate to a NewBloomFilterPolicy() instead of user
+ // supplied C functions.
+ struct Wrapper : public leveldb_filterpolicy_t {
+ static void DoNothing(void*) {}
+
+ ~Wrapper() { delete rep_; }
+ const char* Name() const { return rep_->Name(); }
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+ return rep_->CreateFilter(keys, n, dst);
+ }
+ bool KeyMayMatch(const Slice& key, const Slice& filter) const {
+ return rep_->KeyMayMatch(key, filter);
+ }
+
+ const FilterPolicy* rep_;
+ };
+ Wrapper* wrapper = new Wrapper;
+ wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
+ wrapper->state_ = nullptr;
+ wrapper->destructor_ = &Wrapper::DoNothing;
+ return wrapper;
+}
+
+leveldb_readoptions_t* leveldb_readoptions_create() {
+ return new leveldb_readoptions_t;
+}
+
+void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
+
+void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
+ uint8_t v) {
+ opt->rep.verify_checksums = v;
+}
+
+void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt, uint8_t v) {
+ opt->rep.fill_cache = v;
+}
+
+void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt,
+ const leveldb_snapshot_t* snap) {
+ opt->rep.snapshot = (snap ? snap->rep : nullptr);
+}
+
+leveldb_writeoptions_t* leveldb_writeoptions_create() {
+ return new leveldb_writeoptions_t;
+}
+
+void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
+
+void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt, uint8_t v) {
+ opt->rep.sync = v;
+}
+
+leveldb_cache_t* leveldb_cache_create_lru(size_t capacity) {
+ leveldb_cache_t* c = new leveldb_cache_t;
+ c->rep = NewLRUCache(capacity);
+ return c;
+}
+
+void leveldb_cache_destroy(leveldb_cache_t* cache) {
+ delete cache->rep;
+ delete cache;
+}
+
+leveldb_env_t* leveldb_create_default_env() {
+ leveldb_env_t* result = new leveldb_env_t;
+ result->rep = Env::Default();
+ result->is_default = true;
+ return result;
+}
+
+void leveldb_env_destroy(leveldb_env_t* env) {
+ if (!env->is_default) delete env->rep;
+ delete env;
+}
+
+char* leveldb_env_get_test_directory(leveldb_env_t* env) {
+ std::string result;
+ if (!env->rep->GetTestDirectory(&result).ok()) {
+ return nullptr;
+ }
+
+ char* buffer = static_cast(std::malloc(result.size() + 1));
+ std::memcpy(buffer, result.data(), result.size());
+ buffer[result.size()] = '\0';
+ return buffer;
+}
+
+void leveldb_free(void* ptr) { std::free(ptr); }
+
+int leveldb_major_version() { return kMajorVersion; }
+
+int leveldb_minor_version() { return kMinorVersion; }
+
+} // end extern "C"
diff --git a/zig/libs/leveldb/libs/leveldb/db/c_test.c b/zig/libs/leveldb/libs/leveldb/db/c_test.c
new file mode 100644
index 0000000..16c77ee
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/db/c_test.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style license that can be
+ found in the LICENSE file. See the AUTHORS file for names of contributors. */
+
+#include "leveldb/c.h"
+
+#include
+#include
+#include
+#include
+
+const char* phase = "";
+
+static void StartPhase(const char* name) {
+ fprintf(stderr, "=== Test %s\n", name);
+ phase = name;
+}
+
+#define CheckNoError(err) \
+ if ((err) != NULL) { \
+ fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \
+ abort(); \
+ }
+
+#define CheckCondition(cond) \
+ if (!(cond)) { \
+ fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, #cond); \
+ abort(); \
+ }
+
+static void CheckEqual(const char* expected, const char* v, size_t n) {
+ if (expected == NULL && v == NULL) {
+ // ok
+ } else if (expected != NULL && v != NULL && n == strlen(expected) &&
+ memcmp(expected, v, n) == 0) {
+ // ok
+ return;
+ } else {
+ fprintf(stderr, "%s: expected '%s', got '%s'\n",
+ phase,
+ (expected ? expected : "(null)"),
+ (v ? v : "(null"));
+ abort();
+ }
+}
+
+static void Free(char** ptr) {
+ if (*ptr) {
+ free(*ptr);
+ *ptr = NULL;
+ }
+}
+
+static void CheckGet(
+ leveldb_t* db,
+ const leveldb_readoptions_t* options,
+ const char* key,
+ const char* expected) {
+ char* err = NULL;
+ size_t val_len;
+ char* val;
+ val = leveldb_get(db, options, key, strlen(key), &val_len, &err);
+ CheckNoError(err);
+ CheckEqual(expected, val, val_len);
+ Free(&val);
+}
+
+static void CheckIter(leveldb_iterator_t* iter,
+ const char* key, const char* val) {
+ size_t len;
+ const char* str;
+ str = leveldb_iter_key(iter, &len);
+ CheckEqual(key, str, len);
+ str = leveldb_iter_value(iter, &len);
+ CheckEqual(val, str, len);
+}
+
+// Callback from leveldb_writebatch_iterate()
+static void CheckPut(void* ptr,
+ const char* k, size_t klen,
+ const char* v, size_t vlen) {
+ int* state = (int*) ptr;
+ CheckCondition(*state < 2);
+ switch (*state) {
+ case 0:
+ CheckEqual("bar", k, klen);
+ CheckEqual("b", v, vlen);
+ break;
+ case 1:
+ CheckEqual("box", k, klen);
+ CheckEqual("c", v, vlen);
+ break;
+ }
+ (*state)++;
+}
+
+// Callback from leveldb_writebatch_iterate()
+static void CheckDel(void* ptr, const char* k, size_t klen) {
+ int* state = (int*) ptr;
+ CheckCondition(*state == 2);
+ CheckEqual("bar", k, klen);
+ (*state)++;
+}
+
+static void CmpDestroy(void* arg) { }
+
+static int CmpCompare(void* arg, const char* a, size_t alen,
+ const char* b, size_t blen) {
+ int n = (alen < blen) ? alen : blen;
+ int r = memcmp(a, b, n);
+ if (r == 0) {
+ if (alen < blen) r = -1;
+ else if (alen > blen) r = +1;
+ }
+ return r;
+}
+
+static const char* CmpName(void* arg) {
+ return "foo";
+}
+
+// Custom filter policy
+static uint8_t fake_filter_result = 1;
+static void FilterDestroy(void* arg) { }
+static const char* FilterName(void* arg) {
+ return "TestFilter";
+}
+static char* FilterCreate(
+ void* arg,
+ const char* const* key_array, const size_t* key_length_array,
+ int num_keys,
+ size_t* filter_length) {
+ *filter_length = 4;
+ char* result = malloc(4);
+ memcpy(result, "fake", 4);
+ return result;
+}
+uint8_t FilterKeyMatch(void* arg, const char* key, size_t length,
+ const char* filter, size_t filter_length) {
+ CheckCondition(filter_length == 4);
+ CheckCondition(memcmp(filter, "fake", 4) == 0);
+ return fake_filter_result;
+}
+
+int main(int argc, char** argv) {
+ leveldb_t* db;
+ leveldb_comparator_t* cmp;
+ leveldb_cache_t* cache;
+ leveldb_env_t* env;
+ leveldb_options_t* options;
+ leveldb_readoptions_t* roptions;
+ leveldb_writeoptions_t* woptions;
+ char* dbname;
+ char* err = NULL;
+ int run = -1;
+
+ CheckCondition(leveldb_major_version() >= 1);
+ CheckCondition(leveldb_minor_version() >= 1);
+
+ StartPhase("create_objects");
+ cmp = leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName);
+ env = leveldb_create_default_env();
+ cache = leveldb_cache_create_lru(100000);
+ dbname = leveldb_env_get_test_directory(env);
+ CheckCondition(dbname != NULL);
+
+ options = leveldb_options_create();
+ leveldb_options_set_comparator(options, cmp);
+ leveldb_options_set_error_if_exists(options, 1);
+ leveldb_options_set_cache(options, cache);
+ leveldb_options_set_env(options, env);
+ leveldb_options_set_info_log(options, NULL);
+ leveldb_options_set_write_buffer_size(options, 100000);
+ leveldb_options_set_paranoid_checks(options, 1);
+ leveldb_options_set_max_open_files(options, 10);
+ leveldb_options_set_block_size(options, 1024);
+ leveldb_options_set_block_restart_interval(options, 8);
+ leveldb_options_set_max_file_size(options, 3 << 20);
+ leveldb_options_set_compression(options, leveldb_no_compression);
+
+ roptions = leveldb_readoptions_create();
+ leveldb_readoptions_set_verify_checksums(roptions, 1);
+ leveldb_readoptions_set_fill_cache(roptions, 0);
+
+ woptions = leveldb_writeoptions_create();
+ leveldb_writeoptions_set_sync(woptions, 1);
+
+ StartPhase("destroy");
+ leveldb_destroy_db(options, dbname, &err);
+ Free(&err);
+
+ StartPhase("open_error");
+ db = leveldb_open(options, dbname, &err);
+ CheckCondition(err != NULL);
+ Free(&err);
+
+ StartPhase("leveldb_free");
+ db = leveldb_open(options, dbname, &err);
+ CheckCondition(err != NULL);
+ leveldb_free(err);
+ err = NULL;
+
+ StartPhase("open");
+ leveldb_options_set_create_if_missing(options, 1);
+ db = leveldb_open(options, dbname, &err);
+ CheckNoError(err);
+ CheckGet(db, roptions, "foo", NULL);
+
+ StartPhase("put");
+ leveldb_put(db, woptions, "foo", 3, "hello", 5, &err);
+ CheckNoError(err);
+ CheckGet(db, roptions, "foo", "hello");
+
+ StartPhase("compactall");
+ leveldb_compact_range(db, NULL, 0, NULL, 0);
+ CheckGet(db, roptions, "foo", "hello");
+
+ StartPhase("compactrange");
+ leveldb_compact_range(db, "a", 1, "z", 1);
+ CheckGet(db, roptions, "foo", "hello");
+
+ StartPhase("writebatch");
+ {
+ leveldb_writebatch_t* wb = leveldb_writebatch_create();
+ leveldb_writebatch_put(wb, "foo", 3, "a", 1);
+ leveldb_writebatch_clear(wb);
+ leveldb_writebatch_put(wb, "bar", 3, "b", 1);
+ leveldb_writebatch_put(wb, "box", 3, "c", 1);
+
+ leveldb_writebatch_t* wb2 = leveldb_writebatch_create();
+ leveldb_writebatch_delete(wb2, "bar", 3);
+ leveldb_writebatch_append(wb, wb2);
+ leveldb_writebatch_destroy(wb2);
+
+ leveldb_write(db, woptions, wb, &err);
+ CheckNoError(err);
+ CheckGet(db, roptions, "foo", "hello");
+ CheckGet(db, roptions, "bar", NULL);
+ CheckGet(db, roptions, "box", "c");
+
+ int pos = 0;
+ leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel);
+ CheckCondition(pos == 3);
+ leveldb_writebatch_destroy(wb);
+ }
+
+ StartPhase("iter");
+ {
+ leveldb_iterator_t* iter = leveldb_create_iterator(db, roptions);
+ CheckCondition(!leveldb_iter_valid(iter));
+ leveldb_iter_seek_to_first(iter);
+ CheckCondition(leveldb_iter_valid(iter));
+ CheckIter(iter, "box", "c");
+ leveldb_iter_next(iter);
+ CheckIter(iter, "foo", "hello");
+ leveldb_iter_prev(iter);
+ CheckIter(iter, "box", "c");
+ leveldb_iter_prev(iter);
+ CheckCondition(!leveldb_iter_valid(iter));
+ leveldb_iter_seek_to_last(iter);
+ CheckIter(iter, "foo", "hello");
+ leveldb_iter_seek(iter, "b", 1);
+ CheckIter(iter, "box", "c");
+ leveldb_iter_get_error(iter, &err);
+ CheckNoError(err);
+ leveldb_iter_destroy(iter);
+ }
+
+ StartPhase("approximate_sizes");
+ {
+ int i;
+ int n = 20000;
+ char keybuf[100];
+ char valbuf[100];
+ uint64_t sizes[2];
+ const char* start[2] = { "a", "k00000000000000010000" };
+ size_t start_len[2] = { 1, 21 };
+ const char* limit[2] = { "k00000000000000010000", "z" };
+ size_t limit_len[2] = { 21, 1 };
+ leveldb_writeoptions_set_sync(woptions, 0);
+ for (i = 0; i < n; i++) {
+ snprintf(keybuf, sizeof(keybuf), "k%020d", i);
+ snprintf(valbuf, sizeof(valbuf), "v%020d", i);
+ leveldb_put(db, woptions, keybuf, strlen(keybuf), valbuf, strlen(valbuf),
+ &err);
+ CheckNoError(err);
+ }
+ leveldb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes);
+ CheckCondition(sizes[0] > 0);
+ CheckCondition(sizes[1] > 0);
+ }
+
+ StartPhase("property");
+ {
+ char* prop = leveldb_property_value(db, "nosuchprop");
+ CheckCondition(prop == NULL);
+ prop = leveldb_property_value(db, "leveldb.stats");
+ CheckCondition(prop != NULL);
+ Free(&prop);
+ }
+
+ StartPhase("snapshot");
+ {
+ const leveldb_snapshot_t* snap;
+ snap = leveldb_create_snapshot(db);
+ leveldb_delete(db, woptions, "foo", 3, &err);
+ CheckNoError(err);
+ leveldb_readoptions_set_snapshot(roptions, snap);
+ CheckGet(db, roptions, "foo", "hello");
+ leveldb_readoptions_set_snapshot(roptions, NULL);
+ CheckGet(db, roptions, "foo", NULL);
+ leveldb_release_snapshot(db, snap);
+ }
+
+ StartPhase("repair");
+ {
+ leveldb_close(db);
+ leveldb_options_set_create_if_missing(options, 0);
+ leveldb_options_set_error_if_exists(options, 0);
+ leveldb_repair_db(options, dbname, &err);
+ CheckNoError(err);
+ db = leveldb_open(options, dbname, &err);
+ CheckNoError(err);
+ CheckGet(db, roptions, "foo", NULL);
+ CheckGet(db, roptions, "bar", NULL);
+ CheckGet(db, roptions, "box", "c");
+ leveldb_options_set_create_if_missing(options, 1);
+ leveldb_options_set_error_if_exists(options, 1);
+ }
+
+ StartPhase("filter");
+ for (run = 0; run < 2; run++) {
+ // First run uses custom filter, second run uses bloom filter
+ CheckNoError(err);
+ leveldb_filterpolicy_t* policy;
+ if (run == 0) {
+ policy = leveldb_filterpolicy_create(
+ NULL, FilterDestroy, FilterCreate, FilterKeyMatch, FilterName);
+ } else {
+ policy = leveldb_filterpolicy_create_bloom(10);
+ }
+
+ // Create new database
+ leveldb_close(db);
+ leveldb_destroy_db(options, dbname, &err);
+ leveldb_options_set_filter_policy(options, policy);
+ db = leveldb_open(options, dbname, &err);
+ CheckNoError(err);
+ leveldb_put(db, woptions, "foo", 3, "foovalue", 8, &err);
+ CheckNoError(err);
+ leveldb_put(db, woptions, "bar", 3, "barvalue", 8, &err);
+ CheckNoError(err);
+ leveldb_compact_range(db, NULL, 0, NULL, 0);
+
+ fake_filter_result = 1;
+ CheckGet(db, roptions, "foo", "foovalue");
+ CheckGet(db, roptions, "bar", "barvalue");
+ if (phase == 0) {
+ // Must not find value when custom filter returns false
+ fake_filter_result = 0;
+ CheckGet(db, roptions, "foo", NULL);
+ CheckGet(db, roptions, "bar", NULL);
+ fake_filter_result = 1;
+
+ CheckGet(db, roptions, "foo", "foovalue");
+ CheckGet(db, roptions, "bar", "barvalue");
+ }
+ leveldb_options_set_filter_policy(options, NULL);
+ leveldb_filterpolicy_destroy(policy);
+ }
+
+ StartPhase("cleanup");
+ leveldb_close(db);
+ leveldb_options_destroy(options);
+ leveldb_readoptions_destroy(roptions);
+ leveldb_writeoptions_destroy(woptions);
+ leveldb_free(dbname);
+ leveldb_cache_destroy(cache);
+ leveldb_comparator_destroy(cmp);
+ leveldb_env_destroy(env);
+
+ fprintf(stderr, "PASS\n");
+ return 0;
+}
diff --git a/zig/libs/leveldb/libs/leveldb/db/corruption_test.cc b/zig/libs/leveldb/libs/leveldb/db/corruption_test.cc
new file mode 100644
index 0000000..dc7da76
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/db/corruption_test.cc
@@ -0,0 +1,362 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include
+
+#include "gtest/gtest.h"
+#include "db/db_impl.h"
+#include "db/filename.h"
+#include "db/log_format.h"
+#include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/db.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
+#include "util/logging.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+static const int kValueSize = 1000;
+
+class CorruptionTest : public testing::Test {
+ public:
+ CorruptionTest()
+ : db_(nullptr),
+ dbname_("/memenv/corruption_test"),
+ tiny_cache_(NewLRUCache(100)) {
+ options_.env = &env_;
+ options_.block_cache = tiny_cache_;
+ DestroyDB(dbname_, options_);
+
+ options_.create_if_missing = true;
+ Reopen();
+ options_.create_if_missing = false;
+ }
+
+ ~CorruptionTest() {
+ delete db_;
+ delete tiny_cache_;
+ }
+
+ Status TryReopen() {
+ delete db_;
+ db_ = nullptr;
+ return DB::Open(options_, dbname_, &db_);
+ }
+
+ void Reopen() { ASSERT_LEVELDB_OK(TryReopen()); }
+
+ void RepairDB() {
+ delete db_;
+ db_ = nullptr;
+ ASSERT_LEVELDB_OK(::leveldb::RepairDB(dbname_, options_));
+ }
+
+ void Build(int n) {
+ std::string key_space, value_space;
+ WriteBatch batch;
+ for (int i = 0; i < n; i++) {
+ // if ((i % 100) == 0) std::fprintf(stderr, "@ %d of %d\n", i, n);
+ Slice key = Key(i, &key_space);
+ batch.Clear();
+ batch.Put(key, Value(i, &value_space));
+ WriteOptions options;
+ // Corrupt() doesn't work without this sync on windows; stat reports 0 for
+ // the file size.
+ if (i == n - 1) {
+ options.sync = true;
+ }
+ ASSERT_LEVELDB_OK(db_->Write(options, &batch));
+ }
+ }
+
+ void Check(int min_expected, int max_expected) {
+ int next_expected = 0;
+ int missed = 0;
+ int bad_keys = 0;
+ int bad_values = 0;
+ int correct = 0;
+ std::string value_space;
+ Iterator* iter = db_->NewIterator(ReadOptions());
+ for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ uint64_t key;
+ Slice in(iter->key());
+ if (in == "" || in == "~") {
+ // Ignore boundary keys.
+ continue;
+ }
+ if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
+ key < next_expected) {
+ bad_keys++;
+ continue;
+ }
+ missed += (key - next_expected);
+ next_expected = key + 1;
+ if (iter->value() != Value(key, &value_space)) {
+ bad_values++;
+ } else {
+ correct++;
+ }
+ }
+ delete iter;
+
+ std::fprintf(
+ stderr,
+ "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n",
+ min_expected, max_expected, correct, bad_keys, bad_values, missed);
+ ASSERT_LE(min_expected, correct);
+ ASSERT_GE(max_expected, correct);
+ }
+
+ void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
+ // Pick file to corrupt
+ std::vector filenames;
+ ASSERT_LEVELDB_OK(env_.target()->GetChildren(dbname_, &filenames));
+ uint64_t number;
+ FileType type;
+ std::string fname;
+ int picked_number = -1;
+ for (size_t i = 0; i < filenames.size(); i++) {
+ if (ParseFileName(filenames[i], &number, &type) && type == filetype &&
+ int(number) > picked_number) { // Pick latest file
+ fname = dbname_ + "/" + filenames[i];
+ picked_number = number;
+ }
+ }
+ ASSERT_TRUE(!fname.empty()) << filetype;
+
+ uint64_t file_size;
+ ASSERT_LEVELDB_OK(env_.target()->GetFileSize(fname, &file_size));
+
+ if (offset < 0) {
+ // Relative to end of file; make it absolute
+ if (-offset > file_size) {
+ offset = 0;
+ } else {
+ offset = file_size + offset;
+ }
+ }
+ if (offset > file_size) {
+ offset = file_size;
+ }
+ if (offset + bytes_to_corrupt > file_size) {
+ bytes_to_corrupt = file_size - offset;
+ }
+
+ // Do it
+ std::string contents;
+ Status s = ReadFileToString(env_.target(), fname, &contents);
+ ASSERT_TRUE(s.ok()) << s.ToString();
+ for (int i = 0; i < bytes_to_corrupt; i++) {
+ contents[i + offset] ^= 0x80;
+ }
+ s = WriteStringToFile(env_.target(), contents, fname);
+ ASSERT_TRUE(s.ok()) << s.ToString();
+ }
+
+ int Property(const std::string& name) {
+ std::string property;
+ int result;
+ if (db_->GetProperty(name, &property) &&
+ sscanf(property.c_str(), "%d", &result) == 1) {
+ return result;
+ } else {
+ return -1;
+ }
+ }
+
+ // Return the ith key
+ Slice Key(int i, std::string* storage) {
+ char buf[100];
+ std::snprintf(buf, sizeof(buf), "%016d", i);
+ storage->assign(buf, strlen(buf));
+ return Slice(*storage);
+ }
+
+ // Return the value to associate with the specified key
+ Slice Value(int k, std::string* storage) {
+ Random r(k);
+ return test::RandomString(&r, kValueSize, storage);
+ }
+
+ test::ErrorEnv env_;
+ Options options_;
+ DB* db_;
+
+ private:
+ std::string dbname_;
+ Cache* tiny_cache_;
+};
+
+TEST_F(CorruptionTest, Recovery) {
+ Build(100);
+ Check(100, 100);
+ Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
+ Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
+ Reopen();
+
+ // The 64 records in the first two log blocks are completely lost.
+ Check(36, 36);
+}
+
+TEST_F(CorruptionTest, RecoverWriteError) {
+ env_.writable_file_error_ = true;
+ Status s = TryReopen();
+ ASSERT_TRUE(!s.ok());
+}
+
+TEST_F(CorruptionTest, NewFileErrorDuringWrite) {
+ // Do enough writing to force minor compaction
+ env_.writable_file_error_ = true;
+ const int num = 3 + (Options().write_buffer_size / kValueSize);
+ std::string value_storage;
+ Status s;
+ for (int i = 0; s.ok() && i < num; i++) {
+ WriteBatch batch;
+ batch.Put("a", Value(100, &value_storage));
+ s = db_->Write(WriteOptions(), &batch);
+ }
+ ASSERT_TRUE(!s.ok());
+ ASSERT_GE(env_.num_writable_file_errors_, 1);
+ env_.writable_file_error_ = false;
+ Reopen();
+}
+
+TEST_F(CorruptionTest, TableFile) {
+ Build(100);
+ DBImpl* dbi = reinterpret_cast(db_);
+ dbi->TEST_CompactMemTable();
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+ dbi->TEST_CompactRange(1, nullptr, nullptr);
+
+ Corrupt(kTableFile, 100, 1);
+ Check(90, 99);
+}
+
+TEST_F(CorruptionTest, TableFileRepair) {
+ options_.block_size = 2 * kValueSize; // Limit scope of corruption
+ options_.paranoid_checks = true;
+ Reopen();
+ Build(100);
+ DBImpl* dbi = reinterpret_cast(db_);
+ dbi->TEST_CompactMemTable();
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+ dbi->TEST_CompactRange(1, nullptr, nullptr);
+
+ Corrupt(kTableFile, 100, 1);
+ RepairDB();
+ Reopen();
+ Check(95, 99);
+}
+
+TEST_F(CorruptionTest, TableFileIndexData) {
+ Build(10000); // Enough to build multiple Tables
+ DBImpl* dbi = reinterpret_cast(db_);
+ dbi->TEST_CompactMemTable();
+
+ Corrupt(kTableFile, -2000, 500);
+ Reopen();
+ Check(5000, 9999);
+}
+
+TEST_F(CorruptionTest, MissingDescriptor) {
+ Build(1000);
+ RepairDB();
+ Reopen();
+ Check(1000, 1000);
+}
+
+TEST_F(CorruptionTest, SequenceNumberRecovery) {
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v3"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v4"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v5"));
+ RepairDB();
+ Reopen();
+ std::string v;
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_EQ("v5", v);
+ // Write something. If sequence number was not recovered properly,
+ // it will be hidden by an earlier write.
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v6"));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_EQ("v6", v);
+ Reopen();
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_EQ("v6", v);
+}
+
+TEST_F(CorruptionTest, CorruptedDescriptor) {
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "hello"));
+ DBImpl* dbi = reinterpret_cast(db_);
+ dbi->TEST_CompactMemTable();
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+
+ Corrupt(kDescriptorFile, 0, 1000);
+ Status s = TryReopen();
+ ASSERT_TRUE(!s.ok());
+
+ RepairDB();
+ Reopen();
+ std::string v;
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_EQ("hello", v);
+}
+
+TEST_F(CorruptionTest, CompactionInputError) {
+ Build(10);
+ DBImpl* dbi = reinterpret_cast(db_);
+ dbi->TEST_CompactMemTable();
+ const int last = config::kMaxMemCompactLevel;
+ ASSERT_EQ(1, Property("leveldb.num-files-at-level" + NumberToString(last)));
+
+ Corrupt(kTableFile, 100, 1);
+ Check(5, 9);
+
+ // Force compactions by writing lots of values
+ Build(10000);
+ Check(10000, 10000);
+}
+
+TEST_F(CorruptionTest, CompactionInputErrorParanoid) {
+ options_.paranoid_checks = true;
+ options_.write_buffer_size = 512 << 10;
+ Reopen();
+ DBImpl* dbi = reinterpret_cast(db_);
+
+ // Make multiple inputs so we need to compact.
+ for (int i = 0; i < 2; i++) {
+ Build(10);
+ dbi->TEST_CompactMemTable();
+ Corrupt(kTableFile, 100, 1);
+ env_.SleepForMicroseconds(100000);
+ }
+ dbi->CompactRange(nullptr, nullptr);
+
+ // Write must fail because of corrupted table
+ std::string tmp1, tmp2;
+ Status s = db_->Put(WriteOptions(), Key(5, &tmp1), Value(5, &tmp2));
+ ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
+}
+
+TEST_F(CorruptionTest, UnrelatedKeys) {
+ Build(10);
+ DBImpl* dbi = reinterpret_cast(db_);
+ dbi->TEST_CompactMemTable();
+ Corrupt(kTableFile, 100, 1);
+
+ std::string tmp1, tmp2;
+ ASSERT_LEVELDB_OK(
+ db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
+ std::string v;
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
+ ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
+ dbi->TEST_CompactMemTable();
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
+ ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
+}
+
+} // namespace leveldb
diff --git a/zig/libs/leveldb/libs/leveldb/db/db_impl.cc b/zig/libs/leveldb/libs/leveldb/db/db_impl.cc
new file mode 100644
index 0000000..f96d245
--- /dev/null
+++ b/zig/libs/leveldb/libs/leveldb/db/db_impl.cc
@@ -0,0 +1,1578 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/db_impl.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "db/builder.h"
+#include "db/db_iter.h"
+#include "db/dbformat.h"
+#include "db/filename.h"
+#include "db/log_reader.h"
+#include "db/log_writer.h"
+#include "db/memtable.h"
+#include "db/table_cache.h"
+#include "db/version_set.h"
+#include "db/write_batch_internal.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/status.h"
+#include "leveldb/table.h"
+#include "leveldb/table_builder.h"
+#include "port/port.h"
+#include "table/block.h"
+#include "table/merger.h"
+#include "table/two_level_iterator.h"
+#include "util/coding.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+
+namespace leveldb {
+
+const int kNumNonTableCacheFiles = 10;
+
+// Information kept for every waiting writer
+struct DBImpl::Writer {
+ explicit Writer(port::Mutex* mu)
+ : batch(nullptr), sync(false), done(false), cv(mu) {}
+
+ Status status;
+ WriteBatch* batch;
+ bool sync;
+ bool done;
+ port::CondVar cv;
+};
+
+struct DBImpl::CompactionState {
+ // Files produced by compaction
+ struct Output {
+ uint64_t number;
+ uint64_t file_size;
+ InternalKey smallest, largest;
+ };
+
+ Output* current_output() { return &outputs[outputs.size() - 1]; }
+
+ explicit CompactionState(Compaction* c)
+ : compaction(c),
+ smallest_snapshot(0),
+ outfile(nullptr),
+ builder(nullptr),
+ total_bytes(0) {}
+
+ Compaction* const compaction;
+
+ // Sequence numbers < smallest_snapshot are not significant since we
+ // will never have to service a snapshot below smallest_snapshot.
+ // Therefore if we have seen a sequence number S <= smallest_snapshot,
+ // we can drop all entries for the same key with sequence numbers < S.
+ SequenceNumber smallest_snapshot;
+
+ std::vector