1 // Copyright 2011 The Kyua Authors. 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are 6 // met: 7 // 8 // * Redistributions of source code must retain the above copyright 9 // notice, this list of conditions and the following disclaimer. 10 // * Redistributions in binary form must reproduce the above copyright 11 // notice, this list of conditions and the following disclaimer in the 12 // documentation and/or other materials provided with the distribution. 13 // * Neither the name of Google Inc. nor the names of its contributors 14 // may be used to endorse or promote products derived from this software 15 // without specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 29 #include "store/migrate.hpp" 30 31 #include <stdexcept> 32 33 #include "store/dbtypes.hpp" 34 #include "store/exceptions.hpp" 35 #include "store/layout.hpp" 36 #include "store/metadata.hpp" 37 #include "store/read_backend.hpp" 38 #include "store/write_backend.hpp" 39 #include "utils/datetime.hpp" 40 #include "utils/env.hpp" 41 #include "utils/format/macros.hpp" 42 #include "utils/fs/exceptions.hpp" 43 #include "utils/fs/operations.hpp" 44 #include "utils/fs/path.hpp" 45 #include "utils/logging/macros.hpp" 46 #include "utils/optional.ipp" 47 #include "utils/sanity.hpp" 48 #include "utils/stream.hpp" 49 #include "utils/sqlite/database.hpp" 50 #include "utils/sqlite/exceptions.hpp" 51 #include "utils/sqlite/statement.ipp" 52 #include "utils/text/operations.hpp" 53 54 namespace datetime = utils::datetime; 55 namespace fs = utils::fs; 56 namespace sqlite = utils::sqlite; 57 namespace text = utils::text; 58 59 using utils::none; 60 using utils::optional; 61 62 63 namespace { 64 65 66 /// Schema version at which we switched to results files. 67 const int first_chunked_schema_version = 3; 68 69 70 /// Queries the schema version of the given database. 71 /// 72 /// \param file The database from which to query the schema version. 73 /// 74 /// \return The schema version number. 75 static int 76 get_schema_version(const fs::path& file) 77 { 78 sqlite::database db = store::detail::open_and_setup( 79 file, sqlite::open_readonly); 80 return store::metadata::fetch_latest(db).schema_version(); 81 } 82 83 84 /// Performs a single migration step. 85 /// 86 /// Both action_id and old_database are little hacks to support the migration 87 /// from the historical database to chunked files. We'd use a more generic 88 /// "replacements" map, but it's not worth it. 89 /// 90 /// \param file Database on which to apply the migration step. 91 /// \param version_from Current schema version in the database. 92 /// \param version_to Schema version to migrate to. 93 /// \param action_id If not none, replace ACTION_ID in the migration file with 94 /// this value. 95 /// \param old_database If not none, replace OLD_DATABASE in the migration 96 /// file with this value. 97 /// 98 /// \throw error If there is a problem applying the migration. 99 static void 100 migrate_schema_step(const fs::path& file, 101 const int version_from, 102 const int version_to, 103 const optional< int64_t > action_id = none, 104 const optional< fs::path > old_database = none) 105 { 106 LI(F("Migrating schema of %s from version %s to %s") % file % version_from 107 % version_to); 108 109 PRE(version_to == version_from + 1); 110 111 sqlite::database db = store::detail::open_and_setup( 112 file, sqlite::open_readwrite); 113 114 const fs::path migration = store::detail::migration_file(version_from, 115 version_to); 116 117 std::string migration_string; 118 try { 119 migration_string = utils::read_file(migration); 120 } catch (const std::runtime_error& unused_e) { 121 throw store::error(F("Cannot read migration file '%s'") % migration); 122 } 123 if (action_id) { 124 migration_string = text::replace_all(migration_string, "@ACTION_ID@", 125 F("%s") % action_id.get()); 126 } 127 if (old_database) { 128 migration_string = text::replace_all(migration_string, "@OLD_DATABASE@", 129 old_database.get().str()); 130 } 131 try { 132 db.exec(migration_string); 133 } catch (const sqlite::error& e) { 134 throw store::error(F("Schema migration failed: %s") % e.what()); 135 } 136 } 137 138 139 /// Given a historical database, chunks it up into results files. 140 /// 141 /// The given database is DELETED on success given that it will have been 142 /// split up into various different files. 143 /// 144 /// \param old_file Path to the old database. 145 static void 146 chunk_database(const fs::path& old_file) 147 { 148 PRE(get_schema_version(old_file) == first_chunked_schema_version - 1); 149 150 LI(F("Need to split %s into per-action files") % old_file); 151 152 sqlite::database old_db = store::detail::open_and_setup( 153 old_file, sqlite::open_readonly); 154 155 sqlite::statement actions_stmt = old_db.create_statement( 156 "SELECT action_id, cwd FROM actions NATURAL JOIN contexts"); 157 158 sqlite::statement start_time_stmt = old_db.create_statement( 159 "SELECT test_results.start_time AS start_time " 160 "FROM test_programs " 161 " JOIN test_cases " 162 " ON test_programs.test_program_id == test_cases.test_program_id" 163 " JOIN test_results " 164 " ON test_cases.test_case_id == test_results.test_case_id " 165 "WHERE test_programs.action_id == :action_id " 166 "ORDER BY start_time LIMIT 1"); 167 168 while (actions_stmt.step()) { 169 const int64_t action_id = actions_stmt.safe_column_int64("action_id"); 170 const fs::path cwd(actions_stmt.safe_column_text("cwd")); 171 172 LI(F("Extracting action %s") % action_id); 173 174 start_time_stmt.reset(); 175 start_time_stmt.bind(":action_id", action_id); 176 if (!start_time_stmt.step()) { 177 LI(F("Skipping empty action %s") % action_id); 178 continue; 179 } 180 const datetime::timestamp start_time = store::column_timestamp( 181 start_time_stmt, "start_time"); 182 start_time_stmt.step_without_results(); 183 184 const fs::path new_file = store::layout::new_db_for_migration( 185 cwd, start_time); 186 if (fs::exists(new_file)) { 187 LI(F("Skipping action because %s already exists") % new_file); 188 continue; 189 } 190 191 LI(F("Creating %s for previous action %s") % new_file % action_id); 192 193 try { 194 fs::mkdir_p(new_file.branch_path(), 0755); 195 sqlite::database db = store::detail::open_and_setup( 196 new_file, sqlite::open_readwrite | sqlite::open_create); 197 store::detail::initialize(db); 198 db.close(); 199 migrate_schema_step(new_file, 200 first_chunked_schema_version - 1, 201 first_chunked_schema_version, 202 utils::make_optional(action_id), 203 utils::make_optional(old_file)); 204 } catch (...) { 205 // TODO(jmmv): Handle this better. 206 fs::unlink(new_file); 207 } 208 } 209 210 fs::unlink(old_file); 211 } 212 213 214 } // anonymous namespace 215 216 217 /// Calculates the path to a schema migration file. 218 /// 219 /// \param version_from The version from which the database is being upgraded. 220 /// \param version_to The version to which the database is being upgraded. 221 /// 222 /// \return The path to the installed migrate_vX_vY.sql file. 223 fs::path 224 store::detail::migration_file(const int version_from, const int version_to) 225 { 226 return fs::path(utils::getenv_with_default("KYUA_STOREDIR", KYUA_STOREDIR)) 227 / (F("migrate_v%s_v%s.sql") % version_from % version_to); 228 } 229 230 231 /// Backs up a database for schema migration purposes. 232 /// 233 /// \todo We should probably use the SQLite backup API instead of doing a raw 234 /// file copy. We issue our backup call with the database already open, but 235 /// because it is quiescent, it's OK to do so. 236 /// 237 /// \param source Location of the database to be backed up. 238 /// \param old_version Version of the database's CURRENT schema, used to 239 /// determine the name of the backup file. 240 /// 241 /// \throw error If there is a problem during the backup. 242 void 243 store::detail::backup_database(const fs::path& source, const int old_version) 244 { 245 const fs::path target(F("%s.v%s.backup") % source.str() % old_version); 246 247 LI(F("Backing up database %s to %s") % source % target); 248 try { 249 fs::copy(source, target); 250 } catch (const fs::error& e) { 251 throw store::error(e.what()); 252 } 253 } 254 255 256 /// Migrates the schema of a database to the current version. 257 /// 258 /// The algorithm implemented here performs a migration step for every 259 /// intermediate version between the schema version in the database to the 260 /// version implemented in this file. This should permit upgrades from 261 /// arbitrary old databases. 262 /// 263 /// \param file The database whose schema to upgrade. 264 /// 265 /// \throw error If there is a problem with the migration. 266 void 267 store::migrate_schema(const utils::fs::path& file) 268 { 269 const int version_from = get_schema_version(file); 270 const int version_to = detail::current_schema_version; 271 if (version_from == version_to) { 272 throw error(F("Database already at schema version %s; migration not " 273 "needed") % version_from); 274 } else if (version_from > version_to) { 275 throw error(F("Database at schema version %s, which is newer than the " 276 "supported version %s") % version_from % version_to); 277 } 278 279 detail::backup_database(file, version_from); 280 281 int i; 282 for (i = version_from; i < first_chunked_schema_version - 1; ++i) { 283 migrate_schema_step(file, i, i + 1); 284 } 285 chunk_database(file); 286 INV(version_to == first_chunked_schema_version); 287 } 288